code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Ueberauth.Strategy.Exact do
@moduledoc """
Provides an Ueberauth strategy for authenticating with Exact Online.
### Setup
Create an application in Exact for you to use.
Register a new application at: [Exact App Center](https://apps.exactonline.com)
and get the `client_id` and `client_secret`.
Include the provider in your configuration for Ueberauth:
config :ueberauth, Ueberauth,
providers: [
exact: { Ueberauth.Strategy.Exact, [] }
]
Then include the configuration for Exact Online:
config :ueberauth, Ueberauth.Strategy.Exact.OAuth,
client_id: System.get_env("EXACT_CLIENT_ID"),
client_secret: System.get_env("EXACT_CLIENT_SECRET"),
redirect_uri: "https://gqgh.localtunnel.me/auth/exact/callback" # <-- note that Exact needs HTTPS for a callback URL scheme, even in test apps.
If you haven't already, create a pipeline and setup routes for your callback handler
pipeline :auth do
Ueberauth.plug "/auth"
end
scope "/auth" do
pipe_through [:browser, :auth]
get "/:provider/callback", AuthController, :callback
end
Create an endpoint for the callback where you will handle the
`Ueberauth.Auth` struct:
defmodule MyApp.AuthController do
use MyApp.Web, :controller
def callback_phase(%{ assigns: %{ ueberauth_failure: fails } } = conn, _params) do
# do things with the failure
end
def callback_phase(%{ assigns: %{ ueberauth_auth: auth } } = conn, params) do
# do things with the auth
end
end
You can edit the behaviour of the Strategy by including some options when you
register your provider.
To set the `uid_field`:
config :ueberauth, Ueberauth,
providers: [
exact: { Ueberauth.Strategy.Exact, [uid_field: :Email] } # Default is `:UserID`, a string UUID."
]
## Usage
Once you obtained a token, you may use the OAuth client directly:
Ueberauth.Strategy.Exact.OAuth.get("/current/Me")
See the [Exact Online API Docs](https://start.exactonline.nl/docs/HlpRestAPIResources.aspx) for more information. Note that the provided client knows about the `/api/v1` prefix already.
"""
use Ueberauth.Strategy,
uid_field: :UserID,
default_scope: "",
oauth2_module: Ueberauth.Strategy.Exact.OAuth
alias Ueberauth.Auth.Credentials
alias Ueberauth.Auth.Extra
alias Ueberauth.Auth.Info
alias Ueberauth.Strategy.Exact
@doc """
Handles the initial redirect to the exact authentication page.
To customize the scope (permissions) that are requested by exact include
them as part of your url:
"/auth/exact"
You can also include a `:state` param that exact will return to you.
"""
def handle_request!(conn) do
send_redirect_uri = Keyword.get(options(conn), :send_redirect_uri, true)
opts =
if send_redirect_uri do
[redirect_uri: callback_url(conn)]
else
[]
end
opts =
if conn.params["state"], do: Keyword.put(opts, :state, conn.params["state"]), else: opts
module = option(conn, :oauth2_module)
redirect!(conn, apply(module, :authorize_url!, [opts]))
end
@doc """
Handles the callback from Exact Online.
When there is a failure from Exact the failure is included in the
`ueberauth_failure` struct. Otherwise the information returned from Exact is
returned in the `Ueberauth.Auth` struct.
"""
def handle_callback!(%Plug.Conn{params: %{"code" => code}} = conn) do
module = option(conn, :oauth2_module)
token = apply(module, :get_token!, [[code: code]])
if token.access_token == nil do
set_errors!(conn, [
error(token.other_params["error"], token.other_params["error_description"])
])
else
fetch_user(conn, token)
end
end
@doc false
def handle_callback!(conn) do
set_errors!(conn, [error("missing_code", "No code received")])
end
@doc """
Cleans up the private area of the connection used for passing the raw Exact Online
response around during the callback.
"""
def handle_cleanup!(conn) do
conn
|> put_private(:exact_user, nil)
|> put_private(:exact_token, nil)
end
@doc """
Fetches the `:uid` field from the Exact Online response.
This defaults to the option `:uid_field` which in-turn defaults to `:id`
"""
def uid(conn) do
conn |> option(:uid_field) |> to_string() |> fetch_uid(conn)
end
@doc """
Includes the credentials from the Exact Online response.
"""
def credentials(conn) do
token = conn.private.exact_token
%Credentials{
token: token.access_token,
refresh_token: token.refresh_token,
expires_at: token.expires_at,
token_type: token.token_type,
expires: true
}
end
@doc """
Fetches the fields to populate the info section of the `Ueberauth.Auth`
struct.
"""
def info(conn) do
user = conn.private.exact_user
%Info{
name: user["FullName"],
nickname: user["UserName"] || user["FirstName"],
email: user["Email"],
image: user["PictureUrl"]
}
end
@doc """
Stores the raw information (including the token) obtained from the Exact Online
callback.
"""
def extra(conn) do
%Extra{
raw_info: %{
token: conn.private.exact_token,
user: conn.private.exact_user
}
}
end
defp fetch_uid(field, conn) do
conn.private.exact_user[field]
end
defp fetch_user(conn, token) do
conn = put_private(conn, :exact_token, token)
# Will be better with Elixir 1.3 with/else
case Exact.OAuth.get(token, "/current/Me") do
{:ok, %OAuth2.Response{status_code: 401, body: _body}} ->
set_errors!(conn, [error("token", "unauthorized")])
{:ok, %OAuth2.Response{status_code: status_code, body: user}}
when status_code in 200..399 ->
user_data = user["d"]["results"] |> List.first()
put_private(conn, :exact_user, user_data)
{:error, %OAuth2.Error{reason: reason}} ->
set_errors!(conn, [error("OAuth2", reason)])
{:error, %OAuth2.Response{body: %{"message" => reason}}} ->
set_errors!(conn, [error("OAuth2", reason)])
{:error, _} ->
set_errors!(conn, [error("OAuth2", "uknown error")])
end
end
defp option(conn, key) do
Keyword.get(options(conn), key, Keyword.get(default_options(), key))
end
end
|
lib/ueberauth/strategy/exact.ex
| 0.805173
| 0.466603
|
exact.ex
|
starcoder
|
defmodule RayTracer.Sphere do
@moduledoc """
This module defines sphere operations
"""
alias RayTracer.Shape
alias RayTracer.RTuple
alias RayTracer.Matrix
alias RayTracer.Material
alias RayTracer.Intersection
use Shape, [
center: RTuple.point(0, 0, 0),
r: 1
]
import RTuple, only: [sub: 2, dot: 2]
@type t :: %__MODULE__{
center: RTuple.point,
r: number,
transform: Matrix.matrix,
inv_transform: Matrix.matrix,
trans_inv_transform: Matrix.matrix,
material: Material.t
}
defimpl Shape.Shadeable do
alias RayTracer.{Sphere, RTuple, Intersection, Ray}
@spec local_normal_at(Sphere.t, RTuple.point) :: RTuple.vector
def local_normal_at(sphere, object_point) do
object_point
|> RTuple.sub(sphere.center)
|> RTuple.normalize
end
@spec local_intersect(Sphere.t, Ray.t) :: list(Intersection.t)
def local_intersect(sphere, object_space_ray) do
sphere_to_ray = sub(object_space_ray.origin, sphere.center)
a = dot(object_space_ray.direction, object_space_ray.direction)
b = 2 * dot(object_space_ray.direction, sphere_to_ray)
c = dot(sphere_to_ray, sphere_to_ray) - sphere.r
discriminant = b * b - 4 * a * c
if discriminant < 0 do
[]
else
dsqrt = :math.sqrt(discriminant)
[(-b - dsqrt) / (2 * a), (-b + dsqrt) / (2 * a)]
end |> Enum.map(&Intersection.new(&1, sphere))
end
end
@doc """
Builds a sphere with given `center`, radius `r` and transformation matrix
"""
@spec new(Matrix.matrix, Material.t) :: t
def new(transform \\ Matrix.ident, material \\ Material.new) do
%__MODULE__{material: material} |> Shape.set_transform(transform)
end
@doc """
Builds a glassy sphere
"""
@spec glass_sphere() :: t
def glass_sphere() do
s = put_in(new().material.transparency, 1.0)
put_in(s.material.refractive_index, 1.5)
end
@doc """
Necessary to keep older specs passing
"""
def normal_at(s, point) do
Shape.normal_at(s, point)
end
end
|
lib/sphere.ex
| 0.91256
| 0.606003
|
sphere.ex
|
starcoder
|
defmodule ARI.HTTP.Recordings do
@moduledoc """
HTTP Interface for CRUD operations on Recording objects
REST Reference: https://wiki.asterisk.org/wiki/display/AST/Asterisk+18+Recordings+REST+API
Live Recording Object: https://wiki.asterisk.org/wiki/display/AST/Asterisk+18+REST+Data+Models#Asterisk18RESTDataModels-LiveRecording
Stored Recording Object: https://wiki.asterisk.org/wiki/display/AST/Asterisk+18+REST+Data+Models#Asterisk18RESTDataModels-StoredRecording
"""
use ARI.HTTPClient, "/recordings"
alias ARI.HTTPClient.Response
@doc """
Retrieve list of recordings that are complete
"""
@spec list_stored :: Response.t()
def list_stored do
GenServer.call(__MODULE__, :list_stored)
end
@doc """
Get a stored recording's details
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec get_stored(String.t()) :: Response.t()
def get_stored(name) do
GenServer.call(__MODULE__, {:get_stored, name})
end
@doc """
Get the file associated with the stored recording.
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec get_stored_file(String.t()) :: Response.t()
def get_stored_file(name) do
GenServer.call(__MODULE__, {:get_stored_file, name})
end
@doc """
Copy a stored recording.
## Parameters
name: String (UTF-8) that represents the recording's name
payload: map of the parameters and values to pass to Asterisk
destinationRecordingName: (required) the destination name of the recording
"""
@spec copy_stored(String.t(), map()) :: Response.t()
def copy_stored(name, %{destinationRecordingName: _} = payload) do
GenServer.call(__MODULE__, {:copy_stored, name, payload})
end
@doc """
Deletes a stored recording
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec delete_stored(String.t()) :: Response.t()
def delete_stored(name) do
GenServer.call(__MODULE__, {:delete_stored, name})
end
@doc """
Retrieve list of live recordings
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec get_live(String.t()) :: Response.t()
def get_live(name) do
GenServer.call(__MODULE__, {:get_live, name})
end
@doc """
Stop a live recording and discard it
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec cancel_live(String.t()) :: Response.t()
def cancel_live(name) do
GenServer.call(__MODULE__, {:cancel_live, name})
end
@doc """
Stop a live recording and store it
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec stop_live(String.t()) :: Response.t()
def stop_live(name) do
GenServer.call(__MODULE__, {:stop_live, name})
end
@doc """
Pause a live recording.
Pausing a recording suspends silence detection, which will be restarted when the recording is unpaused.
Paused time is not included in the accounting for maxDurationSeconds.
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec pause_live(String.t()) :: Response.t()
def pause_live(name) do
GenServer.call(__MODULE__, {:pause_live, name})
end
@doc """
Unpause a live recording.
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec unpause_live(String.t()) :: Response.t()
def unpause_live(name) do
GenServer.call(__MODULE__, {:unpause_live, name})
end
@doc """
Mute a live recording.
Muting a recording suspends silence detection, which will be restarted when the recording is unmuted.
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec mute_live(String.t()) :: Response.t()
def mute_live(name) do
GenServer.call(__MODULE__, {:mute_live, name})
end
@doc """
Unmute a live recording.
## Parameters
name: String (UTF-8) that represents the recording's name
"""
@spec unmute_live(String.t()) :: Response.t()
def unmute_live(name) do
GenServer.call(__MODULE__, {:unmute_live, name})
end
@impl true
def handle_call(:list_stored, from, state) do
{:noreply, request("GET", "/stored", from, state)}
end
@impl true
def handle_call({:get_stored, name}, from, state) do
{:noreply, request("GET", "/stored/#{name}", from, state)}
end
@impl true
def handle_call({:get_stored_file, name}, from, state) do
{:noreply, request("GET", "/stored/#{name}/file", from, state)}
end
@impl true
def handle_call({:copy_stored, name, payload}, from, state) do
{:noreply,
request("POST", "/stored/#{name}/copy?#{encode_params(payload)}", from, state)}
end
@impl true
def handle_call({:delete_stored, name}, from, state) do
{:noreply, request("DELETE", "/stored/#{name}", from, state)}
end
@impl true
def handle_call({:get_live, name}, from, state) do
{:noreply, request("GET", "/live/#{name}", from, state)}
end
@impl true
def handle_call({:cancel_live, name}, from, state) do
{:noreply, request("DELETE", "/live/#{name}", from, state)}
end
@impl true
def handle_call({:stop_live, name}, from, state) do
{:noreply, request("POST", "/live/#{name}/stop", from, state)}
end
@impl true
def handle_call({:pause_live, name}, from, state) do
{:noreply, request("POST", "/live/#{name}/pause", from, state)}
end
@impl true
def handle_call({:unpause_live, name}, from, state) do
{:noreply, request("DELETE", "/live/#{name}/pause", from, state)}
end
@impl true
def handle_call({:mute_live, name}, from, state) do
{:noreply, request("POST", "/live/#{name}/mute", from, state)}
end
@impl true
def handle_call({:unmute_live, name}, from, state) do
{:noreply, request("DELETE", "/live/#{name}/mute", from, state)}
end
end
|
lib/ex_ari/http/recordings.ex
| 0.777722
| 0.406626
|
recordings.ex
|
starcoder
|
defmodule Data.Stats do
@moduledoc """
Item statistics
"""
import Data.Type
@type character :: %{
health_points: integer(),
max_health_points: integer(),
skill_points: integer(),
max_skill_points: integer(),
endurance_points: integer(),
max_endurance_points: integer(),
strength: integer(),
agility: integer(),
intelligence: integer(),
awareness: integer(),
vitality: integer(),
willpower: integer()
}
@type armor :: %{
slot: :atom
}
@type weapon :: %{}
@behaviour Ecto.Type
@impl Ecto.Type
def type, do: :map
@impl Ecto.Type
def cast(stats) when is_map(stats), do: {:ok, stats}
def cast(_), do: :error
@impl Ecto.Type
def load(stats) do
stats = for {key, val} <- stats, into: %{}, do: {String.to_atom(key), val}
stats = Enum.into(stats, %{}, &cast_val/1)
{:ok, stats}
end
defp cast_val({key, val}) do
case key do
:slot ->
{key, String.to_atom(val)}
_ ->
{key, val}
end
end
@impl Ecto.Type
def dump(stats) when is_map(stats), do: {:ok, Map.delete(stats, :__struct__)}
def dump(_), do: :error
@impl true
def embed_as(_), do: :self
@impl true
def equal?(term1, term2), do: term1 == term2
@doc """
Set defaults for new statistics
A "migration" of stats to ensure new ones are always available. They should be
saved back in after the user loads their account.
"""
@spec default(Stats.t()) :: Stats.t()
def default(stats) do
stats
|> migrate()
|> ensure(:health_points, 50)
|> ensure(:max_health_points, 50)
|> ensure(:skill_points, 50)
|> ensure(:max_skill_points, 50)
|> ensure(:endurance_points, 20)
|> ensure(:max_endurance_points, 20)
|> ensure(:agility, 10)
|> ensure(:awareness, 10)
|> ensure(:intelligence, 10)
|> ensure(:strength, 10)
|> ensure(:vitality, 10)
|> ensure(:willpower, 10)
end
defp migrate(stats = %{health: health, max_health: max_health}) do
stats
|> Map.put(:health_points, health)
|> Map.put(:max_health_points, max_health)
|> Map.delete(:health)
|> Map.delete(:max_health)
end
defp migrate(stats), do: stats
@doc """
Slots on a character
"""
@spec slots() :: [atom]
def slots(),
do: [:chest, :head, :shoulders, :neck, :back, :hands, :waist, :legs, :feet, :finger]
@doc """
Fields in the statistics map
"""
@spec basic_fields() :: [atom]
def basic_fields(),
do: [
:agility,
:awareness,
:intelligence,
:strength,
:vitality,
:willpower
]
@doc """
Fields in the statistics map
"""
@spec fields() :: [atom]
def fields(),
do: [
:agility,
:awareness,
:endurance_points,
:health_points,
:intelligence,
:max_endurance_points,
:max_health_points,
:max_skill_points,
:skill_points,
:strength,
:vitality,
:willpower
]
@doc """
Validate a character's stats
iex> Data.Stats.valid_character?(%{health_points: 50, strength: 10})
false
iex> Data.Stats.valid_character?(%{})
false
"""
@spec valid_character?(Stats.character()) :: boolean()
def valid_character?(stats) do
keys(stats) == fields() && _integer_fields(stats)
end
def _integer_fields(stats) do
Enum.all?(fields(), fn field ->
is_integer(Map.get(stats, field))
end)
end
@doc """
Validate an armor item
iex> Data.Stats.valid_armor?(%{slot: :chest, armor: 10})
true
iex> Data.Stats.valid_armor?(%{slot: :chest, armor: :none})
false
iex> Data.Stats.valid_armor?(%{slot: :eye, armor: 10})
false
iex> Data.Stats.valid_armor?(%{})
false
"""
@spec valid_armor?(Stats.armor()) :: boolean()
def valid_armor?(stats) do
keys(stats) == [:armor, :slot] && valid_slot?(stats) && is_integer(stats.armor)
end
@doc """
Validate a weapon item
iex> Data.Stats.valid_weapon?(%{})
true
iex> Data.Stats.valid_weapon?(%{anything: true})
false
"""
@spec valid_weapon?(Stats.weapon()) :: boolean()
def valid_weapon?(stats) do
keys(stats) == []
end
@doc """
Validate an item stats based on type
iex> Data.Stats.valid?("basic", %{})
true
iex> Data.Stats.valid?("basic", %{slot: :chest})
false
"""
@spec valid?(String.t(), Stats.t()) :: boolean()
def valid?(type, stats)
def valid?("armor", stats) do
valid_armor?(stats)
end
def valid?("weapon", stats) do
valid_weapon?(stats)
end
def valid?("basic", stats) do
keys(stats) == []
end
def valid?(_, _), do: false
@doc """
Validate if the slot is right
iex> Data.Stats.valid_slot?(%{slot: :chest})
true
iex> Data.Stats.valid_slot?(%{slot: :eye})
false
"""
@spec valid_slot?(Stats.t()) :: boolean()
def valid_slot?(stats)
def valid_slot?(%{slot: slot}) do
slot in slots()
end
end
|
lib/data/stats.ex
| 0.8415
| 0.485722
|
stats.ex
|
starcoder
|
defmodule VintageNet.Interface.IfupDaemon do
@moduledoc """
Wrap MuonTrap.Daemon to start and stop a program based on whether the network is up
Unlike MuonTrap.Daemon, the arguments are called out in the child_spec so it looks like
this:
```
{VintageNet.Interface.IfupDaemon, ifname: ifname, command: program, args: arguments, opts: options]}
```
"""
use GenServer
require Logger
@typedoc false
@type init_args :: [
ifname: VintageNet.ifname(),
command: binary(),
args: [binary()],
opts: keyword()
]
@enforce_keys [:ifname, :command, :args]
defstruct [:ifname, :command, :args, :opts, :pid]
@doc """
Start the IfupDaemon
"""
@spec start_link(init_args()) :: GenServer.on_start()
def start_link(init_args) do
GenServer.start_link(__MODULE__, init_args)
end
@doc """
Return whether the daemon is running
"""
@spec running?(GenServer.server()) :: boolean()
def running?(server) do
GenServer.call(server, :running?)
end
@impl GenServer
def init(init_args) do
state = struct!(__MODULE__, init_args)
{:ok, state, {:continue, :continue}}
end
@impl GenServer
def handle_continue(:continue, %{ifname: ifname} = state) do
VintageNet.subscribe(lower_up_property(ifname))
new_state =
case VintageNet.get(lower_up_property(ifname)) do
true ->
start_daemon(state)
_not_true ->
# If the physical layer isn't up, don't start until
# we're notified that it is available.
state
end
{:noreply, new_state}
end
@impl GenServer
def handle_call(:running?, _from, state) do
{:reply, state.pid != nil and Process.alive?(state.pid), state}
end
@impl GenServer
def handle_info(
{VintageNet, ["interface", ifname, "lower_up"], _old_value, true, _meta},
%{ifname: ifname} = state
) do
# Physical layer is up. Optimistically assume that the LAN is accessible.
{:noreply, start_daemon(state)}
end
def handle_info(
{VintageNet, ["interface", ifname, "lower_up"], _old_value, _false_or_nil, _meta},
%{ifname: ifname} = state
) do
# Physical layer is down or disconnected. We're definitely disconnected.
{:noreply, stop_daemon(state)}
end
defp start_daemon(%{pid: nil} = state) do
Logger.debug("[vintage_net(#{state.ifname})] starting #{state.command}")
{:ok, pid} = MuonTrap.Daemon.start_link(state.command, state.args, state.opts)
%{state | pid: pid}
end
defp start_daemon(state), do: state
defp stop_daemon(%{pid: pid} = state) when is_pid(pid) do
Logger.debug("[vintage_net(#{state.ifname})] stopping #{state.command}")
if Process.alive?(pid), do: GenServer.stop(pid)
%{state | pid: nil}
end
defp stop_daemon(state), do: state
defp lower_up_property(ifname) do
["interface", ifname, "lower_up"]
end
end
|
lib/vintage_net/interface/ifup_daemon.ex
| 0.730578
| 0.659816
|
ifup_daemon.ex
|
starcoder
|
Code.require_file("../util/util.ex", __DIR__)
defmodule Day03 do
def parse_pathdir(tok) do
{dir, num} = String.split_at(tok, 1)
{String.to_existing_atom(dir), Integer.parse(num) |> elem(0)}
end
def parse_path(line) do
line
|> String.codepoints
|> Util.stream_comma_seperated
|> Stream.map(&parse_pathdir/1)
|> Enum.to_list
end
def read_paths(file_path) do
File.stream!(file_path, [encoding: :utf8], :line)
|> Stream.map(&parse_path/1)
|> Enum.to_list
end
def next_coordinate({x, y}, instruction) do
case instruction do
{:U, up} -> {x, y + up}
{:D, down} -> {x, y - down}
{:R, right} -> {x + right, y}
{:L, left} -> {x - left, y}
end
end
def path_to_coords(path, coord \\ {0, 0})
def path_to_coords([], coord), do: [coord]
def path_to_coords([head | path], coord) do
[coord] ++ path_to_coords(path, next_coordinate(coord, head))
end
def next_coordinate_dist({x, y, dist}, instruction) do
case instruction do
{:U, up} -> {x, y + up, dist + up}
{:D, down} -> {x, y - down, dist + down}
{:R, right} -> {x + right, y, dist + right}
{:L, left} -> {x - left, y, dist + left}
end
end
def path_to_coords_dist(path, coord \\ {0, 0, 0})
def path_to_coords_dist([], coord), do: [coord]
def path_to_coords_dist([head | path], coord) do
[coord] ++ path_to_coords_dist(path, next_coordinate_dist(coord, head))
end
def coords_to_segments(coords) do
Stream.chunk_every(coords, 2, 1, :discard)
end
def intersect_segments([{a0x, a0y}, {a1x, a1y}] = _seg_a, [{b0x, b0y}, {b1x, b1y}] = _seg_b) do
ax_straight = a0x == a1x
bx_straight = b0x == b1x
cond do
# Early discard parrallel
ax_straight == bx_straight
-> nil
# seg a is horizontal, seg b is vertical
# * test both seg b x against constant seg a x
# * test both seg a y against constant seg b y
ax_straight
and ((b0x <= a0x) != (b1x <= a0x))
and ((a0y <= b0y) != (a1y <= b0y))
-> {a0x, b0y}
# seg a is vertical, seg b is horizontal
# * test both seg a x against constant seg b x
# * test both seg b y against constant seg a y
bx_straight
and ((a0x <= b0x) != (a1x <= b0x))
and ((b0y <= a0y) != (b1y <= a0y))
-> {b0x, a0y}
# No intersect
true -> nil
end
end
def intersect_segments([{a0x, a0y, a0d}, {a1x, a1y, a1d}] = _seg_a, [{b0x, b0y, b0d}, {b1x, b1y, b1d}] = _seg_b) do
p = intersect_segments([{a0x, a0y}, {a1x, a1y}], [{b0x, b0y}, {b1x, b1y}])
unless is_nil(p) do
{rx, ry} = p
{adp, ad} = if a0d < a1d, do: {{a0x, a0y}, a0d}, else: {{a1x, a1y}, a1d}
{bdp, bd} = if b0d < b1d, do: {{b0x, b0y}, b0d}, else: {{b1x, b1y}, b1d}
{rx, ry, ad + bd + Util.manhatten_distance(p, adp) + Util.manhatten_distance(p, bdp)}
end
end
end
|
day03/el.ex
| 0.524882
| 0.602442
|
el.ex
|
starcoder
|
defmodule Alchemy.Voice do
@moduledoc """
Contains the types and functions related to voice communication with discord.
To use the functions in this module, make sure to configure the paths
to `ffmpeg`, as well as `youtube-dl`, like so:
```elixir
config :alchemy,
ffmpeg_path: "path/to/ffmpeg"
youtube_dl_path: "path/to/youtube-dl"
```
If these are not configured, the necessary supervisors for maintaining
voice connections won't be started, and you'll run into errors when trying
to use the functions in this module.
"""
alias Alchemy.{VoiceState, VoiceRegion}
alias Alchemy.Voice.Supervisor, as: VoiceSuper
alias Alchemy.Discord.Gateway.RateLimiter
@type snowflake :: String.t()
@typedoc """
Represents a voice region.
- `id`
Represent the unique ID for this region.
- `name`
The name of this region.
- `sample_hostname`
An example hostname for the region.
- `sample_port`
An example port for the region.
- `vip`
True if this is a vip-only server.
- `optimal`
True for a single server that is closest to the client.
- `deprecated`
Whether this is a deprecated voice region.
- `custom`
Whether this is a custom voice region.
"""
@type region :: %VoiceRegion{
id: snowflake,
name: String.t(),
sample_hostname: String.t(),
sample_port: Integer,
vip: Boolean,
optimal: Boolean,
deprecated: Boolean,
custom: Boolean
}
@typedoc """
Represents the state of a user's voice connection.
- `guild_id`
The guild id this state is for.
- `channel_id`
The channel id this user is connected to.
- `user_id`
The id of the user this state belongs to.
- `session_id`
The session id for this voice state.
- `deaf`
Whether this user is deafened by the server.
- `mute`
Whether this user is muted by the server.
- `self_deaf`
Whether this user is locally deafened.
- `self_mute`
Whether this user is locally muted.
- `suppress`
Whether this user is muted by the current user.
"""
@type state :: %VoiceState{
guild_id: snowflake | nil,
channel_id: snowflake,
user_id: snowflake,
session_id: String.t(),
deaf: Boolean,
mute: Boolean,
self_deaf: Boolean,
self_mute: Boolean,
suppress: Boolean
}
@typedoc """
Represents the audio options that can be passed to different play methods.
## Options
- `vol` audio volume, in `%`. Can go above 100 to multiply, e.g. `150`.
"""
@type audio_options :: [{:vol, integer}]
@doc """
Joins a voice channel in a guild.
Only one voice connection per guild is possible with the api.
If you're already connected to the guild, this will not restart the
voice connections, but instead just move you to the channel.
This function also checks if you're already connected to this channel,
and does nothing if that is the case.
The timeout will be spread across 2 different message receptions,
i.e. a timeout of `6000` will only wait 3s at every reception.
"""
@spec join(snowflake, snowflake, integer) :: :ok | {:error, String.t()}
def join(guild, channel, timeout \\ 6000) do
case Registry.lookup(Registry.Voice, {guild, :gateway}) do
[{_, ^channel} | _] -> :ok
_ -> VoiceSuper.start_client(guild, channel, timeout)
end
end
@doc """
Disconnects from voice in a guild.
Returns an error if the connection hadn’t been established.
"""
@spec leave(snowflake) :: :ok | {:error, String.t()}
def leave(guild) do
case Registry.lookup(Registry.Voice, {guild, :gateway}) do
[] ->
{:error, "You're not joined to voice in this guild"}
[{pid, _} | _] ->
Supervisor.terminate_child(VoiceSuper.Gateway, pid)
RateLimiter.change_voice_state(guild, nil)
end
end
@doc """
Starts playing a music file on a guild's voice connection.
Returns an error if the client isn't connected to the guild,
or if the file does not exist.
## Examples
```elixir
Voice.join("666", "666")
Voice.play_file("666", "cool_song.mp3")
```
"""
@spec play_file(snowflake, Path.t(), audio_options) :: :ok | {:error, String.t()}
def play_file(guild, file_path, options \\ []) do
with [{pid, _} | _] <- Registry.lookup(Registry.Voice, {guild, :controller}),
true <- File.exists?(file_path) do
GenServer.call(pid, {:play, file_path, :file, options})
else
[] -> {:error, "You're not joined to voice in this guild"}
false -> {:error, "This file does not exist"}
end
end
defp play_type(guild, type, data, options) do
case Registry.lookup(Registry.Voice, {guild, :controller}) do
[] -> {:error, "You're not joined to voice in this guild"}
[{pid, _} | _] -> GenServer.call(pid, {:play, data, type, options})
end
end
def play_data_direct(guild, pid, options \\ []) do
play_type(guild, :url_direct, pid, options)
end
@doc """
Starts playing audio from a url.
For this to work, the url must be one of the
[supported sites](https://rg3.github.io/youtube-dl/supportedsites.html).
This function does not check the validity of this url, so if it's invalid,
an error will get logged, and no audio will be played.
"""
@spec play_url(snowflake, String.t(), audio_options) :: :ok | {:error, String.t()}
def play_url(guild, url, options \\ []) do
play_type(guild, :url, url, options)
end
@doc """
Starts playing audio from an `iodata`, or a stream of `iodata`.
Similar to `play_url/2` except it doesn't create a stream from
`youtube-dl` for you.
"""
@spec play_iodata(snowflake, iodata | Enumerable.t(), audio_options) ::
:ok | {:error, String.t()}
def play_iodata(guild, data, options \\ []) do
play_type(guild, :iodata, data, options)
end
@doc """
Stops playing audio on a guild's voice connection.
Returns an error if the connection hadn't been established.
"""
@spec stop_audio(snowflake) :: :ok | {:error, String.t()}
def stop_audio(guild) do
case Registry.lookup(Registry.Voice, {guild, :controller}) do
[] -> {:error, "You're not joined to voice in this guild"}
[{pid, _} | _] -> GenServer.call(pid, {:stop_playing, :stop_audio_function})
end
end
@doc """
Lets this process listen for the end of an audio track in a guild.
This will subscribe this process up until the next time an audio track
ends, to react to this, you'll want to handle the message in some way, e.g.
```elixir
Voice.listen_for_end(guild)
receive do
{:audio_stopped, ^guild} -> IO.puts "audio has stopped"
end
```
This is mainly designed for use in genservers, or other places where you don't
want to block. If you do want to block and wait immediately, try
`wait_for_end/2` instead.
## Examples
Use in a genserver:
```elixir
def handle_info({:audio_stopped, guild}, state) do
IO.puts "audio has stopped in \#{guild}"
Voice.listen_for_end(guild)
{:noreply, state}
end
```
"""
@spec listen_for_end(snowflake) :: :ok | {:error, String.t()}
def listen_for_end(guild) do
case Registry.lookup(Registry.Voice, {guild, :controller}) do
[] -> {:error, "You're not joined to voice in this guild"}
[{pid, _} | _] -> GenServer.call(pid, :add_listener)
end
end
def listen_for_end(guild, global_name) do
case Registry.lookup(Registry.Voice, {guild, :controller}) do
[] -> {:error, "You're not joined to voice in this guild"}
[{pid, _}|_] -> GenServer.call(pid, {:add_listener, :global.whereis_name(global_name)})
end
end
@doc """
Blocks the current process until audio has stopped playing in a guild.
This is a combination of `listen_for_end/1` and a receive block,
however this will return an error if the provided timeout is exceeded.
This is useful for implementing automatic track listing, e.g.
```elixir
def playlist(guild, tracks) do
Enum.map(tracks, fn track ->
Voice.play_file(guild, track)
Voice.wait_for_end(guild)
end)
end
```
"""
@spec wait_for_end(snowflake, integer | :infinity) :: :ok | {:error, String.t()}
def wait_for_end(guild, timeout \\ :infinity) do
listen_for_end(guild)
receive do
{:audio_stopped, ^guild, _source} -> :ok
after
timeout -> {:error, "Timed out waiting for audio"}
end
end
@doc """
Returns which channel the client is connected to in a guild.
Returns `nil` if there is no connection.
"""
@spec which_channel(snowflake) :: snowflake | nil
def which_channel(guild) do
case Registry.lookup(Registry.Voice, {guild, :gateway}) do
[{_, channel} | _] -> channel
_ -> nil
end
end
end
|
lib/Structs/Voice/voice.ex
| 0.89353
| 0.804866
|
voice.ex
|
starcoder
|
defmodule AWS.ElasticTranscoder do
@moduledoc """
AWS Elastic Transcoder Service
The AWS Elastic Transcoder Service.
"""
@doc """
The CancelJob operation cancels an unfinished job.
You can only cancel a job that has a status of `Submitted`. To prevent a
pipeline from starting to process a job while you're getting the job identifier,
use `UpdatePipelineStatus` to temporarily pause the pipeline.
"""
def cancel_job(client, id, input, options \\ []) do
path_ = "/2012-09-25/jobs/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 202)
end
@doc """
When you create a job, Elastic Transcoder returns JSON data that includes the
values that you specified plus information about the job that is created.
If you have specified more than one output for your jobs (for example, one
output for the Kindle Fire and another output for the Apple iPhone 4s), you
currently must use the Elastic Transcoder API to list the jobs (as opposed to
the AWS Console).
"""
def create_job(client, input, options \\ []) do
path_ = "/2012-09-25/jobs"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 201)
end
@doc """
The CreatePipeline operation creates a pipeline with settings that you specify.
"""
def create_pipeline(client, input, options \\ []) do
path_ = "/2012-09-25/pipelines"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 201)
end
@doc """
The CreatePreset operation creates a preset with settings that you specify.
Elastic Transcoder checks the CreatePreset settings to ensure that they meet
Elastic Transcoder requirements and to determine whether they comply with H.264
standards. If your settings are not valid for Elastic Transcoder, Elastic
Transcoder returns an HTTP 400 response (`ValidationException`) and does not
create the preset. If the settings are valid for Elastic Transcoder but aren't
strictly compliant with the H.264 standard, Elastic Transcoder creates the
preset and returns a warning message in the response. This helps you determine
whether your settings comply with the H.264 standard while giving you greater
flexibility with respect to the video that Elastic Transcoder produces.
Elastic Transcoder uses the H.264 video-compression format. For more
information, see the International Telecommunication Union publication
*Recommendation ITU-T H.264: Advanced video coding for generic audiovisual
services*.
"""
def create_preset(client, input, options \\ []) do
path_ = "/2012-09-25/presets"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 201)
end
@doc """
The DeletePipeline operation removes a pipeline.
You can only delete a pipeline that has never been used or that is not currently
in use (doesn't contain any active jobs). If the pipeline is currently in use,
`DeletePipeline` returns an error.
"""
def delete_pipeline(client, id, input, options \\ []) do
path_ = "/2012-09-25/pipelines/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 202)
end
@doc """
The DeletePreset operation removes a preset that you've added in an AWS region.
You can't delete the default presets that are included with Elastic Transcoder.
"""
def delete_preset(client, id, input, options \\ []) do
path_ = "/2012-09-25/presets/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 202)
end
@doc """
The ListJobsByPipeline operation gets a list of the jobs currently in a
pipeline.
Elastic Transcoder returns all of the jobs currently in the specified pipeline.
The response body contains one element for each job that satisfies the search
criteria.
"""
def list_jobs_by_pipeline(client, pipeline_id, ascending \\ nil, page_token \\ nil, options \\ []) do
path_ = "/2012-09-25/jobsByPipeline/#{URI.encode(pipeline_id)}"
headers = []
query_ = []
query_ = if !is_nil(page_token) do
[{"PageToken", page_token} | query_]
else
query_
end
query_ = if !is_nil(ascending) do
[{"Ascending", ascending} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
The ListJobsByStatus operation gets a list of jobs that have a specified status.
The response body contains one element for each job that satisfies the search
criteria.
"""
def list_jobs_by_status(client, status, ascending \\ nil, page_token \\ nil, options \\ []) do
path_ = "/2012-09-25/jobsByStatus/#{URI.encode(status)}"
headers = []
query_ = []
query_ = if !is_nil(page_token) do
[{"PageToken", page_token} | query_]
else
query_
end
query_ = if !is_nil(ascending) do
[{"Ascending", ascending} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
The ListPipelines operation gets a list of the pipelines associated with the
current AWS account.
"""
def list_pipelines(client, ascending \\ nil, page_token \\ nil, options \\ []) do
path_ = "/2012-09-25/pipelines"
headers = []
query_ = []
query_ = if !is_nil(page_token) do
[{"PageToken", page_token} | query_]
else
query_
end
query_ = if !is_nil(ascending) do
[{"Ascending", ascending} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
The ListPresets operation gets a list of the default presets included with
Elastic Transcoder and the presets that you've added in an AWS region.
"""
def list_presets(client, ascending \\ nil, page_token \\ nil, options \\ []) do
path_ = "/2012-09-25/presets"
headers = []
query_ = []
query_ = if !is_nil(page_token) do
[{"PageToken", page_token} | query_]
else
query_
end
query_ = if !is_nil(ascending) do
[{"Ascending", ascending} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
The ReadJob operation returns detailed information about a job.
"""
def read_job(client, id, options \\ []) do
path_ = "/2012-09-25/jobs/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
The ReadPipeline operation gets detailed information about a pipeline.
"""
def read_pipeline(client, id, options \\ []) do
path_ = "/2012-09-25/pipelines/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
The ReadPreset operation gets detailed information about a preset.
"""
def read_preset(client, id, options \\ []) do
path_ = "/2012-09-25/presets/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
The TestRole operation tests the IAM role used to create the pipeline.
The `TestRole` action lets you determine whether the IAM role you are using has
sufficient permissions to let Elastic Transcoder perform tasks associated with
the transcoding process. The action attempts to assume the specified IAM role,
checks read access to the input and output buckets, and tries to send a test
notification to Amazon SNS topics that you specify.
"""
def test_role(client, input, options \\ []) do
path_ = "/2012-09-25/roleTests"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Use the `UpdatePipeline` operation to update settings for a pipeline.
When you change pipeline settings, your changes take effect immediately. Jobs
that you have already submitted and that Elastic Transcoder has not started to
process are affected in addition to jobs that you submit after you change
settings.
"""
def update_pipeline(client, id, input, options \\ []) do
path_ = "/2012-09-25/pipelines/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 200)
end
@doc """
With the UpdatePipelineNotifications operation, you can update Amazon Simple
Notification Service (Amazon SNS) notifications for a pipeline.
When you update notifications for a pipeline, Elastic Transcoder returns the
values that you specified in the request.
"""
def update_pipeline_notifications(client, id, input, options \\ []) do
path_ = "/2012-09-25/pipelines/#{URI.encode(id)}/notifications"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
The UpdatePipelineStatus operation pauses or reactivates a pipeline, so that the
pipeline stops or restarts the processing of jobs.
Changing the pipeline status is useful if you want to cancel one or more jobs.
You can't cancel jobs after Elastic Transcoder has started processing them; if
you pause the pipeline to which you submitted the jobs, you have more time to
get the job IDs for the jobs that you want to cancel, and to send a `CancelJob`
request.
"""
def update_pipeline_status(client, id, input, options \\ []) do
path_ = "/2012-09-25/pipelines/#{URI.encode(id)}/status"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "elastictranscoder"}
host = build_host("elastictranscoder", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/elastic_transcoder.ex
| 0.827166
| 0.452838
|
elastic_transcoder.ex
|
starcoder
|
defmodule Cldr.Calendar.Format do
@moduledoc """
Formatting functions for calendars
"""
alias Cldr.Calendar.Formatter.Options
@default_format_module Cldr.Calendar.Formatter.HTML.Basic
def default_formatter_module do
@default_format_module
end
@default_calendar_css_class "cldr_calendar"
def default_calendar_css_class do
@default_calendar_css_class
end
def formatter_module?(formatter) do
Code.ensure_loaded?(formatter) && function_exported?(formatter, :format_year, 3)
end
@doc """
Format one calendar year
## Arguments
* `year` is the year of the calendar
to be formatted
* `options` is a `Cldr.Calendar.Formatter.Options`
struct or a `Keyword.t` list of options.
## Returns
* The result of the `format_year/3` callback of
the configured formatter
## Examples
=> Cldr.Calendar.Format.year(2019)
=> Cldr.Calendar.Format.year(2019, formatter: Cldr.Calendar.Formatter.Markdown)
=> Cldr.Calendar.Format.year(2019, formatter: Cldr.Calendar.Formatter.Markdown, locale: "fr"
"""
@spec year(Calendar.year(), Options.t() | Keyword.t()) :: any()
def year(year, options \\ [])
def year(year, options) when is_list(options) do
with {:ok, options} <- Options.validate_options(options) do
year(year, options)
end
end
def year(year, %Options{} = options) do
%Options{calendar: calendar, formatter: formatter} = options
range = 1..calendar.months_in_year(year)
year
|> months(range, options)
|> formatter.format_year(year, options)
end
defp months(year, range, options) do
for month <- range do
month(year, month, options)
end
end
@doc """
Format one calendar year and month
## Arguments
* `year` is the year of the calendar
to be formatted
* `month` is the month of the calendar
to be formatted
* `options` is a `Cldr.Calendar.Formatter.Options`
struct or a `Keyword.t` list of options.
## Returns
* The result of the `format_month/4` callback of
the configured formatter
## Examples
=> Cldr.Calendar.Format.month(2019, 4)
=> Cldr.Calendar.Format.month(2019, 4, formatter: Cldr.Calendar.Formatter.HTML.Basic)
=> Cldr.Calendar.Format.month(2019, 4, formatter: Cldr.Calendar.Formatter.Markdown, locale: "fr"
"""
@spec month(Calendar.year(), Calendar.month(), Options.t | Keyword.t) :: any()
def month(year, month, options \\ [])
def month(year, month, options) when is_list(options) do
with {:ok, options} <- Options.validate_options(options) do
month(year, month, options)
end
end
def month(year, month, %Options{} = options) do
%Options{calendar: calendar} = options
with %Date.Range{first: date} <- calendar.month(year, month) do
month(year, month, date, calendar.calendar_base, options)
end
end
defp month(year, month, date, :month, options) do
%Options{formatter: formatter} = options
range = 0..5
date
|> weeks(range, year, month, options)
|> formatter.format_month(year, month, options)
end
defp month(year, _month, date, :week, options) do
%Options{calendar: calendar, formatter: formatter} = options
month = Cldr.Calendar.month_of_year(date)
weeks_in_month =
date.year
|> calendar.days_in_month(month)
|> div(calendar.days_in_week())
range = 0..(weeks_in_month - 1)
date
|> weeks(range, year, month, options)
|> formatter.format_month(year, month, options)
end
defp weeks(date, range, year, month, options) do
week = Cldr.Calendar.Interval.week(date)
for i <- range do
week
|> Cldr.Calendar.plus(:weeks, i)
|> week(year, month, options)
end
end
defp week(week, year, month, options) do
%Options{formatter: formatter} = options
week_number = Cldr.Calendar.week_of_year(week.first)
days(week, year, month, options)
|> formatter.format_week(year, month, week_number, options)
end
defp days(week, year, month, options) do
%Options{formatter: formatter} = options
for date <- week do
formatter.format_day(date, year, month, options)
end
end
def invalid_formatter_error(formatter) do
{Cldr.Calendar.Formatter.UnknownFormatterError, "Invalid formatter #{inspect formatter}"}
end
def invalid_date_error(date) do
{Cldr.Calendar.Formatter.InvalidDateError, "Invalid formatter #{inspect date}"}
end
end
|
lib/format.ex
| 0.924883
| 0.627595
|
format.ex
|
starcoder
|
defmodule Bitcoin.Protocol.Types.NetworkAddress do
defstruct time: 0, # (uint32) the Time (version >= 31402). Not present in version message.
services: <<0, 0, 0, 0, 0, 0, 0, 0>>, # (uint64_t) bitfield of features to be enabled for this connection. See Version Message.
address: {0, 0, 0, 0}, # decoded address tuple, 4 elemnt for IPv4, 8 element for IPv6 (see :inet)
port: 8333 # (uint16_t) port number, network byte order
@type t :: %Bitcoin.Protocol.Types.NetworkAddress{
time: non_neg_integer,
services: binary,
address: tuple,
port: non_neg_integer
}
def parse(<<time :: unsigned-native-integer-size(32),
services :: bitstring-size(64),
address :: bytes-size(16),
port :: unsigned-big-integer-size(16)>>) do
%Bitcoin.Protocol.Types.NetworkAddress{
time: time,
services: services,
address: address |> addr_to_inet,
port: port
}
end
def parse_stream(<<time :: unsigned-native-integer-size(32),
services :: bitstring-size(64),
address :: bytes-size(16),
port :: unsigned-big-integer-size(16),
remaining_stream :: binary>>) do
[%Bitcoin.Protocol.Types.NetworkAddress{
time: time,
services: services,
address: address |> addr_to_inet,
port: port
}, remaining_stream]
end
def parse_version(<<services :: bitstring-size(64),
address :: bytes-size(16),
port :: unsigned-big-integer-size(16)>>) do
%Bitcoin.Protocol.Types.NetworkAddress{
services: services,
address: address |> addr_to_inet,
port: port
}
end
def parse_version_stream(<<services :: bitstring-size(64),
address :: bytes-size(16),
port :: unsigned-big-integer-size(16),
remaining_stream :: binary>>) do
[%Bitcoin.Protocol.Types.NetworkAddress{
services: services,
address: address |> addr_to_inet,
port: port
}, remaining_stream]
end
# Binary representation as it is used in the Addr message
def serialize(%Bitcoin.Protocol.Types.NetworkAddress{} = s) do
<<
s.time :: unsigned-native-integer-size(32),
s.services :: bitstring-size(64),
(s.address |> inet_to_addr) :: bytes-size(16),
s.port :: unsigned-big-integer-size(16)
>>
end
# Binary representation as it is used in the Version message
def serialize_version(%Bitcoin.Protocol.Types.NetworkAddress{} = s) do
<<
s.services :: bitstring-size(64),
(s.address |> inet_to_addr) :: bytes-size(16),
s.port :: unsigned-big-integer-size(16)
>>
end
# Convert address bytes to erlang :inet ip adress, IPv4
def addr_to_inet(<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, b1, b2, b3, b4>>), do: {b1, b2, b3, b4}
def addr_to_inet(<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, b1, b2, b3, b4>>), do: {b1, b2, b3, b4}
def addr_to_inet(<< ipv6 :: binary-size(128) >>), do: {0,0,0,0} #TODO IPv6
# Convert erlang inet ip adress to address byptes IPv4 (TODO IPv6)
def inet_to_addr({b1, b2, b3, b4}), do: <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, b1, b2, b3, b4>>
end
|
lib/bitcoin/protocol/types/network_address.ex
| 0.594669
| 0.426501
|
network_address.ex
|
starcoder
|
defmodule Makeup.Lexer.Combinators do
@moduledoc """
Common components useful in many lexers.
"""
import NimbleParsec
@doc """
Wraps the given combinator into a token of the given `ttype`.
Instead of a combinator, the first argument can also be a string literal.
"""
def token(literal, token_type) when is_binary(literal) do
replace(string(literal), {token_type, %{}, literal})
end
def token(combinator, token_type) do
combinator |> post_traverse({__MODULE__, :__token__, [token_type]})
end
def token(literal, token_type, attrs) when is_binary(literal) and is_map(attrs) do
replace(string(literal), {token_type, attrs, literal})
end
def token(combinator, token_type, attrs) when is_map(attrs) do
combinator |> post_traverse({__MODULE__, :__token__, [token_type, attrs]})
end
@doc """
Joins the result of the given combinator into a single string.
This is not usually necessary, but it can be useful if you want to match on the tokens.
It's easier to match on the token `{:keyword, %{}, "unquote"}` than on something like
`{:keyword, %{}, ["u", "nquote"]}`, even though both tokens will be treated the same way
by the formatter.
"""
def lexeme(combinator) do
combinator |> post_traverse({__MODULE__, :__lexeme__, []})
end
@doc false
def __token__(_rest, [arg], context, _line, _offset, token_type) do
{[{token_type, %{}, arg}], context}
end
def __token__(_rest, arg, context, _line, _offset, token_type) when is_binary(arg) do
{[{token_type, %{}, arg}], context}
end
def __token__(_rest, args, context, _line, _offset, token_type) do
{[{token_type, %{}, args |> :lists.reverse()}], context}
end
@doc false
def __token__(_rest, [arg], context, _line, _offset, token_type, attrs) do
{[{token_type, attrs, arg}], context}
end
def __token__(_rest, arg, context, _line, _offset, token_type, attrs) when is_binary(arg) do
{[{token_type, attrs, arg}], context}
end
def __token__(_rest, args, context, _line, _offset, token_type, attrs) do
{[{token_type, attrs, args |> :lists.reverse()}], context}
end
@doc false
def __lexeme__(_rest, args, context, _line, _offset) do
result = args |> List.wrap() |> :lists.reverse() |> IO.iodata_to_binary()
{[result], context}
end
defp reverse_sort(items) do
Enum.sort(items, fn a, b -> {byte_size(a), a} > {byte_size(b), b} end)
end
@doc """
Matches one of the literal strings in the list.
The strings aren't matched in order: they are automatically sorted in a way
that guarantees that the longest strings will be tried first.
## Examples
keywords = word_from_list(~w[do end catch after rescue])
"""
def word_from_list(words) do
choice(for word <- reverse_sort(words), do: string(word))
end
@doc """
Matches one of the literal strings in the list and wraps it in a token of the given type.
This is is just a shorthand.
The strings aren't matched in order: they are automatically sorted in a way
that guarantees that the longest strings will be tried first.
## Examples
keywords = word_from_list(~w[do end catch after rescue], :keyword)
"""
def word_from_list(words, ttype) do
choice(for word <- reverse_sort(words), do: string(word)) |> token(ttype)
end
@doc """
Matches one of the literal strings in the list and wraps it in a token of the given `type`,
with the given `attrs`.
This is is just a shorthand.
The strings aren't matched in order: they are automatically sorted in a way
that guarantees that the longest strings will be tried first.
"""
def word_from_list(words, ttype, attrs) do
choice(for word <- reverse_sort(words), do: string(word)) |> token(ttype, attrs)
end
@doc """
Matches a given combinator, repeated 0 or more times, surrounded by left and right delimiters.
Delimiters can be combinators or literal strings (either both combinators or both literal strings).
"""
def many_surrounded_by(combinator, left, right) when is_binary(left) and is_binary(right) do
token(left, :punctuation)
|> concat(
repeat(
lookahead_not(string(right))
|> concat(combinator)
)
)
|> concat(token(right, :punctuation))
end
def many_surrounded_by(combinator, left, right) do
left
|> concat(
repeat(
lookahead_not(right)
|> concat(combinator)
)
)
|> concat(right)
end
@doc """
Matches a given combinator, repeated 0 or more times, surrounded by left and right delimiters,
and wraps the `right` and `left` delimiters into a token of the given `ttype`.
"""
def many_surrounded_by(combinator, left, right, ttype) do
token(left, ttype)
|> concat(
repeat(
lookahead_not(string(right))
|> concat(combinator)
)
)
|> concat(token(right, ttype))
end
@doc false
def collect_raw_chars_and_binaries(_rest, args, context, _line, _offset, ttype, attrs) do
result = merge_chars_helper(ttype, attrs, [], args)
{result, context}
end
defp merge_chars_helper(_ttype, _attrs, [], []), do: []
defp merge_chars_helper(ttype, attrs, acc, [next | rest])
when is_integer(next) or is_binary(next) do
merge_chars_helper(ttype, attrs, [next | acc], rest)
end
defp merge_chars_helper(ttype, attrs, [], [element | rest]) do
[element | merge_chars_helper(ttype, attrs, [], rest)]
end
defp merge_chars_helper(ttype, attrs, acc, list) do
tok = {ttype, attrs, acc}
[tok | merge_chars_helper(ttype, attrs, [], list)]
end
@doc """
A generic combinator for string-like syntactic structures.
It takes the following parameters:
* `left` - left delimiter for the string. Can be a binary or a general combinator.
* `right` - right delimiter for the string. Can be a binary or a general combinator
* `middle` - a list of parsers to run inside the strig which parse entities
that aren't characters.
The most common example are special characters and string interpolation
for languages that support it like Elixir.
* `ttype` - the token type to use for the string delimiters and ordinary characters
(tokens parsd by the )
* `attrs` - metadata attributes for the string delimiters and ordinary characters
## Examples
single_quoted_heredocs = string_like(
"'''",
"'''",
combinators_inside_string,
:string_char
)
The above is equivalent to the following more explicit version:
single_quoted_heredocs = string_like(
string("'''"),
string("'''"),
combinators_inside_string,
:string_char
)
"""
def string_like(left, right, middle, ttype, attrs \\ %{}) when is_list(middle) do
left_combinator =
case is_binary(left) do
true -> string(left)
false -> left
end
right_combinator =
case is_binary(right) do
true -> string(right)
false -> right
end
choices = middle ++ [utf8_char([])]
left_combinator
|> repeat(lookahead_not(right_combinator) |> choice(choices))
|> concat(right_combinator)
|> post_traverse({__MODULE__, :collect_raw_chars_and_binaries, [ttype, attrs]})
end
end
|
lib/makeup/lexer/combinators.ex
| 0.91321
| 0.486575
|
combinators.ex
|
starcoder
|
defmodule Code.Typespec do
@moduledoc false
@doc """
Converts a spec clause back to Elixir quoted expression.
"""
@spec spec_to_quoted(atom, tuple) :: {atom, keyword, [Macro.t()]}
def spec_to_quoted(name, spec)
def spec_to_quoted(name, {:type, line, :fun, [{:type, _, :product, args}, result]})
when is_atom(name) do
meta = [line: line]
body = {name, meta, Enum.map(args, &typespec_to_quoted/1)}
vars =
for type_expr <- args ++ [result],
var <- collect_vars(type_expr),
uniq: true,
do: {var, {:var, meta, nil}}
spec = {:::, meta, [body, typespec_to_quoted(result)]}
if vars == [] do
spec
else
{:when, meta, [spec, vars]}
end
end
def spec_to_quoted(name, {:type, line, :fun, []}) when is_atom(name) do
{:::, [line: line], [{name, [line: line], []}, quote(do: term)]}
end
def spec_to_quoted(name, {:type, line, :bounded_fun, [type, constrs]}) when is_atom(name) do
{:type, _, :fun, [{:type, _, :product, args}, result]} = type
guards =
for {:type, _, :constraint, [{:atom, _, :is_subtype}, [{:var, _, var}, type]]} <- constrs do
{var, typespec_to_quoted(type)}
end
meta = [line: line]
ignore_vars = Keyword.keys(guards)
vars =
for type_expr <- args ++ [result],
var <- collect_vars(type_expr),
var not in ignore_vars,
uniq: true,
do: {var, {:var, meta, nil}}
args = for arg <- args, do: typespec_to_quoted(arg)
when_args = [
{:::, meta, [{name, [line: line], args}, typespec_to_quoted(result)]},
guards ++ vars
]
{:when, meta, when_args}
end
@doc """
Converts a type clause back to Elixir AST.
"""
def type_to_quoted(type)
def type_to_quoted({{:record, record}, fields, args}) when is_atom(record) do
fields = for field <- fields, do: typespec_to_quoted(field)
args = for arg <- args, do: typespec_to_quoted(arg)
type = {:{}, [], [record | fields]}
quote(do: unquote(record)(unquote_splicing(args)) :: unquote(type))
end
def type_to_quoted({name, type, args}) when is_atom(name) do
args = for arg <- args, do: typespec_to_quoted(arg)
quote(do: unquote(name)(unquote_splicing(args)) :: unquote(typespec_to_quoted(type)))
end
@doc """
Returns all types available from the module's BEAM code.
The result is returned as a list of tuples where the first
element is the type (`:typep`, `:type` and `:opaque`).
The module must have a corresponding BEAM file which can be
located by the runtime system. The types will be in the Erlang
Abstract Format.
"""
@spec fetch_types(module | binary) :: {:ok, [tuple]} | :error
def fetch_types(module) when is_atom(module) or is_binary(module) do
case typespecs_abstract_code(module) do
{:ok, abstract_code} ->
exported_types = for {:attribute, _, :export_type, types} <- abstract_code, do: types
exported_types = List.flatten(exported_types)
types =
for {:attribute, _, kind, {name, _, args} = type} <- abstract_code,
kind in [:opaque, :type] do
cond do
kind == :opaque -> {:opaque, type}
{name, length(args)} in exported_types -> {:type, type}
true -> {:typep, type}
end
end
{:ok, types}
_ ->
:error
end
end
@doc """
Returns all specs available from the module's BEAM code.
The result is returned as a list of tuples where the first
element is spec name and arity and the second is the spec.
The module must have a corresponding BEAM file which can be
located by the runtime system. The types will be in the Erlang
Abstract Format.
"""
@spec fetch_specs(module) :: {:ok, [tuple]} | :error
def fetch_specs(module) when is_atom(module) or is_binary(module) do
case typespecs_abstract_code(module) do
{:ok, abstract_code} ->
{:ok, for({:attribute, _, :spec, value} <- abstract_code, do: value)}
:error ->
:error
end
end
@doc """
Returns all callbacks available from the module's BEAM code.
The result is returned as a list of tuples where the first
element is spec name and arity and the second is the spec.
The module must have a corresponding BEAM file
which can be located by the runtime system. The types will be
in the Erlang Abstract Format.
"""
@spec fetch_callbacks(module) :: {:ok, [tuple]} | :error
def fetch_callbacks(module) when is_atom(module) or is_binary(module) do
case typespecs_abstract_code(module) do
{:ok, abstract_code} ->
{:ok, for({:attribute, _, :callback, value} <- abstract_code, do: value)}
:error ->
:error
end
end
# TODO: Do not rely on abstract_code when OTP 20+ support is dropped (v1.8).
# We should then be able to simplify this code and use `with`.
defp typespecs_abstract_code(module) do
case get_module_and_beam(module) do
{module, binary} ->
case :beam_lib.chunks(binary, [:debug_info]) do
{:ok, {_, [debug_info: {:debug_info_v1, backend, data}]}} ->
case data do
{:elixir_v1, %{}, specs} ->
# Fast path to avoid translation to Erlang from Elixir.
{:ok, specs}
_ ->
case backend.debug_info(:erlang_v1, module, data, []) do
{:ok, abstract_code} -> {:ok, abstract_code}
_ -> :error
end
end
_ ->
case :beam_lib.chunks(binary, [:abstract_code]) do
{:ok, {_, [{:abstract_code, {_raw_abstract_v1, abstract_code}}]}} ->
{:ok, abstract_code}
_ ->
:error
end
end
:error ->
:error
end
end
defp get_module_and_beam(module) when is_atom(module) do
case :code.get_object_code(module) do
{^module, beam, _filename} -> {module, beam}
:error -> :error
end
end
defp get_module_and_beam(beam) when is_binary(beam) do
case :beam_lib.info(beam) do
[_ | _] = info -> {info[:module], beam}
_ -> :error
end
end
## To AST conversion
defp collect_vars({:ann_type, _line, args}) when is_list(args) do
[]
end
defp collect_vars({:type, _line, _kind, args}) when is_list(args) do
Enum.flat_map(args, &collect_vars/1)
end
defp collect_vars({:remote_type, _line, args}) when is_list(args) do
Enum.flat_map(args, &collect_vars/1)
end
defp collect_vars({:typed_record_field, _line, type}) do
collect_vars(type)
end
defp collect_vars({:paren_type, _line, [type]}) do
collect_vars(type)
end
defp collect_vars({:var, _line, var}) do
[erl_to_ex_var(var)]
end
defp collect_vars(_) do
[]
end
defp typespec_to_quoted({:user_type, line, name, args}) do
typespec_to_quoted({:type, line, name, args})
end
defp typespec_to_quoted({:type, line, :tuple, :any}) do
{:tuple, [line: line], []}
end
defp typespec_to_quoted({:type, line, :tuple, args}) do
args = for arg <- args, do: typespec_to_quoted(arg)
{:{}, [line: line], args}
end
defp typespec_to_quoted({:type, _line, :list, [{:type, _, :union, unions} = arg]}) do
case unpack_typespec_kw(unions, []) do
{:ok, ast} -> ast
:error -> [typespec_to_quoted(arg)]
end
end
defp typespec_to_quoted({:type, line, :list, []}) do
{:list, [line: line], []}
end
defp typespec_to_quoted({:type, _line, :list, [arg]}) do
[typespec_to_quoted(arg)]
end
defp typespec_to_quoted({:type, line, :nonempty_list, []}) do
[{:..., [line: line], nil}]
end
defp typespec_to_quoted({:type, line, :nonempty_list, [arg]}) do
[typespec_to_quoted(arg), {:..., [line: line], nil}]
end
defp typespec_to_quoted({:type, line, :map, :any}) do
{:map, [line: line], []}
end
defp typespec_to_quoted({:type, line, :map, fields}) do
fields =
Enum.map(fields, fn
{:type, _, :map_field_assoc, :any} ->
{{:optional, [], [{:any, [], []}]}, {:any, [], []}}
{:type, _, :map_field_exact, [{:atom, _, k}, v]} ->
{k, typespec_to_quoted(v)}
{:type, _, :map_field_exact, [k, v]} ->
{{:required, [], [typespec_to_quoted(k)]}, typespec_to_quoted(v)}
{:type, _, :map_field_assoc, [k, v]} ->
{{:optional, [], [typespec_to_quoted(k)]}, typespec_to_quoted(v)}
end)
{struct, fields} = Keyword.pop(fields, :__struct__)
map = {:%{}, [line: line], fields}
if struct do
{:%, [line: line], [struct, map]}
else
map
end
end
defp typespec_to_quoted({:type, line, :binary, [arg1, arg2]}) do
[arg1, arg2] = for arg <- [arg1, arg2], do: typespec_to_quoted(arg)
case {typespec_to_quoted(arg1), typespec_to_quoted(arg2)} do
{arg1, 0} ->
quote(line: line, do: <<_::unquote(arg1)>>)
{0, arg2} ->
quote(line: line, do: <<_::_*unquote(arg2)>>)
{arg1, arg2} ->
quote(line: line, do: <<_::unquote(arg1), _::_*unquote(arg2)>>)
end
end
defp typespec_to_quoted({:type, line, :union, args}) do
args = for arg <- args, do: typespec_to_quoted(arg)
Enum.reduce(Enum.reverse(args), fn arg, expr -> {:|, [line: line], [arg, expr]} end)
end
defp typespec_to_quoted({:type, line, :fun, [{:type, _, :product, args}, result]}) do
args = for arg <- args, do: typespec_to_quoted(arg)
[{:->, [line: line], [args, typespec_to_quoted(result)]}]
end
defp typespec_to_quoted({:type, line, :fun, [args, result]}) do
[{:->, [line: line], [[typespec_to_quoted(args)], typespec_to_quoted(result)]}]
end
defp typespec_to_quoted({:type, line, :fun, []}) do
typespec_to_quoted({:type, line, :fun, [{:type, line, :any}, {:type, line, :any, []}]})
end
defp typespec_to_quoted({:type, line, :range, [left, right]}) do
{:.., [line: line], [typespec_to_quoted(left), typespec_to_quoted(right)]}
end
defp typespec_to_quoted({:type, _line, nil, []}) do
[]
end
defp typespec_to_quoted({:type, line, name, args}) do
args = for arg <- args, do: typespec_to_quoted(arg)
{name, [line: line], args}
end
defp typespec_to_quoted({:var, line, var}) do
{erl_to_ex_var(var), line, nil}
end
defp typespec_to_quoted({:op, line, op, arg}) do
{op, [line: line], [typespec_to_quoted(arg)]}
end
defp typespec_to_quoted({:remote_type, line, [mod, name, args]}) do
remote_type(line, mod, name, args)
end
defp typespec_to_quoted({:ann_type, line, [var, type]}) do
{:::, [line: line], [typespec_to_quoted(var), typespec_to_quoted(type)]}
end
defp typespec_to_quoted(
{:typed_record_field, {:record_field, line, {:atom, line1, name}}, type}
) do
typespec_to_quoted({:ann_type, line, [{:var, line1, name}, type]})
end
defp typespec_to_quoted({:type, _, :any}) do
quote(do: ...)
end
defp typespec_to_quoted({:paren_type, _, [type]}) do
typespec_to_quoted(type)
end
defp typespec_to_quoted({type, _line, atom}) when is_atom(type) do
atom
end
defp typespec_to_quoted(other), do: other
## Helpers
defp remote_type(line, {:atom, _, :elixir}, {:atom, _, :charlist}, []) do
typespec_to_quoted({:type, line, :charlist, []})
end
defp remote_type(line, {:atom, _, :elixir}, {:atom, _, :nonempty_charlist}, []) do
typespec_to_quoted({:type, line, :nonempty_charlist, []})
end
defp remote_type(line, {:atom, _, :elixir}, {:atom, _, :struct}, []) do
typespec_to_quoted({:type, line, :struct, []})
end
defp remote_type(line, {:atom, _, :elixir}, {:atom, _, :as_boolean}, [arg]) do
typespec_to_quoted({:type, line, :as_boolean, [arg]})
end
defp remote_type(line, {:atom, _, :elixir}, {:atom, _, :keyword}, args) do
typespec_to_quoted({:type, line, :keyword, args})
end
defp remote_type(line, mod, name, args) do
args = for arg <- args, do: typespec_to_quoted(arg)
dot = {:., [line: line], [typespec_to_quoted(mod), typespec_to_quoted(name)]}
{dot, [line: line], args}
end
defp erl_to_ex_var(var) do
case Atom.to_string(var) do
<<"_", c::utf8, rest::binary>> ->
String.to_atom("_#{String.downcase(<<c::utf8>>)}#{rest}")
<<c::utf8, rest::binary>> ->
String.to_atom("#{String.downcase(<<c::utf8>>)}#{rest}")
end
end
defp unpack_typespec_kw([{:type, _, :tuple, [{:atom, _, atom}, type]} | t], acc) do
unpack_typespec_kw(t, [{atom, typespec_to_quoted(type)} | acc])
end
defp unpack_typespec_kw([], acc) do
{:ok, Enum.reverse(acc)}
end
defp unpack_typespec_kw(_, _acc) do
:error
end
end
|
lib/elixir/lib/code/typespec.ex
| 0.73077
| 0.446133
|
typespec.ex
|
starcoder
|
defmodule Supervisor do
@moduledoc ~S"""
A behaviour module for implementing supervision functionality.
A supervisor is a process which supervises other processes, which we refer
to as *child processes*. Supervisors are used to build a hierarchical process
structure called a *supervision tree*. Supervision trees are a nice way to
structure fault-tolerant applications.
A supervisor implemented using this module has a standard set
of interface functions and includes functionality for tracing and error
reporting. It also fits into a supervision tree.
## Examples
In order to define a supervisor, we need to first define a child process
that is going to be supervised. In order to do so, we will define a GenServer
that represents a stack:
defmodule Stack do
use GenServer
def start_link(state, opts \\ []) do
GenServer.start_link(__MODULE__, state, opts)
end
def handle_call(:pop, _from, [h | t]) do
{:reply, h, t}
end
def handle_cast({:push, h}, t) do
{:noreply, [h | t]}
end
end
We can now define our supervisor and start it as follows:
# Import helpers for defining supervisors
import Supervisor.Spec
# Supervise the Stack server which will be started with
# two arguments. The initial stack, [:hello], and a
# keyword list containing the GenServer options that
# set the registered name of the server to MyStack.
children = [
worker(Stack, [[:hello], [name: MyStack]])
]
# Start the supervisor with our child
{:ok, pid} = Supervisor.start_link(children, strategy: :one_for_one)
# There is one child worker started
Supervisor.count_children(pid)
#=> %{active: 1, specs: 1, supervisors: 0, workers: 1}
Notice that when starting the GenServer, we are registering it
with name `MyStack`, which allows us to call it directly and
get what is on the stack:
GenServer.call(MyStack, :pop)
#=> :hello
GenServer.cast(MyStack, {:push, :world})
#=> :ok
GenServer.call(MyStack, :pop)
#=> :world
However, there is a bug in our stack server. If we call `:pop` and
the stack is empty, it is going to crash because no clause matches:
GenServer.call(MyStack, :pop)
** (exit) exited in: GenServer.call(MyStack, :pop, 5000)
Luckily, since the server is being supervised by a supervisor, the
supervisor will automatically start a new one, with the initial stack
of `[:hello]`:
GenServer.call(MyStack, :pop)
#=> :hello
Supervisors support different strategies; in the example above, we
have chosen `:one_for_one`. Furthermore, each supervisor can have many
workers and supervisors as children, each of them with their specific
configuration, shutdown values, and restart strategies.
The rest of this documentation will cover supervision strategies; also read
the documentation for the `Supervisor.Spec` module to learn about the
specification for workers and supervisors.
## Module-based supervisors
In the example above, a supervisor was started by passing the supervision
structure to `start_link/2`. However, supervisors can also be created by
explicitly defining a supervision module:
defmodule MyApp.Supervisor do
use Supervisor
def start_link do
Supervisor.start_link(__MODULE__, [])
end
def init([]) do
children = [
worker(Stack, [[:hello]])
]
# supervise/2 is imported from Supervisor.Spec
supervise(children, strategy: :one_for_one)
end
end
You may want to use a module-based supervisor if:
* You need to perform some particular action on supervisor
initialization, like setting up an ETS table.
* You want to perform partial hot-code swapping of the
tree. For example, if you add or remove children,
the module-based supervision will add and remove the
new children directly, while dynamic supervision
requires the whole tree to be restarted in order to
perform such swaps.
## Strategies
Supervisors support different supervision strategies (through the `:strategy`
option, as seen above):
* `:one_for_one` - if a child process terminates, only that
process is restarted.
* `:one_for_all` - if a child process terminates, all other child
processes are terminated and then all child processes (including
the terminated one) are restarted.
* `:rest_for_one` - if a child process terminates, the "rest" of
the child processes, i.e., the child processes after the terminated
one in start order, are terminated. Then the terminated child
process and the rest of the child processes are restarted.
* `:simple_one_for_one` - similar to `:one_for_one` but suits better
when dynamically attaching children. This strategy requires the
supervisor specification to contain only one child. Many functions
in this module behave slightly differently when this strategy is
used.
## Simple one for one
The `:simple_one_for_one` supervisor is useful when you want to dynamically
start and stop supervised children. For example, imagine you want to
dynamically create multiple stacks. We can do so by defining a `:simple_one_for_one`
supervisor:
# Import helpers for defining supervisors
import Supervisor.Spec
# This time, we don't pass any argument because
# the argument will be given when we start the child
children = [
worker(Stack, [], restart: :transient)
]
# Start the supervisor with our one child as a template
{:ok, sup_pid} = Supervisor.start_link(children, strategy: :simple_one_for_one)
# No child worker is active yet until start_child is called
Supervisor.count_children(sup_pid)
#=> %{active: 0, specs: 1, supervisors: 0, workers: 0}
There are a couple differences here:
* the simple one for one specification can define only one child which
works as a template for when we call `start_child/2`
* we have defined the child to have a restart strategy of `:transient`. This
means that, if the child process exits due to a `:normal`, `:shutdown`,
or `{:shutdown, term}` reason, it won't be restarted. This is useful
as it allows our workers to politely shutdown and be removed from the
`:simple_one_for_one` supervisor, without being restarted. You can find
more information about restart strategies in the documentation for the
`Supervisor.Spec` module
With the supervisor defined, let's dynamically start stacks:
{:ok, pid} = Supervisor.start_child(sup_pid, [[:hello, :world], []])
GenServer.call(pid, :pop) #=> :hello
GenServer.call(pid, :pop) #=> :world
{:ok, pid} = Supervisor.start_child(sup_pid, [[:something, :else], []])
GenServer.call(pid, :pop) #=> :something
GenServer.call(pid, :pop) #=> :else
Supervisor.count_children(sup_pid)
#=> %{active: 2, specs: 1, supervisors: 0, workers: 2}
## Exit reasons
From the example above, you may have noticed that the `:transient` restart
strategy for the worker does not restart the child in case it exits with
reason `:normal`, `:shutdown` or `{:shutdown, term}`.
So one may ask: which exit reason should I choose when exiting my worker?
There are three options:
* `:normal` - in such cases, the exit won't be logged, there is no restart
in transient mode, and linked processes do not exit
* `:shutdown` or `{:shutdown, term}` - in such cases, the exit won't be
logged, there is no restart in transient mode, and linked processes exit
with the same reason unless they're trapping exits
* any other term - in such cases, the exit will be logged, there are
restarts in transient mode, and linked processes exit with the same reason
unless they're trapping exits
## Name registration
A supervisor is bound to the same name registration rules as a `GenServer`.
Read more about these rules in the documentation for `GenServer`.
"""
@doc false
defmacro __using__(_) do
quote location: :keep do
@behaviour Supervisor
import Supervisor.Spec
end
end
@doc """
Callback invoked to start the supervisor and during hot code upgrades.
"""
# TODO: Support {:ok, [child_spec], Keyword.t}
# TODO: Document options here and update Supervisor.Spec
@callback init(args :: term) ::
{:ok, {:supervisor.sup_flags, [Supervisor.Spec.spec]}} |
:ignore
@typedoc "Return values of `start_link` functions"
@type on_start :: {:ok, pid} | :ignore |
{:error, {:already_started, pid} | {:shutdown, term} | term}
@typedoc "Return values of `start_child` functions"
@type on_start_child :: {:ok, child} | {:ok, child, info :: term} |
{:error, {:already_started, child} | :already_present | term}
@type child :: pid | :undefined
@typedoc "The Supervisor name"
@type name :: atom | {:global, term} | {:via, module, term}
@typedoc "Options used by the `start*` functions"
@type options :: [name: name,
strategy: Supervisor.Spec.strategy,
max_restarts: non_neg_integer,
max_seconds: non_neg_integer]
@typedoc "The supervisor reference"
@type supervisor :: pid | name | {atom, node}
@doc """
Starts a supervisor with the given children.
A strategy is required to be provided through the `:strategy` option.
Furthermore, the `:max_restarts` and `:max_seconds` options can be
configured as described in the documentation for `Supervisor.Spec.supervise/2`.
The options can also be used to register a supervisor name.
The supported values are described under the "Name registration"
section in the `GenServer` module docs.
If the supervisor and its child processes are successfully created
(i.e., if the start function of each child process returns `{:ok, child}`,
`{:ok, child, info}`, or `:ignore`) this function returns
`{:ok, pid}`, where `pid` is the PID of the supervisor. If a process with the
specified name already exists, the function returns `{:error,
{:already_started, pid}}`, where `pid` is the PID of that process.
If the start function of any of the child processes fails or returns an error
tuple or an erroneous value, the supervisor first terminates with reason
`:shutdown` all the child processes that have already been started, and then
terminates itself and returns `{:error, {:shutdown, reason}}`.
Note that a supervisor started with this function is linked to the parent
process and exits not only on crashes but also if the parent process exits
with `:normal` reason.
"""
@spec start_link([Supervisor.Spec.spec], options) :: on_start
def start_link(children, options) when is_list(children) do
spec = Supervisor.Spec.supervise(children, options)
start_link(Supervisor.Default, spec, options)
end
@doc """
Starts a supervisor module with the given `arg`.
To start the supervisor, the `c:init/1` callback will be invoked in the given
`module`, with `arg` as its argument. The `c:init/1` callback must return a
supervisor specification which can be created with the help of the functions
in the `Supervisor.Spec` module (especially `Supervisor.Spec.supervise/2`).
If the `c:init/1` callback returns `:ignore`, this function returns
`:ignore` as well and the supervisor terminates with reason `:normal`.
If it fails or returns an incorrect value, this function returns
`{:error, term}` where `term` is a term with information about the
error, and the supervisor terminates with reason `term`.
The `:name` option can also be given in order to register a supervisor
name, the supported values are described in the "Name registration"
section in the `GenServer` module docs.
"""
@spec start_link(module, term) :: on_start
@spec start_link(module, term, options) :: on_start
def start_link(module, arg, options \\ []) when is_list(options) do
case Keyword.get(options, :name) do
nil ->
:supervisor.start_link(module, arg)
atom when is_atom(atom) ->
:supervisor.start_link({:local, atom}, module, arg)
{:global, _term} = tuple ->
:supervisor.start_link(tuple, module, arg)
{:via, via_module, _term} = tuple when is_atom(via_module) ->
:supervisor.start_link(tuple, module, arg)
other ->
raise ArgumentError, """
expected :name option to be one of:
* nil
* atom
* {:global, term}
* {:via, module, term}
Got: #{inspect(other)}
"""
end
end
@doc """
Dynamically adds a child specification to `supervisor` and starts that child.
`child_spec` should be a valid child specification (unless the supervisor
is a `:simple_one_for_one` supervisor, see below). The child process will
be started as defined in the child specification.
In the case of `:simple_one_for_one`, the child specification defined in
the supervisor is used and instead of a `child_spec`, an arbitrary list
of terms is expected. The child process will then be started by appending
the given list to the existing function arguments in the child specification.
If a child specification with the specified id already exists, `child_spec` is
discarded and this function returns an error with `:already_started` or
`:already_present` if the corresponding child process is running or not,
respectively.
If the child process start function returns `{:ok, child}` or `{:ok, child,
info}`, then child specification and PID are added to the supervisor and
this function returns the same value.
If the child process start function returns `:ignore`, the child specification
is added to the supervisor, the PID is set to `:undefined` and this function
returns `{:ok, :undefined}`.
If the child process start function returns an error tuple or an erroneous
value, or if it fails, the child specification is discarded and this function
returns `{:error, error}` where `error` is a term containing information about
the error and child specification.
"""
@spec start_child(supervisor, Supervisor.Spec.spec | [term]) :: on_start_child
def start_child(supervisor, child_spec_or_args) do
call(supervisor, {:start_child, child_spec_or_args})
end
@doc """
Terminates the given children, identified by PID or child id.
If the supervisor is not a `:simple_one_for_one`, the child id is expected
and the process, if there's one, is terminated; the child specification is
kept unless the child is temporary.
In case of a `:simple_one_for_one` supervisor, a PID is expected. If the child
specification identifier is given instead of a `pid`, this function returns
`{:error, :simple_one_for_one}`.
A non-temporary child process may later be restarted by the supervisor. The child
process can also be restarted explicitly by calling `restart_child/2`. Use
`delete_child/2` to remove the child specification.
If successful, this function returns `:ok`. If there is no child specification
for the given child id or there is no process with the given PID, this
function returns `{:error, :not_found}`.
"""
@spec terminate_child(supervisor, pid | Supervisor.Spec.child_id) :: :ok | {:error, error}
when error: :not_found | :simple_one_for_one
def terminate_child(supervisor, pid_or_child_id) do
call(supervisor, {:terminate_child, pid_or_child_id})
end
@doc """
Deletes the child specification identified by `child_id`.
The corresponding child process must not be running; use `terminate_child/2`
to terminate it if it's running.
If successful, this function returns `:ok`. This function may return an error
with an appropriate error tuple if the `child_id` is not found, or if the
current process is running or being restarted.
This operation is not supported by `:simple_one_for_one` supervisors.
"""
@spec delete_child(supervisor, Supervisor.Spec.child_id) :: :ok | {:error, error}
when error: :not_found | :simple_one_for_one | :running | :restarting
def delete_child(supervisor, child_id) do
call(supervisor, {:delete_child, child_id})
end
@doc """
Restarts a child process identified by `child_id`.
The child specification must exist and the corresponding child process must not
be running.
Note that for temporary children, the child specification is automatically deleted
when the child terminates, and thus it is not possible to restart such children.
If the child process start function returns `{:ok, child}` or `{:ok, child, info}`,
the PID is added to the supervisor and this function returns the same value.
If the child process start function returns `:ignore`, the PID remains set to
`:undefined` and this function returns `{:ok, :undefined}`.
This function may return an error with an appropriate error tuple if the
`child_id` is not found, or if the current process is running or being
restarted.
If the child process start function returns an error tuple or an erroneous value,
or if it fails, this function returns `{:error, error}`.
This operation is not supported by `:simple_one_for_one` supervisors.
"""
@spec restart_child(supervisor, Supervisor.Spec.child_id) ::
{:ok, child} | {:ok, child, term} | {:error, error}
when error: :not_found | :simple_one_for_one | :running | :restarting | term
def restart_child(supervisor, child_id) do
call(supervisor, {:restart_child, child_id})
end
@doc """
Returns a list with information about all children of the given supervisor.
Note that calling this function when supervising a large number of children
under low memory conditions can cause an out of memory exception.
This function returns a list of `{id, child, type, modules}` tuples, where:
* `id` - as defined in the child specification or `:undefined` in the case
of a `simple_one_for_one` supervisor
* `child` - the PID of the corresponding child process, `:restarting` if the
process is about to be restarted, or `:undefined` if there is no such
process
* `type` - `:worker` or `:supervisor`, as specified by the child specification
* `modules` - as specified by the child specification
"""
@spec which_children(supervisor) ::
[{Supervisor.Spec.child_id | :undefined,
child | :restarting,
Supervisor.Spec.worker,
Supervisor.Spec.modules}]
def which_children(supervisor) do
call(supervisor, :which_children)
end
@doc """
Returns a map containing count values for the given supervisor.
The map contains the following keys:
* `:specs` - the total count of children, dead or alive
* `:active` - the count of all actively running child processes managed by
this supervisor
* `:supervisors` - the count of all supervisors whether or not these
child supervisors are still alive
* `:workers` - the count of all workers, whether or not these child workers
are still alive
"""
@spec count_children(supervisor) ::
%{specs: non_neg_integer, active: non_neg_integer,
supervisors: non_neg_integer, workers: non_neg_integer}
def count_children(supervisor) do
call(supervisor, :count_children) |> :maps.from_list
end
@doc """
Stops the given supervisor with the given `reason`.
It returns `:ok` if the supervisor terminates with the given
reason. If it terminates with another reason, the call exits.
This function keeps OTP semantics regarding error reporting.
If the reason is any other than `:normal`, `:shutdown` or
`{:shutdown, _}`, an error report is logged.
"""
@spec stop(supervisor, reason :: term, timeout) :: :ok
def stop(supervisor, reason \\ :normal, timeout \\ :infinity) do
:gen.stop(supervisor, reason, timeout)
end
@compile {:inline, call: 2}
defp call(supervisor, req) do
GenServer.call(supervisor, req, :infinity)
end
end
|
lib/elixir/lib/supervisor.ex
| 0.766818
| 0.606265
|
supervisor.ex
|
starcoder
|
defmodule Drab.Core do
@moduledoc ~S"""
Drab module providing the base of communication between the browser and the server.
`Drab.Core` defines the method to declare client-side events, which are handled server-side in
the commander module. Also provides basic function for running JS code directly from Phoenix
on the browser.
## Commander
Commander is the module to keep your Drab functions (event handlers) in. See `Drab.Commander`
for more info, and just for this part of docs let's assume you have the following one defined:
defmodule DrabExample.PageCommander do
use Drab.Commander, modules: []
defhandler button_clicked(socket, payload) do
socket |> console("You've sent me this: #{payload |> inspect}")
end
end
## Events
Events are defined directly in the HTML by adding the `drab` attribute with the following pattern:
<button drab='event_name#options:event_handler_function_name(argument)'>clickme</button>
* `event_name` is the DOM event name, eg. "click", "blur"
* `event_handler_function_name` - the name of the event handler function in the commander on
the server side
* `options` - optional, so far the only available option is "debounce(milliseconds)" for
"keyup" event
* `argument` - optional, additional argument to be passed to the event handler function as
a third argument
Example:
<button drab='click:button_clicked'>clickme</button>
Clicking above button launches `DrabExample.PageCommander.button_clicked/2` on the server side.
<button drab='click:button_clicked(42)'>clickme</button>
Clicking the button above launches `DrabExample.PageCommander.button_clicked/3` on the server
side, with third argument of value 42. This is evaluated on the client side, so it could be
any valid JS expression:
<button drab='click:button_clicked({the_answer: 42})'>
<button drab='click:button_clicked(window.location)'>
You may have multiple events defined for a DOM object, but the specific event may appear there
only once (can't define two handlers for one event). Separate `event:handler` pairs with
whitespaces:
<button drab='click:button_clicked mouseover:prepare_button'>clickme</button>
### Shortcut form
There are few shortcuts for the most popular events: `click`, `keyup`, `keydown`, `change`.
For those events an attribute `drab-EVENTNAME` must be set. The following is an equivalent
for the previous one:
<button drab-click='button_clicked'>clickme</button>
As above, there is a possibility to define multiple event handlers for one DOM object, but
the only one handler for the event. The following form is valid:
<button drab-click='button_clicked' drab-mouseover='prepare_button(42)'>clickme</button>
But the next one is prohibited:
<button drab-click='handler1' drab-click='handler2'>INCORRECT</button>
In this case you may provide options with `drab-options` attribute, but only when you have
the only one event defined.
There is a possibility to configure the shortcut list:
config :drab, MyAppWeb.Endpoint,
events_shorthands: ["click", "keyup", "blur"]
Please keep this list short, as it affects client script performance.
#### Defining optional argument in multiple nodes with `drab-argument` attribute
If you add `drab-argument` attribute to any tag, all children of this tag will use this as
an optional attribute. Notice that the existing arguments are not overwritten, so this:
<div drab-argument='42'>
<button drab-click='button_clicked'>
<button drab-click='button_clicked(43)'>
</div>
is the equivalent to:
<button drab-click='button_clicked(42)'>
<button drab-click='button_clicked(43)'>
### Handling event in any commander (Shared Commander)
By default Drab runs the event handler in the commander module corresponding to the controller,
which rendered the current page. But it is possible to choose the module by simply provide
the full path to the commander:
<button drab-click='MyAppWeb.MyCommander.button_clicked'>clickme</button>
Notice that the module must be a commander module, ie. it must be marked with
`use Drab.Commander`, and the function must be marked as public with `Drab.Commander.public/1`
macro.
### Form values
If the sender object is inside a `<form>` tag, it sends the "form" map, which contains values
of all the inputs found withing the form. Keys of that map are "name" attribute of the input or,
if not found, an "id" attribute. If neither "name" or "id" is given, the value of the form is
not included.
## Control of element enabled/disabled state of element
By default, Drab takes control of enabled/disabled state of the Drab element. It disables the
element when the handler is still running, to prevent multiple clicks. Element is back to the
previous (enabled) state after the handler finish. Also in case of disconnection, Drab-controlled
elements are disabled.
You may turn off this behaviour globally using the config options, see `Drab.Config`.
There is also a possibility to turn it off individually, using `drab-no-disable` attribute:
<button drab-click="clickety" drab-no-disable>Button</button>
## Running Elixir code from the Browser
There is the Javascript method
[`Drab.exec_elixir()`](Drab.Client.html#module-drab-exec_elixir-elixir_function_name-argument)
in the global `Drab` object, which allows you to run the Elixir function defined in the Commander.
## Store
Analogically to Plug, Drab can store the values in its own session. To avoid confusion with
the Plug Session session, it is called a Store. You can use functions: `put_store/3` and
`get_store/2` to read and write the values in the Store. It works exactly the same way as
a "normal", Phoenix session.
* By default, Drab Store is kept in browser Local Storage. This means it is gone when you close
the browser or the tab. You may set up where to keep the data with `drab_store_storage`
config entry, see Drab.Config
* Drab Store is not the Plug Session! This is a different entity. Anyway, you have an access
to the Plug Session (details below).
* Drab Store is stored on the client side and it is signed, but - as the Plug Session cookie -
not ciphered.
## Session
Although Drab Store is a different entity than Plug Session (used in Controllers), there is a way
to access the Session. First, you need to whitelist the keys you want to access in
`access_session/1` macro in the Commander (you may give it a list of atoms or a single atom).
Whitelisting is due to security: it is kept in Token, on the client side, and it is signed
but not encrypted.
defmodule DrabPoc.PageCommander do
use Drab.Commander
onload :page_loaded,
access_session :drab_test
def page_loaded(socket) do
socket
|> update(:val, set: get_session(socket, :drab_test), on: "#show_session_test")
end
end
There is no way to update the session from Drab. Session is read-only.
## Broadcasting
Normally Drab operates on the user interface of the browser which generared the event, but
you may use it for broadcasting changes to all connected browsers. Drab uses a *topic*
for distinguishing browsers, which are allowed to receive the change.
Broadcasting function receives `socket` or `topic` as the first argument. If `socket` is used,
function derives the `topic` from the commander configuration. See
`Drab.Commander.broadcasting/1` to learn how to configure the broadcasting options. It is also
possible to subscribe to the external topic in a runtime, using `Drab.Commander.subscribe/2`.
Broadcasting functions may be launched without the `socket` given. In this case, you need
to define it manually, using helper functions: `Drab.Core.same_path/1`, `Drab.Core.same_topic/1`
and `Drab.Core.same_controller/1`. See `broadcast_js/3` for more.
List of broadcasting functions:
* `Drab.Core`:
* `Drab.Core.broadcast_js/3`
* `Drab.Core.broadcast_js!/3`
* `Drab.Live`:
* `Drab.Live.broadcast_poke/2`
* `Drab.Element`:
* `Drab.Element.broadcast_insert/4`
* `Drab.Element.broadcast_prop/3`
* `Drab.Query`:
* `Drab.Query.delete!/2`
* `Drab.Query.execute/2`, `Drab.Query.execute/3`
* `Drab.Query.insert!/2`, `Drab.Query.insert!/3`
* `Drab.Query.update!/2`, `Drab.Query.update!/3`
"""
require Logger
use DrabModule
@typedoc "Returned status of all Core operations"
@type status :: :ok | :error
@typedoc "Types returned from the browser"
@type return :: String.t() | map | float | integer | list
@typedoc "Return value of `exec_js/2`"
@type result :: {status, return | :disconnected | :timeout}
@typedoc "Return value of `broadcast_js/2`"
@type bcast_result :: {:ok, term} | {:error, term}
@typedoc "Subject for broadcasting"
@type subject :: Phoenix.Socket.t() | String.t() | {atom, String.t()} | list
@typedoc "Input: binary string or safe"
@type input :: String.t() | Phoenix.HTML.safe()
@impl true
def js_templates(), do: ["drab.core.js", "drab.events.js"]
@impl true
def transform_payload(payload, _state) do
case payload["form"] do
nil -> payload
form -> Map.put_new(payload, :params, normalize_params(form))
end
end
@doc false
@spec normalize_params(map) :: map
def normalize_params(params) do
params |> Plug.Conn.Query.encode() |> Plug.Conn.Query.decode()
end
@doc """
Synchronously executes the given javascript on the client side.
Returns tuple `{status, return_value}`, where status could be `:ok` or `:error`,
and return value contains the output computed by the Javascript or the error message.
### Options
* `timeout` in milliseconds
### Examples
iex> socket |> exec_js("2 + 2")
{:ok, 4}
iex> socket |> exec_js("not_existing_function()")
{:error, "not_existing_function is not defined"}
iex> socket |> exec_js("for(i=0; i<1000000000; i++) {}")
{:error, :timeout}
iex> socket |> exec_js("alert('hello from IEx!')", timeout: 500)
{:error, :timeout}
"""
@spec exec_js(Phoenix.Socket.t(), input, Keyword.t()) :: result
def exec_js(socket, javascript, options \\ [])
def exec_js(socket, {:safe, _} = safe, options) do
exec_js(socket, Phoenix.HTML.safe_to_string(safe), options)
end
def exec_js(socket, js, options) do
Drab.push_and_wait_for_response(socket, self(), "execjs", [js: js], options)
end
@doc """
Exception raising version of `exec_js/2`
### Examples
iex> socket |> exec_js!("2 + 2")
4
iex> socket |> exec_js!("nonexistent")
** (Drab.JSExecutionError) nonexistent is not defined
(drab) lib/drab/core.ex:100: Drab.Core.exec_js!/2
iex> socket |> exec_js!("for(i=0; i<1000000000; i++) {}")
** (Drab.JSExecutionError) timeout
(drab) lib/drab/core.ex:100: Drab.Core.exec_js!/2
iex> socket |> exec_js!("for(i=0; i<10000000; i++) {}", timeout: 1000)
** (Drab.JSExecutionError) timeout
lib/drab/core.ex:114: Drab.Core.exec_js!/3
"""
@spec exec_js!(Phoenix.Socket.t(), input, Keyword.t()) :: return | no_return
def exec_js!(socket, javascript, options \\ [])
def exec_js!(socket, {:safe, _} = safe, options) do
exec_js!(socket, Phoenix.HTML.safe_to_string(safe), options)
end
def exec_js!(socket, js, options) do
case exec_js(socket, js, options) do
{:error, :disconnected} -> raise Drab.ConnectionError
other -> Drab.JSExecutionError.result_or_raise(other)
end
end
@doc """
Asynchronously executes the javascript on all the browsers listening on the given subject.
The subject is derived from the first argument, which could be:
* socket - in this case broadcasting option is derived from the setup in the commander.
See `Drab.Commander.broadcasting/1` for the broadcasting options
* same_path(string) - sends the JS to browsers sharing (and configured as listening to same_path
in `Drab.Commander.broadcasting/1`) the same url
* same_commander(atom) - broadcast goes to all browsers configured with :same_commander
* same_topic(string) - broadcast goes to all browsers listening to this topic; notice: this
is internal Drab topic, not a Phoenix Socket topic
First argument may be a list of the above.
The second argument is a JavaScript string.
See `Drab.Commander.broadcasting/1` to find out how to change the listen subject.
iex> Drab.Core.broadcast_js(socket, "alert('Broadcasted!')")
{:ok, :broadcasted}
iex> Drab.Core.broadcast_js(same_path("/drab/live"), "alert('Broadcasted!')")
{:ok, :broadcasted}
iex> Drab.Core.broadcast_js(same_controller(MyApp.LiveController), "alert('Broadcasted!')")
{:ok, :broadcasted}
iex> Drab.Core.broadcast_js(same_topic("my_topic"), "alert('Broadcasted!')")
{:ok, :broadcasted}
iex> Drab.Core.broadcast_js([same_topic("my_topic"), same_path("/drab/live")],
"alert('Broadcasted!')")
{:ok, :broadcasted}
Returns `{:ok, :broadcasted}`
"""
@spec broadcast_js(subject, input, Keyword.t()) :: bcast_result
def broadcast_js(socket, javascript, options \\ [])
def broadcast_js(socket, {:safe, _} = safe, options) do
broadcast_js(socket, Phoenix.HTML.safe_to_string(safe), options)
end
def broadcast_js(subject, js, _options) do
ret = Drab.broadcast(subject, self(), "broadcastjs", js: js)
{ret, :broadcasted}
end
@doc """
Helper for broadcasting functions, returns topic for a given URL path.
iex> same_path("/test/live")
"__drab:same_path:/test/live"
"""
@spec same_path(String.t()) :: String.t()
def same_path(url), do: "__drab:same_path:#{url}"
@doc """
Helper for broadcasting functions, returns topic for a given endpoint and URL path.
To be used in multiple endpoint environments only.
iex> same_path(DrabTestApp.Endpoint, "/test/live")
{DrabTestApp.Endpoint, "__drab:same_path:/test/live"}
"""
@spec same_path(atom, String.t()) :: {atom, String.t()}
def same_path(endpoint, url), do: {endpoint, same_path(url)}
@doc """
Helper for broadcasting functions, returns topic for a given controller.
iex> same_controller(DrabTestApp.LiveController)
"__drab:controller:Elixir.DrabTestApp.LiveController"
"""
@spec same_controller(String.t() | atom) :: String.t()
def same_controller(controller), do: "__drab:controller:#{controller}"
@doc """
Helper for broadcasting functions, returns topic for a given endpoint and controller.
To be used in multiple endpoint environments only.
iex> same_controller(DrabTestApp.Endpoint, DrabTestApp.LiveController)
{DrabTestApp.Endpoint, "__drab:controller:Elixir.DrabTestApp.LiveController"}
"""
@spec same_controller(atom, String.t() | atom) :: {atom, String.t()}
def same_controller(endpoint, controller), do: {endpoint, same_controller(controller)}
@doc """
Helper for broadcasting functions, returns topic for a given controller and action.
iex> same_action(DrabTestApp.LiveController, :index)
"controller:Elixir.DrabTestApp.LiveController#index"
"""
@spec same_action(String.t() | atom, String.t() | atom) :: String.t()
def same_action(controller, action), do: "__drab:action:#{controller}##{action}"
@doc """
Helper for broadcasting functions, returns topic for a given endpoint, controller and action.
To be used in multiple endpoint environments only.
iex> same_action(DrabTestApp.Endpoint, DrabTestApp.LiveController, :index)
{DrabTestApp.Endpoint, "controller:Elixir.DrabTestApp.LiveController#index"}
"""
@spec same_action(atom, String.t() | atom, String.t() | atom) :: {atom, String.t()}
def same_action(endpoint, controller, action), do: {endpoint, same_action(controller, action)}
@doc """
Helper for broadcasting functions, returns topic for a given topic string.
Drab broadcasting topics are different from Phoenix topic - they begin with "__drab:". This is
because you can share Drab socket with you own one.
iex> same_topic("mytopic")
"__drab:mytopic"
"""
@spec same_topic(String.t()) :: String.t()
def same_topic(topic), do: "__drab:#{topic}"
@doc """
Helper for broadcasting functions, returns topic for a given topic string.
To be used in multiple endpoint environments only.
Drab broadcasting topics are different from Phoenix topic - they begin with "__drab:". This is
because you can share Drab socket with you own one.
iex> same_topic(DrabTestApp.Endpoint, "mytopic")
{DrabTestApp.Endpoint, "__drab:mytopic"}
"""
@spec same_topic(atom, String.t()) :: {atom, String.t()}
def same_topic(endpoint, topic), do: {endpoint, same_topic(topic)}
@doc false
@spec encode_js(term) :: String.t() | no_return
def encode_js(value), do: Jason.encode!(value)
@doc false
@spec decode_js(iodata) :: term
def decode_js(value) do
case Jason.decode(value) do
{:ok, v} -> v
_ -> value
end
end
@doc """
Returns the value of the Drab store represented by the given key.
uid = get_store(socket, :user_id)
"""
@spec get_store(Phoenix.Socket.t(), atom) :: term
def get_store(socket, key) do
store = Drab.get_store(Drab.pid(socket))
store[key]
# store(socket)[key]
end
@doc """
Returns the value of the Drab store represented by the given key or `default` when key not found
counter = get_store(socket, :counter, 0)
"""
@spec get_store(Phoenix.Socket.t(), atom, term) :: term
def get_store(socket, key, default) do
get_store(socket, key) || default
end
@doc """
Saves the key => value in the Store. Returns unchanged socket.
put_store(socket, :counter, 1)
"""
@spec put_store(Phoenix.Socket.t(), atom, term) :: Phoenix.Socket.t()
def put_store(socket, key, value) do
store = socket |> store() |> Map.merge(%{key => value})
{:ok, _} = exec_js(socket, "Drab.set_drab_store_token(\"#{tokenize_store(socket, store)}\")")
# store the store in Drab server, to have it on terminate
save_store(socket, store)
socket
end
@doc false
@spec save_store(Phoenix.Socket.t(), map) :: :ok
def save_store(socket, store) do
Drab.set_store(Drab.pid(socket), store)
end
@doc false
@spec save_socket(Phoenix.Socket.t()) :: :ok
def save_socket(socket) do
Drab.set_socket(Drab.pid(socket), socket)
end
@doc """
Returns the value of the Plug Session represented by the given key.
counter = get_session(socket, :userid)
You must explicit which session keys you want to access in `:access_session` option in
`use Drab.Commander` or globally, in `config.exs`:
config :drab, MyAppWeb.Endpoint,
access_session: [:user_id]
"""
@spec get_session(Phoenix.Socket.t(), atom) :: term
def get_session(socket, key) do
socket.assigns[:__session] && socket.assigns[:__session][key]
end
@doc """
Returns the value of the Plug Session represented by the given key or `default`,
when key not found.
counter = get_session(socket, :userid, 0)
See also `get_session/2`.
"""
@spec get_session(Phoenix.Socket.t(), atom, term) :: term
def get_session(socket, key, default) do
get_session(socket, key) || default
end
@doc false
@spec store(Phoenix.Socket.t()) :: map
def store(socket) do
{:ok, store_token} = exec_js(socket, "Drab.get_drab_store_token()")
detokenize_store(socket, store_token)
end
@doc false
@spec tokenize_store(Phoenix.Socket.t() | Plug.Conn.t(), map) :: String.t()
def tokenize_store(socket, store) do
Drab.Live.Crypto.encrypt(store, endpoint(socket), "drab_store_token")
end
@doc false
@spec detokenize_store(Phoenix.Socket.t() | Plug.Conn.t(), String.t()) :: map
def detokenize_store(_socket, drab_store_token) when drab_store_token == nil, do: %{}
def detokenize_store(socket, drab_store_token) do
# we just ignore wrong token and default the store to %{}
# this is because it is read on connect, and raising here would cause infinite reconnects
case Drab.Live.Crypto.decrypt(drab_store_token, endpoint(socket), "drab_store_token") do
:error -> %{}
x -> x
end
end
@doc false
@spec endpoint(Phoenix.Socket.t() | Plug.Conn.t()) :: atom
def endpoint(%Phoenix.Socket{} = socket), do: socket.endpoint
def endpoint(%Plug.Conn{} = conn), do: Phoenix.Controller.endpoint_module(conn)
@doc """
Returns the selector of object, which triggered the event. To be used only in event handlers.
def button_clicked(socket, sender) do
set_prop socket, this(sender), innerText: "already clicked"
set_prop socket, this(sender), disabled: true
end
"""
@spec this(map) :: String.t()
def this(sender) do
"[drab-id=#{Drab.Core.encode_js(sender["drab_id"])}]"
end
@doc """
Like `this/1`, but returns selector of the object ID.
def button_clicked(socket, sender) do
socket |> update!(:text, set: "alread clicked", on: this!(sender))
socket |> update!(attr: "disabled", set: true, on: this!(sender))
end
Raises exception when being used on the object without an ID.
"""
@spec this!(map) :: String.t()
def this!(sender) do
id = sender["id"]
unless id,
do:
raise(ArgumentError, """
Try to use Drab.Core.this!/1 on DOM object without an ID:
#{inspect(sender)}
""")
"##{id}"
end
@doc """
Returns the unique selector of the DOM object, which represents the shared commander of
the event triggerer.
In case the even was triggered outside the Shared Commander, returns "" (empty string).
To be used only in event handlers. Allows to create reusable Drab components.
<div drab-commander="DrabTestApp.Shared1Commander">
<div class="spaceholder1">Nothing</div>
<button drab-click="button_clicked">Shared 1</button>
</div>
def button_clicked(socket, sender) do
set_prop socket, this_commander(sender) <> " .spaceholder1", innerText: "changed"
end
"""
@spec this_commander(map) :: String.t()
def this_commander(sender) do
case sender["drab_commander_id"] do
nil -> ""
drab_commander_id -> "[drab-id=#{Drab.Core.encode_js(drab_commander_id)}]"
end
end
@doc false
@spec desafe_values(map | Keyword.t()) :: map
def desafe_values(properties) do
for {k, v} <- properties, into: %{} do
case v do
{:safe, _} -> {k, Phoenix.HTML.safe_to_string(v)}
_ -> {k, v}
end
end
end
end
|
lib/drab/core.ex
| 0.749087
| 0.431584
|
core.ex
|
starcoder
|
defmodule RTypes.Generator do
@moduledoc """
Generator module provides the `make/4` function and `make/2` macro
which allow to automatically derive a data generator to be used with
property-based frameworks.
For example, to write a unit test for a pure function with a given
spec to use with `StreamData` framework one could write something
along the lines:
```elixir
defmodule MyTest do
use ExUnit.Case
use ExUnitProperties
require RTypes
require RTypes.Generator, as: Generator
# @spec f(arg_type) :: result_type
arg_type_gen = Generator.make(arg_type, Generator.StreamData)
result_type? = RTypes.make_predicate(result_type)
property \"for any parameter `f/1` returns value of `result_type`\" do
check all value <- arg_type_gen do
assert result_type?.(f(value))
end
end
end
```
The above would generate 100 (by default) values that belong to
`arg_type` and verify that the result belongs to `result_type`.
"""
import RTypes.Internal, only: [decompose_and_expand: 2, expand_type_args: 1]
@callback derive(RTypes.Extractor.type()) :: generator when generator: term()
@doc """
Make a data generator for the given type and backend.
- `type` is a literal type expression, e.g. `:inet.port_number()`
- `backend` is a module implementing a particular property-based
backend, e.g. `RTypes.Generator.StreamData`
"""
defmacro make(type, backend) do
type_expr = decompose_and_expand(type, __CALLER__)
typ =
case type_expr do
{mod, type_name, args} ->
RTypes.Extractor.extract_type(mod, type_name, expand_type_args(args))
{type_name, args} ->
{:type, 0, type_name, expand_type_args(args)}
end
quote bind_quoted: [typ: Macro.escape(typ), mod: backend] do
mod.derive(typ)
end
end
@doc """
Make a data generator for the given type AST and backend.
- `mod` is a module name implementing a type,
- `type_name` is the type name
- `type_args` is a list of type arguments in AST form
- `backend` a module implementing a particular property-based
backend, e.g. `RTypes.Generator.StreamData`
For example
```
iex> alias RTypes.Generator.StreamData, as: SD
iex> g = RTypes.Generator.make(:inet, :port_number, [], SD)
```
"""
@spec make(module(), atom(), [RTypes.Extractor.type()], module()) :: generator
when generator: term()
def make(mod, type_name, type_args, backend) do
typ = RTypes.Extractor.extract_type(mod, type_name, type_args)
backend.derive(typ)
end
end
|
lib/rtypes/generator.ex
| 0.934806
| 0.958499
|
generator.ex
|
starcoder
|
defmodule Control.Access do
import Kernel, except: [get_in: 2, update_in: 3, get_and_update_in: 3, pop_in: 2]
#defdelegate at(idx), to: Access
#defdelegate elem(idx), to: Access
defdelegate key(k, default \\ nil), to: Access
defdelegate key!(k), to: Access
defdelegate fetch(t, k), to: Access
defdelegate get(t, k, default \\ nil), to: Access
defdelegate get_and_update(t, k, f), to: Access
defdelegate pop(t, k), to: Access
def fetch!(x, k) do
case fetch(x, k) do
{:ok, y} -> y
:error -> raise(KeyError, key: k, term: x)
end
end
def update(t, k, f) do
elem(get_and_update(t, k, fn x -> {nil, f.(x)} end), 1)
end
def each do &each/3 end
def each(op, data, next) do all(op, Enum.to_list(data), next) end
def each(t) do &each(&1, &2, &3, t) end
def each(:get_and_update = op, data, next, t) do
{get, update} = all(op, Enum.to_list(data), next)
{Enum.into(get, t), Enum.into(update, t)}
end
def each(op, data, next, t) when op in [:get, :update] do
all(op, Enum.to_list(data), next)
|> Enum.into(t)
end
def values do &values/3 end
def values(op, data, next) do all(op, Enum.to_list(data), fn x -> Access.elem(1).(op, x, next) end) end
def values(t) do &values(&1, &2, &3, t) end
def values(:get_and_update = op, data, next, t) do
{get, update} = all(op, Enum.to_list(data), fn x -> Access.elem(1).(op |> IO.inspect(), x, next) |> IO.inspect() end)
{Enum.into(get, t), Enum.into(update, t)}
end
def values(op, data, next, t) when op in [:get, :update] do
all(op, Enum.to_list(data), fn x -> Access.elem(1).(op, x, next) end)
|> Enum.into(t)
end
def map_as_list do &go_map_as_list/3 end
def go_map_as_list(:get, data, next) do next.(Enum.to_list(data)) end
def go_map_as_list(:get_and_update, data, next) do Kernel.update_in(next.(Enum.to_list(data)), [Access.elem(1)], &Map.new/1) end
def map_values(:get, data, next) do Enum.map(data, next) end
def get_in(t, [x]) when is_function(x, 3) do x.(:get, t, fn x -> x end) end
def get_in(t, [x | xs]) when is_function(x, 3) do x.(:get, t, fn t2 -> get_in(t2, xs) end) end
def get_in(nil, [_]) do nil end
def get_in(nil, [_ | xs]) do get_in(nil, xs) end
def get_in(t, [x]) do Access.get(t, x) end
def get_in(t, [x | xs]) do get_in(Access.get(t, x), xs) end
def get_and_update_in(t, [x], f) when is_function(x, 3) do x.(:get_and_update, t, f) end
def get_and_update_in(t, [x | xs], f) when is_function(x, 3) do x.(:get_and_update, t, fn t2 -> get_and_update_in(t2, xs, f) end) end
def get_and_update_in(t, [x], f) do get_and_update(t, x, f) end
def get_and_update_in(t, [x | xs], f) do get_and_update(t, x, fn t2 -> get_and_update_in(t2, xs, f) end) end
def update_in(t, [x], f) when is_function(x, 3) do x.(:update, t, f) end
def update_in(t, [x | xs], f) when is_function(x, 3) do x.(:update, t, fn t2 -> update_in(t2, xs, f) end) end
def update_in(t, [x], f) do update(t, x, f) end
def update_in(t, [x | xs], f) do update(t, x, fn t2 -> update_in(t2, xs, f) end) end
def pop_in(nil, [x | _]) do Access.pop(nil, x) end
def pop_in(t, [x]) when is_function(x, 3) do x.(:pop, t) end
def pop_in(t, [x | xs]) when is_function(x, 3) do x.(:pop, t, fn t2 -> pop_in(t2, xs) end) end
def pop_in(t, [x]) do pop(t, x) end
def pop_in(t, [x | xs]) do pop(t, x, fn t2 -> pop_in(t2, xs) end) end
# Accessors
def all() do
&all/3
end
def all(op, data, next) when op in [:get, :update] and is_list(data) do
Enum.map(data, next)
end
def all(:get_and_update, data, next) when is_list(data) do
all(data, next, [], [])
end
def all(:pop, data, next) when is_list(data) do
all(data, next, [], [])
end
def all(_op, data, _next) do
raise "Access.all/0 expected a list, got: #{inspect data}"
end
def all([head | rest], next, gets, updates) do
case next.(head) do
{get, update} -> all(rest, next, [get | gets], [update | updates])
:pop -> all(rest, next, [head | gets], updates)
end
end
def all([], _next, gets, updates) do
{:lists.reverse(gets), :lists.reverse(updates)}
end
def at(index) when is_integer(index) and index >= 0 do
fn op, data, next -> at(op, data, next, index) end
end
def at(:get, data, next, index) when is_list(data) do
data |> Enum.at(index) |> next.()
end
def at(:get_and_update, data, next, index) when is_list(data) do
get_and_update_at(data, index, next, [])
end
def at(_op, data, _next, _index) do
raise "Access.at/1 expected a list, got: #{inspect(data)}"
end
def get_and_update_at([head | rest], 0, next, updates) do
case next.(head) do
{get, update} -> {get, :lists.reverse([update | updates], rest)}
:pop -> {head, :lists.reverse(updates, rest)}
end
end
def get_and_update_at([head | rest], index, next, updates) do
get_and_update_at(rest, index - 1, next, [head | updates])
end
def get_and_update_at([], _index, _next, updates) do
{nil, :lists.reverse(updates)}
end
end
|
lib/control/access.ex
| 0.503174
| 0.605099
|
access.ex
|
starcoder
|
defmodule OMG.Eth do
@moduledoc """
Library for common code of the adapter/port to contracts deployed on Ethereum.
NOTE: The library code is not intended to be used outside of `OMG.Eth`: use `OMG.Eth.RootChain` and `OMG.Eth.Token` as main
entrypoints to the contract-interaction functionality.
NOTE: This wrapper is intended to be as thin as possible, only offering a consistent API to the Ethereum JSONRPC client and contracts.
Handles other non-contract queries to the Ethereum client.
Notes on encoding: All APIs of `OMG.Eth` and the submodules with contract APIs always use raw, decoded binaries
for binaries - never use hex encoded binaries. Such binaries may be passed as is onto `ABI` related functions,
however they must be encoded/decoded when entering/leaving the `Ethereumex` realm
"""
import OMG.Eth.Encoding
@type address :: <<_::160>>
@type hash :: <<_::256>>
def get_ethereum_height do
case Ethereumex.HttpClient.eth_block_number() do
{:ok, "0x" <> height_hex} ->
{height, ""} = Integer.parse(height_hex, 16)
{:ok, height}
other ->
other
end
end
def call_contract(contract, signature, args, return_types) do
data = signature |> ABI.encode(args)
with {:ok, return} <- Ethereumex.HttpClient.eth_call(%{to: to_hex(contract), data: to_hex(data)}),
do: decode_answer(return, return_types)
end
defp decode_answer(enc_return, return_types) do
enc_return
|> from_hex()
|> ABI.TypeDecoder.decode_raw(return_types)
|> case do
[single_return] -> {:ok, single_return}
other when is_list(other) -> {:ok, List.to_tuple(other)}
end
end
@spec contract_transact(address, address, binary, [any], keyword) :: {:ok, binary} | {:error, any}
def contract_transact(from, to, signature, args, opts \\ []) do
data = encode_tx_data(signature, args)
txmap =
%{from: to_hex(from), to: to_hex(to), data: data}
|> Map.merge(Map.new(opts))
|> encode_all_integer_opts()
with {:ok, txhash} <- Ethereumex.HttpClient.eth_send_transaction(txmap),
do: {:ok, from_hex(txhash)}
end
defp encode_all_integer_opts(opts) do
opts
|> Enum.filter(fn {_k, v} -> is_integer(v) end)
|> Enum.into(opts, fn {k, v} -> {k, to_hex(v)} end)
end
def get_bytecode!(path_project_root, contract_name) do
%{"evm" => %{"bytecode" => %{"object" => bytecode}}} =
path_project_root
|> read_contracts_json!(contract_name)
|> Poison.decode!()
"0x" <> bytecode
end
defp encode_tx_data(signature, args) do
signature
|> ABI.encode(args)
|> to_hex()
end
defp encode_constructor_params(args, types) do
args
|> ABI.TypeEncoder.encode_raw(types)
# NOTE: we're not using `to_hex` because the `0x` will be appended to the bytecode already
|> Base.encode16(case: :lower)
end
def deploy_contract(addr, bytecode, types, args, opts) do
enc_args = encode_constructor_params(types, args)
txmap =
%{from: to_hex(addr), data: bytecode <> enc_args}
|> Map.merge(Map.new(opts))
|> encode_all_integer_opts()
with {:ok, txhash} <- Ethereumex.HttpClient.eth_send_transaction(txmap),
do: {:ok, from_hex(txhash)}
end
defp read_contracts_json!(path_project_root, contract_name) do
path = "contracts/build/#{contract_name}.json"
case File.read(Path.join(path_project_root, path)) do
{:ok, contract_json} ->
contract_json
{:error, reason} ->
raise(
RuntimeError,
"Can't read #{path} because #{inspect(reason)}, try running mix deps.compile plasma_contracts"
)
end
end
defp event_topic_for_signature(signature) do
signature |> ExthCrypto.Hash.hash(ExthCrypto.Hash.kec()) |> to_hex()
end
defp filter_not_removed(logs) do
logs |> Enum.filter(&(not Map.get(&1, "removed", true)))
end
def get_ethereum_events(block_from, block_to, signature, contract) do
topic = event_topic_for_signature(signature)
try do
{:ok, logs} =
Ethereumex.HttpClient.eth_get_logs(%{
fromBlock: to_hex(block_from),
toBlock: to_hex(block_to),
address: to_hex(contract),
topics: ["#{topic}"]
})
{:ok, filter_not_removed(logs)}
catch
_ -> {:error, :failed_to_get_ethereum_events}
end
end
def parse_event(%{"data" => data}, {signature, keys}) do
decoded_values =
data
|> from_hex()
|> ABI.TypeDecoder.decode(ABI.FunctionSelector.decode(signature))
Enum.zip(keys, decoded_values)
|> Map.new()
end
def parse_events_with_indexed_fields(
%{"data" => data, "topics" => [_event_sig | indexed_data]},
{non_indexed_keys, non_indexed_key_types},
{indexed_keys, indexed_keys_types}
) do
decoded_non_indexed_fields =
data
|> from_hex()
|> ABI.TypeDecoder.decode_raw(non_indexed_key_types)
non_indexed_fields =
Enum.zip(non_indexed_keys, decoded_non_indexed_fields)
|> Map.new()
decoded_indexed_fields =
for {encoded, type_sig} <- Enum.zip(indexed_data, indexed_keys_types) do
[decoded] =
encoded
|> from_hex()
|> ABI.TypeDecoder.decode_raw([type_sig])
decoded
end
indexed_fields =
Enum.zip(indexed_keys, decoded_indexed_fields)
|> Map.new()
Map.merge(non_indexed_fields, indexed_fields)
end
end
|
apps/omg_eth/lib/eth.ex
| 0.884598
| 0.522872
|
eth.ex
|
starcoder
|
defmodule Phoenix.Tracker.DeltaGeneration do
@moduledoc false
require Logger
alias Phoenix.Tracker.{State, Clock, Replica}
@doc """
Extracts minimal delta from generations to satisfy remote clock.
Falls back to extracting entire crdt if unable to match delta.
"""
@spec extract(State.delta, [State.delta], State.context) :: State.delta | State.t
def extract(%State{mode: :normal} = state, generations, remote_clock) do
case delta_fullfilling_clock(generations, remote_clock) do
{delta, index} ->
if index, do: Logger.debug "#{inspect state.replica}: sending delta generation #{index + 1}"
delta
nil ->
Logger.debug "#{inspect state.replica}: falling back to sending entire crdt"
State.extract(state)
end
end
@spec push(State.t, [State.delta], State.delta, [pos_integer]) :: [State.delta]
def push(%State{mode: :normal} = parent, [] = _generations, %State{mode: :delta} = delta, opts) do
parent.delta
|> List.duplicate(Enum.count(opts))
|> do_push(delta, opts, {delta, []})
end
def push(%State{mode: :normal} = _parent, generations, %State{mode: :delta} = delta, opts) do
do_push(generations, delta, opts, {delta, []})
end
defp do_push([], _delta, [], {_prev, acc}), do: Enum.reverse(acc)
defp do_push([gen | generations], delta, [gen_max | opts], {prev, acc}) do
case State.merge_deltas(gen, delta) do
{:ok, merged} ->
if State.delta_size(merged) <= gen_max do
do_push(generations, delta, opts, {merged, [merged | acc]})
else
do_push(generations, delta, opts, {merged, [prev | acc]})
end
{:error, :not_contiguous} ->
do_push(generations, delta, opts, {gen, [gen | acc]})
end
end
@doc """
Prunes permanently downed replicaes from the delta generation list
"""
@spec remove_down_replicas([State.delta], Replica.replica_ref) :: [State.delta]
def remove_down_replicas(generations, replica_ref) do
Enum.map(generations, fn %State{mode: :delta} = gen ->
State.remove_down_replicas(gen, replica_ref)
end)
end
defp delta_fullfilling_clock(generations, remote_clock) do
generations
|> Enum.with_index()
|> Enum.find(fn {%State{range: {local_start, _local_end}}, _} ->
Clock.dominates?(remote_clock, local_start)
end)
end
end
|
deps/phoenix_pubsub/lib/phoenix/tracker/delta_generation.ex
| 0.728072
| 0.50354
|
delta_generation.ex
|
starcoder
|
defmodule Spf.Lexer do
@moduledoc """
Lexer for SPF strings and explain-strings.
See [the collected ABNF](https://www.rfc-editor.org/rfc/rfc7208.html#section-12).
"""
@typedoc """
qualifier = ?+ / ?- / ?~ / ??
"""
@type q :: ?+ | ?- | ?~ | ??
@typedoc """
`range` denotes a token's `start..stop`-slice in the input string.
"""
@type range :: Range.t()
@typedoc """
The token's `type`.
There are several classes of tokens:
- the version: `:version`,
- a mechanism: `:a, :all, :exists, :exp, :include, :ip4, :ip6, :mx, :ptr`
- a modifier: `:exp, :redirect`,
- an explain string: `:exp_str`,
- an unknown modifier: `:unknown`,
- a syntax error: `:error`
- whitespace: `:whitespace`,
- a subtoken: `:expand, :literal, :cidr`
Subtokens may appear as part of another token's value.
`:exp_str` is produced by tokenizing the explain string. After expanding the
domain-spec of modifier `exp=domain-spec` into a domain name, that domain's
TXT RR is retrieved and tokenized for later expansion into an explanation
string. This only happens when the SPF verdict is `:fail` and the `exp`
modifier is present and has a valid domain name.
The `:whitespace` token will match both spaces and tab characters in order to
be able to warn about multiple spaces and/or tab characters being used. Use
of a tab character is technically a syntax error, but this library only warns
about its use.
The `:error` token is tried as a last resort and matches any non-space
sequence. When matched, it means the SPF string has a syntax error.
"""
@type type ::
:a
| :all
| :exists
| :exp
| :include
| :ip4
| :ip6
| :mx
| :ptr
| :redirect
| :unknown
| :version
| :whitespace
# catch all
| :error
# explain-string
| :exp_str
# subtokens
| :cidr
| :expand
| :literal
@typedoc """
A token represented as a tuple: `{type, list, range}`.
Where:
- `type` is an atom which denotes the token `t:type/0`
- `list` may be empty or contain one or more values (including subtokens)
- `range` is the `start..stop`-slice in the input string
"""
@type token :: {type, list(), range}
@typedoc """
An ok/error tuple produced by lexing some input
"""
@type result :: {:ok, [token], binary, map} | {:error, atom, binary, map}
@typedoc """
A lexer is a function that takes a binary & a lexer-context, and returns a `t:result/0`
"""
@type lexer :: (binary, map -> result)
@wspace [?\s, ?\t]
@eoterm @wspace ++ [-1]
@mdelimiters [?., ?-, ?+, ?,, ?/, ?_, ?=]
@mletters [?s, ?l, ?o, ?d, ?i, ?p, ?h, ?c, ?r, ?t, ?v] ++
[?S, ?L, ?O, ?D, ?I, ?P, ?H, ?C, ?R, ?T, ?V]
# when used to slice a string -> yields ""
@null_slice 1..0//-1
@doc """
Returns a lexer `t:result/0` after consuming an SPF string.
The SPF string can be a full SPF TXT string or a partial string.
The lexer produces a list of tokens found, including a catch-all
`:error` token for character sequences that were not recognized.
## Example
iex> {:ok, tokens, _rest, _map} = Spf.Lexer.tokenize_spf("a:%{d}")
iex> tokens
[{:a, [43, {:expand, [100, -1, false, ["."]], 2..5}, {:cidr, [32, 128], 1..0//-1}], 0..5}]
"""
@spec tokenize_spf(binary) :: result
def tokenize_spf(input) when is_binary(input),
do: spf_tokenize().(input, %{offset: 0, input: input})
@doc """
Returns a lexer `t:result/0` after consuming an explain-string.
An explaing-string is the TXT RR value of the domain specified by the
domain specification of the `exp`-modifier and is basically a series
of macro-strings and spaces. This is the only time `c`, `r`, `t`-macros
may be used.
The lexer produces an `:error` token for character-sequences it doesn't know.
## Example
iex> {:ok, tokens, _rest, _map} = Spf.Lexer.tokenize_exp("timestamp %{t}")
iex> tokens
[
{:exp_str,
[{:literal, ["timestamp"], 0..8},
{:whitespace, [" "], 9..9},
{:expand, [116, -1, false, ["."]], 10..13}
], 0..13}
]
"""
@spec tokenize_exp(binary) :: result
def tokenize_exp(input) when is_binary(input),
do: exp_tokenize().(input, %{offset: 0, input: input})
# Context Helpers
@spec del(map, atom) :: map
defp del(map, name),
do: Map.delete(map, name)
@spec range(map, atom) :: range
defp range(map, name) do
# note: if stop < start -> a token is being generated out of nothing
start = Map.get(map, name, 0)
stop = Map.get(map, :offset) - 1
cond do
start > stop -> @null_slice
true -> start..stop
end
end
@spec set(map, atom) :: map
defp set(map, name) do
# record current offset for given `name`
Map.put(map, name, map.offset)
end
@spec upd(map, binary) :: map
defp upd(map, rest) do
# update offset after some input was accepted by some parser
offset = map.offset + String.length(map.input) - String.length(rest)
Map.put(map, :offset, offset)
|> Map.put(:input, rest)
end
# Tokenizers
@spec spf_tokenize :: lexer
defp spf_tokenize() do
choice([
whitespace(),
# mechanisms
mechanism(:a),
mechanism(:mx),
mechanism(:ip4),
mechanism(:ip6),
mechanism(:include),
mechanism(:exists),
mechanism(:ptr),
mechanism(:all),
# modifiers
modifier(:redirect),
modifier(:v),
modifier(:exp),
modifier(:unknown),
# catch all
error()
])
|> many()
end
@spec exp_tokenize :: lexer
defp exp_tokenize() do
choice([whitespace(), mstring() |> satisfy(fn l -> l != [] end), error()])
|> many()
|> map(fn l -> List.flatten(l) end)
|> mark(:exp_str)
|> map(fn token -> [token] end)
end
# Token parsers
@spec error :: lexer
defp error() do
until(fn c -> c in @eoterm end)
|> map(fn chars -> [to_string(chars)] end)
|> mark(:error)
end
@spec mechanism(atom) :: lexer
defp mechanism(key) when key in [:a, :mx] do
choice([
sequence([qualifier(), keyword(key, eot())])
|> map(fn [q, _key] -> [q, default_cidr([])] end),
sequence([qualifier(), keyword(key, char1(?/)), cidr()])
|> map(fn [q, _key, cidr] -> [q, cidr] end),
sequence([
qualifier(),
keyword(key, char1(?:)),
char1(?:),
choice([expand(), literals(), merror()])
|> until(eot())
|> satisfy(fn x -> x != [] end)
])
|> map(fn [q, _key, _skip, terms] -> cidr_check(key, [q | terms]) end)
])
|> mark(key)
end
defp mechanism(:ptr) do
choice([
sequence([qualifier(), keyword(:ptr, eot())])
|> map(fn [q, _key] -> [q] end),
sequence([
qualifier(),
keyword(:ptr, char1(?:)),
char1(?:),
choice([expand(), literals(), merror()])
|> until(eot())
|> satisfy(fn x -> x != [] end)
])
|> map(fn [q, _key, _skip, terms] -> [q | terms] end)
])
|> mark(:ptr)
end
defp mechanism(:all) do
sequence([qualifier(), keyword(:all, eot())])
|> map(fn [q, _key] -> [q] end)
|> mark(:all)
end
defp mechanism(key) do
# mechanisms :ip4, :ip6, :include, :exists
sequence([
qualifier(),
keyword(key, char1(?:)),
char1(?:),
choice([expand(), literals(), merror()])
|> until(eot())
|> satisfy(fn x -> x != [] end)
])
|> map(fn [q, _key, _skip, terms] -> cidr_check(key, [q | terms]) end)
|> mark(key)
end
@spec modifier(atom) :: lexer
defp modifier(:v) do
sequence([
keyword(:v, char1(?=)),
char1(?=),
keyword(:spf, number()),
number(),
eot()
])
|> map(fn [_v, _is, _spf, n, _eot] -> [n] end)
|> mark(:version)
end
defp modifier(:unknown) do
# name = *( expand / literal )
sequence([name(), char1(?=), mstring()])
|> map(fn [name, _, macro_string] -> List.flatten([name, macro_string]) end)
|> mark(:unknown)
end
defp modifier(key) do
sequence([
keyword(key, char1(?=)),
char1(?=),
choice([expand(), literals(), merror()])
|> until(eot())
|> map(fn l -> if l == [], do: [{:error, [""], @null_slice}], else: l end)
# |> satisfy(fn x -> x != [] end)
])
|> map(fn [_key, _skip, terms] -> cidr_check(key, terms) end)
|> mark(key)
end
@spec whitespace() :: lexer
defp whitespace() do
until(fn c -> c not in @wspace end)
|> map(fn chars -> [to_string(chars)] end)
|> mark(:whitespace)
end
# TokenParser Helpers
@spec cidr_check(atom, [token]) :: [token]
defp cidr_check(key, tokens) when key in [:a, :mx] do
with {:literal, [str], range} <- List.last(tokens),
context <- %{offset: range.first, input: str},
{:ok, [literal, cidr], "", _} <- cidr_lex().(str, context) do
case literal do
{:literal, [""], @null_slice} -> List.replace_at(tokens, -1, cidr)
_ -> List.replace_at(tokens, -1, literal) |> List.insert_at(-1, cidr)
end
else
_ -> List.insert_at(tokens, -1, {:cidr, [32, 128], @null_slice})
end
end
defp cidr_check(_key, tokens),
do: tokens
@spec cidr_lex() :: lexer
defp cidr_lex() do
# parse the last cidr from a literal string and return [lead_literal, cidr_or_default]
sequence([
char()
|> until(choice([cidr(), eot()]))
|> map(fn l -> [to_string(l)] end)
|> mark(:literal),
optional(cidr()) |> map(&default_cidr/1)
])
end
@spec default_cidr(list) :: list
defp default_cidr(l),
do: if(l == [], do: {:cidr, [32, 128], @null_slice}, else: l)
@spec default_mdelims(list) :: list
defp default_mdelims(l),
do: if(l == [], do: ["."], else: Enum.map(l, fn n -> to_string([n]) end))
defp expand() do
# note: keep param defaults to -1, since keep==0 is actually valid (albeit useless)
choice([
sequence([
keyword("%{"),
any(@mletters),
optional(number()) |> map(fn x -> if x == [], do: -1, else: x end),
optional(keyword("r")) |> map(fn x -> if x == [], do: false, else: true end),
optional(any(@mdelimiters) |> many()) |> map(&default_mdelims/1),
keyword("}")
])
|> map(fn [_, ltr, keep, reverse, delims, _] -> [ltr, keep, reverse, delims] end)
|> mark(:expand),
keyword("%%") |> map(fn _ -> ["%"] end) |> mark(:expand),
keyword("%-") |> map(fn _ -> ["-"] end) |> mark(:expand),
keyword("%_") |> map(fn _ -> ["_"] end) |> mark(:expand)
])
end
defp mliteral?(c),
do: 0x21 <= c and c <= 0x7E and c != 0x25
defp mstring(),
do: choice([expand(), literals(), merror()]) |> until(eot())
defp merror() do
char()
|> satisfy(fn _ -> true end)
|> map(fn l -> [to_string([l])] end)
|> mark(:error)
end
defp literals() do
until(fn c -> not mliteral?(c) end)
|> map(fn l -> [List.flatten(l) |> to_string()] end)
|> mark(:literal)
end
# SPF parsers
# token = {:type, list, range}
# subtokens may have null_slice as range (i.e. they're defaults, not present in input)
defp cidr() do
# cidr can only match if it ends a term
choice([
sequence([cidr4(), cidr6()]),
cidr4() |> map(fn n -> [n, 128] end),
cidr6() |> map(fn n -> [32, n] end)
])
|> eot()
|> mark(:cidr)
end
defp cidr4() do
# just lex the number, parser will check validity (incl. leading zeros)
sequence([char1(?/), number()])
|> map(fn [_, number] -> number end)
end
defp cidr6() do
# just lex the number, parser will check validity
sequence([char1(?/), char1(?/), number()])
|> map(fn [_, _, number] -> number end)
end
defp qualifier() do
# when used always yields a qualifier
any([?+, ?-, ?~, ??])
|> optional()
|> map(fn x -> if x == [], do: ?+, else: x end)
end
# GENERIC Parsers > return parser results
defp any(codepoints),
do: char() |> satisfy(fn c -> c in codepoints end)
defp alpha(),
do: char() |> satisfy(fn x -> (?a <= x and x <= ?z) or (?A <= x and x <= ?Z) end)
defp alnum(),
do: choice([alpha(), digit()])
defp char1(expected),
do: char() |> satisfy(fn x -> x == expected end)
defp digit(),
do: char() |> satisfy(fn x -> ?0 <= x and x <= ?9 end)
defp keyword(expected) do
to_string(expected)
|> anycase(empty())
|> map(fn _ -> expected end)
end
defp keyword(expected, accept) do
to_string(expected)
|> anycase(accept)
|> map(fn _ -> expected end)
end
defp name() do
sequence([alpha(), choice([alnum(), any([?., ?_, ?-])]) |> many()])
|> satisfy(fn list -> list != [] end)
|> map(fn chars -> to_string(chars) end)
end
defp number() do
sequence([digit(), digit() |> many()])
|> map(fn digits -> List.flatten(digits) |> List.to_integer() end)
end
# COMBINATORS > return a parser function
# @spec parser_fun: (binary, map) -> {:error, reason, input, ctx} | {:ok, terms, rest, upd(ctx, rest)}
defp anycase(str, accept) do
fn input, ctx ->
want = String.upcase(str)
{have, rest} = String.split_at(input, String.length(want))
with {:ok, _, _, _} <- accept.(rest, ctx) do
if want == String.upcase(have),
do: {:ok, str, rest, upd(ctx, rest)},
else: {:error, :anycase, input, ctx}
end
end
end
defp empty(),
do: fn input, ctx -> {:ok, [], input, ctx} end
defp char() do
fn input, ctx ->
case input do
"" -> {:error, :eos, input, ctx}
<<byte::8, rest::binary>> -> {:ok, byte, rest, ctx}
end
end
end
defp choice(parsers) do
fn input, ctx ->
case parsers do
[] ->
{:error, :choice, input, ctx}
[first | others] ->
with {:error, _, _, ctx} <- first.(input, ctx),
do: choice(others).(input, ctx)
end
end
end
defp eot() do
# end of term means wspace is next or end of input
fn input, ctx ->
c =
case input do
"" -> -1
<<c::8, _rest::binary>> -> c
end
if c in @eoterm,
do: {:ok, [], input, ctx},
else: {:error, :eot, input, ctx}
end
end
defp eot(parser) do
# usage: parser() |> eot()
fn input, ctx ->
with {:ok, term, rest, ctx} <- parser.(input, ctx),
{:ok, _, _, _} <- eot().(rest, ctx),
do: {:ok, term, rest, upd(ctx, rest)}
end
end
defp many(parser) do
# note, applies `parser` 0 or more times:
# - if `parser` *never* fails, this will loop forever!
# - if having one many inside another many, it'll loop forever as well
fn input, ctx ->
case parser.(input, ctx) do
{:error, _, _, ctx} ->
{:ok, [], input, ctx}
{:ok, term, rest, ctx} ->
{:ok, terms, rest, ctx} = many(parser).(rest, ctx)
{:ok, [term | terms], rest, upd(ctx, rest)}
end
end
end
defp mark(parser, name) do
fn input, ctx ->
with {:ok, term, rest, ctx} <- parser.(input, set(ctx, name)) do
{:ok, {name, term, range(ctx, name)}, rest, del(ctx, name)}
end
end
end
defp map(parser, mapper) do
fn input, ctx ->
with {:ok, term, rest, ctx} <- parser.(input, ctx),
do: {:ok, mapper.(term), rest, ctx}
end
end
defp satisfy(parser, accept) do
fn input, ctx ->
with {:ok, term, rest, ctx} <- parser.(input, ctx) do
if accept.(term),
do: {:ok, term, rest, upd(ctx, rest)},
else: {:error, :reject, rest, ctx}
end
end
end
defp optional(parser) do
fn input, ctx ->
with {:error, _, _, ctx} <- parser.(input, ctx),
do: {:ok, [], input, ctx}
end
end
defp sequence(parsers) do
fn input, ctx ->
case parsers do
[] ->
{:ok, [], input, ctx}
[first | others] ->
with {:ok, term, rest, ctx_s} <- first.(input, ctx),
{:ok, terms, rest, ctx_s} <- sequence(others).(rest, ctx_s) do
{:ok, [term | terms], rest, ctx_s}
else
_ -> {:error, :sequence, input, ctx}
end
end
end
end
defp until(stop) when is_function(stop, 1) do
# gobble up chars until stop function says so or till eos
fn input, ctx ->
{char, rest} =
case input do
"" -> {-1, ""}
<<c::8, rest::binary>> -> {c, rest}
end
if stop.(char) do
if input == ctx.input,
do: {:error, :until, input, ctx},
else: {:ok, [], input, ctx}
else
with {:ok, chars, rest, ctx} <- until(stop).(rest, ctx),
do: {:ok, [char | chars], rest, upd(ctx, rest)}
end
end
end
defp until(parser, stop) when is_function(stop, 2) do
# apply parser 0 or more times, until stop parser says so or fail
# - note: if parser fails before stop says to stop, nothing is parsed
fn input, ctx ->
case stop.(input, ctx) do
{:ok, _, _, _} ->
{:ok, [], input, ctx}
_ ->
with {:ok, term, rest, ctx} <- parser.(input, ctx),
{:ok, terms, rest, ctx} <- until(parser, stop).(rest, ctx) do
{:ok, [term | terms], rest, upd(ctx, rest)}
end
end
end
end
end
|
lib/spf/lexer.ex
| 0.888922
| 0.648132
|
lexer.ex
|
starcoder
|
defmodule Params.Schema do
@moduledoc ~S"""
Defines a params schema for a module.
A params schema is just a map where keys are the parameter name
(ending with a `!` to mark the parameter as required) and the
value is either a valid Ecto.Type, another map for embedded schemas
or an array of those.
## Example
```elixir
defmodule ProductSearch do
use Params.Schema, %{
text!: :string,
near: %{
latitude!: :float,
longitude!: :float
},
tags: [:string]
}
end
```
To get an Ecto.Changeset for ProductSearch params use:
```elixir
changeset = ProductSearch.from(params)
```
To transform the changeset into a map or `%ProductSearch{}`struct use
[Params.changes/1](Params.html#changes/1) or [Params.data/1](Params.html#data/1)
respectively.
"""
@doc false
defmacro __using__([]) do
quote do
import Params.Schema, only: [schema: 1]
unquote(__use__(:ecto))
unquote(__use__(:params))
end
end
@doc false
defmacro __using__(schema) do
quote bind_quoted: [schema: schema] do
import Params.Def, only: [defschema: 1]
Params.Def.defschema(schema)
end
end
@doc false
defmacro schema([do: definition]) do
quote do
Ecto.Schema.schema "params #{__MODULE__}" do
unquote(definition)
end
end
end
defp __use__(:ecto) do
quote do
use Ecto.Schema
import Ecto.Changeset
@primary_key {:_id, :binary_id, autogenerate: false}
end
end
defp __use__(:params) do
quote do
Module.register_attribute(__MODULE__, :required, persist: true)
Module.register_attribute(__MODULE__, :optional, persist: true)
Module.register_attribute(__MODULE__, :schema, persist: true)
@behaviour Params.Behaviour
def from(params, options \\ []) when is_list(options) do
on_cast = Keyword.get(options, :with, &__MODULE__.changeset(&1, &2))
__MODULE__ |> struct |> Ecto.Changeset.change |> on_cast.(params)
end
def data(params, options \\ []) when is_list(options) do
case from(params, options) do
ch = %{valid?: true} -> {:ok, Params.data(ch)}
ch -> {:error, ch}
end
end
def changeset(changeset, params) do
Params.changeset(changeset, params)
end
defoverridable [changeset: 2]
end
end
end
|
lib/params/schema.ex
| 0.830457
| 0.769037
|
schema.ex
|
starcoder
|
defmodule Graph.Pathfinding do
@moduledoc """
This module contains implementation code for path finding algorithms used by `libgraph`.
"""
import Graph.Utils, only: [vertex_id: 1, edge_weight: 3]
@type heuristic_fun :: ((Graph.vertex) -> integer)
@doc """
Finds the shortest path between `a` and `b` as a list of vertices.
Returns `nil` if no path can be found.
The shortest path is calculated here by using a cost function to choose
which path to explore next. The cost function in Dijkstra's algorithm is
`weight(E(A, B))+lower_bound(E(A, B))` where `lower_bound(E(A, B))` is always 0.
"""
@spec dijkstra(Graph.t, Graph.vertex, Graph.vertex) :: [Graph.vertex] | nil
def dijkstra(%Graph{} = g, a, b) do
a_star(g, a, b, fn _v -> 0 end)
end
@doc """
Finds the shortest path between `a` and `b` as a list of vertices.
Returns `nil` if no path can be found.
This implementation takes a heuristic function which allows you to
calculate the lower bound cost of a given vertex `v`. The algorithm
then uses that lower bound function to determine which path to explore
next in the graph.
The `dijkstra` function is simply `a_star` where the heuristic function
always returns 0, and thus the next vertex is chosen based on the weight of
the edge between it and the current vertex.
"""
@spec a_star(Graph.t, Graph.vertex, Graph.vertex, heuristic_fun) :: [Graph.vertex] | nil
def a_star(%Graph{type: :directed, vertices: vs, out_edges: oe} = g, a, b, hfun)
when is_function(hfun, 1) do
with a_id <- vertex_id(a),
b_id <- vertex_id(b),
{:ok, a_out} <- Map.fetch(oe, a_id) do
tree = Graph.new() |> Graph.add_vertex(a_id)
q = PriorityQueue.new()
q =
a_out
|> Stream.map(fn id -> {id, cost(g, a_id, id, hfun)} end)
|> Enum.reduce(q, fn {id, cost}, q ->
PriorityQueue.push(q, {a_id, id, edge_weight(g, a_id, id)}, cost)
end)
case do_bfs(q, g, b_id, tree, hfun) do
nil ->
nil
path when is_list(path) ->
for id <- path, do: Map.get(vs, id)
end
else
_ ->
nil
end
end
def a_star(%Graph{type: :undirected, vertices: vs} = g, a, b, hfun)
when is_function(hfun, 1) do
a_id = vertex_id(a)
b_id = vertex_id(b)
a_all_edges = all_edges(g, a_id)
tree = Graph.new() |> Graph.add_vertex(a_id)
q = PriorityQueue.new()
q =
a_all_edges
|> Stream.map(fn id -> {id, cost(g, a_id, id, hfun)} end)
|> Enum.reduce(q, fn {id, cost}, q ->
PriorityQueue.push(q, {a_id, id, edge_weight(g, a_id, id)}, cost)
end)
case do_bfs(q, g, b_id, tree, hfun) do
nil ->
nil
path when is_list(path) ->
for id <- path, do: Map.get(vs, id)
end
end
@doc """
Finds all paths between `a` and `b`, each path as a list of vertices.
Returns `nil` if no path can be found.
"""
@spec all(Graph.t, Graph.vertex, Graph.vertex) :: [Graph.vertex]
def all(%Graph{vertices: vs, out_edges: oe} = g, a, b) do
with a_id <- vertex_id(a),
b_id <- vertex_id(b),
{:ok, a_out} <- Map.fetch(oe, a_id) do
case dfs(g, a_out, b_id, [a_id], []) do
[] ->
[]
paths ->
paths
|> Enum.map(fn path -> Enum.map(path, &Map.get(vs, &1)) end)
end
else
_ -> []
end
end
## Private
defp all_edges(%Graph{type: :undirected, out_edges: oe, in_edges: ie}, v_id) do
v_in = Map.get(ie, v_id, MapSet.new())
v_out = Map.get(oe, v_id, MapSet.new())
MapSet.union(v_in, v_out)
end
defp cost(%Graph{vertices: vs} = g, v1_id, v2_id, hfun) do
edge_weight(g, v1_id, v2_id) + hfun.(Map.get(vs, v2_id))
end
defp do_bfs(
q,
%Graph{type: :directed, out_edges: oe} = g,
target_id,
%Graph{vertices: vs_tree} = tree,
hfun
) do
case PriorityQueue.pop(q) do
{{:value, {v_id, ^target_id, _}}, _q1} ->
v_id_tree = Graph.Utils.vertex_id(v_id)
construct_path(v_id_tree, tree, [target_id])
{{:value, {v1_id, v2_id, v2_acc_weight}}, q1} ->
v2_id_tree = Graph.Utils.vertex_id(v2_id)
if Map.has_key?(vs_tree, v2_id_tree) do
do_bfs(q1, g, target_id, tree, hfun)
else
case Map.get(oe, v2_id) do
nil ->
do_bfs(q1, g, target_id, tree, hfun)
v2_out ->
tree =
tree
|> Graph.add_vertex(v2_id)
|> Graph.add_edge(v2_id, v1_id)
q2 =
v2_out
|> Enum.map(fn id -> {id, v2_acc_weight + cost(g, v2_id, id, hfun)} end)
|> Enum.reduce(q1, fn {id, cost}, q ->
PriorityQueue.push(
q,
{v2_id, id, v2_acc_weight + edge_weight(g, v2_id, id)},
cost
)
end)
do_bfs(q2, g, target_id, tree, hfun)
end
end
{:empty, _} ->
nil
end
end
defp do_bfs(
q,
%Graph{type: :undirected} = g,
target_id,
%Graph{vertices: vs_tree} = tree,
hfun
) do
case PriorityQueue.pop(q) do
{{:value, {v_id, ^target_id, _}}, _q1} ->
v_id_tree = vertex_id(v_id)
construct_path(v_id_tree, tree, [target_id])
{{:value, {v1_id, v2_id, v2_acc_weight}}, q1} ->
v2_id_tree = vertex_id(v2_id)
if Map.has_key?(vs_tree, v2_id_tree) do
do_bfs(q1, g, target_id, tree, hfun)
else
all_edges = all_edges(g, v2_id)
if MapSet.equal?(all_edges, MapSet.new()) do
do_bfs(q1, g, target_id, tree, hfun)
else
tree =
tree
|> Graph.add_vertex(v2_id)
|> Graph.add_edge(v2_id, v1_id)
q2 =
all_edges
|> Enum.map(fn id -> {id, v2_acc_weight + cost(g, v2_id, id, hfun)} end)
|> Enum.reduce(q1, fn {id, cost}, q ->
PriorityQueue.push(
q,
{v2_id, id, v2_acc_weight + edge_weight(g, v2_id, id)},
cost
)
end)
do_bfs(q2, g, target_id, tree, hfun)
end
end
{:empty, _} ->
nil
end
end
defp construct_path(v_id_tree, %Graph{vertices: vs_tree, out_edges: oe_tree} = tree, path) do
v_id_actual = Map.get(vs_tree, v_id_tree)
path = [v_id_actual | path]
case oe_tree |> Map.get(v_id_tree, MapSet.new()) |> MapSet.to_list() do
[] ->
path
[next_id_tree] ->
construct_path(next_id_tree, tree, path)
end
end
defp dfs(%Graph{} = g, neighbors, target_id, path, paths) when is_list(paths) do
{paths, visited} =
if MapSet.member?(neighbors, target_id) do
{[Enum.reverse([target_id | path]) | paths], [target_id | path]}
else
{paths, path}
end
neighbors = MapSet.difference(neighbors, MapSet.new(visited))
do_dfs(g, MapSet.to_list(neighbors), target_id, path, paths)
end
defp do_dfs(_g, [], _target_id, _path, paths) when is_list(paths) do
paths
end
defp do_dfs(%Graph{out_edges: oe} = g, [next_neighbor_id | neighbors], target_id, path, acc) do
case Map.get(oe, next_neighbor_id) do
nil ->
do_dfs(g, neighbors, target_id, path, acc)
next_neighbors ->
case dfs(g, next_neighbors, target_id, [next_neighbor_id | path], acc) do
[] ->
do_dfs(g, neighbors, target_id, path, acc)
paths ->
do_dfs(g, neighbors, target_id, path, paths)
end
end
end
end
|
lib/graph/pathfinding.ex
| 0.895323
| 0.688763
|
pathfinding.ex
|
starcoder
|
defmodule Mix.Releases.Appup.Utils do
@moduledoc false
@purge_modes [:soft_purge, :brutal_purge]
@doc """
Given a list of appup instructions, this function determines if they are valid or not,
returning `:ok` if they are valid, or `{:invalid, term}` if not, where the term in that
tuple is the invalid instruction.
"""
@spec validate_instructions([Appup.instruction()]) :: :ok | {:invalid, term}
def validate_instructions(ixs) when is_list(ixs) do
validate_instructions(ixs, 0)
end
defp validate_instructions([], _), do: :ok
defp validate_instructions([:restart_new_emulator | rest], 0) do
validate_instructions(rest, 1)
end
defp validate_instructions([:restart_new_emulator | _], _),
do: {:invalid, :restart_new_emulator}
defp validate_instructions([:restart_emulator], _), do: :ok
defp validate_instructions([:restart_emulator | _], _), do: {:invalid, :restart_emulator}
defp validate_instructions([i | rest], n) do
if valid_instruction?(i) do
validate_instructions(rest, n + 1)
else
{:invalid, i}
end
end
@doc """
Given an appup instruction, this function determines if it is valid or not,
"""
@spec valid_instruction?(Appup.instruction()) :: boolean
def valid_instruction?({:update, mod, :supervisor}) when is_atom(mod), do: true
def valid_instruction?({:update, mod, {:advanced, args}}) when is_atom(mod) and is_list(args),
do: true
def valid_instruction?({:update, mod, dep_mods}) when is_atom(mod) and is_list(dep_mods) do
Enum.all?(dep_mods, &is_atom/1)
end
def valid_instruction?({:update, mod, {:advanced, args}, dep_mods})
when is_atom(mod) and is_list(args) and is_list(dep_mods) do
Enum.all?(dep_mods, &is_atom/1)
end
def valid_instruction?({:update, mod, {:advanced, args}, pre_purge, post_purge, dep_mods})
when is_atom(mod) and is_list(args) and is_list(dep_mods) do
pre_purge in @purge_modes and post_purge in @purge_modes and Enum.all?(dep_mods, &is_atom/1)
end
def valid_instruction?({:update, m, to, {:advanced, args}, pre, post, dep_mods})
when is_atom(m) and is_list(args) and is_list(dep_mods) do
pre in @purge_modes and post in @purge_modes and is_valid_timeout?(to) and
Enum.all?(dep_mods, &is_atom/1)
end
def valid_instruction?({:update, m, mt, to, {:advanced, args}, pre, post, dep_mods})
when is_atom(m) and is_list(args) and is_list(dep_mods) do
pre in @purge_modes and post in @purge_modes and is_valid_timeout?(to) and
is_valid_modtype?(mt) and Enum.all?(dep_mods, &is_atom/1)
end
def valid_instruction?({:load_module, mod}) when is_atom(mod), do: true
def valid_instruction?({:load_module, mod, dep_mods}) when is_atom(mod) and is_list(dep_mods) do
Enum.all?(dep_mods, &is_atom/1)
end
def valid_instruction?({:load_module, mod, pre_purge, post_purge, dep_mods})
when is_atom(mod) and is_list(dep_mods) do
pre_purge in @purge_modes and post_purge in @purge_modes and Enum.all?(dep_mods, &is_atom/1)
end
def valid_instruction?({:add_module, mod}) when is_atom(mod), do: true
def valid_instruction?({:add_module, mod, dep_mods}) when is_atom(mod) and is_list(dep_mods) do
Enum.all?(dep_mods, &is_atom/1)
end
def valid_instruction?({:delete_module, mod}) when is_atom(mod), do: true
def valid_instruction?({:delete_module, mod, dep_mods})
when is_atom(mod) and is_list(dep_mods) do
Enum.all?(dep_mods, &is_atom/1)
end
def valid_instruction?({:apply, {mod, fun, args}}) do
is_atom(mod) and is_atom(fun) and is_list(args)
end
def valid_instruction?({:add_application, app}) when is_atom(app), do: true
def valid_instruction?({:add_application, app, start_type}) when is_atom(app) do
is_valid_start_type?(start_type)
end
def valid_instruction?({:remove_application, app}) when is_atom(app), do: true
def valid_instruction?({:restart_application, app}) when is_atom(app), do: true
def valid_instruction?(:restart_new_emulator), do: true
def valid_instruction?(:restart_emulator), do: true
def valid_instruction?({:load_object_code, {app, vsn, mods}}) do
is_atom(app) and is_list(vsn) and is_list(mods) and Enum.all?(mods, &is_atom/1)
end
def valid_instruction?(:point_of_no_return), do: true
def valid_instruction?({:load, {mod, pre_purge, post_purge}}) when is_atom(mod) do
pre_purge in @purge_modes and post_purge in @purge_modes
end
def valid_instruction?({:remove, {mod, pre_purge, post_purge}}) when is_atom(mod) do
pre_purge in @purge_modes and post_purge in @purge_modes
end
def valid_instruction?({:suspend, mods}) when is_list(mods) do
Enum.all?(mods, fn
m when is_atom(m) ->
true
{m, to} when is_atom(m) ->
is_valid_timeout?(to)
_ ->
false
end)
end
def valid_instruction?({i, mods})
when i in [:start, :stop, :resume, :purge] and is_list(mods) do
Enum.all?(mods, &is_atom/1)
end
def valid_instruction?({:code_change, mods}) when is_list(mods) do
Enum.all?(mods, fn
{mod, _extra} -> is_atom(mod)
_ -> false
end)
end
def valid_instruction?({:code_change, mode, mods})
when is_list(mods) and mode in [:up, :down] do
Enum.all?(mods, fn
{mod, _extra} -> is_atom(mod)
_ -> false
end)
end
def valid_instruction?({:sync_nodes, _id, nodelist}) when is_list(nodelist) do
Enum.all?(nodelist, &is_atom/1)
end
def valid_instruction?({:sync_nodes, _id, {m, f, a}}) do
is_atom(m) and is_atom(f) and is_list(a)
end
# Unknown instruction, or invalid construction
def valid_instruction?(_), do: false
defp is_valid_modtype?(:static), do: true
defp is_valid_modtype?(:dynamic), do: true
defp is_valid_modtype?(_), do: false
defp is_valid_timeout?(:default), do: true
defp is_valid_timeout?(:infinity), do: true
defp is_valid_timeout?(timeout) when is_integer(timeout) and timeout > 0, do: true
defp is_valid_timeout?(_), do: false
defp is_valid_start_type?(:permanent), do: true
defp is_valid_start_type?(:transient), do: true
defp is_valid_start_type?(:temporary), do: true
defp is_valid_start_type?(:load), do: true
defp is_valid_start_type?(:none), do: true
defp is_valid_start_type?(_), do: false
end
|
lib/mix/lib/releases/appup/utils.ex
| 0.828973
| 0.498657
|
utils.ex
|
starcoder
|
defmodule Pseudoloc do
@moduledoc """
Creates a [pseudolocalized](https://en.wikipedia.org/wiki/Pseudolocalization) translation of
`Gettext` data files.
Because this module is designed to work with `Gettext`, it specifically ignores
[interpolated](https://hexdocs.pm/gettext/Gettext.html#module-interpolation) sections of the
strings it localizes.
"""
@interpolation_pattern ~r/%\{[^}\s\t\n]+\}/
@typedoc """
A mapping of individual graphemes to a list of alternate representations.
Both the key and all entries in the value list should be a single character.
## Examples
```
%{
"a" => ["à", "á", "å"],
"b" => ["ḅ"]
}
```
"""
@type alternates :: %{optional(String.t()) => list(String.t())}
@typedoc """
Represents a range of text within a string by starting index and length.
"""
@type range :: {non_neg_integer, non_neg_integer}
@doc """
Gets the ranges within the text that need to be localized.
In other words, the ranges of the text that are not interpolations.
Returns a list of tuples containing the index and length of each range to be localized.
## Examples
A string with no interpolations:
```
iex> Pseudoloc.get_localizable_ranges("foo")
[{0, 3}]
```
A string consisting of only interpolations:
```
iex> Pseudoloc.get_localizable_ranges("%{foo}")
[]
```
A string containing multiple interpolations:
```
iex> Pseudoloc.get_localizable_ranges("foo%{bar}baz%{quux}quuux")
[{0, 3}, {9, 3}, {19, 5}]
```
"""
@spec get_localizable_ranges(String.t()) :: [range]
def get_localizable_ranges(text) do
interpolation_ranges = Regex.scan(@interpolation_pattern, text, return: :index)
do_get_ranges(text, 0, interpolation_ranges, [])
end
@doc """
Localizes the `grapheme` if there are valid `alternates`.
## Examples
Returns the grapheme unchanged if there are no alternates:
```
iex> Pseudoloc.localize_grapheme("a", %{"b" => ["ḅ"]})
"a"
```
Returns a random alternative if they exist:
```
iex> Pseudoloc.localize_grapheme("a", %{"a" => ["α"]})
"α"
```
```
iex> alts = ["1", "2", "3"]
iex> Pseudoloc.localize_grapheme("a", %{"a" => alts}) in alts
true
```
"""
@spec localize_grapheme(String.t(), alternates) :: String.t()
def localize_grapheme(grapheme, alternates) do
case Map.has_key?(alternates, grapheme) do
false -> grapheme
true -> Enum.random(alternates[grapheme])
end
end
@doc """
Localizes `text` within the `range` with the `alternates`.
## Examples
```
iex> Pseudoloc.localize_range("foo", {1, 1}, %{"o" => ["ṓ"]})
"fṓo"
```
"""
@spec localize_range(String.t(), range, alternates) :: String.t()
def localize_range(text, range, alternates)
def localize_range(text, {_start, length}, _alternates) when length <= 0, do: text
def localize_range(text, {start, length}, alternates) do
range = Range.new(start, start + length - 1)
{_, result} =
Enum.reduce(range, {:cont, text}, fn elem, {_, text} ->
{:cont, localize_grapheme_at(text, elem, alternates)}
end)
result
end
@doc """
Localizes `text` with the default alternates.
See `localize_string/2` for details.
"""
@spec localize_string(String.t()) :: String.t()
def localize_string(text), do: localize_string(text, default_alternates())
@doc """
Localizes `text` with the given `alternates`.
## Examples
Localizing the non-interpolated sections of a string:
```
iex> alternates = %{"a" => ["α"], "f" => ["ϝ"], "u" => ["ṵ"]}
iex> text = "foo%{bar}baz%{quux}quuux"
iex> Pseudoloc.localize_string(text, alternates)
"ϝoo%{bar}bαz%{quux}qṵṵṵx"
```
"""
@spec localize_string(String.t(), alternates) :: String.t()
def localize_string(text, alternates) do
ranges = get_localizable_ranges(text)
{_, result} =
Enum.reduce(ranges, {:cont, text}, fn range, {_, text} ->
{:cont, localize_range(text, range, alternates)}
end)
result
end
# ----- Private functions -----
defp cleanup_ranges(ranges) do
ranges
|> Enum.reverse()
|> Enum.reject(fn elem -> match?({_, 0}, elem) end)
end
defp default_alternates do
{map, _} = Code.eval_file(Path.join(:code.priv_dir(:pseudoloc), "alternates.exs"))
map
end
defp do_get_ranges(text, last_pos, interpolation_ranges, translate_ranges)
defp do_get_ranges(text, last_pos, [], translate_ranges) do
result =
if last_pos < String.length(text) do
[{last_pos, String.length(text) - last_pos} | translate_ranges]
else
translate_ranges
end
cleanup_ranges(result)
end
defp do_get_ranges(text, last_pos, [head | tail], translate_ranges) do
[{start, length}] = head
do_get_ranges(text, start + length, tail, [{last_pos, start - last_pos} | translate_ranges])
end
defp localize_grapheme_at(text, at, alternates) do
before_text = String.slice(text, 0, at)
after_text = String.slice(text, at + 1, String.length(text))
Enum.join([before_text, localize_grapheme(String.at(text, at), alternates), after_text])
end
end
|
lib/pseudoloc.ex
| 0.928457
| 0.942823
|
pseudoloc.ex
|
starcoder
|
defmodule Turtle.Vector do
@moduledoc """
2D Vector operations
"""
@type t :: {number(), number()}
@doc """
Returns a new 2D Vector
## Examples
iex> Turtle.Vector.new(1, 2)
{1, 2}
"""
@spec new(number(), number()) :: t()
def new(x, y), do: {x, y}
@doc """
Returns the 2D point addition of `a` and `b`.
## Examples
iex> a = Turtle.Vector.new(1, 2)
{1, 2}
iex> Turtle.Vector.add(a, a)
{2, 4}
"""
@spec add(t(), t()) :: t()
def add({x0, y0}, {x1, y1}) do
{x0 + x1, y0 + y1}
end
@doc """
Returns the point subtraction of `a` and `b`.
## Examples
iex> a = Turtle.Vector.new(1, 2)
{1, 2}
iex> Turtle.Vector.sub(a, a)
{0, 0}
"""
@spec sub(t(), t()) :: t()
def sub({x0, y0}, {x1, y1}) do
{x0 - x1, y0 - y1}
end
@doc """
Returns the point negation
## Examples
iex> a = Turtle.Vector.new(1, 2)
{1, 2}
iex> Turtle.Vector.neg(a)
{-1, -2}
"""
@spec neg(t()) :: t()
def neg({x, y}) do
{-x, -y}
end
@doc """
Returns the point scalar multiplication
## Examples
iex> a = Turtle.Vector.new(1, 2)
{1, 2}
iex> Turtle.Vector.mul(a, 2)
{2, 4}
"""
@spec mul(t(), number()) :: t()
def mul({x, y}, a) when is_number(a) do
{x * a, y * a}
end
@spec mul(t(), t()) :: number()
def mul({x1, y1}, {x2, y2}) do
x1 * x2 + y1 * y2
end
@doc """
Returns the point scalar division
## Examples
iex> a = Turtle.Vector.new(2, 4)
{2, 4}
iex> Turtle.Vector.div(a, 2)
{1.0, 2.0}
"""
@spec div(t(), number()) :: t()
def div({x, y}, a) when is_number(a) do
{x / a, y / a}
end
@doc """
Return the absolute value of a vector
## Examples
iex> v = Turtle.Vector.new(1, 2)
{1, 2}
iex> Turtle.Vector.abs(v)
2.23606797749979
"""
@spec abs(t()) :: number()
def abs({x, y}) do
:math.sqrt(x * x + y * y)
end
@doc """
Rotate vector counterclockwise by angle
"""
@spec rotate(t(), number()) :: t()
def rotate({x, y}, angle) when is_number(angle) do
{px, py} = {-y, x}
angle = angle * :math.pi() / 180.0
cos = :math.cos(angle)
sin = :math.sin(angle)
{x * cos + px * sin, y * cos + py * sin}
end
end
|
turtle/lib/turtle/vector.ex
| 0.951515
| 0.877319
|
vector.ex
|
starcoder
|
defmodule MapBasedLruCacheLib do
@moduledoc """
in simplest mode like SimpleLruCacheLib we used array
to store keys which used by cache, and the problem
was this method does not provide good search, because
in the time we want change recently updated key we should
whole of array to find target key and remove it and move it
to head of array.
Map provides better algorithm to searching and adding and removing
something like O(log N).
## Breif Algorithm
we can consider Map keys like befor array index
and Map Values as key. see below:
[k1, k2, k3, ... , k100] === %{ 1 => k1, 2 => k2, 3 => k3, ... 100 => k100}
this seems good but there is a problem, consider we used k3 and want change
priority the final map should be something like:
%{ 1 => k3, 2 => k1, 3 => k2, ... 100 => k100}
this means all keys lesser than k3 key should be updated to k+1.
to avoid this problem we can order descending, that means
item with biggest value is most recently used one,
so our map will be something like:
%{ 1 => k100, 2 => k99, 3 => k98, ... 100 => k1}
so by accessing to k3 (by get/put) we will have
(before access): %{ 1 => k100, ..., 98=>k3, 99=>k2, 100 => k1}
(after access) : %{ 1 => k100, ..., 99=>k2, 100 => k1, 101 => k3}
ok, so a challenge:
How you delete item by key from this map?
consider we want to delete k50, how it is possible?
simply we can have reverse map to this map.
"""
defstruct size: 0,
capacity: 0,
access_counter: 0,
hash_map: %{},
counter_key_map: %{},
key_counter_map: %{}
@type map_based_lru_cache() :: %__MODULE__{
size: integer,
capacity: integer,
access_counter: integer,
hash_map: map(),
counter_key_map: map(),
key_counter_map: map()
}
@behaviour LruCacheLib
@doc ~S"""
returns new struct with passed capacity
## Examples
iex> MapBasedLruCacheLib.new_instance(10)
%MapBasedLruCacheLib{access_counter: 0,capacity: 10,counter_key_map: %{},hash_map: %{},key_counter_map: %{},size: 0}
"""
@spec new_instance(integer) :: map_based_lru_cache()
@impl true
def new_instance(capacity)
when is_integer(capacity) and capacity > 0 do
%__MODULE__{capacity: capacity}
end
@doc ~S"""
update cache by provied key, value
## Algorithm
if size < capacity:
if key in HashMap:
inc access_counter
update key_counter_map and set new access counter
delete old counter value from counter_key_map
add new counter value to counter_key_map
update hashmap with new value
else:
inc access_counter
update key_counter_map, counter_key_map with access_counter, key
add key,value to HashMap
else:
if key in HashMap:
inc access_counter
update key_counter_map and set new access counter
delete old counter value from counter_key_map
add new counter value to counter_key_map
update hashmap with new value
else:
get keys of counter_key_map as list and pick first key from list
delete selected counter and passed key from key_counter_map, counter_key_map and hash_map
add key, value to cache
## Examples
iex> MapBasedLruCacheLib.new_instance(5) |> MapBasedLruCacheLib.put("a","a")
%MapBasedLruCacheLib{access_counter: 1,capacity: 5,counter_key_map: %{1 => "a"},hash_map: %{"a" => "a"},key_counter_map: %{"a" => 1},size: 1}
iex> MapBasedLruCacheLib.new_instance(5) |> MapBasedLruCacheLib.put("a","a") |> MapBasedLruCacheLib.put("b","b")
%MapBasedLruCacheLib{access_counter: 2,capacity: 5,counter_key_map: %{1 => "a", 2 => "b"},hash_map: %{"a" => "a", "b" => "b"},key_counter_map: %{"a" => 1, "b" => 2},size: 2}
iex> MapBasedLruCacheLib.new_instance(5)
...> |> MapBasedLruCacheLib.put("a","a")
...> |> MapBasedLruCacheLib.put("b","b")
...> |> MapBasedLruCacheLib.put("a","a")
%MapBasedLruCacheLib{access_counter: 3,capacity: 5,counter_key_map: %{2 => "b", 3 => "a"},hash_map: %{"a" => "a", "b" => "b"},key_counter_map: %{"a" => 3, "b" => 2},size: 2}
iex> MapBasedLruCacheLib.new_instance(5) |> MapBasedLruCacheLib.put("a","a")
...> |> MapBasedLruCacheLib.put("a","b")
...> |> MapBasedLruCacheLib.get("a")
...> |> Tuple.to_list |> Enum.at(1)
"b"
"""
@spec put(map_based_lru_cache(), String.t(), any) :: map_based_lru_cache()
@impl true
def put(
lru_cache = %__MODULE__{
hash_map: hash_map
},
key,
value
) do
if hash_map |> Map.has_key?(key) do
lru_cache = lru_cache |> update_counter_parts(key)
%{
lru_cache
| hash_map: hash_map |> Map.put(key, value)
}
else
put_new_key(lru_cache, key, value)
end
end
@doc ~S"""
delete speciefied key from cache.
Algorithm:
if key exists in our HashMap
find counter from key_counter_map
delete items from key_counter_map and counter_key_map and hash_map
size = size - 1
return updated structure
else
returns original passed cache structure
## Examples
iex> MapBasedLruCacheLib.new_instance(10)|>MapBasedLruCacheLib.put("a","a")|>MapBasedLruCacheLib.delete("a")|>MapBasedLruCacheLib.size
0
"""
@spec delete(map_based_lru_cache(), String.t()) :: map_based_lru_cache()
@impl true
def delete(
lru_cache = %__MODULE__{
size: size,
hash_map: hash_map,
key_counter_map: key_counter_map,
counter_key_map: counter_key_map
},
key
) do
if key_counter_map |> Map.has_key?(key) do
counter_key_to_del = key_counter_map |> Map.get(key)
%{
lru_cache
| size: size - 1,
key_counter_map: key_counter_map |> Map.delete(key),
counter_key_map: counter_key_map |> Map.delete(counter_key_to_del),
hash_map: hash_map |> Map.delete(key)
}
else
lru_cache
end
end
@doc ~S"""
search in cache for specified key and if successfully found
item returns {true, value, new_cache_struct} else
returns {false, nil, new_cache_struct}
## Algorithm:
if key in hash_map:
get counter from key_counter_map by key
update key_counter_map and counter_map_key and access_counter
return {true, value, updated_struct}
else
return {false, nil, struct}
## Examples
iex> MapBasedLruCacheLib.new_instance(5) |> MapBasedLruCacheLib.put("a","a")
...> |> MapBasedLruCacheLib.get("a")|> Tuple.to_list |> Enum.at(1)
"a"
iex> MapBasedLruCacheLib.new_instance(5) |> MapBasedLruCacheLib.put("a","a")
...> |> MapBasedLruCacheLib.put("a","b") |> MapBasedLruCacheLib.get("a")
...> |> Tuple.to_list |> Enum.at(1)
"b"
"""
@spec get(map_based_lru_cache(), String.t()) ::
{false, nil, map_based_lru_cache()} | {true, any, map_based_lru_cache()}
@impl true
def get(
lru_cache = %__MODULE__{
hash_map: hash_map
},
key
) do
if hash_map |> Map.has_key?(key) do
{true, hash_map |> Map.get(key), lru_cache |> update_counter_parts(key)}
else
{false, nil, lru_cache}
end
end
@doc ~S"""
returns cache actual size
## Examples
iex> MapBasedLruCacheLib.new_instance(10) |> MapBasedLruCacheLib.size()
0
iex> MapBasedLruCacheLib.new_instance(10) |> MapBasedLruCacheLib.put("a","a") |> MapBasedLruCacheLib.size
1
"""
@spec size(map_based_lru_cache()) :: integer
@impl true
def size(%__MODULE__{size: size}) do
size
end
@doc ~S"""
returns cache capacity
## Examples
iex> MapBasedLruCacheLib.new_instance(5) |> MapBasedLruCacheLib.capacity
5
iex> MapBasedLruCacheLib.new_instance(10) |> MapBasedLruCacheLib.capacity
10
"""
@spec capacity(map_based_lru_cache()) :: integer
@impl true
def capacity(%__MODULE__{
capacity: capacity
}) do
capacity
end
defp put_new_key(
lru_cache = %__MODULE__{
size: size,
capacity: capacity,
hash_map: hash_map
},
key,
value
)
when size < capacity do
lru_cache = lru_cache |> update_counter_parts(key)
size =
if hash_map |> Map.has_key?(key) do
size
else
size + 1
end
%{
lru_cache
| size: size,
hash_map: hash_map |> Map.put(key, value)
}
end
defp put_new_key(
lru_cache = %__MODULE__{
size: size,
capacity: capacity,
counter_key_map: counter_key_map
},
key,
value
)
when size == capacity do
oldest_counter_in_cache = counter_key_map |> Map.keys() |> hd
oldest_key_in_cache = counter_key_map |> Map.get(oldest_counter_in_cache)
lru_cache |> delete(oldest_key_in_cache) |> put(key, value)
end
@spec update_counter_parts(map_based_lru_cache(), String.t()) :: map_based_lru_cache()
defp update_counter_parts(
lru_cache = %__MODULE__{
access_counter: access_counter,
counter_key_map: counter_key_map,
key_counter_map: key_counter_map
},
key
) do
counter_key_map =
if key_counter_map |> Map.has_key?(key) do
old_counter = key_counter_map |> Map.get(key)
counter_key_map |> Map.delete(old_counter)
else
counter_key_map
end
%{
lru_cache
| access_counter: access_counter + 1,
counter_key_map: counter_key_map |> Map.put(access_counter + 1, key),
key_counter_map: key_counter_map |> Map.put(key, access_counter + 1)
}
end
end
|
apps/lru_cache_lib/lib/map_based_lru_cache_lib.ex
| 0.716913
| 0.580174
|
map_based_lru_cache_lib.ex
|
starcoder
|
defmodule AdventOfCode.Solutions.DayOne do
@input "L5, R1, L5, L1, R5, R1, R1, L4, L1, L3, R2, R4, L4, L1, L1, R2, R4, R3, L1, R4, L4, L5, L4, R4, L5, R1, R5, L2, R1, R3, L2, L4, L4, R1, L192, R5, R1, R4, L5, L4, R5, L1, L1, R48, R5, R5, L2, R4, R4, R1, R3, L1, L4, L5, R1, L4, L2, L5, R5, L2, R74, R4, L1, R188, R5, L4, L2, R5, R2, L4, R4, R3, R3, R2, R1, L3, L2, L5, L5, L2, L1, R1, R5, R4, L3, R5, L1, L3, R4, L1, L3, L2, R1, R3, R2, R5, L3, L1, L1, R5, L4, L5, R5, R2, L5, R2, L1, L5, L3, L5, L5, L1, R1, L4, L3, L1, R2, R5, L1, L3, R4, R5, L4, L1, R5, L1, R5, R5, R5, R2, R1, R2, L5, L5, L5, R4, L5, L4, L4, R5, L2, R1, R5, L1, L5, R4, L3, R4, L2, R3, R3, R3, L2, L2, L2, L1, L4, R3, L4, L2, R2, R5, L1, R2"
defmodule Position do
defstruct x: 0, y: 0, direction: :none, visited: MapSet.new([]), visited_twice_distance: 0
def move(:turn_left, distance, position) do
case position.direction do
:east -> up(position, distance)
:north -> left(position, distance)
:west -> down(position, distance)
:south -> right(position, distance)
:none -> left(position, distance)
end
end
def move(:turn_right, distance, position) do
case position.direction do
:east -> down(position, distance)
:south -> left(position, distance)
:west -> up(position, distance)
:north -> right(position, distance)
:none -> right(position, distance)
end
end
defp visited_positions_horizontal(start, ending, y) do
start..ending |> Enum.map(fn(x) -> %{x: x, y: y} end) |> MapSet.new
end
defp visited_positions_vertical(start, ending, x) do
start..ending |> Enum.map(fn(y) -> %{x: x, y: y} end) |> MapSet.new
end
defp check_visited(visited, new_visited, visited_twice_distance) do
intersection = MapSet.intersection(visited, new_visited)
if intersection |> MapSet.size > 0 and visited_twice_distance == 0 do
new_visited_twice_position = MapSet.to_list(intersection) |> List.first
new_visited_twice_distance = distance(new_visited_twice_position)
{MapSet.union(visited, new_visited), new_visited_twice_distance}
else
{MapSet.union(visited, new_visited), visited_twice_distance}
end
end
defp up(position, distance) do
visited_positions = visited_positions_vertical(position.y, position.y + distance - 1, position.x)
{new_visited, new_visited_twice_distance} = check_visited(position.visited, visited_positions, position.visited_twice_distance)
%{position | y: position.y + distance, direction: :north, visited: new_visited, visited_twice_distance: new_visited_twice_distance}
end
defp down(position, distance) do
visited_positions = visited_positions_vertical(position.y, position.y - distance + 1, position.x)
{new_visited, new_visited_twice_distance} = check_visited(position.visited, visited_positions, position.visited_twice_distance)
%{position | y: position.y - distance, direction: :south, visited: new_visited, visited_twice_distance: new_visited_twice_distance}
end
defp left(position, distance) do
visited_positions = visited_positions_horizontal(position.x, position.x - distance + 1, position.y)
{new_visited, new_visited_twice_distance} = check_visited(position.visited, visited_positions, position.visited_twice_distance)
%{position | x: position.x - distance, direction: :west, visited: new_visited, visited_twice_distance: new_visited_twice_distance}
end
defp right(position, distance) do
visited_positions = visited_positions_horizontal(position.x, position.x + distance - 1, position.y)
{new_visited, new_visited_twice_distance} = check_visited(position.visited, visited_positions, position.visited_twice_distance)
%{position | x: position.x + distance, direction: :east, visited: new_visited, visited_twice_distance: new_visited_twice_distance}
end
def distance(position) do
abs(position.x) + abs(position.y)
end
end
alias Position
defp parse_input(input) do
input
|> String.trim
|> String.split(", ", trim: true)
end
defp parse_instruction(instruction) do
direction = case String.at(instruction, 0) do
"L" -> :turn_left
"R" -> :turn_right
end
value = String.slice(instruction, 1, String.length(instruction) - 1)
{direction, String.to_integer(value)}
end
def run(input \\ @input) do
result = parse_input(input)
|> Enum.map(&parse_instruction/1)
|> Enum.to_list
|> List.foldl(%Position{}, fn(x, acc) -> Position.move(elem(x, 0) , elem(x,1), acc) end)
%{distance: Position.distance(result), visited_twice_distance: result.visited_twice_distance}
end
end
|
lib/solutions/day_one.ex
| 0.690663
| 0.733881
|
day_one.ex
|
starcoder
|
defmodule Routemaster.Drain.EventState do
@moduledoc """
Persisted data.
Each `EventState` is associated to the URL of a resource and
stores the timestamp of the latest Drain event received for
that URL.
They are stored in Redis, with the URL acting as key.
They are used to keep track of the most recent event for a
resource and to filter out stale events receivied out of order.
"""
alias Routemaster.Drain.Event
alias Routemaster.Config
import Routemaster.Redis, only: [serialize: 1, deserialize: 1]
@redis Routemaster.Redis.Data
@key_prefix "drain:event-state:"
defstruct [:url, :t]
@type t :: %{
:__struct__ => __MODULE__,
required(:url) => Event.url,
required(:t) => Event.timestamp,
}
@doc """
Checks if a `Routemaster.Drain.Event` is more recent than the
previous received events for the same URL (for the same resource).
In practical terms, it compares the timestamp `r` of the event with
the stored timestamp of the most recent previously received event.
"""
@spec fresh?(Event.t) :: boolean
def fresh?(%Event{url: url, t: t}) do
latest_recorded = get(url)
latest_recorded.t < t
end
@doc """
Looks up and `EventState` struct in Redis by URL. If nothing is
found, it returns a null struct with timestamp equal to zero
which will be considered older than any real data.
"""
@spec get(Event.url) :: t
def get(url) do
case _redis_get(url) do
{:ok, data} when not is_nil(data) ->
deserialize(data)
_ ->
%__MODULE__{url: url, t: 0}
end
end
@doc """
Given a `Routemaster.Drain.Event`, it stores an `EventState` in
Redis with the event URL and timestamp.
"""
@spec save(Event.t) :: :ok | {:error, any}
def save(%Event{url: url, t: t}) do
state = %__MODULE__{url: url, t: t}
case _redis_set(url, serialize(state)) do
{:ok, _} -> :ok
{:error, _} = error -> error
end
end
defp _redis_get(key) do
@redis.get ns(key)
end
defp _redis_set(key, value) do
@redis.setex ns(key), Config.cache_ttl, value
end
defp ns(base) do
@key_prefix <> to_string(base)
end
end
|
lib/routemaster/drain/event_state.ex
| 0.683102
| 0.502991
|
event_state.ex
|
starcoder
|
defmodule CoAP.Block do
defstruct number: 0, more: false, size: 0
@type t :: %__MODULE__{number: integer, more: boolean, size: integer}
@type tuple_t :: {integer, boolean, integer}
@type binary_t_small :: <<_::8>>
@type binary_t_medium :: <<_::16>>
@type binary_t_large :: <<_::32>>
@type binary_t :: binary_t_small | binary_t_medium | binary_t_large
# _TODO: if more: false, a size_exponent of 0 should be ignored?
# otherwise size_exponent of 0 results in size: 16
@doc """
Build a Block struct
If given a Block struct, return it. If given a message option tuple, build a Block struct.
"""
@spec build(t()) :: t()
def build(%__MODULE__{} = block), do: block
@spec build(tuple_t()) :: t()
def build({number, more, size}) do
%__MODULE__{number: number, more: more, size: size}
end
@spec build(nil) :: nil
def build(nil), do: nil
@spec to_tuple(nil) :: nil
def to_tuple(nil), do: nil
@doc """
Return a block tuple for a block struct. Return a block tuple when given a block tuple
"""
@spec to_tuple(t()) :: tuple_t()
def to_tuple(%__MODULE__{} = block), do: {block.number, block.more, block.size}
@spec to_tuple(tuple_t()) :: tuple_t()
def to_tuple(block) when is_tuple(block), do: block
@doc """
Decode binary block option to tuple
small size(4) block number
medium size(12) block number
large size(28) block number
Decode tuple from binary block to Block struct
"""
@spec decode(binary_t_small()) :: t()
def decode(<<number::size(4), more::size(1), size_exponent::size(3)>>),
do: decode(number, more, size_exponent)
@spec decode(binary_t_medium()) :: t()
def decode(<<number::size(12), more::size(1), size_exponent::size(3)>>),
do: decode(number, more, size_exponent)
@spec decode(binary_t_large()) :: t()
def decode(<<number::size(28), more::size(1), size_exponent::size(3)>>),
do: decode(number, more, size_exponent)
@spec decode(integer, 0 | 1, integer) :: t()
def decode(number, more, size_exponent) do
%__MODULE__{
number: number,
more: if(more == 0, do: false, else: true),
size: trunc(:math.pow(2, size_exponent + 4))
}
end
@spec encode(t() | tuple_t()) :: binary_t()
def encode(%__MODULE__{} = block), do: encode({block.number, block.more, block.size})
@spec encode(%{number: integer, more: 0 | 1, size: integer}) :: binary_t()
def encode(%{number: number, more: more, size: size}), do: encode({number, more, size})
def encode({number, more, 0}) do
encode(number, if(more, do: 1, else: 0), 0)
end
def encode({number, more, size}) do
encode(number, if(more, do: 1, else: 0), trunc(:math.log2(size)) - 4)
end
def encode(number, more, size_exponent) when number < 16 do
<<number::size(4), more::size(1), size_exponent::size(3)>>
end
def encode(number, more, size_exponent) when number < 4096 do
<<number::size(12), more::size(1), size_exponent::size(3)>>
end
def encode(number, more, size_exponent) do
<<number::size(28), more::size(1), size_exponent::size(3)>>
end
end
|
lib/coap/block.ex
| 0.552902
| 0.626481
|
block.ex
|
starcoder
|
defmodule Filtrex do
@moduledoc """
Filtrex consists of the following primary components:
* `Filtrex` - handles the overall parsing of filters and delegates to
`Filtrex.AST` to build an ecto query expression
* `Filtrex.Condition` - an abstract module built to delegate to specific condition modules in the format of `Filtrex.Condition.Type` where the type is converted to CamelCase (See `Filtrex.Condition.Text.parse/2`)
* `Filtrex.Params` - an abstract module for parsing plug-like params from a query string into a filter
* `Filtrex.Fragment` - simple struct to hold generated expressions and values to be used when generating queries for ecto
* `Filtrex.Type.Config` - struct to hold various configuration and validation options for creating a filter
"""
require Ecto.Query
defstruct type: nil, conditions: [], sub_filters: [], empty: false
@whitelist [
:filter, :type, :conditions, :sub_filters,
:column, :comparator, :value, :start, :end
]
@type t :: Filtrex.t
@doc """
Parses a filter expression and returns an error or the parsed filter with
the appropriate parsed sub-structures.
The `configs` option is a list of type configs (See `Filtrex.Type.Config`)
Example:
```
[%Filtrex.Type.Config{type: :text, keys: ~w(title comments)}]
```
"""
@spec parse([Filtrex.Type.Config.t], Map.t) :: {:error, String.t} | {:ok, Filtrex.t}
def parse(configs, map) do
with {:ok, sanitized} <- Filtrex.Params.sanitize(map, @whitelist),
{:ok, valid_structured_map} <- validate_structure(sanitized),
do: parse_validated_structure(configs, valid_structured_map)
end
@doc """
Parses a filter expression, like `parse/2`. If any exception is raised when
parsing the map, a `%Filtrex{empty: true}` struct will be returned.
"""
@spec safe_parse([Filtrex.Type.Config.t], Map.t) :: Filtrex.t
def safe_parse(configs, map) do
try do
{:ok, filter} = parse(configs, map)
filter
rescue
_ -> %Filtrex{empty: true}
end
end
@doc """
This function converts Plug-decoded params like the example below into a filtrex struct based on options in the configs.
```
%{"comments_contains" => "love",
"title" => "My Blog Post",
"created_at_between" => %{"start" => "2014-01-01", "end" => "2016-01-01"}}
```
"""
def parse_params(_configs, params) when params == %{}, do: {:ok, %Filtrex{empty: true}}
def parse_params(configs, params) do
with {:ok, {type, params}} <- parse_params_filter_union(params),
{:ok, conditions} <- Filtrex.Params.parse_conditions(configs, params),
do: {:ok, %Filtrex{type: type, conditions: conditions}}
end
@doc """
Converts Plug-decoded params into a Filtrex struct, like `parse_params/1`. If
an exception is raised while parsing the params, a `%Filtrex{empty: true}` struct
will be returned.
"""
def safe_parse_params(_configs, params) when params == %{}, do: %Filtrex{empty: true}
def safe_parse_params(configs, params) do
try do
{:ok, filter} = parse_params(configs, params)
filter
rescue
_ -> %Filtrex{empty: true}
end
end
@doc """
Converts a filter with the specified ecto module name into a valid ecto query
expression that is compiled when called.
If a `%Filtrex{empty: true}` struct is passed as the filter, the query will
not be modified. If you want the query to return no results when this happens,
set the `allow_empty` option to `true`:
```
Filtrex.query(query, filter, allow_empty: true)
```
"""
@spec query(Ecto.Query.t, Filtrex.t, Keyword.t) :: Ecto.Query.t
def query(queryable, filter, opts \\ [allow_empty: true])
def query(queryable, %Filtrex{empty: true}, opts) do
if opts[:allow_empty] do
queryable
else
Ecto.Query.where(queryable, false)
end
end
def query(queryable, filter, _opts) do
{result, _} =
queryable
|> Filtrex.AST.build_query(filter)
|> Code.eval_quoted([], __ENV__)
result
end
@doc """
Validates the rough filter params structure
"""
def validate_structure(map) do
case map do
%{filter: %{type: type}} when type not in ~w(all any none) ->
{:error, "Invalid filter type '#{type}'"}
%{filter: %{conditions: conditions}} when conditions == [] or not is_list(conditions) ->
{:error, "One or more conditions required to filter"}
%{filter: %{sub_filters: sub_filters}} when not is_list(sub_filters) ->
{:error, "Sub-filters must be a valid list of filters"}
validated = %{filter: params} ->
sub_filters = Map.get(params, :sub_filters, [])
result = Enum.reduce_while(sub_filters, {:ok, []}, fn (sub_map, {:ok, acc}) ->
case validate_structure(sub_map) do
{:ok, sub_validated} -> {:cont, {:ok, acc ++ [sub_validated]}}
{:error, error} -> {:halt, {:error, error}}
end
end)
with {:ok, validated_sub_filters} <- result,
do: {:ok, put_in(validated.filter[:sub_filters], validated_sub_filters)}
_ ->
{:error, "Invalid filter structure"}
end
end
defp parse_validated_structure(configs, %{filter: params}) do
parsed_filters = Enum.reduce_while(params[:sub_filters], {:ok, []}, fn (to_parse, {:ok, acc}) ->
case parse(configs, to_parse) do
{:ok, filter} -> {:cont, {:ok, acc ++ [filter]}}
{:error, error} -> {:halt, {:error, error}}
end
end)
with {:ok, filters} <- parsed_filters,
do: parse_conditions(configs, params[:type], params[:conditions])
|> parse_condition_results(params[:type], filters)
end
defp parse_conditions(configs, type, conditions) do
Enum.reduce(conditions, %{errors: [], conditions: []}, fn (map, acc) ->
case Filtrex.Condition.parse(configs, Map.put(map, :inverse, inverse_for(type))) do
{:error, error} ->
update_list_in_map(acc, :errors, error)
{:ok, condition} ->
update_list_in_map(acc, :conditions, condition)
end
end)
end
defp parse_condition_results(%{errors: [], conditions: conditions}, type, parsed_filters) do
{:ok, %Filtrex{type: type, conditions: conditions, sub_filters: parsed_filters}}
end
defp parse_condition_results(%{errors: errors}, _, _) do
{:error, Enum.join(errors, ", ")}
end
defp parse_params_filter_union(params) do
case Map.fetch(params, "filter_union") do
{:ok, type} when type in ~w(all any none) ->
{:ok, {type, Map.delete(params, "filter_union")}}
:error ->
{:ok, {"all", params}}
_ ->
{:error, "Invalid filter union"}
end
end
defp inverse_for("none"), do: true
defp inverse_for(_), do: false
defp update_list_in_map(map, key, value) do
values = Map.get(map, key)
Map.put(map, key, values ++ [value])
end
end
|
lib/filtrex.ex
| 0.874379
| 0.910027
|
filtrex.ex
|
starcoder
|
defmodule AWS.ComputeOptimizer do
@moduledoc """
AWS Compute Optimizer is a service that analyzes the configuration and
utilization metrics of your AWS resources, such as EC2 instances and Auto
Scaling groups. It reports whether your resources are optimal, and
generates optimization recommendations to reduce the cost and improve the
performance of your workloads. Compute Optimizer also provides recent
utilization metric data, as well as projected utilization metric data for
the recommendations, which you can use to evaluate which recommendation
provides the best price-performance trade-off. The analysis of your usage
patterns can help you decide when to move or resize your running resources,
and still meet your performance and capacity requirements. For more
information about Compute Optimizer, including the required permissions to
use the service, see the [AWS Compute Optimizer User
Guide](https://docs.aws.amazon.com/compute-optimizer/latest/ug/).
"""
@doc """
Describes recommendation export jobs created in the last seven days.
Use the `ExportAutoScalingGroupRecommendations` or
`ExportEC2InstanceRecommendations` actions to request an export of your
recommendations. Then use the `DescribeRecommendationExportJobs` action to
view your export jobs.
"""
def describe_recommendation_export_jobs(client, input, options \\ []) do
request(client, "DescribeRecommendationExportJobs", input, options)
end
@doc """
Exports optimization recommendations for Auto Scaling groups.
Recommendations are exported in a comma-separated values (.csv) file, and
its metadata in a JavaScript Object Notation (.json) file, to an existing
Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more
information, see [Exporting
Recommendations](https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html)
in the *Compute Optimizer User Guide*.
You can have only one Auto Scaling group export job in progress per AWS
Region.
"""
def export_auto_scaling_group_recommendations(client, input, options \\ []) do
request(client, "ExportAutoScalingGroupRecommendations", input, options)
end
@doc """
Exports optimization recommendations for Amazon EC2 instances.
Recommendations are exported in a comma-separated values (.csv) file, and
its metadata in a JavaScript Object Notation (.json) file, to an existing
Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more
information, see [Exporting
Recommendations](https://docs.aws.amazon.com/compute-optimizer/latest/ug/exporting-recommendations.html)
in the *Compute Optimizer User Guide*.
You can have only one Amazon EC2 instance export job in progress per AWS
Region.
"""
def export_e_c2_instance_recommendations(client, input, options \\ []) do
request(client, "ExportEC2InstanceRecommendations", input, options)
end
@doc """
Returns Auto Scaling group recommendations.
AWS Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling
groups that meet a specific set of requirements. For more information, see
the [Supported resources and
requirements](https://docs.aws.amazon.com/compute-optimizer/latest/ug/requirements.html)
in the *AWS Compute Optimizer User Guide*.
"""
def get_auto_scaling_group_recommendations(client, input, options \\ []) do
request(client, "GetAutoScalingGroupRecommendations", input, options)
end
@doc """
Returns Amazon EC2 instance recommendations.
AWS Compute Optimizer generates recommendations for Amazon Elastic Compute
Cloud (Amazon EC2) instances that meet a specific set of requirements. For
more information, see the [Supported resources and
requirements](https://docs.aws.amazon.com/compute-optimizer/latest/ug/requirements.html)
in the *AWS Compute Optimizer User Guide*.
"""
def get_e_c2_instance_recommendations(client, input, options \\ []) do
request(client, "GetEC2InstanceRecommendations", input, options)
end
@doc """
Returns the projected utilization metrics of Amazon EC2 instance
recommendations.
<note> The `Cpu` and `Memory` metrics are the only projected utilization
metrics returned when you run this action. Additionally, the `Memory`
metric is returned only for resources that have the unified CloudWatch
agent installed on them. For more information, see [Enabling Memory
Utilization with the CloudWatch
Agent](https://docs.aws.amazon.com/compute-optimizer/latest/ug/metrics.html#cw-agent).
</note>
"""
def get_e_c2_recommendation_projected_metrics(client, input, options \\ []) do
request(client, "GetEC2RecommendationProjectedMetrics", input, options)
end
@doc """
Returns the enrollment (opt in) status of an account to the AWS Compute
Optimizer service.
If the account is the master account of an organization, this action also
confirms the enrollment status of member accounts within the organization.
"""
def get_enrollment_status(client, input, options \\ []) do
request(client, "GetEnrollmentStatus", input, options)
end
@doc """
Returns the optimization findings for an account.
For example, it returns the number of Amazon EC2 instances in an account
that are under-provisioned, over-provisioned, or optimized. It also returns
the number of Auto Scaling groups in an account that are not optimized, or
optimized.
"""
def get_recommendation_summaries(client, input, options \\ []) do
request(client, "GetRecommendationSummaries", input, options)
end
@doc """
Updates the enrollment (opt in) status of an account to the AWS Compute
Optimizer service.
If the account is a master account of an organization, this action can also
be used to enroll member accounts within the organization.
"""
def update_enrollment_status(client, input, options \\ []) do
request(client, "UpdateEnrollmentStatus", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "compute-optimizer"}
host = build_host("compute-optimizer", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.0"},
{"X-Amz-Target", "ComputeOptimizerService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/compute_optimizer.ex
| 0.902218
| 0.666999
|
compute_optimizer.ex
|
starcoder
|
defmodule ExPng.Chunks.Header do
@moduledoc """
Stores the data collected from a PNG's header data chunk. This chunk enodes
information about
* the image's width and height
* the bit_depth of encoded pixel data
* the color mode the image pixel data is stored in
* whether the image data has been interlaced using the Adam7 interlacing
algorithm
* the filter _method_ applied to the image. This is different than the
filter _type_ used by each line of the image (see `ExPng.Image.Line`),
and the only current valid value for this field is `0`
* the compression method applied to the image data. Like the filter
method, `0` is the only allowed value for this field. PNG uses the
[DEFLATE](https://en.wikipedia.org/wiki/Deflate) compression algorithm,
and `ExPng` uses Erlang's implementation of [zlib](https://en.wikipedia.org/wiki/Zlib).
The data contained in this struct is read from a PNG when decoding, and generated
automatically for an image when encoding, so `ExPng` users should never have to
manipulate this data manually.
"""
@type t :: %__MODULE__{
width: pos_integer,
height: pos_integer,
bit_depth: ExPng.bit_depth(),
color_mode: ExPng.color_mode(),
compression: 0,
filter: 0,
interlace: 0 | 1,
type: :IHDR
}
defstruct [
:width,
:height,
:bit_depth,
:color_mode,
:compression,
:filter,
:interlace,
type: :IHDR
]
@spec new(:IHDR, <<_::104>>) :: {:ok, __MODULE__.t()} | {:error, binary, binary}
def new(:IHDR, <<w::32, h::32, bd, ct, cmp, fltr, intlc>> = data) do
with {:ok, bit_depth} <- validate_bit_depth(bd),
{:ok, color_mode} <- validate_color_mode(ct) do
{
:ok,
%__MODULE__{
width: w,
height: h,
bit_depth: bit_depth,
color_mode: color_mode,
compression: cmp,
filter: fltr,
interlace: intlc
}
}
else
{:error, _, _} -> {:error, "malformed IHDR", data}
end
end
@behaviour ExPng.Encodeable
@impl true
def to_bytes(%__MODULE__{} = header, _opts \\ []) do
with {:ok, bit_depth} <- validate_bit_depth(header.bit_depth),
{:ok, color_mode} <- validate_color_mode(header.color_mode) do
length = <<13::32>>
type = <<73, 72, 68, 82>>
data =
<<header.width::32, header.height::32, bit_depth, color_mode, header.compression,
header.filter, header.interlace>>
crc = :erlang.crc32([type, data])
length <> type <> data <> <<crc::32>>
else
error -> error
end
end
## PRIVATE
defp validate_bit_depth(bd) when bd in [1, 2, 4, 8, 16], do: {:ok, bd}
defp validate_bit_depth(bd), do: {:error, "invalid bit depth", bd}
defp validate_color_mode(ct) when ct in [0, 2, 3, 4, 6], do: {:ok, ct}
defp validate_color_mode(ct), do: {:error, "invalid color type", ct}
end
|
lib/ex_png/chunks/header.ex
| 0.9118
| 0.786418
|
header.ex
|
starcoder
|
defmodule FakeServer.HTTP.Response do
@moduledoc """
Response structure
FakeServer makes use of the FakeServer.HTTP.Response structure to define the responses that will be given by the server.
The structure has the following fields:
- `:code`: The status code of the response. It must be an integer.
- `:body`: Optional. The response body. Can be a string or a map.
- `:headers`: Optional. The response headers. Must be a map with the string keys.
You can use the `new/3` function to create a new struct.
"""
@enforce_keys [:code]
defstruct [code: nil, body: "", headers: %{}]
@doc """
Creates a new Response structure.
## Example
```elixir
FakeServer.HTTP.Response.new(200, %{name: "<NAME>", email: "<EMAIL>"}, %{"Content-Type" => "application/json"})
FakeServer.HTTP.Response.new(200, ~s<{"name":"<NAME>","email":"<EMAIL>"}>, %{"Content-Type" => "application/json"})
FakeServer.HTTP.Response.new(201, ~s<{"name":"<NAME>","email":"<EMAIL>"}>)
FakeServer.HTTP.Response.new(404)
```
"""
def new(status_code, body \\ "", headers \\ %{}), do: %__MODULE__{code: status_code, body: body, headers: headers}
@doc """
Creates a new response with status code 200
"""
def ok(body \\ "", headers \\ %{}), do: new(200, body, headers)
@doc """
Creates a new response with status code 201
"""
def created(body \\ "", headers \\ %{}), do: new(201, body, headers)
@doc """
Creates a new response with status code 202
"""
def accepted(body \\ "", headers \\ %{}), do: new(202, body, headers)
@doc """
Creates a new response with status code 203
"""
def non_authoritative_information(body \\ "", headers \\ %{}), do: new(203, body, headers)
@doc """
Creates a new response with status code 204
"""
def no_content(body \\ "", headers \\ %{}), do: new(204, body, headers)
@doc """
Creates a new response with status code 205
"""
def reset_content(body \\ "", headers \\ %{}), do: new(205, body, headers)
@doc """
Creates a new response with status code 206
"""
def partial_content(body \\ "", headers \\ %{}), do: new(206, body, headers)
@doc """
Returns a list with all 4xx HTTP methods available
"""
def all_4xx do
[
bad_request(),
unauthorized(),
forbidden(),
not_found(),
method_not_allowed(),
not_acceptable(),
proxy_authentication_required(),
request_timeout(),
conflict(),
gone(),
length_required(),
precondition_failed(),
payload_too_large(),
uri_too_long(),
unsupported_media_type(),
expectation_failed(),
im_a_teapot(),
unprocessable_entity(),
locked(),
failed_dependency(),
upgrade_required(),
precondition_required(),
too_many_requests(),
request_header_fields_too_large()
]
end
@doc """
Creates a new response with status code 400
"""
def bad_request(body \\ "", headers \\ %{}), do: new(400, body, headers)
@doc """
Creates a new response with status code 401
"""
def unauthorized(body \\ "", headers \\ %{}), do: new(401, body, headers)
@doc """
Creates a new response with status code 403
"""
def forbidden(body \\ "", headers \\ %{}), do: new(403, body, headers)
@doc """
Creates a new response with status code 404
"""
def not_found(body \\ "", headers \\ %{}), do: new(404, body, headers)
@doc """
Creates a new response with status code 405
"""
def method_not_allowed(body \\ "", headers \\ %{}), do: new(405, body, headers)
@doc """
Creates a new response with status code 406
"""
def not_acceptable(body \\ "", headers \\ %{}), do: new(406, body, headers)
@doc """
Creates a new response with status code 407
"""
def proxy_authentication_required(body \\ "", headers \\ %{}), do: new(407, body, headers)
@doc """
Creates a new response with status code 408
"""
def request_timeout(body \\ "", headers \\ %{}), do: new(408, body, headers)
@doc """
Creates a new response with status code 409
"""
def conflict(body \\ "", headers \\ %{}), do: new(409, body, headers)
@doc """
Creates a new response with status code 410
"""
def gone(body \\ "", headers \\ %{}), do: new(410, body, headers)
@doc """
Creates a new response with status code 411
"""
def length_required(body \\ "", headers \\ %{}), do: new(411, body, headers)
@doc """
Creates a new response with status code 412
"""
def precondition_failed(body \\ "", headers \\ %{}), do: new(412, body, headers)
@doc """
Creates a new response with status code 413
"""
def payload_too_large(body \\ "", headers \\ %{}), do: new(413, body, headers)
@doc """
Creates a new response with status code 414
"""
def uri_too_long(body \\ "", headers \\ %{}), do: new(414, body, headers)
@doc """
Creates a new response with status code 415
"""
def unsupported_media_type(body \\ "", headers \\ %{}), do: new(415, body, headers)
@doc """
Creates a new response with status code 417
"""
def expectation_failed(body \\ "", headers \\ %{}), do: new(417, body, headers)
@doc """
Creates a new response with status code 418
"""
def im_a_teapot(body \\ "", headers \\ %{}), do: new(418, body, headers)
@doc """
Creates a new response with status code 422
"""
def unprocessable_entity(body \\ "", headers \\ %{}), do: new(422, body, headers)
@doc """
Creates a new response with status code 423
"""
def locked(body \\ "", headers \\ %{}), do: new(423, body, headers)
@doc """
Creates a new response with status code 424
"""
def failed_dependency(body \\ "", headers \\ %{}), do: new(424, body, headers)
@doc """
Creates a new response with status code 426
"""
def upgrade_required(body \\ "", headers \\ %{}), do: new(426, body, headers)
@doc """
Creates a new response with status code 428
"""
def precondition_required(body \\ "", headers \\ %{}), do: new(428, body, headers)
@doc """
Creates a new response with status code 429
"""
def too_many_requests(body \\ "", headers \\ %{}), do: new(429, body, headers)
@doc """
Creates a new response with status code 431
"""
def request_header_fields_too_large(body \\ "", headers \\ %{}), do: new(431, body, headers)
@doc """
Returns a list with all 5xx HTTP methods available
"""
def all_5xx do
[
internal_server_error(),
not_implemented(),
bad_gateway(),
service_unavailable(),
gateway_timeout(),
http_version_not_supported(),
variant_also_negotiates(),
insufficient_storage(),
not_extended(),
network_authentication_required()
]
end
@doc """
Creates a new response with status code 500
"""
def internal_server_error(body \\ "", headers \\ %{}), do: new(500, body, headers)
@doc """
Creates a new response with status code 501
"""
def not_implemented(body \\ "", headers \\ %{}), do: new(501, body, headers)
@doc """
Creates a new response with status code 502
"""
def bad_gateway(body \\ "", headers \\ %{}), do: new(502, body, headers)
@doc """
Creates a new response with status code 503
"""
def service_unavailable(body \\ "", headers \\ %{}), do: new(503, body, headers)
@doc """
Creates a new response with status code 504
"""
def gateway_timeout(body \\ "", headers \\ %{}), do: new(504, body, headers)
@doc """
Creates a new response with status code 505
"""
def http_version_not_supported(body \\ "", headers \\ %{}), do: new(505, body, headers)
@doc """
Creates a new response with status code 506
"""
def variant_also_negotiates(body \\ "", headers \\ %{}), do: new(506, body, headers)
@doc """
Creates a new response with status code 507
"""
def insufficient_storage(body \\ "", headers \\ %{}), do: new(507, body, headers)
@doc """
Creates a new response with status code 510
"""
def not_extended(body \\ "", headers \\ %{}), do: new(510, body, headers)
@doc """
Creates a new response with status code 511
"""
def network_authentication_required(body \\ "", headers \\ %{}), do: new(511, body, headers)
@doc """
FakeServer default response. Used when there are no responses left to reply.
"""
def default, do: new(200, ~s<{"message": "This is a default response from FakeServer"}>)
end
|
lib/fake_server/http/response.ex
| 0.898923
| 0.756807
|
response.ex
|
starcoder
|
defmodule OsrsEx.Hiscores.Hiscore do
@moduledoc "A struct representing a player's hiscores."
alias OsrsEx.Hiscores.{Skill, Activity}
defstruct [
:overall,
:attack,
:defence,
:strength,
:hitpoints,
:ranged,
:prayer,
:magic,
:cooking,
:woodcutting,
:fletching,
:fishing,
:firemaking,
:crafting,
:smithing,
:mining,
:herblore,
:agility,
:thieving,
:slayer,
:farming,
:runecraft,
:hunter,
:construction,
:clue_scroll_easy,
:clue_scroll_medium,
:clue_scroll_hard,
:clue_scroll_elite,
:clue_scroll_master,
:clue_scroll_all,
:bounty_hunter_rogue,
:bounty_hunter_hunter,
:last_man_standing,
]
@typedoc """
A struct representing the anonymous tuples
returned by the raw Jagex Hiscores API.
"""
@type t :: %__MODULE__{
overall: Skill.t,
attack: Skill.t,
defence: Skill.t,
strength: Skill.t,
hitpoints: Skill.t,
ranged: Skill.t,
prayer: Skill.t,
magic: Skill.t,
cooking: Skill.t,
woodcutting: Skill.t,
fletching: Skill.t,
fishing: Skill.t,
firemaking: Skill.t,
crafting: Skill.t,
smithing: Skill.t,
mining: Skill.t,
herblore: Skill.t,
agility: Skill.t,
thieving: Skill.t,
slayer: Skill.t,
farming: Skill.t,
runecraft: Skill.t,
hunter: Skill.t,
construction: Skill.t,
clue_scroll_easy: Activity.t,
clue_scroll_medium: Activity.t,
clue_scroll_hard: Activity.t,
clue_scroll_elite: Activity.t,
clue_scroll_master: Activity.t,
clue_scroll_all: Activity.t,
bounty_hunter_rogue: Activity.t,
bounty_hunter_hunter: Activity.t,
last_man_standing: Activity.t,
}
end
|
lib/hiscores/hiscore.ex
| 0.777722
| 0.449211
|
hiscore.ex
|
starcoder
|
defmodule Subscribex do
@moduledoc """
Subscribex is a simple way to interface with RabbitMQ. It's split into 2 main
components:
* `Subscribex.Broker` — Brokers are wrappers around the RabbitMQ instance.
Actions go through the broker to get to the connection.
* `Subscribex.Subscriber` — Subscribers are long-lived connections to a
RabbitMQ instance that will handle messages as they are read from the
queue.
In the following sections, we will provide an overview of those components and
how they interact with each other. If you're looking for the bare-minimum information
on how to utilize `Subscribex`, please check
the [getting started guide](http://hexdocs.pm/subscribex/getting-started.html).
If you want another example of `Subscribex` wired up within an application, please check
the [sample application](https://github.com/dnsbty/subscribex_example).
## Brokers
`Subscribex.Broker` is a wrapper around a RabbitMQ instance. We can define a
broker as follows:
defmodule MyApp.Broker do
use Subscribex.Broker, otp_app: :my_app
end
Where the configuration for the Broker must be in your application
environment, usually defined in your `config/config.exs`:
config :my_app, MyApp.Broker,
rabbit_host:
username: "guest",
password: "<PASSWORD>",
host: "localhost",
port: 5672
If you prefer, you can use a URL to connect instead:
config :my_app, MyApp.Broker,
rabbit_host: "amqp://guest:guest@localhost:5672"
Each broker defines a `start_link/0` function that needs to be invoked before
using the broker. In general, this function is not called directly, but used
as part of your application supervision tree.
If your application was generated with a supervisor (by passing `--sup` to
`mix new`) you will have a `lib/my_app/application.ex` file (or
`lib/my_app.ex` for Elixir versions `< 1.4.0`) containing the application
start callback that defines and starts your supervisor. You just need to edit
the `start/2` function to start the repo as a supervisor on your application's
supervisor:
def start(_type, _args) do
import Supervisor.Spec
children = [
supervisor(MyApp.Broker, [])
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
## Subscribers
Subscribers allow developers to monitor a queue and handle messages from that
queue as they are published. Let's see an example:
defmodule MyApp.Queue1Subscriber do
use Subscribex.Subscriber
def start_link(broker) do
Subscribex.Subscriber.start_link(__MODULE__, broker)
end
def init(broker) do
config = %Config{
auto_ack: true,
broker: broker,
queue: "test-queue",
prefetch_count: 1000,
exchange: "test-exchange",
exchange_type: :topic,
exchange_opts: [durable: true],
binding_opts: [routing_key: "routing_key"]
}
{:ok, config}
end
def handle_payload(payload, _channel, _delivery_tag, _redelivered) do
Logger.info(inspect(payload))
end
end
As messages are read off the queue, they will be passed into the subscriber's
`handle_payload/4` function for handling.
For more in-depth discussion about how to utilize `Subscribex.Subscriber`,
please check the [utilizing subscribers guide](http://hexdocs.pm/subscribex/utilizing-subscribers.html)
"""
end
|
lib/subscribex.ex
| 0.815159
| 0.542924
|
subscribex.ex
|
starcoder
|
defmodule Ockam.Examples.Delivery do
@moduledoc """
Examples of using delivery pipe
Creates a filter worker to lose messages
Sends messages through filter and through filter wrapped in a delivery pipe
Returns a sequence of messages received with unreliable delivety (through filter)
and reliable delivery (through pipe)
"""
alias Ockam.Examples.Messaging.Filter
def run() do
pipe_mod = Ockam.Messaging.Delivery.ResendPipe
{:ok, filter} = Filter.create([])
{:ok, receiver} = pipe_mod.receiver().create([])
## Local delivery is fast, set lower confirm timeout
{:ok, sender} =
pipe_mod.sender().create(receiver_route: [filter, receiver], confirm_timeout: 200)
{:ok, me} = Ockam.Node.register_random_address()
## Seng 100 messages to self through filter
Enum.each(1..100, fn n ->
Ockam.Router.route(%{
onward_route: [filter, me],
return_route: [me],
payload: "unreliable #{n}"
})
end)
unreliable =
1..100
|> Enum.map(fn n ->
expected_payload = "unreliable #{n}"
receive do
%{onward_route: [^me], payload: ^expected_payload} ->
expected_payload
after
200 ->
nil
end
end)
|> Enum.reject(&is_nil/1)
## Send 100 messages to self through reliable pipe
Enum.each(1..100, fn n ->
Ockam.Router.route(%{
onward_route: [sender, me],
return_route: [me],
payload: "reliable #{n}"
})
end)
## Wait for message #100
expected_100 = "reliable 100"
receive do
%{onward_route: [^me], payload: ^expected_100} ->
:ok
## Fail in 10 minutes
after
600_000 ->
raise "Message #100 was not received in 10 minutes"
end
reliable = receive_all()
%{unreliable: unreliable, reliable: reliable}
end
def receive_all(msgs \\ []) do
receive do
%{payload: pl} ->
receive_all(msgs ++ [pl])
after
0 ->
msgs
end
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/examples/messaging/delivery.ex
| 0.873174
| 0.467514
|
delivery.ex
|
starcoder
|
defmodule Timex.Parsers.DateFormat.Tokenizers.Strftime do
@moduledoc """
Responsible for tokenizing date/time format strings
which use the strftime formatter.
"""
alias Timex.Parsers.DateFormat.ParserState, as: State
alias Timex.Parsers.DateFormat.Directive, as: Directive
# These are all the strftime formatter's directives
@directives [
# Years
{"Y", Directive.get(:year4)},
{"y", Directive.get(:year2)},
{"C", Directive.get(:century)},
{"G", Directive.get(:iso_year4)},
{"g", Directive.get(:iso_year2)},
# Months
{"m", Directive.get(:month)},
{"b", Directive.get(:mshort)},
{"h", Directive.get(:mshort)},
{"B", Directive.get(:mfull)},
# Days
{"d", Directive.get(:day)},
{"e", %{Directive.get(:day) | pad_type: :space}},
{"j", Directive.get(:oday)},
# Weeks
{"V", Directive.get(:iso_weeknum)},
{"W", Directive.get(:week_mon)},
{"U", Directive.get(:week_sun)},
{"u", Directive.get(:wday_mon)},
{"w", Directive.get(:wday_sun)},
{"a", Directive.get(:wdshort)},
{"A", Directive.get(:wdfull)},
# Hours
{"H", Directive.get(:hour24)},
{"k", %{Directive.get(:hour24) | pad_type: :space}},
{"I", Directive.get(:hour12)},
{"l", %{Directive.get(:hour12) | pad_type: :space}},
{"M", Directive.get(:min)},
{"S", Directive.get(:sec)},
{"s", Directive.get(:sec_epoch)},
{"P", Directive.get(:am)},
{"p", Directive.get(:AM)},
# Timezones
{"Z", Directive.get(:zname)},
{"z", Directive.get(:zoffs)},
{":z", Directive.get(:zoffs_colon)},
{"::z", Directive.get(:zoffs_sec)},
# Preformatted Directives
{"D", Directive.get(:slashed)},
{"F", Directive.get(:strftime_iso_date)},
{"R", Directive.get(:strftime_clock)},
{"r", Directive.get(:strftime_kitchen)},
{"T", Directive.get(:iso_time)},
{"v", Directive.get(:strftime_shortdate)}
]
@directive_pattern ~r/\%(?<flags>[-_0:]{1}|[:]{2})?(?<width>[\d]+)?([EO]{1})?(?<dir>\w{1})/
@doc """
Takes a format string and extracts parsing directives for the parser.
## Example
iex> Timex.Parsers.Tokenizers.Strftime.tokenize("%Y-%0m-%d")
[%Directive{token: :year4, ...}, %Directive{token: :month, pad: 1, ...}, ...]
"""
def tokenize(s) when s in [nil, ""], do: {:error, "Format string cannot be nil or empty!"}
def tokenize(s) when is_list(s), do: tokenize("#{s}")
def tokenize(s) do
case Regex.match?(@directive_pattern, s) do
true ->
tokens = @directive_pattern |> Regex.scan(s, capture: :all)
parts = @directive_pattern |> Regex.split(s)
tokens |> weave(parts) |> do_tokenize(%State{}, [])
false ->
{:error, "Invalid strftime format string"}
end
end
defp do_tokenize([], _, result), do: result |> Enum.reverse
defp do_tokenize([part|tokens], %State{col: col} = state, result)
when is_binary(part)
do
# Handle escaped percent signs
escaped = String.replace(part, "%%", "%")
new_col = col + String.length(part)
directive = %Directive{type: :char, token: escaped, raw: part}
state = %{state | :col => new_col}
do_tokenize(tokens, state, [directive|result])
end
defp do_tokenize([[_, _, _, mod, _]|_], %State{col: col}, _)
when mod in ["E", "O"],
do: {:error, "Unsupported modifier #{mod} at column #{col}."}
defp do_tokenize([[_, _, _, mod, _]|_], %State{col: col}, _)
when mod != "",
do: {:error, "Invalid modifier #{mod} at column #{col}."}
defp do_tokenize([[raw, flags, width, _mod, dir] | tokens], %State{col: col} = state, result) do
directive = case {dir, flags} do
{"z", ":"} -> get_directive(":z")
{"z", "::"} -> get_directive("::z")
_ -> get_directive(dir)
end
case directive do
:invalid -> {:error, "Invalid directive used starting at column #{col}"}
{_, %Directive{} = directive} ->
directive = %{directive | :raw => raw}
directive = directive |> process_padding |> process_flags(flags) |> process_width(width)
new_col = col + ((flags <> width <> dir) |> String.length)
state = %{state | :col => new_col}
do_tokenize(tokens, state, [directive | result])
end
end
defp do_tokenize(invalid, %State{col: col}, _) do
{:error, "Invalid token starting at column #{col}: #{Macro.to_string(invalid)}"}
end
# Set padding for numeric tokens to always fill out the full width of the number
defp process_padding(%Directive{type: :numeric, len: lo..hi} = dir),
do: %{dir | :pad => hi - lo}
defp process_padding(%Directive{type: :numeric, len: len} = dir)
when is_number(len),
do: %{dir | :pad => len - 1}
# Everything else keeps default padding (none)
defp process_padding(%Directive{} = dir),
do: dir
# Handles flags for directives
defp process_flags(%Directive{} = dir, ""), do: dir
defp process_flags(%Directive{} = dir, "-"), do: %{dir | :pad => 0}
defp process_flags(%Directive{} = dir, "0"), do: %{dir | :pad_type => :zero}
defp process_flags(%Directive{} = dir, "_"), do: %{dir | :pad_type => :space}
defp process_flags(%Directive{} = dir, ":"), do: dir
defp process_flags(%Directive{} = dir, "::"), do: dir
defp process_flags(%Directive{token: token}, flag)
when not token in [:zoffs, :zoffs_colon, :zoffs_sec] and flag in [":", "::"],
do: {:error, "Invalid use of `#{flag}` flag. Can only be used with %z directive!"}
defp process_flags(%Directive{}, flag),
do: {:error, "Invalid flag #{flag}!"}
# Sets the minimum string length for a given directive
defp process_width(%Directive{} = dir, ""), do: dir
defp process_width(%Directive{} = dir, width) do
case Integer.parse(width) do
:error -> {:error, "Invalid width specification: #{width}"}
{num, _} ->
case dir.len do
_..hi when hi < num -> %{dir | :len => num}
_..hi when hi >= num -> %{dir | :len => Range.new(num, hi)}
len when is_number(len) -> %{dir | :len => num}
:word -> %{dir | :len => Range.new(num, 9999)}
_ -> dir
end
end
end
defp get_directive(dir) do
List.keyfind(@directives, dir, 0) || :invalid
end
defp weave(xs, ys), do: do_weave(xs, ys, [])
defp do_weave([], ys, result), do: (Enum.filter(ys, &non_empty/1) ++ result) |> Enum.reverse
defp do_weave(xs, [], result), do: (Enum.filter(xs, &non_empty/1) ++ result) |> Enum.reverse
# Handle percent escapes
defp do_weave([[<<?%, _::binary>> = raw, _, _, _, _]=token|xs], [part|ys], result)
when part != ""
do
case String.length(part) - (part |> String.rstrip(?%) |> String.length) do
len when len != 2 and div(len, 2) == 1 ->
do_weave(xs, ys, [part <> raw | result])
_ -> do_weave(xs, ys, [token, part | result])
end
end
defp do_weave([""|xs], [""|ys], result), do: do_weave(xs, ys, result)
defp do_weave([""|xs], [hy|ys], result), do: do_weave(xs, ys, [hy|result])
defp do_weave([hx|xs], [""|ys], result), do: do_weave(xs, ys, [hx|result])
defp do_weave([hx|xs], [hy|ys], result), do: do_weave(xs, ys, [hx, hy | result])
defp non_empty(""), do: false
defp non_empty(_), do: true
end
|
lib/parsers/dateformat/tokenizers/strftime.ex
| 0.779741
| 0.424412
|
strftime.ex
|
starcoder
|
defmodule Kitto.Job.DSL do
@moduledoc """
A DSL to define jobs populating the widgets with data.
"""
alias Kitto.Job
alias Kitto.Notifier
@doc false
defmacro __using__(_opts) do
quote do
import Kitto.Job.DSL
import Kitto.Notifier, only: [broadcast!: 2]
end
end
@doc """
Main API to define jobs.
Jobs can either be defined with a block or a command. When using a block, the
expression represents data retrieval and any transformations required to
broadcast events to the widgets. With command, the stdout and exit code of
the command will be broadcasted to the widgets using the jobs name as the
data source.
Data broadcast using commands is in the form `{exit_code: integer, stdout: String.t}`
## Examples
use Kitto.Job.DSL
job :jenkins, every: :minute do
jobs = Jenkins.jobs |> Enum.map(fn (%{"job" => job}) -> %{job: job.status} end)
broadcast! :jenkins, %{jobs: jobs}
end
job :twitter, do: Twitter.stream("#elixir", &(broadcast!(:twitter, &1))
job :echo, every: :minute, command: "echo hello"
job :kitto_last_commit,
every: {5, :minutes},
command: "curl https://api.github.com/repos/kittoframework/kitto/commits\?page\=1\&per_page\=1"
## Options
* `:every` - Sets the interval on which the job will be performed. When it's not
specified, the job will be called once (suitable for streaming resources).
* `:first_at` - A timeout after which to perform the job for the first time
* `:command` - A command to be run on the server which will automatically
broadcast events using the jobs name.
"""
defmacro job(name, options, contents \\ []) do
name = if is_atom(name), do: name, else: String.to_atom(name)
if options[:command] do
_job(:shell, name, options)
else
_job(:elixir, name, options, contents)
end
end
defp _job(:elixir, name, options, contents) do
block = Macro.prewalk (options[:do] || contents[:do]), fn
{:|>, pipe_meta, [lhs, {:broadcast!, meta, context}]} when is_atom(context) or context == [] -> {:|>, pipe_meta, [lhs, {:broadcast!, meta, [name]}]}
{:broadcast!, meta, args = [{:%{}, _, _}]} -> {:broadcast!, meta, [name] ++ args}
ast_node -> ast_node
end
quote do
Job.register binding()[:runner_server],
unquote(name),
unquote(options |> Keyword.delete(:do)),
(__ENV__ |> Map.take([:file, :line])),
fn -> unquote(block) end
end
end
defp _job(:shell, name, options) do
quote do
command = unquote(options)[:command]
block = fn ->
[sh | arguments] = command |> String.split
{stdout, exit_code} = System.cmd(sh, arguments)
Notifier.broadcast!(unquote(name), %{stdout: stdout, exit_code: exit_code})
end
Job.register binding()[:runner_server],
unquote(name),
unquote(options),
(__ENV__ |> Map.take([:file, :line])),
block
end
end
end
|
lib/kitto/job/dsl.ex
| 0.794106
| 0.405419
|
dsl.ex
|
starcoder
|
defmodule Linkify do
@moduledoc """
Create url links from text containing urls.
Turns an input string like `"Check out google.com"` into
`Check out "<a href=\"http://google.com\">google.com</a>"`
## Examples
iex> Linkify.link("google.com")
~s(<a href="http://google.com">google.com</a>)
iex> Linkify.link("google.com", new_window: true, rel: "noopener noreferrer")
~s(<a href="http://google.com" target="_blank" rel="noopener noreferrer">google.com</a>)
iex> Linkify.link("google.com", class: "linkified")
~s(<a href="http://google.com" class="linkified">google.com</a>)
"""
import Linkify.Parser
@doc """
Finds links and turns them into HTML `<a>` tag.
Options:
* `class` - specify the class to be added to the generated link.
* `rel` - specify the rel attribute.
* `new_window` - set to `true` to add `target="_blank"` attribute
* `truncate` - Set to a number to truncate urls longer then the number. Truncated urls will end in `...`
* `strip_prefix` - Strip the scheme prefix (default: `false`)
* `exclude_class` - Set to a class name when you don't want urls auto linked in the html of the give class (default: `false`)
* `exclude_id` - Set to an element id when you don't want urls auto linked in the html of the give element (default: `false`)
* `email` - link email links (default: `false`)
* `mention` - link @mentions (when `true`, requires `mention_prefix` or `mention_handler` options to be set) (default: `false`)
* `mention_prefix` - a prefix to build a link for a mention (example: `https://example.com/user/`, default: `nil`)
* `mention_handler` - a custom handler to validate and formart a mention (default: `nil`)
* `hashtag: false` - link #hashtags (when `true`, requires `hashtag_prefix` or `hashtag_handler` options to be set)
* `hashtag_prefix: nil` - a prefix to build a link for a hashtag (example: `https://example.com/tag/`)
* `hashtag_handler: nil` - a custom handler to validate and formart a hashtag
* `extra: false` - link urls with rarely used schemes (magnet, ipfs, irc, etc.)
* `validate_tld: true` - Set to false to disable TLD validation for urls/emails, also can be set to :no_scheme to validate TLDs only for urls without a scheme (e.g `example.com` will be validated, but `http://example.loki` won't)
"""
def link(text, opts \\ []) do
parse(text, opts)
end
def link_map(text, acc, opts \\ []) do
parse({text, acc}, opts)
end
end
|
lib/linkify.ex
| 0.838911
| 0.487612
|
linkify.ex
|
starcoder
|
defmodule Wallaby.Phantom do
@moduledoc """
Wallaby driver for PhantomJS.
## Usage
Start a Wallaby Session using this driver with the following command:
```
{:ok, session} = Wallaby.start_session()
```
## Notes
This driver requires PhantomJS be installed in your path. You can install PhantomJS through NPM or your package manager of choice:
```
$ npm install -g phantomjs-prebuilt
```
If you need to specify a specific PhantomJS you can pass the path in the configuration:
```
config :wallaby, phantomjs: "node_modules/.bin/phantomjs"
```
You can also pass arguments to PhantomJS through the `phantomjs_args` config setting, e.g.:
```
config :wallaby, phantomjs_args: "--webdriver-logfile=phantomjs.log"
```
"""
use Supervisor
alias Wallaby.Phantom.Driver
alias Wallaby.Phantom.ServerPool
alias Wallaby.DependencyError
@behaviour Wallaby.Driver
@doc false
def start_link(opts \\ []) do
{:ok, phantomjs_path} = find_phantomjs_executable()
Supervisor.start_link(__MODULE__, %{phantomjs_path: phantomjs_path}, opts)
end
def validate do
case find_phantomjs_executable() do
{:ok, _path} ->
:ok
{:error, :not_found} ->
exception =
DependencyError.exception("""
Wallaby can't find phantomjs. Make sure you have phantomjs installed
and included in your path, or that your `config :wallaby, :phantomjs`
setting points to a valid phantomjs executable.
""")
{:error, exception}
end
end
def init(%{phantomjs_path: phantomjs_path}) do
children = [
{ServerPool, [phantomjs_path]},
Wallaby.Driver.LogStore
]
Supervisor.init(children, strategy: :one_for_one)
end
@doc false
@spec find_phantomjs_executable :: {:ok, String.t()} | {:error, :not_found}
def find_phantomjs_executable do
phantom_path = Application.get_env(:wallaby, :phantomjs, "phantomjs")
[Path.expand(phantom_path), phantom_path]
|> Enum.find(&System.find_executable/1)
|> case do
path when is_binary(path) ->
{:ok, path}
nil ->
{:error, :not_found}
end
end
@doc false
def capabilities(opts) do
default_capabilities()
|> Map.merge(user_agent_capability(opts[:user_agent]))
|> Map.merge(custom_headers_capability(opts[:custom_headers]))
end
@doc false
def default_capabilities do
%{
javascriptEnabled: true,
loadImages: false,
version: "",
rotatable: false,
takesScreenshot: true,
cssSelectorsEnabled: true,
browserName: "phantomjs",
nativeEvents: false,
platform: "ANY"
}
end
@doc false
def start_session(opts) do
{:ok, server} = ServerPool.checkout()
Wallaby.Phantom.Driver.create(server, opts)
end
@doc false
def end_session(%Wallaby.Session{server: server} = session) do
Driver.execute_script(session, "localStorage.clear()", [], check_logs: false)
Driver.delete(session)
ServerPool.check_in(server)
end
def blank_page?(session) do
case current_url(session) do
{:ok, url} -> url == "about:blank"
_ -> false
end
end
@doc false
defdelegate accept_alert(session, open_dialog_fn), to: Driver
@doc false
defdelegate accept_confirm(session, open_dialog_fn), to: Driver
@doc false
defdelegate accept_prompt(session, input_va, open_dialog_fn), to: Driver
@doc false
defdelegate cookies(session), to: Driver
@doc false
defdelegate current_path(session), to: Driver
@doc false
defdelegate current_url(session), to: Driver
@doc false
defdelegate dismiss_confirm(session, open_dialog_fn), to: Driver
@doc false
defdelegate dismiss_prompt(session, open_dialog_fn), to: Driver
@doc false
defdelegate get_window_size(session), to: Driver
@doc false
defdelegate page_title(session), to: Driver
@doc false
defdelegate page_source(session), to: Driver
@doc false
defdelegate set_cookie(session, key, value), to: Driver
@doc false
defdelegate set_window_size(session, width, height), to: Driver
@doc false
defdelegate visit(session, url), to: Driver
@doc false
defdelegate attribute(element, name), to: Driver
@doc false
defdelegate click(element), to: Driver
@doc false
defdelegate clear(element), to: Driver
@doc false
defdelegate displayed(element), to: Driver
@doc false
defdelegate selected(element), to: Driver
@doc false
defdelegate set_value(element, value), to: Driver
@doc false
defdelegate text(element), to: Driver
@doc false
defdelegate execute_script(session_or_element, script, args), to: Driver
@doc false
defdelegate execute_script_async(session_or_element, script, args), to: Driver
@doc false
defdelegate find_elements(session_or_element, compiled_query), to: Driver
@doc false
defdelegate send_keys(session_or_element, keys), to: Driver
@doc false
defdelegate take_screenshot(session_or_element), to: Driver
defdelegate log(session_or_element), to: Driver
defdelegate parse_log(log), to: Wallaby.Phantom.Logger
@doc false
def user_agent do
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/538.1 (KHTML, like Gecko) PhantomJS/2.1.1 Safari/538.1"
end
@doc false
def user_agent_capability(nil), do: %{}
def user_agent_capability(ua) do
%{"phantomjs.page.settings.userAgent" => ua}
end
@doc false
def custom_headers_capability(nil), do: %{}
def custom_headers_capability(ch) do
Enum.reduce(ch, %{}, fn {k, v}, acc ->
Map.merge(acc, %{"phantomjs.page.customHeaders.#{k}" => v})
end)
end
def window_handle(_session), do: {:error, :not_supported}
def window_handles(_session), do: {:error, :not_supported}
def focus_window(_session, _window_handle), do: {:error, :not_supported}
def close_window(_session), do: {:error, :not_supported}
def get_window_position(_session), do: {:error, :not_supported}
def set_window_position(_session, _x, _y), do: {:error, :not_supported}
def maximize_window(_session), do: {:error, :not_supported}
def focus_frame(_session, _frame), do: {:error, :not_supported}
def focus_parent_frame(_session), do: {:error, :not_supported}
end
|
lib/wallaby/phantom.ex
| 0.771542
| 0.579192
|
phantom.ex
|
starcoder
|
defmodule Cure do
use Application
@moduledoc """
The main Cure module. Provides a few functions to easily start a connection
with a C/C++ program, to send messages to the program and to handle the
incoming responses.
"""
@doc """
Starts the Cure application, returns the Cure.Supervisor-PID.
"""
@spec start(any, any) :: {:ok, pid} | {:error, term}
def start(_type, _args) do
Cure.Supervisor.start_link
end
@doc """
Starts a Cure.Server process that can communicate with a C-program.
"""
@spec load(String.t) :: {:ok, pid}
def load(c_program_name) when c_program_name |> is_binary do
Cure.Supervisor.start_child(c_program_name)
end
@doc """
Sends binary data to the C/C++ program that the server is connected with. A
callback-function (arity 1) can be added to handle the incoming response of
the program. If no callback is added, the response will be sent to the
calling process of this function.
The third argument indicates how the response should be handled. Possible
modes for handling the response are the following:
:once -> callback function is only applied once.
:permanent -> callback function is applied to all following events
:sync -> the server waits with further events until response is processed
(no timeout specified = :infinity).
(Same effect as calling Cure.Server.send_data directly.)
"""
@spec send_data(pid, binary, :once | :permanent | :sync,
((binary) -> any) | timeout) :: :ok
def send_data(server, data, :once, callback) do
server |> Cure.Server.send_data(data, :once, callback)
end
def send_data(server, data, :permanent, callback) do
server |> Cure.Server.send_data(data, :permanent, callback)
end
def send_data(server, data, :sync, callback) when is_function(1, callback) do
server |> Cure.Server.send_data(data, :sync, callback)
end
def send_data(server, data, :sync, timeout) do
server |> Cure.Server.send_data(data, :sync, timeout)
end
@doc """
Sends binary data to the C/C++ program that the server is connected with.
The server waits with processing further events until the response for this
function is handled.
(Same effect as calling Cure.Server.send_data directly.)
"""
@spec send_data(pid, binary, :sync, ((binary) -> any), timeout) :: :ok
def send_data(server, data, :sync, callback, timeout) do
server |> Cure.Server.send_data(data, :sync, callback, timeout)
end
@doc """
Sends binary data to the C/C++ program that the server is connected with.
The result is sent back to the process that called this function. The third
argument indicates how the response should be handled. Possible modes for
handling the response are the following:
:once -> Only the first event will be sent back to the calling process.
:noreply -> No event will be sent back to the calling process.
:permanent -> All following events will be sent back to the calling process.
:sync -> the server waits with processing further events until the response is
sent back to the calling process (timeout = :infinity unless specified).
(Same effect as calling Cure.Server.send_data directly.)
"""
@spec send_data(pid, binary,
:once | :noreply | :permanent, :sync) :: :ok | {:error, term}
def send_data(server, data, :once) do
server |> Cure.Server.send_data(data, :once)
end
def send_data(server, data, :noreply) do
server |> Cure.Server.send_data(data, :noreply)
end
def send_data(server, data, :permanent) do
server |> Cure.Server.send_data(data, :permanent)
end
def send_data(server, data, :sync) do
server |> Cure.Server.send_data(data, :sync)
end
@doc """
Subscribes the calling process to receive data events from the server process.
"""
@spec subscribe(pid) :: :ok
def subscribe(server), do: server |> Cure.Server.subscribe
@doc """
Adds an extra callback function to the server that is triggered on all
incoming data.
"""
@spec subscribe(pid, ((binary) -> any)) :: :ok
def subscribe(server, fun), do: server |> Cure.Server.subscribe(fun)
@doc """
Unsubscribes the calling process from receiving further data events coming
from the server process.
"""
@spec unsubscribe(pid) :: :ok
def unsubscribe(server), do: server |> Cure.Server.unsubscribe
@doc """
Removes a callback that was applied to all incoming data events.
NOTE: this has to be the exact same callback function that was registered
earlier with subscribe in order for this function to work properly.
"""
@spec unsubscribe(pid, ((binary) -> any)) :: :ok
def unsubscribe(server, fun), do: server |> Cure.Server.unsubscribe(fun)
@doc """
Stops a server process that is being supervised in the supervision tree.
"""
@spec stop(pid) :: :ok
def stop(server) when server |> is_pid do
server |> Cure.Supervisor.terminate_child
end
end
|
lib/cure.ex
| 0.802981
| 0.559952
|
cure.ex
|
starcoder
|
defmodule DBConnection.LogEntry do
@moduledoc """
Struct containing log entry information.
"""
defstruct [:call, :query, :params, :result, :pool_time, :connection_time,
:decode_time]
@typedoc """
Log entry information.
* `:call` - The `DBConnection` function called
* `:query` - The query used by the function
* `:params` - The params passed to the function (if any)
* `:result` - The result of the call
* `:pool_time` - The length of time awaiting a connection from the pool (if
the connection was not already checked out)
* `:connection_time` - The length of time using the connection
* `:decode_time` - The length of time decoding the result (if decoded the
result using `DBConnection.Query.decode/3`)
All times are in the native time units of the VM, see
`System.monotonic_time/0`. Falls back to `:os.timestamp/0`.
"""
@type t :: %__MODULE__{call: atom,
query: any,
params: any,
result: {:ok, any} | {:ok, any, any} | {:error, Exception.t},
pool_time: non_neg_integer | nil,
connection_time: non_neg_integer,
decode_time: non_neg_integer | nil}
@doc false
def new(call, query, params, times, result) do
entry = %__MODULE__{call: call, query: query, params: params,
result: result}
parse_times(times, entry)
end
## Helpers
defp parse_times([], entry), do: entry
defp parse_times([first | times], entry) do
{_, entry} = Enum.reduce(times, {first, entry}, &parse_time/2)
entry
end
defmacrop diff(to, from) do
if function_exported?(:erlang, :monotonic_time, 0) do
quote do: unquote(to) - unquote(from)
else
quote do: max(:timer.now_diff(unquote(to), unquote(from)), 0)
end
end
defp parse_time({:stop, stop} = time, {{:decode, decode}, entry}) do
{time, %{entry | decode_time: diff(decode, stop)}}
end
defp parse_time({:start, start} = time, {{:stop, stop}, entry}) do
{time, %{entry | connection_time: diff(stop, start)}}
end
defp parse_time({:checkout, checkout} = time, {{:start, start}, entry}) do
{time, %{entry | pool_time: diff(start, checkout)}}
end
end
|
deps/db_connection/lib/db_connection/log_entry.ex
| 0.868353
| 0.499023
|
log_entry.ex
|
starcoder
|
defmodule ConnAudit do
@moduledoc """
This application provides a simple implementation for login brute force protection.
The two components for this are the Verification Plug macro and the Auditing API.
You can check out the sections below for more information on each component.
This application isn't meant to be a replacement for a _real_ solution and you'd assuredly be better suited using something production-ready.
The way the system works is to place a `Plug` built using the `ConnAudit` macro in front of your login page.
The macro accepts a handler for what to do if the "verification" fails, otherwise it will just pass the `Plug.Conn` through to the next plug.
#### Verification Plug
The verification plug macro will create a plug `Plug` for you that will check the result of an audit.
If the audit passes, it will forward the `conn` on.
If it fails, it will pass the `conn` into the function that's passed in.
```elixir
defmodule AuditTest.Plug.Verify do
use ConnAudit
import Phoenix.Controller, only: [redirect: 2]
def on_reject(conn) do
conn
|> redirect(to: "/403")
|> halt()
end
end
```
The `use ConnAudit` macro requires your plug to implement the `on_reject` function.
This function takes a `Plug.Conn` and returns a `Plug.Conn`.
This allows you to define how yo want to handle a failed audit.
Then just add this `Plug` into your pipeline or attach it whatever routes you want auditing on.
This `Plug` will create a new `GenServer` for every unique token passed to the Audit API.
I'd recommend limiting the number of routes you apply this to.
```elixir
pipeline :audited do
plug AuditTest.Plug.Verify
end
scope "/login", AuditTestingWeb do
pipe_through [:browser, :audited]
get "/", LoginController, :index
end
```
You'd be better off relying on sessions to isolate the majority of your pages and having this `Plug` only on your actual login page.
That said, it can be used for any kind of auditing purposes to block access to specific pages, nothing says it has to be used for login controls.
The resolution of the `Plug.Conn` is implemented through a Protocol.
You are welcome to re-implement the existing protocol how you deem fit.
You could alternatively write an entirely new one and create your own verification plug.
The existing one is as follows.
```elixir
defimpl ConnAudit.Resolvable, for: Plug.Conn do
def resolve(%Plug.Conn{remote_ip: {oct1, oct2, oct3, oct4}}) do
Integer.to_string(oct1) <> "." <>
Integer.to_string(oct2) <> "." <>
Integer.to_string(oct3) <> "." <>
Integer.to_string(oct4)
end
end
```
#### Auditing API
There are three functions as part of the auditing API.
**TODO - Define three functions**
**TODO - How the tokens work + resolution**
```elixir
defmodule AuditTestWeb.LoginController do
# Plug only on this page instead of in pipeline
plug AuditTest.Plug.Verify
def create(conn, %{
"login_form" => %{
"username" => username,
"password" => password
}
}) do
if Accounts.login(username, password) do
ConnAudit.succeed(conn)
conn
|> put_session(:authenticated, username)
|> redirect(to: "/")
else
ConnAudit.fail(conn)
conn
|> put_flash(:error, "Invalid username or password.")
|> redirect(to: "/auth/login")
end
end
end
```
## Logging
There are four `:telemetry` events executed in this application.
| Event | Description | Atoms |
| :---: | :---: | :---: |
| Audit Success | A successful audit was performed for a token | `[:conn_audit, :audit, :success]` |
| Audit Failure | An unsuccessful audit was performed for a token | `[:conn_audit, :audit, :failure]` |
| Audit Lockout | A audit was considered "locked out" | `[:conn_audit, :audit, :lockout]` |
| Audit Timeout | The configured `:ttl` has passed and an audit is no longer valid | `[:conn_audit, :audit, :timeout]` |
All of the events have the same parameters and metadata.
### Parameters
- `attempts` is the number of failed attempts that happened up to that point
- `token` is the resolved token for the connection / user
### Metadata
- `timestamp` is a UTC `DateTime`
"""
alias ConnAudit.Auditor
import ConnAudit.Resolvable
@doc """
Accepts an identifier and will mark a failed audit.
This can accept a binary or a struct that implements the `ConnAudit.Resolvable` protocol.
"""
def fail(token) when is_binary(token) do
Auditor.fail(token)
end
def fail(resolvable) do
resolve(resolvable)
|> fail()
end
@doc """
Accepts an identifier and will mark a successful audit.
This can accept a binary or a struct that implements the `ConnAudit.Resolvable` protocol.
"""
def succeed(token) when is_binary(token) do
Auditor.succeed(token)
end
def succeed(resolvable) do
resolve(resolvable)
|> succeed()
end
@doc """
Accepts an identifier and will return the current audit status.
The audit status will either be :pass if the identifier accepted or or :lockout if denied.
This can accept a binary or a struct that implements the `ConnAudit.Resolvable` protocol.
"""
def check(token) when is_binary(token) do
Auditor.check(token)
end
def check(resolvable) do
resolve(resolvable)
|> check()
end
@doc """
This will create the necessary code for a `Plug` to be used in a `Plug` pipeline or individual `Plug` controller.
The see `ConnAudit` module description for more information.
"""
defmacro __using__(_opts) do
quote do
import Plug.Conn
import ConnAudit.Resolvable
@behaviour ConnAudit.Plug
def init(opts), do: opts
def call(conn, _opts) do
token = resolve(conn)
case ConnAudit.check(token) do
:pass ->
conn
_ ->
on_reject(conn)
end
end
end
end
end
|
lib/conn_audit.ex
| 0.719088
| 0.946794
|
conn_audit.ex
|
starcoder
|
defmodule AutoLinker do
@moduledoc """
Create url links from text containing urls.
Turns an input string like `"Check out google.com"` into
`Check out "<a href='http://google.com' target='_blank' rel='noopener noreferrer'>google.com</a>"`
## Examples
iex> AutoLinker.link("google.com")
"<a href='http://google.com' class='auto-linker' target='_blank' rel='noopener noreferrer'>google.com</a>"
iex> AutoLinker.link("google.com", new_window: false, rel: false)
"<a href='http://google.com' class='auto-linker'>google.com</a>"
iex> AutoLinker.link("google.com", new_window: false, rel: false, class: false)
"<a href='http://google.com'>google.com</a>"
iex> AutoLinker.link("[Google](http://google.com)", markdown: true, new_window: false, rel: false, class: false)
"<a href='http://google.com'>Google</a>"
iex> AutoLinker.link("[Google Search](http://google.com)", markdown: true)
"<a href='http://google.com' class='auto-linker' target='_blank' rel='noopener noreferrer'>Google Search</a>"
"""
import AutoLinker.Parser
@doc """
Auto link a string.
Options:
* `class: "auto-linker"` - specify the class to be added to the generated link. false to clear
* `rel: "noopener noreferrer"` - override the rel attribute. false to clear
* `new_window: true` - set to false to remove `target='_blank'` attribute
* `scheme: false` - Set to true to link urls with schema `http://google`
* `truncate: false` - Set to a number to truncate urls longer then the number. Truncated urls will end in `..`
* `strip_prefix: true` - Strip the scheme prefix
* `exclude_class: false` - Set to a class name when you don't want urls auto linked in the html of the give class
* `exclude_id: false` - Set to an element id when you don't want urls auto linked in the html of the give element
* `exclude_patterns: ["```"] - Don't link anything between the the pattern
* `markdown: false` - link markdown style links
Each of the above options can be specified when calling `link(text, opts)`
or can be set in the `:auto_linker's configuration. For example:
config :auto_linker,
class: false,
new_window: false
Note that passing opts to `link/2` will override the configuration settings.
"""
def link(text, opts \\ []) do
parse text, opts
end
end
|
lib/auto_linker.ex
| 0.818374
| 0.542682
|
auto_linker.ex
|
starcoder
|
defmodule GenRetry do
@moduledoc ~s"""
GenRetry provides utilities for retrying Elixir functions,
with configurable delay and backoff characteristics.
## Summary
Given a 0-arity function which raises an exception upon failure, `retry/2`
and `retry_link/2` repeatedly executes the function until success is
reached or the maximum number of retries has occurred.
`GenRetry.Task.async/2` and `GenRetry.Task.Supervisor.async/3`
provide drop-in replacements for `Task.async/1` and
`Task.Supervisor.async/2`, respectively, adding retry capability.
They return plain `%Task{}` structs, usable with any other function in
the `Task` module.
## Examples
my_background_function = fn ->
:ok = try_to_send_tps_reports()
end
GenRetry.retry(my_background_function, retries: 10, delay: 10_000)
my_future_function = fn ->
{:ok, val} = get_val_from_flaky_network_service()
val
end
t = GenRetry.Task.async(my_future_function, retries: 3)
my_val = Task.await(t) # may raise exception
## Installation
1. Add GenRetry to your list of dependencies in `mix.exs`:
def deps do
[{:gen_retry, "~> #{GenRetry.Mixfile.project()[:version]}"}]
end
2. Ensure GenRetry is started before your application:
def application do
[applications: [:gen_retry]]
end
3. (Optional) Specify a custom logging module in your config.exs
config :gen_retry, GenRetry.Logger, logger: MyApp.CustomLogger
where MyApp.CustomLogger implements the GenRetry.Logger behavior.
The default module is GenRetry.Utils if none is specified in a config.
## Options
* `:retries`, integer (default 1):
Number of times to retry upon failure. Set to
0 to try exactly once; set to `:infinity` to retry forever.
* `:delay`, integer (default 1000):
Number of milliseconds to wait between first failure and first retry.
Subsequent retries use this value as a starting point for exponential
backoff.
* `:jitter`, number (default 0):
Proportion of current retry delay to randomly add to delay time.
For example, given options `delay: 1000, jitter: 0.1`, the first delay
will be a random time between 1000 and 1100 milliseconds. Values
under 0 will remove time rather than add it; beware of values under -1,
which may result in nonsensical "negative time delays".
* `:exp_base`, number (default 2):
The base to use for exponentiation during exponential backoff.
Set to `1` to disable backoff. Values less than 1 are not very useful.
* `:respond_to`, pid (ignored by `GenRetry.Task.*`):
The process ID to which a message should be sent upon completion.
Successful exits send `{:success, return_value, final_retry_state}`;
unsuccessful exits send `{:failure, error, stacktrace, final_retry_state}`.
* `:on_success`, `fn {result, final_retry_state} -> nil end` (default no-op):
A function to run on success. The argument is a tuple containing the result and the final retry state.
* `:on_failure`, `fn {exception, stacktrace, final_retry_state} -> nil end` (default no-op):
A function to run on failure. The argument is a tuple containing the exception, stacktrace, and final retry state.
"""
use Application
@doc false
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
worker(GenRetry.Launcher, [[], [name: :gen_retry_launcher]]),
]
opts = [strategy: :one_for_one, name: GenRetry.Supervisor]
Supervisor.start_link(children, opts)
end
defmodule State do
@moduledoc ~S"""
Used to represent the state of a GenRetry invocation.
This struct is part of the success or failure message to be
optionally sent to another process, specified by `opts[:respond_to]`,
upon completion.
* `:function` and `:opts` are the invocation arguments supplied by the user.
* `:tries` is the total number of attempts made before sending this message.
* `:retry_at` is either the timestamp of the last attempt, or 0
(if `opts[:retries] == 0`).
"""
# the function to retry
defstruct function: nil,
# %GenRetry.Options{} from caller
opts: nil,
# number of tries performed so far
tries: 0,
# :erlang.system_time(:milli_seconds)
retry_at: 0,
# a function for logging
logger: nil
@type t :: %__MODULE__{
function: GenRetry.retryable_fun(),
opts: GenRetry.Options.t(),
tries: non_neg_integer,
retry_at: non_neg_integer,
}
end
defmodule Options do
@moduledoc false
defstruct retries: 1,
delay: 1000,
jitter: 0,
exp_base: 2,
respond_to: nil,
on_success: nil,
on_failure: nil
@type t :: %__MODULE__{
retries: :infinity | non_neg_integer,
delay: non_neg_integer,
jitter: number,
exp_base: number,
respond_to: pid | nil,
on_success: GenRetry.on_success(),
on_failure: GenRetry.on_failure(),
}
use ExConstructor
end
@type option ::
{:retries, :infinity | non_neg_integer}
| {:delay, non_neg_integer}
| {:jitter, number}
| {:exp_base, number}
| {:respond_to, pid}
| {:on_success, on_success()}
| {:on_failure, on_failure()}
@type options :: [option]
@type retryable_fun :: (() -> any | no_return)
@type success_msg :: {:success, any, GenRetry.State.t()}
@type failure_msg ::
{:failure, Exception.t(), [:erlang.stack_item()], GenRetry.State.t()}
@type on_success ::
({result :: any(), final_retry_state :: GenRetry.State.t()} -> any())
@type on_failure ::
({exception :: any(), stacktrace :: list(),
final_retry_state :: GenRetry.State.t()} ->
any())
@doc ~S"""
Starts a retryable process linked to `GenRetry.Supervisor`, and returns its
pid. `fun` should be a function that raises an exception upon failure;
any other return value is treated as success.
"""
@spec retry(retryable_fun, options) :: pid
def retry(fun, opts \\ []) do
GenRetry.Launcher.launch(fun, opts)
end
@doc ~S"""
Starts a retryable process linked to the current process, and returns its
pid. `fun` should be a function that raises an exception upon failure;
any other return value is treated as success.
"""
@spec retry_link(retryable_fun, options) :: pid
def retry_link(fun, opts \\ []) do
{:ok, pid} =
GenServer.start_link(
GenRetry.Worker,
{fun, Options.new(opts)},
timeout: :infinity
)
pid
end
end
|
lib/gen_retry.ex
| 0.875674
| 0.505066
|
gen_retry.ex
|
starcoder
|
defmodule DivoVernemq do
@moduledoc """
Defines a single-node "cluster" vernemq broker
as a map compatible with divo for building a
docker-compose file.
"""
@behaviour Divo.Stack
@doc """
Implements the Divo Stack behaviour to take a
keyword list of defined variables specific to
the DivoVernemq stack and returns a map describing
the service definition of VerneMQ.
### Optional Configuration
* `allow_anonymous`: Allow unauthenticated connections to the cluster. Defaults to `:on`
* `mqtt_port`: The port on which the MQTT listener will be exposed to the host. The default
port VerneMQ uses for MQTT is 1883. The only affects the port exposed to the host;
within the container the VerneMQ broker is listening on port 1883.
* `ssl_mqtt_port`: The port on which the MQTT over SSL communication will be exposed to the host.
The default port VerneMQ uses for MQTT over SSL is 8883. This only affects the
port exposed to the host; within the container, the VerneMQ broker is listening
on port 8883.
* `ws_mqtt_port`: The port on which the MQTT over Websocket communication will be exposed to the host.
The default port VerneMQ uses for MQTT over websockets is 8080. This only
affects the port exposed to the host; within the container the VerneMQ broker
is listening on port 8080.
* `stats_port`: The port on which the statistics web service, including the status UI, is exposed
to the host. The default port VerneMQ uses for the stats interface is 8888. This
only affects the host port; within the container the VerneMQ broker is listening on
port 8888. Set your browser to `localhost:<stats_port>/status` to view the UI.
* `log_level`: The log level at which the VerneMQ broker will output messages. Defaults to `:info`.
* `users`: A map of usernames and passwords to create records for file-based authentication. These
are transparently converted to environment variables that are injected into the container
in the form of `DOCKER_VERNEMQ_USER_<username>='password'`. Defaults to an empty map.
* `version`: The version of the VerneMQ image to run. Defaults to `latest`.
"""
@impl Divo.Stack
@spec gen_stack([tuple()]) :: map()
def gen_stack(envars) do
version = Keyword.get(envars, :version, "latest")
allow_anonymous = Keyword.get(envars, :allow_anonymous, :on) |> validate_allow_anon()
mqtt_port = Keyword.get(envars, :mqtt_port, 1883)
ssl_mqtt_port = Keyword.get(envars, :ssl_mqtt_port, 8883)
ws_mqtt_port = Keyword.get(envars, :ws_mqtt_port, 8080)
stats_port = Keyword.get(envars, :stats_port, 8888)
log_level = Keyword.get(envars, :log_level, :info) |> validate_log_level()
users = Keyword.get(envars, :users, %{})
%{
vernemq: %{
image: "jeffgrunewald/vernemq:#{version}",
ports: [
"#{mqtt_port}:1883",
"#{ssl_mqtt_port}:8883",
"#{ws_mqtt_port}:8080",
"#{stats_port}:8888"
],
environment:
[
"DOCKER_VERNEMQ_ALLOW_ANONYMOUS=#{allow_anonymous}",
"DOCKER_VERNEMQ_LOG__CONSOLE__LEVEL=#{log_level}"
] ++ inject_auth_users(users)
}
}
end
defp inject_auth_users(users) when users == %{}, do: []
defp inject_auth_users(users) do
Enum.map(users, fn {user, passwd} -> "DOCKER_VERNEMQ_USER_#{to_string(user)}='#{passwd}'" end)
end
defp validate_log_level(level)
when level in [:debug, :info, :warning, :error, "debug", "info", "warning", "error"] do
level
end
defp validate_log_level(_), do: "info"
defp validate_allow_anon(allow) when allow in [:on, :off, "on", "off"], do: allow
defp validate_allow_anon(_), do: "on"
end
|
lib/divo_vernemq.ex
| 0.690142
| 0.610366
|
divo_vernemq.ex
|
starcoder
|
defmodule Mix.Tasks.EqcCI do
use Mix.Task
@shortdoc "Create a project's properties for QuickCheck-CI"
@recursive true
@moduledoc """
Create the properties for a project.
This task mimics `mix test` but compiles the test files to beam instead of in
memory and does not execute the tests or properties.
Switches are ignored for the moment.
"""
@switches [force: :boolean, color: :boolean, cover: :boolean,
max_cases: :integer, include: :keep,
exclude: :keep, only: :keep, compile: :boolean,
timeout: :integer]
@spec run(OptionParser.argv) :: :ok
def run(args) do
Mix.shell.info "Building properties for QuickCheck-CI"
{opts, files, _} = OptionParser.parse(args, switches: @switches)
unless System.get_env("MIX_ENV") || Mix.env == :test do
Mix.raise "mix eqcci is running on environment #{Mix.env}. Please set MIX_ENV=test explicitly"
end
Mix.Task.run "loadpaths", args
if Keyword.get(opts, :compile, true) do
Mix.Task.run "compile", args
end
project = Mix.Project.config
# Start the app and configure exunit with command line options
# before requiring test_helper.exs so that the configuration is
# available in test_helper.exs. Then configure exunit again so
# that command line options override test_helper.exs
Mix.shell.print_app
Mix.Task.run "app.start", args
# Ensure ex_unit is loaded.
case Application.load(:ex_unit) do
:ok -> :ok
{:error, {:already_loaded, :ex_unit}} -> :ok
end
opts = ex_unit_opts(opts)
ExUnit.configure(opts)
test_paths = project[:test_paths] || ["test"]
Enum.each(test_paths, &require_test_helper(&1))
ExUnit.configure(opts)
# Finally parse, require and load the files
test_files = parse_files(files, test_paths)
test_pattern = project[:test_pattern] || "*_test.exs"
test_files = Mix.Utils.extract_files(test_files, test_pattern)
_ = Kernel.ParallelCompiler.files_to_path(test_files, Mix.Project.compile_path(project))
end
@doc false
def ex_unit_opts(opts) do
opts = opts
|> filter_opts(:include)
|> filter_opts(:exclude)
|> filter_only_opts()
default_opts(opts) ++
Keyword.take(opts, [:trace, :max_cases, :include, :exclude, :seed, :timeout])
end
defp default_opts(opts) do
# Set autorun to false because Mix
# automatically runs the test suite for us.
case Keyword.get(opts, :color) do
nil -> [autorun: false]
enabled? -> [autorun: false, colors: [enabled: enabled?]]
end
end
defp parse_files([], test_paths) do
test_paths
end
defp parse_files([single_file], _test_paths) do
# Check if the single file path matches test/path/to_test.exs:123, if it does
# apply `--only line:123` and trim the trailing :123 part.
{single_file, opts} = ExUnit.Filters.parse_path(single_file)
ExUnit.configure(opts)
[single_file]
end
defp parse_files(files, _test_paths) do
files
end
defp parse_filters(opts, key) do
if Keyword.has_key?(opts, key) do
ExUnit.Filters.parse(Keyword.get_values(opts, key))
end
end
defp filter_opts(opts, key) do
if filters = parse_filters(opts, key) do
Keyword.put(opts, key, filters)
else
opts
end
end
defp filter_only_opts(opts) do
if filters = parse_filters(opts, :only) do
opts
|> Keyword.put_new(:include, [])
|> Keyword.put_new(:exclude, [])
|> Keyword.update!(:include, &(filters ++ &1))
|> Keyword.update!(:exclude, &[:test|&1])
else
opts
end
end
defp require_test_helper(dir) do
file = Path.join(dir, "test_helper.exs")
if File.exists?(file) do
Code.require_file file
else
Mix.raise "Cannot run tests because test helper file #{inspect file} does not exist"
end
end
end
|
lib/mix/tasks/mix_eqcci.ex
| 0.827166
| 0.448547
|
mix_eqcci.ex
|
starcoder
|
defmodule ExPesel do
@moduledoc """
Library for PESEL number.
* `ExPesel.valid?/1` - you can check if PESEL number is valid
* `ExPesel.valid_with?/2` - you can check if PESEL number is valid with additional check about sex or birthdate
* `ExPesel.birthdate/1` - you can obtain date of birth from PESEL number
* `ExPesel.sex/1` - you can obtain sex from the PESEL number
* `ExPesel.zombie?/1` - you can obtain is PESEL number is a zombie (more than 123 years before today)
More about PESEL number:
* [Wikipedia EN](https://en.wikipedia.org/wiki/PESEL)
* [Wikipedia PL](https://pl.wikipedia.org/wiki/PESEL)
* [obywatel.gov.pl](https://obywatel.gov.pl/dokumenty-i-dane-osobowe/czym-jest-numer-pesel)
"""
alias ExPesel.Pesel
@doc """
Is PESEL number valid?
PESEL is valid when:
* length of it is 11
* last digit is proper checksum for first ten digits
* date of birt from first 6 digits are proper
* date of birth is until today.
For example:
iex> ExPesel.valid?("44051401458")
true
iex> ExPesel.valid?("90720312611")
false
iex> ExPesel.valid?("90520308014")
false
iex> ExPesel.valid?("44051401459")
false
iex> ExPesel.valid?("00000000000")
false
iex> ExPesel.valid?("234555")
false
iex> ExPesel.valid?("23455532312131123123")
false
iex> ExPesel.valid?("some really bad data")
false
"""
@spec valid?(String.t()) :: boolean()
defdelegate valid?(pesel), to: Pesel
@doc """
Is PESEL valid and additionaly:
* belongs to male?
* or belongs to female?
* or date of birth extracted from PESEL is equal birthdate?
For example:
iex> ExPesel.valid_with?("44051401458", :male)
true
iex> ExPesel.valid_with?("88122302080", :male)
false
iex> ExPesel.valid_with?("44051401458", :female)
false
iex> ExPesel.valid_with?("88122302080", :female)
true
iex> ExPesel.valid_with?("44051401458", {1944, 5, 14})
true
iex> ExPesel.valid_with?("44051401458", {1944, 6, 13})
false
iex> ExPesel.valid_with?("some really bad data", {1944, 6, 13})
false
"""
def valid_with?(pesel, sex_or_birthday)
# sorry have to do by def because there is no defdelegate with atoms
@spec valid_with?(String.t(), :male) :: boolean()
def valid_with?(pesel, :male), do: Pesel.valid_with?(pesel, :male)
@spec valid_with?(String.t(), :female) :: boolean()
def valid_with?(pesel, :female), do: Pesel.valid_with?(pesel, :female)
@spec valid_with?(String.t(), {1800..2299, 1..12, 1..31}) :: boolean()
defdelegate valid_with?(pesel, birthdate), to: Pesel
@doc """
Returns date of birth extracted from PESEL.
Date of birth is encoded by first 6 digits as: `yymmdd`
Century is encoded in `mm` part of it:
* 1800 - 1899 there is addition of 80 to `mm`
* 1900 - 1999 there is addition of 0 to `mm`
* 2000 - 2099 there is addition of 20 to `mm`
* 2100 - 2199 there is addition of 40 to `mm`
* 2200 - 2299 there is addition of 50 to `mm`
For example:
iex> ExPesel.birthdate("01920300359")
{1801, 12, 3}
iex> ExPesel.birthdate("44051401459")
{1944, 5, 14}
iex> ExPesel.birthdate("10320305853")
{2010, 12, 3}
iex> ExPesel.birthdate("90520308014")
{2190, 12, 3}
iex> ExPesel.birthdate("90720312611")
{2290, 12, 3}
iex> ExPesel.birthdate("some really bad input")
:unknown
"""
@spec birthdate(String.t()) :: {1800..2299, 1..12, 1..31} | :unknown
defdelegate birthdate(pesel), to: Pesel
@doc """
Returns sex extracted from PESEL.
Sex is encoded by digit at 10 position:
* even number for female
* odd number for male
For example:
iex> ExPesel.sex("44051401459")
:male
iex> ExPesel.sex("88122302080")
:female
iex> ExPesel.sex("some bad input")
:unknown
"""
@spec sex(String.t()) :: :male | :female | :unknown
defdelegate sex(pesel), to: Pesel
@doc """
Is PESEL number a zombie?
We got zombie PESEL when it has date of birth 123 years before today.
For example:
iex> ExPesel.zombie?("01920300359")
true
iex> ExPesel.zombie?("88122302080")
false
iex> ExPesel.zombie?("I'm a zombie man!")
false
"""
@spec zombie?(String.t()) :: boolean()
defdelegate zombie?(pesel), to: Pesel
end
|
lib/ex_pesel.ex
| 0.849129
| 0.646035
|
ex_pesel.ex
|
starcoder
|
defmodule State.Route do
@moduledoc """
Stores and indexes `Model.Route.t` from `routes.txt`.
"""
use State.Server,
indices: [:id, :type, :line_id],
parser: Parse.Routes,
recordable: Model.Route
alias Events.Gather
@impl GenServer
def init(_) do
_ = super(nil)
subscriptions = [{:fetch, "directions.txt"}, {:fetch, "routes.txt"}]
for sub <- subscriptions, do: Events.subscribe(sub)
state = %{data: Gather.new(subscriptions, &do_gather/1), last_updated: nil}
{:ok, state}
end
@impl Events.Server
def handle_event(event, value, _, state) do
state = %{state | data: Gather.update(state.data, event, value)}
{:noreply, state, :hibernate}
end
def do_gather(%{
{:fetch, "directions.txt"} => directions_blob,
{:fetch, "routes.txt"} => routes_blob
}) do
dmap = get_direction_map(directions_blob)
routes = get_routes(routes_blob, dmap)
handle_new_state(routes)
end
def get_direction_map(blob),
do:
blob
|> Parse.Directions.parse()
|> Enum.group_by(&{&1.route_id, &1.direction_id})
def get_routes(routes_blob, direction_map),
do:
routes_blob
|> Parse.Routes.parse()
|> Enum.map(fn route ->
%{
route
| direction_names: get_direction_details(direction_map, route.id, :direction),
direction_destinations:
get_direction_details(direction_map, route.id, :direction_destination)
}
end)
defp get_direction_details(map, route_id, field) do
for direction_id <- ["0", "1"] do
case Map.get(map, {route_id, direction_id}, nil) do
[d | _tail] ->
case Map.fetch!(d, field) do
"" -> nil
value -> value
end
_ ->
nil
end
end
end
def hidden?(%{listed_route: false}), do: true
def hidden?(%{agency_id: agency_id}) when agency_id != "1", do: true
def hidden?(_), do: false
@spec by_id(Model.Route.id()) :: Model.Route.t() | nil
def by_id(id) do
case super(id) do
[] -> nil
[stop] -> stop
end
end
def by_ids(ids) do
ids
|> super()
|> Enum.sort_by(& &1.sort_order)
end
def match(matcher, index, opts \\ []) do
opts = Keyword.put_new(opts, :order_by, {:sort_order, :asc})
super(matcher, index, opts)
end
end
|
apps/state/lib/state/route.ex
| 0.812347
| 0.402421
|
route.ex
|
starcoder
|
defmodule Nx.Type do
@moduledoc """
Conveniences for working with types.
A type is a two-element tuple with the name and the size.
The first element must be one of followed by the respective
sizes:
* `:s` - signed integer (8, 16, 32, 64)
* `:u` - unsigned integer (8, 16, 32, 64)
* `:f` - float (32, 64)
* `:bf` - a brain floating point (16)
Note: there is a special type used by the `defn` compiler
which is `{:tuple, size}`, that represents a tuple. Said types
do not appear on user code, only on compiler implementations,
and therefore are not handled by the functions in this module.
"""
@type t ::
{:s, 8}
| {:s, 16}
| {:s, 32}
| {:s, 64}
| {:u, 8}
| {:u, 16}
| {:u, 32}
| {:u, 64}
| {:f, 32}
| {:f, 64}
| {:bf, 16}
| {:tuple, non_neg_integer}
@doc """
Returns the minimum possible value for the given type.
"""
def min_value_binary(type)
def min_value_binary({:s, 8}), do: <<-128::8-signed-native>>
def min_value_binary({:s, 16}), do: <<-32678::16-signed-native>>
def min_value_binary({:s, 32}), do: <<-2_147_483_648::32-signed-native>>
def min_value_binary({:s, 64}), do: <<-9_223_372_036_854_775_808::64-signed-native>>
def min_value_binary({:u, size}), do: <<0::size(size)-native>>
def min_value_binary({:bf, 16}), do: <<0xFF80::16-native>>
def min_value_binary({:f, 32}), do: <<0xFF7FFFFF::32-native>>
def min_value_binary({:f, 64}), do: <<0xFFEFFFFFFFFFFFFF::64-native>>
@doc """
Returns the minimum possible value for the given type.
"""
def max_value_binary(type)
def max_value_binary({:s, 8}), do: <<127::8-signed-native>>
def max_value_binary({:s, 16}), do: <<32677::16-signed-native>>
def max_value_binary({:s, 32}), do: <<2_147_483_647::32-signed-native>>
def max_value_binary({:s, 64}), do: <<9_223_372_036_854_775_807::64-signed-native>>
def max_value_binary({:u, 8}), do: <<255::8-native>>
def max_value_binary({:u, 16}), do: <<65535::16-native>>
def max_value_binary({:u, 32}), do: <<4_294_967_295::32-native>>
def max_value_binary({:u, 64}), do: <<18_446_744_073_709_551_615::64-native>>
def max_value_binary({:bf, 16}), do: <<0x7F80::16-native>>
def max_value_binary({:f, 32}), do: <<0x7F7FFFFF::32-native>>
def max_value_binary({:f, 64}), do: <<0x7FEFFFFFFFFFFFFF::64-native>>
@doc """
Infers the type of the given value.
The value may be a number, boolean, or an arbitrary list with
any of the above. Integers are by default signed and of size 64.
Floats have size of 64. Booleans are unsigned integers of size 1
(also known as predicates).
In case mixed types are given, the one with highest space
requirements is used (i.e. float > brain floating > integer > boolean).
## Examples
iex> Nx.Type.infer([1, 2, 3])
{:s, 64}
iex> Nx.Type.infer([[1, 2], [3, 4]])
{:s, 64}
iex> Nx.Type.infer([1.0, 2.0, 3.0])
{:f, 32}
iex> Nx.Type.infer([1, 2.0])
{:f, 32}
iex> Nx.Type.infer([])
{:f, 32}
iex> Nx.Type.infer("string")
** (ArgumentError) cannot infer the numerical type of "string"
"""
def infer(value) do
case infer(value, -1) do
-1 -> {:f, 32}
0 -> {:s, 64}
1 -> {:f, 32}
end
end
defp infer(arg, inferred) when is_list(arg), do: Enum.reduce(arg, inferred, &infer/2)
defp infer(arg, inferred) when is_integer(arg), do: max(inferred, 0)
defp infer(arg, inferred) when is_float(arg), do: max(inferred, 1)
defp infer(other, _inferred),
do: raise(ArgumentError, "cannot infer the numerical type of #{inspect(other)}")
@doc """
Validates the given type tuple.
It returns the type itself or raises.
## Examples
iex> Nx.Type.normalize!({:u, 8})
{:u, 8}
iex> Nx.Type.normalize!({:u, 0})
** (ArgumentError) invalid numerical type: {:u, 0} (see Nx.Type docs for all supported types)
iex> Nx.Type.normalize!({:k, 8})
** (ArgumentError) invalid numerical type: {:k, 8} (see Nx.Type docs for all supported types)
"""
def normalize!(type) do
case validate(type) do
:error ->
raise ArgumentError,
"invalid numerical type: #{inspect(type)} (see Nx.Type docs for all supported types)"
type ->
type
end
end
defp validate({:s, size} = type) when size in [8, 16, 32, 64], do: type
defp validate({:u, size} = type) when size in [8, 16, 32, 64], do: type
defp validate({:f, size} = type) when size in [32, 64], do: type
defp validate({:bf, size} = type) when size in [16], do: type
defp validate(_type), do: :error
@doc """
Converts the given type to a floating point representation
with the minimum size necessary.
Note both float and complex are floating point representations.
## Examples
iex> Nx.Type.to_floating({:s, 8})
{:f, 32}
iex> Nx.Type.to_floating({:s, 32})
{:f, 32}
iex> Nx.Type.to_floating({:bf, 16})
{:bf, 16}
iex> Nx.Type.to_floating({:f, 32})
{:f, 32}
"""
def to_floating({:bf, size}), do: {:bf, size}
def to_floating({:f, size}), do: {:f, size}
def to_floating(type), do: merge(type, {:f, 32})
@doc """
Converts the given type to an aggregation precision.
## Examples
iex> Nx.Type.to_aggregate({:s, 8})
{:s, 64}
iex> Nx.Type.to_aggregate({:u, 32})
{:u, 64}
iex> Nx.Type.to_aggregate({:bf, 16})
{:bf, 16}
iex> Nx.Type.to_aggregate({:f, 32})
{:f, 32}
"""
def to_aggregate({:u, _size}), do: {:u, 64}
def to_aggregate({:s, _size}), do: {:s, 64}
def to_aggregate(type), do: type
@doc """
Casts the given scalar to type.
It does not handle overflow/underflow,
returning the scalar as is, but cast.
## Examples
iex> Nx.Type.cast_scalar!({:u, 8}, 10)
10
iex> Nx.Type.cast_scalar!({:s, 8}, 10)
10
iex> Nx.Type.cast_scalar!({:s, 8}, -10)
-10
iex> Nx.Type.cast_scalar!({:f, 32}, 10)
10.0
iex> Nx.Type.cast_scalar!({:bf, 16}, -10)
-10.0
iex> Nx.Type.cast_scalar!({:f, 32}, 10.0)
10.0
iex> Nx.Type.cast_scalar!({:bf, 16}, -10.0)
-10.0
iex> Nx.Type.cast_scalar!({:u, 8}, -10)
** (ArgumentError) cannot cast scalar -10 to {:u, 8}
iex> Nx.Type.cast_scalar!({:s, 8}, 10.0)
** (ArgumentError) cannot cast scalar 10.0 to {:s, 8}
"""
def cast_scalar!({type, _}, int) when type in [:u] and is_integer(int) and int >= 0, do: int
def cast_scalar!({type, _}, int) when type in [:s] and is_integer(int), do: int
def cast_scalar!({type, _}, int) when type in [:f, :bf] and is_integer(int), do: int * 1.0
def cast_scalar!({type, _}, float) when type in [:f, :bf] and is_float(float), do: float
def cast_scalar!(type, other) do
raise ArgumentError, "cannot cast scalar #{inspect(other)} to #{inspect(type)}"
end
@doc """
Merges the given types finding a suitable representation for both.
Types have the following precedence:
f > bf > s > u
If the types are the same, they are merged to the highest size.
If they are different, the one with the highest precedence wins,
as long as the size of the `max(big, small * 2))` fits under 64
bits. Otherwise it casts to f64.
## Examples
iex> Nx.Type.merge({:s, 8}, {:s, 8})
{:s, 8}
iex> Nx.Type.merge({:s, 8}, {:s, 64})
{:s, 64}
iex> Nx.Type.merge({:s, 8}, {:u, 8})
{:s, 16}
iex> Nx.Type.merge({:s, 16}, {:u, 8})
{:s, 16}
iex> Nx.Type.merge({:s, 8}, {:u, 16})
{:s, 32}
iex> Nx.Type.merge({:s, 32}, {:u, 8})
{:s, 32}
iex> Nx.Type.merge({:s, 8}, {:u, 32})
{:s, 64}
iex> Nx.Type.merge({:s, 64}, {:u, 8})
{:s, 64}
iex> Nx.Type.merge({:s, 8}, {:u, 64})
{:s, 64}
iex> Nx.Type.merge({:u, 8}, {:f, 32})
{:f, 32}
iex> Nx.Type.merge({:u, 64}, {:f, 32})
{:f, 32}
iex> Nx.Type.merge({:s, 8}, {:f, 32})
{:f, 32}
iex> Nx.Type.merge({:s, 64}, {:f, 32})
{:f, 32}
iex> Nx.Type.merge({:u, 8}, {:f, 64})
{:f, 64}
iex> Nx.Type.merge({:u, 64}, {:f, 64})
{:f, 64}
iex> Nx.Type.merge({:s, 8}, {:f, 64})
{:f, 64}
iex> Nx.Type.merge({:s, 64}, {:f, 64})
{:f, 64}
iex> Nx.Type.merge({:u, 8}, {:bf, 16})
{:bf, 16}
iex> Nx.Type.merge({:u, 64}, {:bf, 16})
{:bf, 16}
iex> Nx.Type.merge({:s, 8}, {:bf, 16})
{:bf, 16}
iex> Nx.Type.merge({:s, 64}, {:bf, 16})
{:bf, 16}
iex> Nx.Type.merge({:f, 32}, {:bf, 16})
{:f, 32}
iex> Nx.Type.merge({:f, 64}, {:bf, 16})
{:f, 64}
"""
def merge({type, left_size}, {type, right_size}) do
{type, max(left_size, right_size)}
end
def merge(left, right) do
case sort(left, right) do
{{:u, size1}, {:s, size2}} -> {:s, max(min(size1 * 2, 64), size2)}
{_, type2} -> type2
end
end
defp type_to_int(:f), do: 3
defp type_to_int(:bf), do: 2
defp type_to_int(:s), do: 1
defp type_to_int(:u), do: 0
defp sort({left_type, _} = left, {right_type, _} = right) do
if type_to_int(left_type) < type_to_int(right_type) do
{left, right}
else
{right, left}
end
end
@doc """
Merges the given types with the type of a scalar.
We attempt to keep the original type and its size as best
as possible.
## Examples
iex> Nx.Type.merge_scalar({:u, 8}, 0)
{:u, 8}
iex> Nx.Type.merge_scalar({:u, 8}, 255)
{:u, 8}
iex> Nx.Type.merge_scalar({:u, 8}, 256)
{:u, 16}
iex> Nx.Type.merge_scalar({:u, 8}, -1)
{:s, 16}
iex> Nx.Type.merge_scalar({:u, 8}, -32767)
{:s, 16}
iex> Nx.Type.merge_scalar({:u, 8}, -32768)
{:s, 16}
iex> Nx.Type.merge_scalar({:u, 8}, -32769)
{:s, 32}
iex> Nx.Type.merge_scalar({:s, 8}, 0)
{:s, 8}
iex> Nx.Type.merge_scalar({:s, 8}, 127)
{:s, 8}
iex> Nx.Type.merge_scalar({:s, 8}, -128)
{:s, 8}
iex> Nx.Type.merge_scalar({:s, 8}, 128)
{:s, 16}
iex> Nx.Type.merge_scalar({:s, 8}, -129)
{:s, 16}
iex> Nx.Type.merge_scalar({:s, 8}, 1.0)
{:f, 32}
iex> Nx.Type.merge_scalar({:f, 32}, 1)
{:f, 32}
iex> Nx.Type.merge_scalar({:f, 32}, 1.0)
{:f, 32}
iex> Nx.Type.merge_scalar({:f, 64}, 1.0)
{:f, 64}
"""
def merge_scalar({:u, size}, integer) when is_integer(integer) and integer >= 0 do
{:u, max(unsigned_size(integer), size)}
end
def merge_scalar({:u, size}, integer) when is_integer(integer) do
merge_scalar({:s, size * 2}, integer)
end
def merge_scalar({:s, size}, integer) when is_integer(integer) do
{:s, max(signed_size(integer), size)}
end
def merge_scalar({:bf, size}, number) when is_number(number) do
{:bf, size}
end
def merge_scalar({:f, size}, number) when is_number(number) do
{:f, size}
end
def merge_scalar(_, number) when is_number(number) do
{:f, 32}
end
@doc """
Returns true if the type is an integer in Elixir.
## Examples
iex> Nx.Type.integer?({:s, 8})
true
iex> Nx.Type.integer?({:u, 64})
true
iex> Nx.Type.integer?({:f, 64})
false
"""
def integer?({:u, _}), do: true
def integer?({:s, _}), do: true
def integer?({_, _}), do: false
@doc """
Returns true if the type is a float in Elixir.
## Examples
iex> Nx.Type.float?({:f, 32})
true
iex> Nx.Type.float?({:bf, 16})
true
iex> Nx.Type.float?({:u, 64})
false
"""
def float?({:f, _}), do: true
def float?({:bf, _}), do: true
def float?({_, _}), do: false
@doc """
Returns a string representation of the given type.
## Examples
iex> Nx.Type.to_string({:s, 8})
"s8"
iex> Nx.Type.to_string({:s, 16})
"s16"
iex> Nx.Type.to_string({:s, 32})
"s32"
iex> Nx.Type.to_string({:s, 64})
"s64"
iex> Nx.Type.to_string({:u, 8})
"u8"
iex> Nx.Type.to_string({:u, 16})
"u16"
iex> Nx.Type.to_string({:u, 32})
"u32"
iex> Nx.Type.to_string({:u, 64})
"u64"
iex> Nx.Type.to_string({:f, 16})
"f16"
iex> Nx.Type.to_string({:bf, 16})
"bf16"
iex> Nx.Type.to_string({:f, 32})
"f32"
iex> Nx.Type.to_string({:f, 64})
"f64"
"""
def to_string({type, size}), do: Atom.to_string(type) <> Integer.to_string(size)
defp unsigned_size(x) when x <= 1, do: 1
defp unsigned_size(x) when x <= 255, do: 8
defp unsigned_size(x) when x <= 65535, do: 16
defp unsigned_size(x) when x <= 4_294_967_295, do: 32
defp unsigned_size(_), do: 64
defp signed_size(x) when x < 0, do: signed_size(-x - 1)
defp signed_size(x) when x <= 1, do: 1
defp signed_size(x) when x <= 127, do: 8
defp signed_size(x) when x <= 32767, do: 16
defp signed_size(x) when x <= 2_147_483_647, do: 32
defp signed_size(_), do: 64
end
|
lib/nx/type.ex
| 0.93488
| 0.565239
|
type.ex
|
starcoder
|
defmodule SimplePay.Wallet.Reader do
use GenServer
alias SimplePay.{Wallet, Repo, Utilities}
alias Events.{WalletCreated, MoneyDeposited, MoneyWithdrawn, WithdrawDeclined}
@default_state %{ event_store: SimplePay.EventStore, stream: nil, last_event: nil, balance: nil,
subscription_ref: nil, id: nil }
def start_link(wallet) do
GenServer.start_link(__MODULE__, wallet, name: via_tuple(wallet))
end
def init(wallet_id) do
stream = "wallet-" <> to_string(wallet_id)
%Wallet{last_event_processed: last_event, balance: balance} = wallet = Repo.get!(Wallet, wallet_id) # Our latest snapshot
state = %{@default_state | stream: stream, last_event: last_event, balance: balance, id: wallet.id}
GenServer.cast(self, :subscribe)
{:ok, state}
end
def handle_cast(:subscribe, state) do
# Read only unprocessed events and stay subscribed
{:ok, subscription} = Extreme.read_and_stay_subscribed(state.event_store, self, state.stream, state.last_event + 1)
# Monitor the subscription so we can resubscribe in the case of a crash
ref = Process.monitor(subscription)
{:noreply, %{state|subscription_ref: ref}}
end
@doc """
Deals with the events pushed by our EventStore & updates state accordingly.
"""
def handle_info({:on_event, push}, state) do
case push.event.event_type do
"Elixir.Events.MoneyDeposited" ->
data = :erlang.binary_to_term(push.event.data)
update_params = %{balance: state.balance + data.amount, last_event_processed: state.last_event + 1}
wallet = Repo.get!(Wallet, state.id) |> Wallet.update_changeset(update_params) |> Repo.update!
{:noreply, %{state | balance: wallet.balance, last_event: wallet.last_event_processed}}
"Elixir.Events.MoneyWithdrawn" ->
data = :erlang.binary_to_term(push.event.data)
update_params = %{balance: state.balance - data.amount, last_event_processed: state.last_event + 1}
wallet = Repo.get!(Wallet, state.id) |> Wallet.update_changeset(update_params) |> Repo.update!
{:noreply, %{state | balance: wallet.balance, last_event: wallet.last_event_processed}}
"Elixir.Events.WithdrawDeclined" ->
IO.puts "Withdraw declined - Balance: $#{state.balance / 100}"
update_params = %{last_event_processed: state.last_event + 1}
wallet = Repo.get!(Wallet, state.id) |> Wallet.update_changeset(update_params) |> Repo.update!
{:noreply, %{state | last_event: wallet.last_event_processed}}
_ ->
update_params = %{last_event_processed: state.last_event + 1}
wallet = Repo.get!(Wallet, state.id) |> Wallet.update_changeset(update_params) |> Repo.update!
{:noreply, %{state | last_event: wallet.last_event_processed}}
end
end
defp via_tuple(wallet) do
{:via, :gproc, {:n, :l, {:wallet, wallet}}}
end
end
|
programming/elixir/simple_pay/lib/simple_pay/wallet/reader.ex
| 0.569134
| 0.410963
|
reader.ex
|
starcoder
|
defmodule Logfmt.Decoder do
@moduledoc """
Decodes a logfmt-style log line into a `map`.
## Examples
iex> Logfmt.decode "foo=bar"
%{"foo" => "bar"}
iex> Logfmt.decode "foo=true"
%{"foo" => true}
"""
import String, only: [next_grapheme: 1]
@doc """
See [`Logfmt.decode`](/logfmt/Logfmt.html#decode/1).
"""
@spec decode(String.t()) :: map
def decode(string) do
parse_char(next_grapheme(string), :garbage, Map.new())
end
@spec parse_char({String.t(), String.t()}, :garbage, map) :: map
defp parse_char({char, rest}, :garbage, map)
when char > " " and char != "\"" and char != "=" do
parse_char(next_grapheme(rest), :key, char, map)
end
@spec parse_char({String.t(), String.t()}, :garbage, map) :: map
defp parse_char({_char, rest}, :garbage, map) do
parse_char(next_grapheme(rest), :garbage, map)
end
@spec parse_char(nil, :garbage, map) :: map
defp parse_char(nil, :garbage, map) do
map
end
@spec parse_char({String.t(), String.t()}, :key, String.t(), map) :: map
defp parse_char({char, rest}, :key, key, map)
when char > " " and char != "\"" and char != "=" do
parse_char(next_grapheme(rest), :key, key <> char, map)
end
@spec parse_char({String.t(), String.t()}, :key, String.t(), map) :: map
defp parse_char({"=", rest}, :key, key, map) do
parse_char(next_grapheme(rest), :equals, key, map)
end
@spec parse_char({String.t(), String.t()}, :key, String.t(), map) :: map
defp parse_char({_char, rest}, :key, key, map) do
parse_char(next_grapheme(rest), :garbage, map |> put_value(key, true))
end
@spec parse_char(nil, :key, String.t(), map) :: map
defp parse_char(nil, :key, key, map) do
map |> put_value(key, true)
end
@spec parse_char({String.t(), String.t()}, :equals, String.t(), map) :: map
defp parse_char({char, rest}, :equals, key, map)
when char > " " and char != "\"" and char != "=" do
parse_char(next_grapheme(rest), :ivalue, key, char, map)
end
@spec parse_char({String.t(), String.t()}, :equals, String.t(), map) :: map
defp parse_char({"\"", rest}, :equals, key, map) do
parse_char(next_grapheme(rest), :qvalue, false, key, "", map)
end
@spec parse_char({String.t(), String.t()}, :equals, String.t(), map) :: map
defp parse_char({_char, rest}, :equals, key, map) do
parse_char(next_grapheme(rest), :garbage, map |> put_value(key, true))
end
@spec parse_char(nil, :equals, String.t(), map) :: map
defp parse_char(nil, :equals, key, map) do
map |> put_value(key, true)
end
@spec parse_char({String.t(), String.t()}, :ivalue, String.t(), String.t(), map) :: map
defp parse_char({char, rest}, :ivalue, key, value, map)
when char <= " " or char == "\"" or char == "=" do
parse_char(next_grapheme(rest), :garbage, map |> put_value(key, value))
end
@spec parse_char({String.t(), String.t()}, :ivalue, String.t(), String.t(), map) :: map
defp parse_char({char, rest}, :ivalue, key, value, map) do
parse_char(next_grapheme(rest), :ivalue, key, value <> char, map)
end
@spec parse_char(nil, :ivalue, String.t(), String.t(), map) :: map
defp parse_char(nil, :ivalue, key, value, map) do
map |> put_value(key, value)
end
@spec parse_char({String.t(), String.t()}, :qvalue, false, String.t(), String.t(), map) :: map
defp parse_char({"\\", rest}, :qvalue, false, key, value, map) do
parse_char(next_grapheme(rest), :qvalue, true, key, value, map)
end
@spec parse_char({String.t(), String.t()}, :qvalue, true, String.t(), String.t(), map) :: map
defp parse_char({char, rest}, :qvalue, true, key, value, map) do
parse_char(next_grapheme(rest), :qvalue, false, key, value <> char, map)
end
@spec parse_char({String.t(), String.t()}, :qvalue, false, String.t(), String.t(), map) :: map
defp parse_char({"\"", rest}, :qvalue, false, key, value, map) do
parse_char(next_grapheme(rest), :garbage, map |> put_value(key, value))
end
@spec parse_char({String.t(), String.t()}, :qvalue, false, String.t(), String.t(), map) :: map
defp parse_char({char, rest}, :qvalue, false, key, value, map) do
parse_char(next_grapheme(rest), :qvalue, false, key, value <> char, map)
end
@spec parse_char(nil, :qvalue, false, String.t(), String.t(), map) :: map
defp parse_char(nil, :qvalue, false, key, value, map) do
map |> put_value(key, value)
end
@spec put_value(map, String.t(), boolean) :: map
defp put_value(map, key, value) when is_boolean(value) do
map |> Map.put(key, value)
end
@spec put_value(map, String.t(), String.t()) :: map
defp put_value(map, key, value) do
map |> Map.put(key, value |> coerce_value)
end
@spec coerce_value(String.t()) :: true
defp coerce_value("true"), do: true
@spec coerce_value(String.t()) :: false
defp coerce_value("false"), do: false
@spec coerce_value(String.t()) :: nil
defp coerce_value("nil"), do: nil
@spec coerce_value(String.t()) :: number | String.t()
defp coerce_value(value) do
integer =
case Integer.parse(value) do
{integer, ""} -> integer
{_, _} -> nil
:error -> nil
end
# https://github.com/elixir-lang/elixir/pull/3863
float =
try do
case Float.parse(value) do
{float, ""} -> float
{_, _} -> nil
:error -> nil
end
rescue
ArgumentError -> value
end
integer || float || value
end
end
|
lib/logfmt/decoder.ex
| 0.903439
| 0.412234
|
decoder.ex
|
starcoder
|
defmodule Collision.Detection.SeparatingAxis do
@moduledoc """
Implements the separating axis theorem for collision detection.
Checks for collision by projecting all of the edges of a pair of polygons
against test axes that are the normals of their edges.
If there is any axis for which the projections aren't overlapping,
then the polygons are not colliding with one another. If all of
the axes have overlapping projections, the polygons are colliding.
"""
alias Collision.Polygon.Vertex
alias Collision.Polygon.Polygon
alias Collision.Vector.Vector2
@type axis :: {Vertex.t, Vertex.t}
@type polygon :: Polygon.t
@doc """
Check for collision between two polygons.
Returns: `true` | `false`
## Examples
iex> p1 = Polygon.gen_regular_polygon(4, 4, 0, {0, 0})
iex> p2 = Polygon.gen_regular_polygon(4, 6, 0, {2, 2})
iex> SeparatingAxis.collision?(p1, p2)
true
iex> p1 = Polygon.gen_regular_polygon(3, 1, 0, {-5, 8})
iex> p2 = Polygon.gen_regular_polygon(4, 6, 0, {2, 2})
iex> SeparatingAxis.collision?(p1, p2)
false
"""
@spec collision?(polygon, polygon) :: boolean
def collision?(polygon_1, polygon_2) do
projections = collision_projections(polygon_1, polygon_2)
Enum.all?(projections, fn zipped_projection ->
overlap?(zipped_projection) || containment?(zipped_projection)
end)
end
@doc """
Checks for collision between two polygons and, if colliding, calculates
the minimum translation vector to move out of collision. The float in the
return is the magnitude of overlap?.
Returns: nil | {Vector2.t, float}
## Examples
iex> p1 = Polygon.gen_regular_polygon(4, 4, 45, {0, 0})
iex> p2 = Polygon.gen_regular_polygon(4, 4, 45, {4, 0})
iex> {mtv, magnitude} = SeparatingAxis.collision_mtv(p1, p2)
iex> Vector.round_components(mtv, 2)
%Collision.Vector.Vector2{x: -1.0, y: 0.0}
iex> Float.round(magnitude, 2)
1.66
iex> p1 = Polygon.gen_regular_polygon(3, 1, 0, {-5, 8})
iex> p2 = Polygon.gen_regular_polygon(4, 6, 0, {2, 2})
iex> SeparatingAxis.collision_mtv(p1, p2)
nil
"""
# TODO There is repetition between this and collision?, but it
# runs faster this way. Refactoring opportunity in the future.
@spec collision_mtv(polygon, polygon) :: {Vector2.t, number}
def collision_mtv(polygon_1, polygon_2) do
in_collision = collision?(polygon_1, polygon_2)
if in_collision do
axes_to_test = test_axes(polygon_1.vertices) ++ test_axes(polygon_2.vertices)
zipped_projections = collision_projections(polygon_1, polygon_2)
axes_and_projections = Enum.zip(axes_to_test, zipped_projections)
axes_and_projections
|> minimum_overlap
end
end
# Get the axes to project the polygons against.
# The list of vertices is expected to be ordered counter-clockwise,
# so we're using the left normal to generate test axes.
@spec test_axes([Vertex.t] | polygon) :: [axis]
defp test_axes(%{vertices: vertices}), do: test_axes(vertices)
defp test_axes(vertices) do
vertices
|> Stream.chunk(2, 1, [Enum.at(vertices, 0)])
|> Stream.map(fn [a, b] -> Vector2.from_points(a,b) end)
|> Stream.map(&(Vector2.left_normal(&1)))
|> Enum.map(&(Vector.normalize(&1)))
end
# Project all of a polygon's edges onto the test axis and return
# the minimum and maximum points.
#@spec project_onto_axis([Vertex.t], Vertex.t) :: (number, number)
defp project_onto_axis(vertices, axis) do
dot_products = vertices
|> Enum.map(fn vertex ->
vertex
|> Vertex.to_tuple
|> Collision.Vector.from_tuple
|> Vector.dot_product(axis)
end)
{Enum.min(dot_products), Enum.max(dot_products)}
end
# Given a polygon, project all of its edges onto an axis.
@spec project_onto_axes([Vertex.t], [Vector2.t]) :: [Vector2.t]
defp project_onto_axes(vertices, axes) do
Enum.map(axes, fn axis ->
project_onto_axis(vertices, axis)
end)
end
# Check whether a pair of lines are overlapping.
@spec overlap?(axis) :: boolean
defp overlap?({{min1, max1}, {min2, max2}}) do
!((min1 > max2) || (min2 > max1))
end
# Check whether a projection is wholly contained within another.
@spec containment?(axis) :: boolean
defp containment?({{min1, max1}, {min2, max2}}) do
line1_inside = min1 > min2 && max1 < max2
line2_inside = min2 > min1 && max2 < max1
line1_inside || line2_inside
end
# Check whether a polygon is entirely inside another.
defp total_containment?(axes) do
axes
|> Enum.all?(fn {_axis, projections} ->
containment?(projections)
end)
end
# Calculate the magnitude of overlap for overlapping lines.
@spec overlap_magnitude(axis) :: number
defp overlap_magnitude({{min1, max1}, {min2, max2}}) do
min(max1 - min2, max2 - min1)
end
# Given a list of vector/axis tuples, finds the minimum translation
# vector and magnitude to move the polygons out of collision.
@spec minimum_overlap([{Vector2.t, axis}]) :: {Vector2.t, number}
defp minimum_overlap(axes) do
overlap = if total_containment?(axes) do
axes
|> Enum.flat_map(fn {axis, {{min1, max1}, {min2, max2}} = projections} ->
[{axis, overlap_magnitude(projections) + abs(min1 - min2)},
{axis, overlap_magnitude(projections) + abs(max1 - max2)}]
end)
else
axes
|> Enum.map(fn {axis, projections} -> {axis, overlap_magnitude(projections)} end)
end
overlap
|> Enum.sort_by(fn {_axis, magnitude} ->
magnitude
end)
|> Enum.at(0)
end
# Generate a zipped list of projections for both polygons.
@spec collision_projections(polygon, polygon) :: [{Vector2.t, Vector2.t}]
defp collision_projections(%{vertices: v1}, %{vertices: v2}) do
collision_projections(v1, v2)
end
defp collision_projections(p1, p2) do
axes_to_test = test_axes(p1) ++ test_axes(p2)
p1_projection = project_onto_axes(p1, axes_to_test)
p2_projection = project_onto_axes(p2, axes_to_test)
Enum.zip(p1_projection, p2_projection)
end
end
|
lib/collision/detection/separating_axis.ex
| 0.923983
| 0.812123
|
separating_axis.ex
|
starcoder
|
defmodule Bolt.Sips.Types do
@moduledoc """
Basic support for representing nodes, relationships and paths belonging to
a Neo4j graph database.
Four supported types of entities:
- Node
- Relationship
- UnboundRelationship
- Path
"""
defmodule Entity do
@moduledoc """
base structure for Node and Relationship
"""
@base_fields [id: nil, properties: nil]
defmacro __using__(fields) do
fields = @base_fields ++ fields
quote do
defstruct unquote(fields)
end
end
end
defmodule Node do
@moduledoc """
Self-contained graph node.
A Node represents a node from a Neo4j graph and consists of a
unique identifier (within the scope of its origin graph), a list of
labels and a map of properties.
https://github.com/neo4j/neo4j/blob/3.1/manual/bolt/src/docs/dev/serialization.asciidoc#node
"""
use Entity, labels: nil
end
defmodule Relationship do
@moduledoc """
Self-contained graph relationship.
A Relationship represents a relationship from a Neo4j graph and consists of
a unique identifier (within the scope of its origin graph), identifiers
for the start and end nodes of that relationship, a type and a map of properties.
https://github.com/neo4j/neo4j/blob/3.1/manual/bolt/src/docs/dev/serialization.asciidoc#bolt-value-relstruct
"""
use Entity, start: nil, end: nil, type: nil
end
defmodule UnboundRelationship do
@moduledoc """
Self-contained graph relationship without endpoints.
An UnboundRelationship represents a relationship relative to a
separately known start point and end point.
https://github.com/neo4j/neo4j/blob/3.1/manual/bolt/src/docs/dev/serialization.asciidoc#unboundrelationship
"""
use Entity, start: nil, end: nil, type: nil
end
defmodule Path do
@moduledoc """
Self-contained graph path.
A Path is a sequence of alternating nodes and relationships corresponding to a
walk in the graph. The path always begins and ends with a node.
Its representation consists of a list of distinct nodes,
a list of distinct relationships and a sequence of integers describing the
path traversal
https://github.com/neo4j/neo4j/blob/3.1/manual/bolt/src/docs/dev/serialization.asciidoc#path
"""
@type t :: %__MODULE__{
nodes: List.t() | nil,
relationships: List.t() | nil,
sequence: List.t() | nil
}
defstruct nodes: nil, relationships: nil, sequence: nil
@doc """
represents a traversal or walk through a graph and maintains a direction
separate from that of any relationships traversed
"""
@spec graph(Path.t()) :: List.t() | nil
def graph(path) do
entities = [List.first(path.nodes)]
draw_path(
path.nodes,
path.relationships,
path.sequence,
0,
Enum.take_every(path.sequence, 2),
entities,
# last node
List.first(path.nodes),
# next node
nil
)
end
# @lint false
defp draw_path(_n, _r, _s, _i, [], acc, _ln, _nn), do: acc
defp draw_path(n, r, s, i, [h | t] = _rel_index, acc, ln, _nn) do
next_node = Enum.at(n, Enum.at(s, 2 * i + 1))
urel =
if h > 0 && h < 255 do
# rel: rels[rel_index - 1], start/end: (ln.id, next_node.id)
rel = Enum.at(r, h - 1)
unbound_relationship =
[:id, :type, :properties, :start, :end]
|> Enum.zip([rel.id, rel.type, rel.properties, ln.id, next_node.id])
struct(UnboundRelationship, unbound_relationship)
else
# rel: rels[-rel_index - 1], start/end: (next_node.id, ln.id)
# Neo4j sends: -1, and Boltex returns 255 instead? Investigating,
# meanwhile ugly path:
# oh dear ...
haha = if h == 255, do: -1, else: h
rel = Enum.at(r, -haha - 1)
unbound_relationship =
[:id, :type, :properties, :start, :end]
|> Enum.zip([rel.id, rel.type, rel.properties, next_node.id, ln.id])
struct(UnboundRelationship, unbound_relationship)
end
draw_path(n, r, s, i + 1, t, (acc ++ [urel]) ++ [next_node], next_node, ln)
end
end
end
|
lib/bolt_sips/types.ex
| 0.842232
| 0.674054
|
types.ex
|
starcoder
|
defmodule Forage.Codec.Encoder do
@moduledoc """
Functionality to encode a `Forage.Plan` into a Phoenix `param` map
for use with the `ApplicationWeb.Router.Helpers`.
"""
alias Forage.ForagePlan
@doc """
Encodes a forage plan into a params map.
This function doesn't need to take the schema as an argument
because it will never have to convert a string into an atom
(the params map contains only strings and never atoms)
"""
def encode(%ForagePlan{} = plan) do
# Each of the forage components (filter, sort and pagination) will be encoded as maps,
# so that they can simply be merged together
filter_map = encode_filter(plan)
sort_map = encode_sort(plan)
pagination_map = encode_pagination(plan)
# Merge the three maps
filter_map |> Map.merge(sort_map) |> Map.merge(pagination_map)
end
@doc """
Encode the "filter" part of a forage plan. Returns a map.
"""
def encode_filter(%ForagePlan{filter: []} = _plan), do: %{}
def encode_filter(%ForagePlan{filter: filter} = _plan) do
filter_value =
for filter_filter <- filter, into: %{} do
field_name =
case filter_filter[:field] do
{:simple, name} when is_atom(name) ->
Atom.to_string(name)
{:assoc, {_schema, local, remote}} when is_atom(local) and is_atom(remote) ->
local_string = Atom.to_string(local)
remote_string = Atom.to_string(remote)
local_string <> "." <> remote_string
end
# Return key-value pair
{field_name,
%{
"op" => filter_filter[:operator],
"val" => filter_filter[:value]
}}
end
%{"_filter" => filter_value}
end
@doc """
Encode the "sort" part of a forage plan. Returns a map.
"""
def encode_sort(%ForagePlan{sort: []} = _plan), do: %{}
def encode_sort(%ForagePlan{sort: sort} = _plan) do
sort_value =
for sort_column <- sort, into: %{} do
field_name = Atom.to_string(sort_column[:field])
direction_name = Atom.to_string(sort_column[:direction])
# Return key-value pair
{field_name, %{"direction" => direction_name}}
end
%{"_sort" => sort_value}
end
@doc """
Encode the "pagination" part of a forage plan. Returns a map.
"""
def encode_pagination(%ForagePlan{pagination: pagination} = _plan) do
encoded_after =
case Keyword.fetch(pagination, :after) do
:error -> %{}
{:ok, value} -> %{"after" => value}
end
encoded_before =
case Keyword.fetch(pagination, :before) do
:error -> %{}
{:ok, value} -> %{"before" => value}
end
Map.merge(encoded_after, encoded_before)
end
end
|
lib/forage/codec/encoder.ex
| 0.847495
| 0.516352
|
encoder.ex
|
starcoder
|
defmodule Expo.Po do
@moduledoc """
`.po` / `.pot` file handler
"""
alias Expo.Po.DuplicateTranslationsError
alias Expo.Po.Parser
alias Expo.Po.SyntaxError
alias Expo.Translations
@type parse_options :: [{:file, Path.t()}]
@type parse_error ::
{:error,
{:parse_error, message :: String.t(), context :: String.t(), line :: pos_integer()}}
@type duplicate_translations_error ::
{:error,
{:duplicate_translations,
[{message :: String.t(), new_line :: pos_integer(), old_line :: pos_integer()}]}}
@type file_error :: {:error, File.posix()}
@doc """
Dumps a `Expo.Translations` struct as iodata.
This function dumps a `Expo.Translations` struct (representing a PO file) as iodata,
which can later be written to a file or converted to a string with
`IO.iodata_to_binary/1`.
## Examples
After running the following code:
iodata = Expo.Po.compose %Expo.Translations{
headers: ["Last-Translator: <NAME>"],
translations: [
%Expo.Translation.Singular{msgid: ["foo"], msgstr: ["bar"], comments: "A comment"}
]
}
File.write!("/tmp/test.po", iodata)
the `/tmp/test.po` file would look like this:
msgid ""
msgstr ""
"Last-Translator: <NAME>"
# A comment
msgid "foo"
msgstr "bar"
"""
@spec compose(translations :: Translations.t()) :: iodata()
defdelegate compose(content), to: Expo.Po.Composer
@doc """
Parses a string into a `Expo.Translations` struct.
This function parses a given `str` into a `Expo.Translations` struct.
It returns `{:ok, po}` if there are no errors,
otherwise `{:error, line, reason}`.
## Examples
iex> {:ok, po} = Expo.Po.parse_string \"""
...> msgid "foo"
...> msgstr "bar"
...> \"""
iex> [t] = po.translations
iex> t.msgid
["foo"]
iex> t.msgstr
["bar"]
iex> po.headers
[]
iex> Expo.Po.parse_string "foo"
{:error, {:parse_error, "expected msgid followed by strings while processing plural translation inside singular translation or plural translation", "foo", 1}}
"""
@spec parse_string(content :: binary(), opts :: parse_options()) ::
{:ok, Translations.t()}
| parse_error()
| duplicate_translations_error()
def parse_string(content, opts \\ []) do
Parser.parse(content, opts)
end
@doc """
Parses a string into a `Expo.Translations` struct, raising an exception if there are
any errors.
Works exactly like `parse_string/1`, but returns a `Expo.Translations` struct
if there are no errors or raises a `Expo.Po.SyntaxError` error if there
are.
## Examples
iex> po = Expo.Po.parse_string! \"""
...> msgid "foo"
...> msgstr "bar"
...> \"""
iex> [t] = po.translations
iex> t.msgid
["foo"]
iex> t.msgstr
["bar"]
iex> po.headers
[]
iex> Expo.Po.parse_string!("msgid")
** (Expo.Po.SyntaxError) 1: expected whitespace while processing msgid followed by strings inside plural translation inside singular translation or plural translation
iex> Expo.Po.parse_string!(\"""
...> msgid "test"
...> msgstr ""
...>
...> msgid "test"
...> msgstr ""
...> \""")
** (Expo.Po.DuplicateTranslationsError) 4: found duplicate on line 4 for msgid: 'test'
"""
@spec parse_string!(content :: String.t(), opts :: parse_options()) ::
Translations.t() | no_return
def parse_string!(str, opts \\ []) do
case parse_string(str, opts) do
{:ok, parsed} ->
parsed
{:error, {:parse_error, reason, context, line}} ->
options = [line: line, reason: reason, context: context]
options =
case opts[:file] do
nil -> options
path -> [{:file, path} | options]
end
raise SyntaxError, options
{:error, {:duplicate_translations, duplicates}} ->
options = [duplicates: duplicates]
options =
case opts[:file] do
nil -> options
path -> [{:file, path} | options]
end
raise DuplicateTranslationsError, options
end
end
@doc """
Parses the contents of a file into a `Expo.Translations` struct.
This function works similarly to `parse_string/1` except that it takes a file
and parses the contents of that file. It can return:
* `{:ok, po}`
* `{:error, line, reason}` if there is an error with the contents of the
`.po` file (for example, a syntax error)
* `{:error, reason}` if there is an error with reading the file (this error
is one of the errors that can be returned by `File.read/1`)
## Examples
{:ok, po} = Expo.Po.parse_file "translations.po"
po.file
#=> "translations.po"
Expo.Po.parse_file "nonexistent"
#=> {:error, :enoent}
"""
@spec parse_file(path :: Path.t(), opts :: parse_options()) ::
{:ok, Translations.t()}
| parse_error()
| duplicate_translations_error()
| file_error()
def parse_file(path, opts \\ []) do
with {:ok, contents} <- File.read(path) do
Parser.parse(contents, Keyword.put_new(opts, :file, path))
end
end
@doc """
Parses the contents of a file into a `Expo.Translations` struct, raising if there
are any errors.
Works like `parse_file/1`, except that it raises a `Expo.Po.SyntaxError`
exception if there's a syntax error in the file or a `File.Error` error if
there's an error with reading the file.
## Examples
Expo.Po.parse_file! "nonexistent.po"
#=> ** (File.Error) could not parse "nonexistent.po": no such file or directory
"""
@spec parse_file!(Path.t(), opts :: parse_options()) :: Translations.t() | no_return
def parse_file!(path, opts \\ []) do
case parse_file(path, opts) do
{:ok, parsed} ->
parsed
{:error, {:parse_error, reason, context, line}} ->
raise SyntaxError, line: line, reason: reason, file: path, context: context
{:error, {:duplicate_translations, duplicates}} ->
raise DuplicateTranslationsError,
duplicates: duplicates,
file: Keyword.get(opts, :file, path)
{:error, reason} ->
raise File.Error, reason: reason, action: "parse", path: Keyword.get(opts, :file, path)
end
end
end
|
lib/expo/po.ex
| 0.857112
| 0.451871
|
po.ex
|
starcoder
|
defmodule Day1 do
@moduledoc """
Solution to Day 1 Advent of Code puzzle 2021: SonarSweep
"""
@doc """
Returns the number of instances a measurement increased from the
previous measurement
"""
def run(filepath) do
data = get_data(filepath)
IO.puts("Increases: #{sweep(data, 0)}")
IO.puts("Normalized increases: #{normalize_data(data, 0)}")
end
@doc """
Reads measurement data from a .txt file and converts it into a List
of Integer
Returns a list of numbers given a filename - the expected format
is a return-separated list of numerical values.
"""
def get_data(filepath) do
{:ok, data} = File.read(filepath)
string_data = data |> String.split("\n", trim: true)
Day1.bins_to_ints(string_data, [])
end
@doc """
Converts a list of bitstrings to a list of Integers
Returns a list of Integers
"""
def bins_to_ints(string_list, int_list) do
Enum.reverse(Enum.reduce(string_list, int_list, fn(x, acc) -> [elem(Integer.parse(x, 10), 0) | acc] end))
end
@doc """
Analyzes measurement data to determine speed of depth increases
Returns number of times a measurement in the input data increased
from the previous measurement
"""
def sweep(list, increases) do
cond do
length(list) === 1 ->
increases
Enum.at(list, 0) < Enum.at(list, 1) ->
sweep(Enum.slice(list, 1..length(list) - 1), (increases + 1))
Enum.at(list, 0) >= Enum.at(list, 1) ->
sweep(Enum.slice(list, 1..length(list) - 1), increases)
end
end
@doc """
Normalizes depth increase data by measuring increases between
three-measurement windows
Returns the number of normalized increases
"""
def normalize_data(data, increases) do
depth_1 = Enum.reduce(Enum.slice(data, 0..2), 0, fn(x, acc) -> x + acc end)
depth_2 = Enum.reduce(Enum.slice(data, 1..3), 0, fn(x, acc) -> x + acc end)
cond do
# All three-measurement windows have been evaluated when length(list) == 2
length(data) === 2 ->
increases
depth_1 < depth_2 ->
normalize_data(Enum.slice(data, 1..length(data) - 1), increases + 1)
(depth_1 >= depth_2) ->
normalize_data(Enum.slice(data, 1..length(data) - 1), increases)
end
end
end
|
lib/day_1/day_1.ex
| 0.834474
| 0.761184
|
day_1.ex
|
starcoder
|
defmodule JsonRfc.Patch do
@moduledoc """
Represent map transformations as a series of JSON Patch (RFC 6902) compatible operations.
"""
import JsonRfc, only: [is_array_index: 2, is_array_append: 2]
import JsonRfc.Pointer, only: [transform: 3, fetch: 2]
@type ops :: :add | :replace | :remove | :move | :copy
@type opmap :: %{:op => ops, optional(term) => any}
@type opmap(op) :: %{:op => op, optional(term) => any}
@type path :: JsonRfc.Pointer.t() | iodata()
@doc """
Operation: add the given `value` to the document at `path`.
* Supports the array append operator (`/-`) to add to the end of the array.
* If a value is already present at `path` it is replaced.
* If the pointer is the root (""), replaces the entire document.
* Handles the array append operator to append to an array.
* Shifts values right when inserting into an array
"""
@spec add(path(), JsonRfc.value()) :: opmap(:add)
def add(path, value),
do: %{op: :add, path: path, value: value}
@doc """
Operation: replace the value in the document at `path`
Like add, except it fails if there is no value at the given key.
"""
@spec replace(path(), JsonRfc.value()) :: opmap(:replace)
def replace(path, value),
do: %{op: :replace, path: path, value: value}
@doc """
Operation: remove the value in the document at `path`
"""
@spec remove(path()) :: opmap(:remove)
def remove(path),
do: %{op: :remove, path: path}
@doc """
Operation: move the value in the document `from` to `path`
"""
@spec move(path(), path()) :: opmap(:move)
def move(from, path),
do: %{op: :move, from: from, path: path}
@doc """
Operation: copy the value at `from` in the document to `path`
"""
@spec copy(path(), path()) :: opmap(:copy)
def copy(from, path),
do: %{op: :copy, from: from, path: path}
@doc """
Given a list of `ops`, apply them all to the given document.
Given a single operation, apply that operation to the given document.
Operations are the maps returned from methods in this module, and represent
the same named operations in IETF RFC 6902.
## Examples
iex> doc = %{"foo" => [], "byebye" => 5}
iex> ops = [
...> JsonRfc.Patch.add("/bar", 3),
...> JsonRfc.Patch.replace("/foo", %{}),
...> JsonRfc.Patch.remove("/byebye"),
...> JsonRfc.Patch.move("/bar", "/foo/bar"),
...> JsonRfc.Patch.copy("/foo", "/baz")
...> ]
iex> JsonRfc.Patch.evaluate(doc, ops)
{:ok, %{"foo" => %{"bar" => 3}, "baz" => %{"bar" => 3}}}
"""
@spec evaluate(JsonRfc.value(), opmap()) :: {:ok, JsonRfc.value()} | {:error, term}
@spec evaluate(JsonRfc.value(), list(opmap())) :: {:ok, JsonRfc.value()} | {:error, term}
def evaluate(document, ops) when is_list(ops) do
# reduce the operation list over the document, stopping on error
Enum.reduce_while(ops, {:ok, document}, fn op, {:ok, doc} ->
case evaluate(doc, op) do
{:ok, result} -> {:cont, {:ok, result}}
error -> {:halt, error}
end
end)
end
def evaluate(document, %{op: :add, path: pointer, value: value}) do
transform(document, pointer, fn enum, key ->
cond do
is_array_index(enum, key) ->
{head, tail} = Enum.split(enum, key)
{:ok, head ++ [value] ++ tail}
is_array_append(enum, key) ->
{:ok, enum ++ [value]}
is_map(enum) ->
{:ok, Map.put(enum, key, value)}
true ->
{:error, :invalid_target}
end
end)
end
# Replaces the value at `path' with `value`.
# Requires that there already exists a value at `path`, otherwise invalid target is returned.
def evaluate(document, %{op: :replace, path: pointer, value: value}) do
transform(document, pointer, fn enum, key ->
cond do
is_array_index(enum, key) ->
{head, [_ | tail]} = Enum.split(enum, key)
{:ok, head ++ [value] ++ tail}
is_map(enum) and is_map_key(enum, key) ->
{:ok, Map.replace!(enum, key, value)}
true ->
{:error, :invalid_target}
end
end)
end
# Removes the value at `path`.
# shifts array elements left on removal
# doesn't support array append
def evaluate(document, %{op: :remove, path: pointer}) do
transform(document, pointer, fn enum, key ->
cond do
is_array_index(enum, key) ->
{head, [_ | tail]} = Enum.split(enum, key)
{:ok, head ++ tail}
is_map(enum) and is_map_key(enum, key) ->
{:ok, Map.delete(enum, key)}
true ->
{:error, :invalid_target}
end
end)
end
def evaluate(document, %{op: :move, from: from, path: path}) do
case fetch(document, from) do
{:ok, value} -> evaluate(document, [remove(from), add(path, value)])
error -> error
end
end
def evaluate(document, %{op: :copy, from: from, path: path}) do
case fetch(document, from) do
{:ok, value} -> evaluate(document, add(path, value))
error -> error
end
end
@doc """
Evaluates the given list of ops against the document, and additionally returns the operations themselves in the result tuple.
"""
@spec evaluate_with_ops(JsonRfc.value(), list(opmap())) ::
{:ok, JsonRfc.value(), list(opmap)} | {:error, term}
@spec evaluate_with_ops(JsonRfc.value(), opmap()) ::
{:ok, JsonRfc.value(), opmap()} | {:error, term}
def evaluate_with_ops(document, ops) do
case evaluate(document, ops) do
{:ok, document} -> {:ok, document, ops}
error -> error
end
end
end
|
lib/patch.ex
| 0.912699
| 0.758063
|
patch.ex
|
starcoder
|
defmodule Nebulex.Time do
@moduledoc """
Time utilities.
"""
## API
@doc """
Returns the current system time in the given time unit.
The `unit` is set to `:millisecond` by default.
## Examples
iex> Nebulex.Time.now()
_milliseconds
"""
@spec now(System.time_unit()) :: integer()
defdelegate now(unit \\ :millisecond), to: System, as: :system_time
@doc """
Returns `true` if the given `timeout` is a valid timeout; otherwise, `false`
is returned.
Valid timeout: `:infinity | non_neg_integer()`.
## Examples
iex> Nebulex.Time.timeout?(1)
true
iex> Nebulex.Time.timeout?(:infinity)
true
iex> Nebulex.Time.timeout?(-1)
false
"""
@spec timeout?(term) :: boolean
def timeout?(timeout) when is_integer(timeout) and timeout >= 0, do: true
def timeout?(:infinity), do: true
def timeout?(_timeout), do: false
@doc """
Returns the equivalent expiration time in milliseconds for the given `time`.
## Example
iex> Nebulex.Time.expiry_time(10, :second)
10000
iex> Nebulex.Time.expiry_time(10, :minute)
600000
iex> Nebulex.Time.expiry_time(1, :hour)
3600000
iex> Nebulex.Time.expiry_time(1, :day)
86400000
"""
@spec expiry_time(pos_integer, :second | :minute | :hour | :day) :: pos_integer
def expiry_time(time, unit \\ :second)
def expiry_time(time, unit) when is_integer(time) and time > 0 do
do_expiry_time(time, exp_time_unit(unit))
end
def expiry_time(time, _unit) do
raise ArgumentError,
"invalid time. The time is expected to be a positive integer, got #{inspect(time)}"
end
def do_expiry_time(time, :second) do
time * 1000
end
def do_expiry_time(time, :minute) do
do_expiry_time(time * 60, :second)
end
def do_expiry_time(time, :hour) do
do_expiry_time(time * 60, :minute)
end
def do_expiry_time(time, :day) do
do_expiry_time(time * 24, :hour)
end
defp exp_time_unit(:second), do: :second
defp exp_time_unit(:minute), do: :minute
defp exp_time_unit(:hour), do: :hour
defp exp_time_unit(:day), do: :day
defp exp_time_unit(other) do
raise ArgumentError,
"unsupported time unit. Expected :second, :minute, :hour, or :day" <>
", got #{inspect(other)}"
end
end
|
lib/nebulex/time.ex
| 0.921007
| 0.546617
|
time.ex
|
starcoder
|
defmodule YipyipExAuth.Plugs.ProcessRefreshToken do
@moduledoc """
Plug to process and verify refresh tokens. Must be initialized with a `YipyipExAuth.Config`-struct, which can be initialized itself using `YipyipExAuth.Config.from_enum/1`.
The token signature source (bearer or cookie) must match the `token_signature_transport` specified in the token payload.
A refresh token can only be used to refresh a session once. A single refresh token id is stored in the
server-side session by `YipyipExAuth.Plugs.upsert_session/3` to enforce this.
The plug does not reject unauthenticated requests by itself. If a request is successfully verified, the user ID, session ID and extra payload are assigned to the conn. If not, an authentication error message is put in the conn's private map, which can be retrieved using `YipyipExAuth.Utils.get_auth_error/1`. This allows applications to implement their own plug to reject unauthenticated requests, for example:
## Usage example that rejects unauthenticated requests
```
defmodule MyPhoenixAppWeb.Router do
use MyPhoenixAppWeb, :router
@config YipyipExAuth.Config.from_enum(
session_ttl: 68400,
refresh_token_ttl: 3600,
session_store_module: MyModule
)
pipeline :valid_refresh_token do
plug YipyipExAuth.Plugs.ProcessRefreshToken, @config
plug :only_authenticated
end
@doc \"\"\"
Reject unauthenticated requests
\"\"\"
def only_authenticated(%{assigns: %{current_user_id: _}} = conn, _opts), do: conn
def only_authenticated(conn, _opts) do
auth_error = YipyipExAuth.Utils.get_auth_error(conn)
conn |> Plug.Conn.send_resp(401, auth_error) |> halt()
end
end
```
In this way, applications can completely customize how to respond to unauthenticated requests and how much information to expose to the client.
## Examples / doctests
alias Plug.Conn
alias YipyipExAuth.Plugs.ProcessRefreshToken
alias YipyipExAuth.Utils
import YipyipExAuth.TestHelpers
# only available when Mix env = test
alias YipyipExAuth.TestSupport.FakeSessionStore
import YipyipExAuth.TestSupport.Shared
@config YipyipExAuth.Config.from_enum(
session_ttl: 68400,
refresh_token_ttl: 3600,
session_store_module: FakeSessionStore
)
@plug_opts ProcessRefreshToken.init(@config)
# "reject" requests without refresh token
iex> conn = %Conn{} |> ProcessRefreshToken.call(@plug_opts)
iex> "refresh token not found" = Utils.get_auth_error(conn)
iex> {conn.assigns, Utils.get_session(conn)}
{%{}, nil}
# "reject" requests with invalid token
iex> config = %{@config | refresh_token_salt: "different"}
iex> conn = build_conn() |> put_refresh_token(config) |> ProcessRefreshToken.call(@plug_opts)
iex> "refresh token invalid" = Utils.get_auth_error(conn)
iex> {conn.assigns, Utils.get_session(conn)}
{%{}, nil}
# "reject" requests with expired refresh token
iex> plug_opts = ProcessRefreshToken.init(%{@config | refresh_token_ttl: -1})
iex> conn = build_conn() |> put_refresh_token(@config) |> ProcessRefreshToken.call(plug_opts)
iex> "refresh token expired" = Utils.get_auth_error(conn)
iex> {conn.assigns, Utils.get_session(conn)}
{%{}, nil}
# "reject" requests where the signature transport mechanism does not match the session's initial value
iex> token = generate_refresh_token(build_conn(), @config, %{tst: :cookie})
iex> conn = build_conn() |> put_refresh_token(@config, token) |> ProcessRefreshToken.call(@plug_opts)
iex> "token signature transport invalid" = Utils.get_auth_error(conn)
iex> {conn.assigns, Utils.get_session(conn)}
{%{}, nil}
# "reject" requests with an expired session
iex> token = generate_refresh_token(build_conn(), @config, %{exp: 1})
iex> conn = build_conn() |> put_refresh_token(@config, token) |> ProcessRefreshToken.call(@plug_opts)
iex> "session expired" = Utils.get_auth_error(conn)
iex> {conn.assigns, Utils.get_session(conn)}
{%{}, nil}
# "allow" requests with valid refresh token
iex> conn = build_conn() |> put_refresh_token(@config) |> ProcessRefreshToken.call(@plug_opts)
iex> nil = Utils.get_auth_error(conn)
iex> conn.assigns
%{current_session_id: "a", current_user_id: 1, extra_refresh_token_payload: %{}}
iex> %YipyipExAuth.Models.Session{} = Utils.get_session(conn)
# "allow" requests with valid refresh token with signature in cookie
iex> token = generate_refresh_token(build_conn(), @config, %{tst: :cookie})
iex> [header, encoded_payload, signature] = String.split(token, ".", parts: 3)
iex> conn = build_conn()
...> |> put_refresh_token(@config, header <> "." <> encoded_payload)
...> |> Plug.Test.put_req_cookie(@config.refresh_cookie_name, "." <> signature)
...> |> ProcessRefreshToken.call(@plug_opts)
iex> nil = Utils.get_auth_error(conn)
iex> conn.assigns
%{current_session_id: "a", current_user_id: 1, extra_refresh_token_payload: %{}}
iex> %YipyipExAuth.Models.Session{} = Utils.get_session(conn)
"""
@behaviour Plug
alias Phoenix.Token
alias Plug.Conn
use YipyipExAuth.Utils.Constants
alias YipyipExAuth.{Config, SharedInternals}
alias YipyipExAuth.Models.Session
require Logger
@doc false
@impl true
@spec init(YipyipExAuth.Config.t()) :: Plug.opts()
def init(%Config{
refresh_cookie_name: cookie_name,
refresh_token_salt: salt,
refresh_token_ttl: max_age,
refresh_token_key_digest: digest,
session_store_module: session_store
}) do
{session_store, salt, cookie_name, key_digest: digest, max_age: max_age}
end
@doc false
@impl true
@spec call(Conn.t(), Plug.opts()) :: Conn.t()
def call(conn, {session_store, salt, cookie_name, verification_opts}) do
with {:token, {sig_transport, token}} <- SharedInternals.get_token(conn, cookie_name),
{:ok, payload} <- Token.verify(conn, salt, token, verification_opts),
{:pl, %{uid: uid, sid: sid, id: rtid, tst: tst, exp: exp, epl: epl}} <- {:pl, payload},
{:transport_matches, true} <- {:transport_matches, sig_transport == tst},
{:session_expired, false} <-
SharedInternals.session_expired?(sid, uid, exp, session_store),
{:session, %Session{user_id: ^uid} = session} <- {:session, session_store.get(sid, uid)},
{:token_fresh, true} <- {:token_fresh, session.refresh_token_id == rtid} do
conn
|> Conn.assign(:current_user_id, uid)
|> Conn.assign(:current_session_id, sid)
|> Conn.assign(:extra_refresh_token_payload, epl)
|> Conn.put_private(@private_session_key, session)
|> Conn.put_private(@private_refresh_token_payload_key, payload)
|> Conn.put_private(@private_token_signature_transport_key, tst)
else
{:token, nil} ->
SharedInternals.auth_error(conn, "refresh token not found")
{:error, :expired} ->
SharedInternals.auth_error(conn, "refresh token expired")
{:error, :invalid} ->
SharedInternals.auth_error(conn, "refresh token invalid")
{:pl, _} ->
SharedInternals.auth_error(conn, "invalid refresh token payload")
{:transport_matches, false} ->
SharedInternals.auth_error(conn, "token signature transport invalid")
{:session, nil} ->
SharedInternals.auth_error(conn, "session not found")
{:session, %Session{}} ->
SharedInternals.auth_error(conn, "session user mismatch")
{:session_expired, true} ->
SharedInternals.auth_error(conn, "session expired")
{:token_fresh, false} ->
SharedInternals.auth_error(conn, "refresh token stale")
error ->
Logger.error("Unexpected auth error: #{inspect(error)}")
SharedInternals.auth_error(conn, "unexpected error")
end
end
end
|
lib/plugs/process_refresh_token.ex
| 0.884657
| 0.54353
|
process_refresh_token.ex
|
starcoder
|
defmodule ArtemisWeb.ViewHelper.Tables do
use Phoenix.HTML
import Phoenix.HTML.Tag
@default_delimiter ","
@doc """
Generates empty table row if no records match
"""
def render_table_row_if_empty(records, options \\ [])
def render_table_row_if_empty(%{entries: entries}, options), do: render_table_row_if_empty(entries, options)
def render_table_row_if_empty(records, options) when length(records) == 0 do
message = Keyword.get(options, :message, "No records found")
Phoenix.View.render(ArtemisWeb.LayoutView, "table_row_if_empty.html", message: message)
end
def render_table_row_if_empty(_records, _options), do: nil
@doc """
Render sortable table header
"""
def sortable_table_header(conn, value, label, delimiter \\ @default_delimiter) do
path = order_path(conn, value, delimiter)
text = content_tag(:span, label)
icon = content_tag(:i, "", class: icon_class(conn, value, delimiter))
content_tag(:a, [text, icon], href: path)
end
defp order_path(conn, value, delimiter) do
updated_query_params = update_query_param(conn, value, delimiter)
query_string = Plug.Conn.Query.encode(updated_query_params)
"#{Map.get(conn, :request_path)}?#{query_string}"
end
defp update_query_param(conn, value, delimiter) do
inverse = inverse_value(value)
query_params = Map.get(conn, :query_params) || %{}
current_value = Map.get(query_params, "order", "")
current_fields = String.split(current_value, delimiter)
updated_fields =
cond do
Enum.member?(current_fields, value) -> replace_item(current_fields, value, inverse)
Enum.member?(current_fields, inverse) -> replace_item(current_fields, inverse, value)
true -> [value]
end
updated_value = Enum.join(updated_fields, delimiter)
Map.put(query_params, "order", updated_value)
end
defp inverse_value(value), do: "-#{value}"
defp replace_item(list, current, next) do
case Enum.find_index(list, &(&1 == current)) do
nil -> list
index -> List.update_at(list, index, fn _ -> next end)
end
end
defp icon_class(conn, value, delimiter) do
base = "sort icon"
query_params = Map.get(conn, :query_params) || %{}
current_value = Map.get(query_params, "order", "")
current_fields = String.split(current_value, delimiter)
cond do
Enum.member?(current_fields, value) -> "#{base} ascending"
Enum.member?(current_fields, inverse_value(value)) -> "#{base} descending"
true -> base
end
end
@doc """
Render Data Table
Example:
<%=
render_data_table(
@conn,
@customers,
allowed_columns: allowed_columns(),
default_columns: ["name", "slug", "actions"],
selectable: true
)
%>
Options:
allowed_columns: map of allowed columns
default_columns: list of strings
selectable: include checkbox for bulk actions
query_params: map of connection query params
request_path: string of connection request path
user: struct of current user
## Features
### Column Ordering
The `columns` query param can be used to define a custom order to table
columns. For example, the default columns might be:
Name | Slug | Actions
By passing in the query param `?columns=status,name,address` the table
will transform to show:
Status | Name | Address
This enables custom reporting in a standard and repeatable way across the
application. Since query params are used to define the columns, any reports a
user creates can be revisited using the same URL. Which in turn, also makes
it easy to share with others.
### Table Export
Custom exporters can be defined for any format, like `html`, `json`, `csv`,
`xls`, or `pdf`. There's no conventions to learn or magic. As documented below,
standard Elixir and Phoenix code can be used to define and write custom
exporters in any format.
## Options
The goal of the data table is to be extensible without introducing new
data table specific conventions. Instead, enable extension using standard
Elixir and Phoenix calls.
### Allowed Columns
The value for `allowed_columns` should be a map. A complete example may look like:
%{
"name" => [
label: fn (_conn) -> "Name" end,
value: fn (_conn, row) -> row.name end,
],
"slug" => [
label: fn (_conn) -> "Slug" end,
value: fn (_conn, row) -> row.slug end,
]
}
The key for each entry should be a URI friendly slug. It is used to match
against the `columns` query param.
The value for each entry is a keyword list. It must define a `label` and
`value` function.
The `label` function is used in column headings. It takes one argument, the
`conn` struct. The most common return will be a simple bitstring, but
the `conn` is included for more advanced usage, for instance creating an
anchor link.
The `value` function is used for the column value. It takes two arguments,
the `conn` struct and the `row` value. The most common return will be calling
an attribute on the row value, for instance `data.name`. The `conn` value is
included for more advanced usage.
#### Support for Different Content Types / Formats
The required `label` and `value` functions should return simple values, like
bitstrings, integers, and floats.
Format specific values, such as HTML tags, should be defined in format
specific keys. For instance:
"name" => [
label: fn (_conn) -> "Name" end,
value: fn (_conn, row) -> row.name end,
value_html: fn (conn, row) ->
link(row.name, to: Routes.permission_path(conn, :show, row))
end
]
The data table function will first search for `label_<format>` and
`value_<format>` keys. E.g. a standard `html` request would search for
`label_html` and `value_html`. And in turn, a request for `csv` content type
would search for `label_csv` and `value_csv`. If format specific keys are not
found, the require `label` and `value` keys will be used as a fallback.
### Default Columns
The default columns option should be a list of bitstrings, each corresponding
to a key defined in the `allowed_columns` map.
default_columns: ["name", "slug"]
"""
def render_data_table(conn_or_socket_or_assigns, data, options \\ [])
def render_data_table(%{socket: socket} = assigns, data, options) do
options =
options
|> Keyword.put_new(:query_params, assigns[:query_params])
|> Keyword.put_new(:request_path, assigns[:request_path])
|> Keyword.put_new(:user, assigns[:user])
render_data_table(socket, data, options)
end
def render_data_table(%{conn: %Plug.Conn{} = conn} = _assigns, data, options) do
render_data_table(conn, data, options)
end
def render_data_table(conn_or_socket, data, options) do
format = get_request_format(conn_or_socket)
conn_or_socket = update_conn_or_socket_fields(conn_or_socket, options)
columns = get_data_table_columns(conn_or_socket, options)
headers? = Keyword.get(options, :headers, true)
compact? = Keyword.get(options, :compact, false)
class = "data-table-container"
class =
case compact? do
true -> class <> " compact"
false -> class
end
class =
case headers? do
true -> class <> " with-headers"
false -> class <> " without-headers"
end
assigns = [
class: class,
columns: columns,
conn_or_socket: conn_or_socket,
data: data,
headers?: headers?,
id: Keyword.get(options, :id, Artemis.Helpers.UUID.call()),
selectable: Keyword.get(options, :selectable),
show_only: Keyword.get(options, :show_only)
]
Phoenix.View.render(ArtemisWeb.LayoutView, "data_table.#{format}", assigns)
end
defp update_conn_or_socket_fields(%Phoenix.LiveView.Socket{} = socket, options) do
socket
|> Map.put(:query_params, Keyword.get(options, :query_params))
|> Map.put(:request_path, Keyword.get(options, :request_path))
|> Map.put(:assigns, %{
user: Keyword.fetch!(options, :user)
})
end
defp update_conn_or_socket_fields(conn, _options), do: conn
defp get_request_format(conn) do
Phoenix.Controller.get_format(conn)
rescue
_ -> :html
end
@doc """
Compares the `?columns=` query param value against the `allowed_columns`. If
the query param is not set, compares the `default_columns` value instead.
Returns a map of matching keys in `allowed_columns`.
"""
def get_data_table_columns(%Plug.Conn{} = conn, options) do
assigns = %{
query_params: conn.query_params
}
get_data_table_columns(assigns, options)
end
def get_data_table_columns(assigns, options) do
selectable? = Keyword.get(options, :selectable, false)
allowed_columns = Keyword.get(options, :allowed_columns, [])
requested_columns = parse_data_table_requested_columns(assigns, options)
filtered =
Enum.reduce(requested_columns, [], fn key, acc ->
case Map.get(allowed_columns, key) do
nil -> acc
column -> [column | acc]
end
end)
columns = Enum.reverse(filtered)
case selectable? do
true -> [get_checkbox_column() | columns]
false -> columns
end
end
defp get_checkbox_column() do
[
label: fn _conn -> nil end,
label_html: fn _conn ->
tag(:input, class: "ui checkbox select-all-rows", type: "checkbox", name: "id-toggle")
end,
value: fn _conn, _row -> nil end,
value_html: fn _conn, row ->
value = Map.get(row, :_id, Map.get(row, :id))
tag(:input, class: "ui checkbox select-row", type: "checkbox", name: "id[]", value: value)
end
]
end
@doc """
Parse query params and return requested data table columns
"""
def parse_data_table_requested_columns(conn_or_assigns, options \\ [])
def parse_data_table_requested_columns(%Phoenix.LiveView.Socket{} = _socket, options) do
get_data_table_requested_columns(options)
end
def parse_data_table_requested_columns(%Plug.Conn{} = conn, options) do
conn
|> Map.get(:query_params)
|> parse_data_table_requested_columns(options)
end
def parse_data_table_requested_columns(%{query_params: query_params}, options) do
parse_data_table_requested_columns(query_params, options)
end
def parse_data_table_requested_columns(query_params, options) when is_map(query_params) do
query_params
|> Map.get("columns")
|> get_data_table_requested_columns(options)
end
defp get_data_table_requested_columns(options), do: Keyword.get(options, :default_columns, [])
defp get_data_table_requested_columns(nil, options), do: Keyword.get(options, :default_columns, [])
defp get_data_table_requested_columns(value, _) when is_bitstring(value), do: String.split(value, ",")
defp get_data_table_requested_columns(value, _) when is_list(value), do: value
@doc """
Renders the label for a data center column.
"""
def render_data_table_label(conn, column, format) do
key = String.to_atom("label_#{format}")
default = Keyword.fetch!(column, :label)
render = Keyword.get(column, key, default)
render.(conn)
end
@doc """
Renders the row value for a data center column.
"""
def render_data_table_value(conn, column, row, format) do
key = String.to_atom("value_#{format}")
default = Keyword.fetch!(column, :value)
render = Keyword.get(column, key, default)
render.(conn, row)
end
@doc """
Render a select box to allow users to choose custom columns
"""
def render_data_table_column_selector(%Plug.Conn{} = conn, available_columns) do
assigns = %{
conn: conn,
query_params: conn.query_params,
request_path: conn.request_path
}
render_data_table_column_selector(assigns, available_columns)
end
def render_data_table_column_selector(assigns, available_columns) do
conn_or_socket = Map.get(assigns, :conn) || Map.get(assigns, :socket)
selected = parse_data_table_requested_columns(assigns)
class = if length(selected) > 0, do: "active"
sorted_by_selected =
Enum.sort_by(available_columns, fn column ->
key = elem(column, 1)
index = Enum.find_index(selected, &(&1 == key)) || :infinity
index
end)
assigns = [
available: sorted_by_selected,
class: class,
conn_or_socket: conn_or_socket,
selected: selected
]
Phoenix.View.render(ArtemisWeb.LayoutView, "data_table_columns.html", assigns)
end
@doc """
Prints a primary and secondary value
"""
def render_table_entry(primary, secondary \\ nil)
def render_table_entry(primary, secondary) when is_nil(secondary), do: primary
def render_table_entry(primary, secondary) do
[
content_tag(:div, primary),
content_tag(:div, secondary, class: "secondary-value")
]
end
end
|
apps/artemis_web/lib/artemis_web/view_helpers/tables.ex
| 0.73659
| 0.481515
|
tables.ex
|
starcoder
|
defmodule DSMR.Telegram do
@type t() :: %__MODULE__{
header: DSMR.Telegram.Header.t(),
checksum: DSMR.Telegram.Checksum.t(),
data: [DSMR.Telegram.COSEM.t() | DSMR.Telegram.MBus.t()]
}
defstruct header: nil, checksum: nil, data: []
defmodule OBIS do
@type t() :: %__MODULE__{
code: String.t(),
medium: atom(),
channel: integer(),
tags: [keyword()]
}
defstruct code: nil, medium: nil, channel: nil, tags: nil
def new({:obis, [medium, channel | tags]}) do
code = "#{medium}-#{channel}:#{Enum.join(tags, ".")}"
medium = interpretet_medium(medium)
tags = interpretet_tags(tags)
%OBIS{code: code, medium: medium, channel: channel, tags: tags}
end
defp interpretet_medium(0), do: :abstract
defp interpretet_medium(1), do: :electricity
defp interpretet_medium(6), do: :heat
defp interpretet_medium(7), do: :gas
defp interpretet_medium(8), do: :water
defp interpretet_medium(_), do: :unknown
defp interpretet_tags([0, 2, 8]), do: [general: :version]
defp interpretet_tags([1, 0, 0]), do: [general: :timestamp]
defp interpretet_tags([96, 1, 1]), do: [general: :equipment_identifier]
defp interpretet_tags([96, 14, 0]), do: [general: :tariff_indicator]
defp interpretet_tags([1, 8, 1]), do: [energy: :total, direction: :consume, tariff: :low]
defp interpretet_tags([1, 8, 2]), do: [energy: :total, direction: :consume, tariff: :normal]
defp interpretet_tags([2, 8, 1]), do: [energy: :total, direction: :produce, tariff: :low]
defp interpretet_tags([2, 8, 2]), do: [energy: :total, direction: :produce, tariff: :normal]
defp interpretet_tags([1, 7, 0]), do: [power: :active, phase: :all, direction: :consume]
defp interpretet_tags([2, 7, 0]), do: [power: :active, phase: :all, direction: :produce]
defp interpretet_tags([21, 7, 0]), do: [power: :active, phase: :l1, direction: :consume]
defp interpretet_tags([41, 7, 0]), do: [power: :active, phase: :l2, direction: :consume]
defp interpretet_tags([61, 7, 0]), do: [power: :active, phase: :l3, direction: :consume]
defp interpretet_tags([22, 7, 0]), do: [power: :active, phase: :l1, direction: :produce]
defp interpretet_tags([42, 7, 0]), do: [power: :active, phase: :l2, direction: :produce]
defp interpretet_tags([62, 7, 0]), do: [power: :active, phase: :l3, direction: :produce]
defp interpretet_tags([31, 7, 0]), do: [amperage: :active, phase: :l1]
defp interpretet_tags([51, 7, 0]), do: [amperage: :active, phase: :l2]
defp interpretet_tags([71, 7, 0]), do: [amperage: :active, phase: :l3]
defp interpretet_tags([32, 7, 0]), do: [voltage: :active, phase: :l1]
defp interpretet_tags([52, 7, 0]), do: [voltage: :active, phase: :l2]
defp interpretet_tags([72, 7, 0]), do: [voltage: :active, phase: :l3]
defp interpretet_tags([96, 7, 9]), do: [power_failures: :long]
defp interpretet_tags([96, 7, 21]), do: [power_failures: :short]
defp interpretet_tags([99, 97, 0]), do: [power_failures: :event_log]
defp interpretet_tags([32, 32, 0]), do: [voltage: :sags, phase: :l1]
defp interpretet_tags([52, 32, 0]), do: [voltage: :sags, phase: :l2]
defp interpretet_tags([72, 32, 0]), do: [voltage: :sags, phase: :l3]
defp interpretet_tags([32, 36, 0]), do: [voltage: :swells, phase: :l1]
defp interpretet_tags([52, 36, 0]), do: [voltage: :swells, phase: :l2]
defp interpretet_tags([72, 36, 0]), do: [voltage: :swells, phase: :l3]
defp interpretet_tags([96, 13, 0]), do: [message: :text]
defp interpretet_tags([96, 13, 1]), do: [message: :code]
defp interpretet_tags([24, 1, 0]), do: [mbus: :device_type]
defp interpretet_tags([96, 1, 0]), do: [mbus: :equipment_identifier]
defp interpretet_tags([24, 2, 1]), do: [mbus: :measurement]
defp interpretet_tags(_), do: []
end
defmodule Value do
@type t() :: %__MODULE__{
value: integer() | float() | String.t(),
raw: String.t(),
unit: String.t()
}
defstruct value: nil, raw: nil, unit: nil
def new({:value, [[{_type, value}, {:raw, raw}], unit: unit]}) do
%Value{value: value, raw: raw, unit: unit}
end
def new({:value, [[{_type, value}, {:raw, raw}]]}) do
%Value{value: value, raw: raw}
end
end
defmodule COSEM do
@type t() :: %__MODULE__{obis: OBIS.t(), values: [Value.t()]}
defstruct obis: nil, values: []
def new([obis | values]) do
obis = OBIS.new(obis)
values = Enum.map(values, &Value.new/1)
%COSEM{obis: obis, values: values}
end
end
defmodule MBus do
@type t() :: %__MODULE__{channel: integer(), data: [DSMR.Telegram.COSEM.t()]}
defstruct channel: nil, data: []
def new(channel, cosem) do
%MBus{channel: channel, data: [COSEM.new(cosem)]}
end
end
defmodule Header do
@type t() :: %__MODULE__{manufacturer: String.t(), model: String.t()}
defstruct manufacturer: nil, model: nil
def new([{:manufacturer, manufacturer}, {:model, model}]) do
%Header{manufacturer: manufacturer, model: model}
end
end
defmodule Checksum do
@type t() :: %__MODULE__{value: String.t()}
defstruct value: nil
def new(checksum) do
%Checksum{value: checksum}
end
end
end
|
lib/dsmr/telegram.ex
| 0.712332
| 0.606469
|
telegram.ex
|
starcoder
|
defmodule Pointers do
@moduledoc """
A context for working with Pointers, a sort of global foreign key scheme.
"""
import Ecto.Query
alias Pointers.{Pointer, Tables}
@doc """
Returns a Pointer, either the one provided or a synthesised one
pointing to the provided schema object. Does not hit the database or
cause the pointer to be written to the database whatsoever.
"""
def cast!(%Pointer{}=p), do: p
def cast!(%struct{id: id}), do: %Pointer{id: id, table_id: Tables.id!(struct)}
@doc "Looks up the table for a given pointer"
def table(%Pointer{table_id: id}), do: Tables.table!(id)
def schema(%Pointer{table_id: id}), do: Tables.schema!(id)
@doc """
Return the provided pointer when it belongs to table queryable by the given table search term.
"""
def assert_points_to!(%Pointer{table_id: table}=pointer, term) do
if Tables.id!(term) == table, do: pointer, else: raise ArgumentError
end
@doc """
Given a list of pointers which may or may have their pointed loaded,
return a plan for preloading, a map of module name to set of loadable IDs.
"""
def plan(pointers) when is_list(pointers), do: Enum.reduce(pointers, %{}, &plan/2)
defp plan(%Pointer{pointed: p}, acc) when not is_nil(p), do: acc
defp plan(%Pointer{id: id, table_id: table}, acc) do
Map.update(acc, Tables.schema!(table), MapSet.new([id]), &MapSet.put(&1, id))
end
@doc """
Returns a basic query over undeleted pointable objects in the system,
optionally limited to one or more types.
If the type is set to a Pointable, Virtual or Mixin schema, records
will be selected from that schema directly. It is assumed this
filters deleted records by construction.
Otherwise, will query from Pointer, filtering not is_nil(deleted_at)
"""
def query_base(type \\ nil)
def query_base([]), do: query_base(Pointer)
def query_base(nil), do: query_base(Pointer)
def query_base(Pointer), do: from(p in Pointer, where: is_nil(p.deleted_at))
def query_base(schemas) when is_list(schemas) do
table_ids = Enum.map(schemas, &get_table_id!/1)
from(p in query_base(Pointer), where: p.table_id in ^table_ids)
end
def query_base(schema) when is_atom(schema) or is_binary(schema) do
get_table_id!(schema) # ensure it's a pointable or virtual or table id
from s in schema, select: s
end
def get_table_id!(schema) do
if is_binary(schema), do: schema,
else: with(nil <- Pointers.Util.table_id(schema), do: need_pointable(schema))
end
defp need_pointable(got) do
raise RuntimeError,
message: "Expected a table id or pointable or virtual schema module name, got: #{inspect got}"
end
end
|
lib/pointers.ex
| 0.853608
| 0.491578
|
pointers.ex
|
starcoder
|
defmodule Plaid.Castable do
@moduledoc false
@type generic_map :: %{String.t() => any()}
@doc """
Core to how this library functions is the `Plaid.Castable` behaviour for casting response objects to structs.
Each struct implements the behaviour by adding a `cast/1` fuction that take a generic string-keyed
map and turns it into a struct of it's own type.
`Plaid.Castable.cast/2` and `Plaid.Castable.cast_list/2` are helper functions for casting nested objects.
Both which have the same signature taking first a module and second the string-keyed map.
Each API call function will return some `__Response` struct. i.e. `GetResponse`, `SearchResponse`, `CreateResponse`, etc.
This is because of how plaid's API is structured, and it follows the same pattern as the client-libraries they
maintain in go, ruby, java, and other languages.
Take advantage of nested modules for structs when possible and it makes sense.
Nest all `__Response` structs next to the functions that make the API calls.
The `Plaid.Client` module that makes all the API calls must be passed a `__Response` module that implements
the `Plaid.Castable` behaviour. It then calls the `cast/1` function on that `__Response` module; passing
the response body to kick off the response casting process.
### Made-up example
Endpoint: `/payments/get`
Response:
```json
{
"payment": {
"customer_name": "<NAME>",
"items": [
{
"amount": 1200,
"name": "<NAME>"
},
{
"amount": 3100,
"name": "Video Game"
}
]
}
}
```
Elixir structs:
```elixir
defmodule Plaid.Payment do
@behaviour Plad.Castable
alias Plaid.Castable
defstruct [:customer_name, :items]
defmodule Item do
@behaviour Plaid.Castable
defstruct [:amount, :name]
@impl true
def cast(generic_map) do
%__MODULE__{
amount: generic_map["amount"],
name: generic_map["name"]
}
end
end
@impl true
def cast(generic_map) do
%__MODULE__{
customer_name: generic_map["customer_name"],
items: Castable.cast_list(Item, generic_map["items"])
}
end
end
```
Elixir API module:
```elixir
defmodule Plaid.Payments do
alias Plaid.Payment
defmodule GetResponse do
@behaviour Plaid.Castable
alias Plaid.Castable
defstruct [:payment, :request_id]
@impl true
def cast(generic_map) do
%__MODULE__{
payment: Castable.cast(Payment, generic_map["payment"]),
request_id: generic_map["request_id"]
}
end
end
def get(id, config) do
Plaid.Client.call(
"/payments/get"
%{id: id},
GetResponse,
config
)
end
end
```
Tying it all together in IEx:
```elixir
iex> Plaid.Payments.get("5930", client_id: "123", secret: "abc")
{:ok, %Plaid.Payments.GetResponse{
payment: %Plaid.Payment{
customer_name: "<NAME>"
items: [
%Plaid.Payment.Item{
amount: 1200,
name: "Toy Solider"
},
{
amount: 3100,
name: "Video Game"
}
]
}
}}
```
"""
@callback cast(generic_map() | nil) :: struct() | nil
@doc """
Take a generic string-key map and cast it into a well defined struct.
Passing `:raw` as the module will just give back the string-key map; Useful for fallbacks.
## Examples
cast(MyStruct, %{"key" => "value"})
%MyStruct{key: "value"}
cast(:raw, %{"key" => "value"})
%{"key" => "value"}
"""
@spec cast(module() | :raw, generic_map() | nil) :: generic_map() | nil
def cast(_implementation, nil) do
nil
end
def cast(:raw, generic_map) do
generic_map
end
def cast(implementation, generic_map) when is_map(generic_map) do
implementation.cast(generic_map)
end
@doc """
Take a list of generic string-key maps and cast them into well defined structs.
## Examples
cast_list(MyStruct, [%{"a" => "b"}, %{"c" => "d"}])
[%MyStruct{a: "b"}, %MyStruct{c: "d"}]
"""
@spec cast_list(module(), [generic_map()] | nil) :: [struct()] | nil
def cast_list(_implementation, nil) do
nil
end
def cast_list(implementation, list_of_generic_maps) when is_list(list_of_generic_maps) do
Enum.map(list_of_generic_maps, &cast(implementation, &1))
end
end
|
lib/plaid/castable.ex
| 0.880925
| 0.851891
|
castable.ex
|
starcoder
|
defmodule Terrasol.Path do
@moduledoc """
Handling of Earthstar path strings and the resulting
`Terrasol.Path.t` structures
"""
@upper 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
@lower 'abcdefghijklmnopqrstuvwxyz'
@digit '0123456789'
@puncs '\'()-._~!$&+,:=@%'
@allow @upper ++ @lower ++ @digit ++ @puncs
@enforce_keys [
:string,
:segments,
:ephemeral,
:writers
]
defstruct string: "",
segments: [],
ephemeral: false,
writers: []
@typedoc "An Earthstar path"
@type t() :: %__MODULE__{
string: String.t(),
segments: [String.t()],
ephemeral: boolean(),
writers: [Terrasol.Author.t()]
}
defimpl String.Chars, for: Terrasol.Path do
def to_string(path), do: "#{path.string}"
end
@doc """
Parse an Earthstar path string into a `Terrasol.Path`
`:error` on invalid input
"""
def parse(path)
def parse(%Terrasol.Path{} = path), do: path
def parse(path) when is_binary(path) and byte_size(path) >= 2 and byte_size(path) <= 512 do
case String.split(path, "/") do
# Starts with "/"
["" | rest] -> parse_segments(rest, [], path, false, [])
_ -> :error
end
end
def parse(_), do: :error
# "/"
defp parse_segments([], [], _, _, _), do: :error
defp parse_segments([], segments, string, ephemeral, writers),
do: %__MODULE__{
string: string,
segments: Enum.reverse(segments),
ephemeral: ephemeral,
writers: writers
}
defp parse_segments([s | rest], seg, orig, ephem, writers) do
case parse_seg(to_charlist(s), [], seg, ephem, writers) do
:error -> :error
{segments, ephemeral, writers} -> parse_segments(rest, segments, orig, ephemeral, writers)
end
end
# // (empty segment)
defp parse_seg([], [], _, _, _), do: :error
# /@ at start
defp parse_seg([64 | _], [], [], _, _), do: :error
defp parse_seg([], curr, segments, ephem, write) do
restring = curr |> Enum.reverse() |> to_string
{[restring | segments], ephem, write}
end
# ! - set ephemeral
defp parse_seg([33 | rest], curr, s, _, w), do: parse_seg(rest, [33 | curr], s, true, w)
# ~ - set writer
defp parse_seg([126 | rest], [], s, e, w) do
restring = rest |> to_string
case collect_writers(String.split(restring, "~"), w) do
:error -> :error
writers -> {["~" <> restring | s], e, writers}
end
end
defp parse_seg([c | rest], curr, s, e, w) when c in @allow do
parse_seg(rest, [c | curr], s, e, w)
end
defp parse_seg(_, _, _, _, _), do: :error
# I don't think the order matters, but we'll maintain
defp collect_writers([], writers), do: Enum.reverse(writers)
defp collect_writers([w | rest], writers) do
case Terrasol.Author.parse(w) do
:error -> :error
writer -> collect_writers(rest, [writer | writers])
end
end
end
|
lib/terrasol/path.ex
| 0.739986
| 0.570271
|
path.ex
|
starcoder
|
defmodule GenWebsocket do
@moduledoc ~S"""
A Websocket client for Elixir with a API similar to `:gen_tcp`.
Example of usage where a client is started and connected, then it is used to send and receive
data:
iex> {:ok, client} = GenWebsocket.connect('example.com', 80, [])
iex> :ok = GenWebsocket.send(client, "data")
iex> {:ok, data} = GenWebsocket.recv(client, 100)
When the client has the option `:active` set to `true` or `:once` it will send to the process
that started the client or the controlling process defined with `controlling_process/2` function
messages with the format `{:websocket, pid, data}`, where `pid` is the PID of the client and
`data` is a binary with the data received from the websocket server. If the the `:active` option
was set to `:once` the client will set it to `false` after sending data once. When the option is
set to `false`, the `recv/3` function must be used to retrieve data or the option needs to be set
back to `true` or `:once` using the `set_active/2` function.
When the connection is lost unexpectedly a message is sent with the format
`{:websocket_closed, pid}`.
To close the client connection to the websocket server the function `close/1` can be used, the
connection to the websocket will be closed and the client process will be stopped.
"""
alias GenWebsocket.Client
@type reason() :: any()
@type opts() :: Keyword.t()
@doc """
Starts and connects a client to the websocket server.
The `opts` paramameter is a Keyword list that expects the follwing optional entries:
* `:transport - An atom with the transport protocol, either `:tcp` or `:tls`, defaults to `:tcp`
* `:path` - A string with the websocket server path, defaults to `"/"`
* `:headers` - A list of tuples with the HTTP headers to send to the server, defaults to `[]`
* `:active` - A boolean or to indicate if the client should send back any received data, it can
be `true`, `false` and `:once`, defaults to `false`
* `:compress` - A boolean to indicate if the data should be compressed, defaults to `true`
* `:protocols` - A list of strings indicating the protocols used by the server, defaults to `[]`
"""
@spec connect(charlist(), :inet.port_number(), opts(), timeout()) ::
{:ok, pid()} | {:error, reason()}
defdelegate connect(host, port, opts \\ [], timeout \\ 5_000), to: Client
@doc """
Sends data to the websocket server.
Data must be a binary or a list of binaries.
"""
@spec send(pid(), iodata()) :: :ok | {:error, reason()}
defdelegate send(client, data), to: Client
@doc """
When the client has the option `:active` set to `false`, the `recv/3` function can be used to
retrieve any data sent by the server.
If the provided length is `0`, it returns immediately with all data present in the client, even if
there is none. If the timeout expires it returns `{:error, :timeout}`.
"""
@spec recv(pid(), non_neg_integer(), timeout()) :: {:ok, String.t()} | {:error, reason()}
defdelegate recv(client, length, timeout \\ 5_000), to: Client
@doc """
Defines the process to which the data received by the client is sent to, when the client option
`:active` is set to `true` or `:once`.
"""
@spec controlling_process(pid(), pid()) :: :ok | {:error, reason()}
defdelegate controlling_process(client, pid), to: Client
@doc """
Closes the client connection to the websocket server and stops the client. Any data retained by
the client will be lost.
"""
@spec close(pid()) :: :ok
defdelegate close(client), to: Client
@doc """
It defines the client `:active` option.
The possible values for the `:active` options are `true`, `false` or `:once`. When the `:active`
option is set to `:once`, the client will send back the first received frame of data and set the
`:active` option to `false`.
"""
@spec set_active(pid(), boolean() | :once) :: :ok | {:error, reason()}
defdelegate set_active(client, active), to: Client
end
|
lib/gen_websocket.ex
| 0.865025
| 0.612527
|
gen_websocket.ex
|
starcoder
|
defmodule Authex do
@moduledoc """
Defines an auth module.
This module provides a simple set of tools for the authorization and authentication
required by a typical API through use of JSON web tokens. To begin, we will want
to generate a secret from which our tokens will be signed with. There is a convenient
mix task available for this:
mix authex.gen.secret
We should keep this secret as an environment variable.
Next, we will want to create our auth module
defmodule MyApp.Auth do
use Authex
def start_link(opts \\\\ []) do
Authex.start_link(__MODULE__, opts, name: __MODULE__)
end
# Callbacks
@impl Authex
def init(opts) do
# Add any configuration listed in Authex.start_link/3
secret = System.get_env("AUTH_SECRET") || "foobar"
opts = Keyword.put(opts, :secret, secret)
{:ok, opts}
end
@impl Authex
def handle_for_token(%MyApp.User{} = resource, opts) do
{:ok, [sub: resource.id, scopes: resource.scopes], opts}
end
def handle_for_token(_resource, _opts) do
{:error, :bad_resource}
end
@impl Authex
def handle_from_token(token, _opts) do
# You may want to perform a database lookup for your user instead
{:ok, %MyApp.User{id: token.sub, scopes: token.scopes}}
end
end
We must then add the auth module to our supervision tree.
children = [
MyApp.Auth
]
## Tokens
At the heart of Authex is the `Authex.Token` struct. This struct is simply
a wrapper around the typical JWT claims. The only additional item is the
`:scopes` and `:meta` key. There are 3 base actions required for these tokens -
creating, signing, and verification.
#### Creating
We can easily create token structs using the `token/3` function.
Authex.token(MyApp.Auth, sub: 1, scopes: ["admin/read"])
The above would create a token struct for a resource with an id of 1 and with
"admin/read" authorization.
#### Signing
Once we have a token struct, we can sign it using the `sign/3` function to
create a compact token binary. This is what we will use for authentication and
authorization for our API.
token = Authex.token(MyApp.Auth, sub: 1, scopes: ["admin/read"])
Authex.sign(MyApp.Auth, token)
#### Verifying
Once we have a compact token binary, we can verify it and turn it back to an
token struct using the `verify/3` function.
token = Authex.token(MyApp.Auth, sub: 1, scopes: ["admin/read"])
compact_token = Authex.sign(MyApp.Auth, token)
{:ok, token} = Authex.verify(MyApp.Auth, compact_token)
## Callbacks
Typically, we want to be able to create tokens from another source of data.
This could be something like a `User` struct. We also will want to take a token
and turn it back into a `User` struct.
To do this, we must implement callbacks. For our auth module above, we can
convert a user to a token.
token = Authex.for_token(MyApp.Auth, user)
compact_token = Authex.sign(MyApp.Auth, token)
As well as turn a token back into a user.
token = Authex.verify(MyApp.Auth, compact_token)
user = Authex.from_token(MyApp.Auth, token)
## Repositories
Usually, use of JSON web tokens requires some form of persistence to blacklist
tokens through their `:jti` claim.
To do this, we must create a repository. A repository is simply a module that
adopts the `Authex.Repo` behaviour. For more information on creating
repositories, please see the `Authex.Repo` documentation.
Once we have created our blacklist, we define it in our opts when starting our
auth module or in the `c:init/1` callback.
During the verification process used by `verify/3`, any blacklist defined in
our config will be checked against. Please be aware of any performance
penatly that may be incurred through use of database-backed repo's without use
of caching.
## Plugs
Authex provides a number of plugs to handle the typical authentication and
authorization process required by an API using your auth module.
For more information on handling authentication, please see the `Authex.Plug.Authentication`
documentation.
For more information on handling authorization, please see the `Authex.Plug.Authorization`
documentation.
"""
alias Authex.{Repo, Server, Signer, Token, Verifier}
@type alg :: :hs256 | :hs384 | :hs512
@type signer_option :: {:alg, alg()} | {:secret, binary()}
@type signer_options :: [signer_option()]
@type verifier_option ::
{:alg, alg()}
| {:time, integer()}
| {:secret, binary()}
| {:blacklist, Authex.Blacklist.t()}
@type verifier_options :: [verifier_option()]
@type option ::
{:secret, binary()}
| {:blacklist, module() | false}
| {:default_alg, alg()}
| {:default_iss, binary()}
| {:default_aud, binary()}
| {:default_sub, binary() | integer()}
| {:default_jti, mfa() | binary() | false}
| {:unauthorized, module()}
| {:forbidden, module()}
@type options :: [option()]
@type t :: module()
@doc """
A callback executed when the auth process starts.
This should be used to dynamically set any config during runtime - such as the
secret key used to sign tokens with.
Returns `{:ok, opts}` or `:ignore`.
## Example
def init(opts) do
secret = System.get_env("AUTH_SECRET")
opts = Keyword.put(opts, :secret, secret)
{:ok, opts}
end
"""
@callback init(options()) :: {:ok, options()} | :ignore
@callback handle_for_token(resource :: any(), Keyword.t()) ::
{:ok, Authex.Token.claims(), signer_options()} | {:error, any()}
@callback handle_from_token(Authex.Token.t(), Keyword.t()) ::
{:ok, resource :: any()} | {:error, any()}
@doc """
Starts the auth process.
Returns `{:ok, pid}` on success.
Returns `{:error, {:already_started, pid}}` if the auth process is already
started or `{:error, term}` in case anything else goes wrong.
## Options
* `:secret` - The secret used to sign tokens with.
* `:blacklist` - A blacklist repo, or false if disabled - defaults to `false`.
* `:default_alg` - The default algorithm used to sign tokens - defaults to `:hs256`.
* `:default_iss` - The default iss claim used in tokens.
* `:default_aud` - The default aud claim used in tokens.
* `:default_ttl` - The default time to live for tokens in seconds.
* `:default_jti` - The default mfa used to generate the jti claim. Can be `false`
if you do not want to generate one - defaults to `{Authex.UUID, :generate, []}`.
"""
@spec start_link(Authex.t(), options(), GenServer.options()) :: GenServer.on_start()
def start_link(module, opts \\ [], server_opts \\ []) do
Server.start_link(module, opts, server_opts)
end
@doc """
Creates a new token.
A token is a struct that wraps the typical JWT claims but also adds a couple
new fields. Please see the `Authex.Token` documentation for more details.
Returns an `Authex.Token` struct.
## Options
* `:time` - The base time (timestamp format) in which to use.
* `:ttl` - The time-to-live for the token in seconds or `:infinity` if no expiration
is required. The lifetime is based on the time provided via the options,
or the current time if not provided.
## Example
Authex.token(MyAuth, sub: 1, scopes: ["admin/read"])
"""
@spec token(Authex.t(), Authex.Token.claims(), Authex.Token.options()) :: Authex.Token.t()
def token(module, claims \\ [], opts \\ []) do
Token.new(module, claims, opts)
end
@doc """
Signs a token, creating a compact token.
The compact token is a binary that can be used for authentication and authorization
purposes. Typically, this would be placed in an HTTP header, such as:
```bash
Authorization: Bearer mytoken
```
Returns `compact_token` or raises an `Authex.Error`.
## Options
* `:secret` - The secret key to sign the token with.
* `:alg` - The algorithm to sign the token with - defaults to `:hs256`
Any option provided would override the default set in the config.
"""
@spec sign(Authex.t(), Authex.Token.t(), signer_options()) :: binary()
def sign(module, %Authex.Token{} = token, opts \\ []) do
module
|> Signer.new(opts)
|> Signer.compact(token)
end
@doc """
Generates a compact token from a set of claims.
This is simply a shortened version of calling `token/3` and `sign/3`.
## Options
All options are the same available in `token/3` and `sign/3`.
"""
@spec compact_token(Authex.t(), Authex.Token.claims(), signer_options()) :: binary()
def compact_token(module, claims \\ [], opts \\ []) do
token = token(module, claims, opts)
sign(module, token, opts)
end
@doc """
Verifies a compact token.
Verification is a multi-step process that ensures:
1. The token has not been tampered with.
2. The current time is not before the `nbf` value.
3. The current time is not after the `exp` value.
4. The token `jti` is not included in the blacklist (if provided).
If all checks pass, the token is deemed verified.
Returns `{:ok, token}` or `{:error, reason}`.
## Options
* `:time` - The base time (timestamp format) in which to use.
* `:secret` - The secret key to verify the token with.
* `:alg` - The algorithm to verify the token with
* `:blacklist` - The blacklist module to verify with.
Any option provided would override the default set in the config.
## Example
{:ok, token} = Authex.verify(MyAuth, compact_token)
"""
@spec verify(Authex.t(), binary(), verifier_options()) ::
{:ok, Authex.Token.t()}
| {:error,
:bad_token
| :not_ready
| :expired
| :blacklisted
| :blacklist_error
| :jti_unverified}
def verify(module, compact_token, opts \\ []) do
Verifier.run(module, compact_token, opts)
end
@doc """
Refreshes an `Authex.Token` into a new `Authex.Token`.
When using this function, the assumption has already been made that you have
verified it with `verify/3`. This will extract the following claims from the
original token:
* `:sub`
* `:iss`
* `:aud`
* `:scopes`
* `:meta`
It will then take these claims and generate a new token with them.
## Options
Please see the options available at `token/3`.
## Example
token = Authex.refresh(token)
"""
@spec refresh(Authex.t(), Authex.Token.t(), Authex.Token.options()) :: Authex.Token.t()
def refresh(module, token, opts \\ []) do
claims =
token
|> Map.from_struct()
|> Enum.into([])
|> Keyword.take([:sub, :iss, :aud, :scopes, :meta])
token(module, claims, opts)
end
@doc """
Converts an `Authex.Token` into a resource.
This invokes the `c:handle_from_token/2` defined in the auth module. Please see
the callback docs for further details.
Returns `{:ok, resource}` or `{:error, reason}`.
## Options
You can also include any additional options your callback might need.
## Example
{:ok, user} = Authex.from_token(token)
"""
@spec from_token(Authex.t(), Authex.Token.t(), verifier_options() | Keyword.t()) ::
{:ok, any()} | {:error, any()}
def from_token(module, %Token{} = token, opts \\ []) do
module.handle_from_token(token, opts)
end
@doc """
Converts a resource into an `Authex.Token`.
This invokes the `c:handle_for_token/2` callback defined in the auth module. Please
see the callback docs for further details.
Returns `{:ok, token}` or `{:error, reason}`
## Options
Please see the options available in `c:token/3`. You can also include any
additional options your callback might need.
## Example
{:ok, token} = Authex.for_token(MyAuth, user)
"""
@spec for_token(Authex.t(), resource :: any(), signer_options() | Keyword.t()) ::
{:ok, Authex.Token.t()} | {:error, any()}
def for_token(module, resource, opts \\ []) do
with {:ok, claims, opts} <- module.handle_for_token(resource, opts) do
{:ok, token(module, claims, opts)}
end
end
@doc """
Gets the current resource from a `Plug.Conn`.
The resource will only be accessible if the `conn` has been run through the
`Authex.Plug.Authentication` plug.
Returns `{:ok, resource}` or `:error`.
"""
@spec current_resource(conn :: Plug.Conn.t()) :: {:ok, any()} | :error
def current_resource(_conn = %{private: private}) do
Map.fetch(private, :authex_resource)
end
def current_resource(_) do
:error
end
@doc """
Gets the current scopes from a `Plug.Conn`.
The scopes will only be accessible if the `conn` has been run through the
`Authex.Plug.Authentication` plug.
Returns `{:ok, scopes}` or `:error`.
"""
@spec current_scopes(conn :: Plug.Conn.t()) :: {:ok, [binary()]} | :error
def current_scopes(conn) do
with {:ok, token} <- current_token(conn) do
Map.fetch(token, :scopes)
end
end
@doc """
Gets the current scope from a `Plug.Conn`.
The scope will only be accessible if the `conn` has been run through the
`Authex.Plug.Authorization` plug.
Returns `{:ok, scope}` or `:error`.
"""
@spec current_scope(conn :: Plug.Conn.t()) :: {:ok, [binary()]} | :error
def current_scope(_conn = %{private: private}) do
Map.fetch(private, :authex_scope)
end
def current_scope(_) do
:error
end
@doc """
Gets the current token from a `Plug.Conn`.
The token will only be accessible if the `conn` has been run through the
`Authex.Plug.Authentication` plug.
Returns `{:ok, token}` or `:error`.
"""
@spec current_token(conn :: Plug.Conn.t()) :: {:ok, Authex.Token.t()} | :error
def current_token(_conn = %{private: private}) do
Map.fetch(private, :authex_token)
end
def current_token(_) do
:error
end
@doc """
Checks whether a token jti is blacklisted.
This uses the blaclist repo defined in the auth config. The key is the `:jti`
claim in the token.
Returns a boolean.
## Example
Authex.blacklisted?(MyAuth, token)
"""
@spec blacklisted?(Authex.t(), Authex.Token.t()) :: boolean() | :error
def blacklisted?(module, %Authex.Token{jti: jti}) do
blacklist = config(module, :blacklist, false)
Repo.exists?(blacklist, jti)
end
@doc """
Blacklists a token jti.
This uses the blaclist repo defined in the auth config. The key is the `:jti`
claim in the token.
Returns `:ok` on success, or `:error` on failure.
## Example
Authex.blacklist(MyAuth, token)
"""
@spec blacklist(Authex.t(), Authex.Token.t()) :: :ok | :error
def blacklist(module, %Authex.Token{jti: jti}) do
blacklist = config(module, :blacklist, false)
Repo.insert(blacklist, jti)
end
@doc """
Unblacklists a token jti.
This uses the blaclist repo defined in the auth config. The key is the `:jti`
claim in the token.
Returns `:ok` on success, or `:error` on failure.
## Example
MyApp.Auth.unblacklist(token)
"""
@spec unblacklist(Authex.t(), Authex.Token.t()) :: :ok | :error
def unblacklist(module, %Authex.Token{jti: jti}) do
blacklist = config(module, :blacklist, false)
Repo.delete(blacklist, jti)
end
@doc """
Fetches a config value.
## Example
Authex.config(MyAuth, :secret)
"""
@spec config(Authex.t(), atom(), any()) :: any()
def config(module, key, default \\ nil) do
Server.config(module, key, default)
end
defmacro __using__(_opts) do
quote do
@behaviour Authex
if Module.get_attribute(__MODULE__, :doc) == nil do
@doc """
Returns a specification to start this Authex process under a supervisor.
See `Supervisor`.
"""
end
def child_spec(args) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [args]}
}
end
defoverridable(child_spec: 1)
end
end
end
|
lib/authex.ex
| 0.892735
| 0.560192
|
authex.ex
|
starcoder
|
defmodule Faker.Name.It do
import Faker, only: [sampler: 2]
@moduledoc """
Functions for name data in Italian
"""
@doc """
Returns a complete name (may include a suffix/prefix or both)
## Examples
iex> Faker.Name.It.name()
"<NAME>"
iex> Faker.Name.It.name()
"<NAME>"
"""
@spec name() :: String.t()
def name, do: name(Faker.random_between(0, 9))
defp name(0), do: "#{prefix()} #{first_name()} #{last_name()}"
defp name(_), do: "#{first_name()} #{last_name()}"
@doc """
Returns a random first name
## Examples
iex> Faker.Name.It.first_name()
"Azalea"
iex> Faker.Name.It.first_name()
"Dionigi"
iex> Faker.Name.It.first_name()
"Agave"
"""
@spec first_name() :: String.t()
sampler(:first_name, [
"Abramo",
"Acantha",
"Ace",
"Achille",
"Ada",
"Adamo",
"Adelaide",
"Adele",
"Adimar",
"Adina",
"Adolfo",
"Adorno",
"Adriana",
"Adriano",
"Adriel",
"Adriele",
"Afra",
"Agar",
"Agata",
"Agave",
"Agnese",
"Agostino",
"Aimeè",
"Alan",
"Alarico",
"Alba",
"Albano",
"Alberta",
"Alberto",
"Albino",
"Alcina",
"Alda",
"Aldo",
"Aldobrando",
"Aldwin",
"Alea",
"Aleramo",
"Alessandra",
"Alessandro",
"Alessia",
"Alessio",
"Alfio",
"Alfonsina",
"Alfonso",
"Alfredo",
"Alhena",
"Alia",
"Alice",
"Alida",
"Alina",
"Allegra",
"Alma",
"Altair",
"Altea",
"Alvaro",
"Alvina",
"Amalia",
"Amanda",
"Amaranta",
"Amarilli",
"Ambra",
"Ambrogio",
"Amedeo",
"Amina",
"Amos",
"Anastasia",
"Ancilla",
"Andrea",
"Andreina",
"Andromeda",
"Angela",
"Angelica",
"Angelo",
"Annika",
"Anna",
"Annunziata",
"Anselmo",
"Antonia",
"Antonio",
"Aquilante",
"Arabella",
"Aramis",
"Arduino",
"Aretha",
"Argelia",
"Aria",
"Arianna",
"Ariele",
"Ariella",
"Aristide",
"Arlena",
"Armando",
"Armida",
"Arnaldo",
"Aroldo",
"Arturo",
"Ashton",
"Aspasia",
"Assunta",
"Asteria",
"Athos",
"Attilio",
"Audrey",
"Augusta",
"Augusto",
"Aura",
"Aurea",
"Aurelio",
"Aurora",
"Axel",
"Azalea",
"Azzurra",
"Baldo",
"Baldovino",
"Balthazar",
"Barbara",
"Bardo",
"Bartolomeo",
"Battista",
"Beatrice",
"Belinda",
"Bella",
"Ben",
"Benedetto",
"Beniamino",
"Benvenuto",
"Bernardo",
"Berta",
"Bertrando",
"Beryl",
"Bianca",
"Bibiana",
"Bjorn",
"Boris",
"Brad",
"Brent",
"Brian",
"Brigida",
"Bruna",
"Brunilde",
"Bruno",
"Bud",
"Caio",
"Calogero",
"Calypso",
"Camelia",
"Cameron",
"Camilla",
"Camillo",
"Candida",
"Candido",
"Carina",
"Carla",
"Carlo",
"Carmela",
"Carmelo",
"Carolina",
"Cassandra",
"Caterina",
"Cecilia",
"Cedric",
"Celesta",
"Celeste",
"Cesara",
"Cesare",
"Chandra",
"Chantal",
"Chiara",
"Cino",
"Cinzia",
"Cirillo",
"Ciro",
"Claudia",
"Claudio",
"Clelia",
"Clemente",
"Clio",
"Clizia",
"Cloe",
"Clorinda",
"Clotilde",
"Concetta",
"Consolata",
"Contessa",
"Cora",
"Cordelia",
"Corinna",
"Cornelia",
"Corrado",
"Cosetta",
"Cosimo",
"Costantino",
"Costanza",
"Costanzo",
"Cristal",
"Cristiana",
"Cristiano",
"Cristina",
"Cristoforo",
"Cruz",
"Curzio",
"Dafne",
"Dagmar",
"Dagon",
"Daisy",
"Dalia",
"Dalila",
"Damaris",
"Damiano",
"Danae",
"Daniela",
"Daniele",
"Dante",
"Daria",
"Dario",
"Davide",
"Dean",
"Debora",
"Dela",
"Delfino",
"Delia",
"Delizia",
"Demetrio",
"Demos",
"Dermont",
"Desdemona",
"Deva",
"Diamante",
"Diana",
"Diego",
"Diletta",
"Dina",
"Dino",
"Dionigi",
"Diva",
"Dolcina",
"Dolores",
"Domenica",
"Domenico",
"Domitilla",
"Donald",
"Donar",
"Donata",
"Donato",
"Donna",
"Dora",
"Doriana",
"Doriano",
"Doris",
"Dorotea",
"Douglas",
"Drago",
"Drusiana",
"Duccio",
"Duilio",
"Duncan",
"Dylan",
"Ebe",
"Eco",
"Edda",
"Edelweiss",
"Eden",
"Edera",
"Edgardo",
"Edith",
"Ediardo",
"Edvige",
"Edwin",
"Efraim",
"Egeria",
"Egidio",
"Eglantina",
"Egle",
"Einar",
"Elda",
"Eldora",
"Elena",
"Eleonora",
"Eletta",
"Elettra",
"Elfo",
"Elga",
"Elia",
"Eliana",
"Eligio",
"Elio",
"Elisabetta",
"Elissa",
"Ella",
"Elsa",
"Elvino",
"Elvio",
"Elvira",
"Elvis",
"Emanuela",
"Emanuele",
"Emilia",
"Emiliana",
"Emiliano",
"Emilio",
"Emma",
"Enea",
"Enki",
"Ennio",
"Enrica",
"Enrico",
"Enza",
"Enzo",
"Eolo",
"Era",
"Erasmo",
"Erica",
"Eris",
"Ermanno",
"Ermes",
"Erminio",
"Ernesto",
"Eros",
"Esmeralda",
"Ester",
"Ethel",
"Etilla",
"Ettore",
"Eudora",
"Eugenia",
"Eugenio",
"Euridice",
"Euro",
"Eva",
"Evania",
"Evelina",
"Ezio",
"Fabiana",
"Fabiano",
"Fabio",
"Fabrizia",
"Fabrizio",
"Faith",
"Falco",
"Fara",
"Fatima",
"Fausta",
"Fausto",
"Febo",
"Fedele",
"Federica",
"Federico",
"Fedra",
"Felice",
"Feliciano",
"Felicita",
"Fenella",
"Ferdinando",
"Fergal",
"Fergus",
"Fermo",
"Fernanda",
"Fernando",
"Ferruccio",
"Fiammetta",
"Filiberto",
"Filippo",
"Fillide",
"Filomena",
"Fiona",
"Fiore",
"Fiorella",
"Fiorenza",
"Fiorenzo",
"Flaminia",
"Flaminio",
"Flavia",
"Flavio",
"Flora",
"Floriano",
"Folco",
"Ford",
"Fortuna",
"Fortunato",
"Franca",
"Francesca",
"Francesco",
"Franco",
"Freya",
"Frida",
"Frine",
"Fulvia",
"Fulvio",
"Furio",
"Gabriele",
"Gabriella",
"Gaetano",
"Gaia",
"Gail",
"Galtea",
"Galeazzo",
"Gardenia",
"Gareth",
"Gaspare",
"Gastone",
"Gavin",
"Gaynor",
"Gea",
"Gelsomina",
"Geltrude",
"Gemma",
"Gennaro",
"Genoveffa",
"Genziana",
"Gerardo",
"Gerda",
"Germano",
"Ghino",
"Giacomo",
"Giada",
"Gigliola",
"Gilberto",
"Gilda",
"Ginestra",
"Ginevra",
"Gioia",
"Giole",
"Giona",
"Giordano",
"Giorgio",
"Giotto",
"Giovanna",
"Giovanni",
"Gisella",
"Giuditta",
"Giulia",
"Giuliana",
"Giuliano",
"Giulio",
"Giunia",
"Giuseppe",
"Giuseppina",
"Glauco",
"Glen",
"Glenda",
"Glicera",
"Gloria",
"Godiva",
"Goffredo",
"Goliard",
"Gordon",
"Graham",
"Grant",
"Grazia",
"Graziana",
"Graziano",
"Gregorio",
"Griselda",
"Gualtiero",
"Guendalina",
"Guerrino",
"Guglielmo",
"Guicciardo",
"Guido",
"Gunnar",
"Gurthie",
"Gustavo",
"Haidar",
"Haila",
"Harrison",
"Hassan",
"Haydee",
"Heather",
"Hela",
"Hidalgo",
"Hiram",
"Holger",
"Hoshi",
"Ida",
"Idalia",
"Igea",
"Ignazio",
"Igor",
"Ilaria",
"Ilario",
"Ilda",
"Ilia",
"Ilva",
"Imera",
"Immacolata",
"Indro",
"Inga",
"Ingmar",
"Ingrid",
"Iolanda",
"Iole",
"Iorio",
"Ippolita",
"Ippolito",
"Irene",
"Iris",
"Iride",
"Irma",
"Irvin",
"Isabella",
"Isaura",
"Isidoro",
"Isotta",
"Italo",
"Ivan",
"Ivana",
"Ivano",
"Ivilla",
"Ivo",
"Izusa",
"Jader",
"Jael",
"Jafar",
"Jamil",
"Janez",
"Jaron",
"Jasmine",
"Jason",
"Jemina",
"Jessica",
"Jin",
"Jocelyn",
"Joel",
"Joelle",
"Jonathan",
"Kamar",
"Keith",
"Ken",
"Kendra",
"Kilian",
"Kim",
"Kira",
"Kirk",
"Lada",
"Ladislao",
"Lamberto",
"Lancelot",
"Lanfranco",
"Lara",
"Laura",
"Lavinia",
"Lea",
"Leandro",
"Leda",
"Lee",
"Leila",
"Leo",
"Leon",
"Leonardo",
"Leone",
"Leonida",
"Leopoldo",
"Leslie",
"Letizia",
"Levi",
"Levia",
"Lia",
"Liala",
"Liana",
"Licia",
"Lidia",
"Liliana",
"Lilith",
"Lilka",
"Linda",
"Linnea",
"Lino",
"Linus",
"Liù",
"Liuba",
"Livia",
"Livio",
"Lonella",
"Lora",
"Loredana",
"Loreley",
"Lorena",
"Lorenza",
"Lorenzo",
"Lothar",
"Luana",
"Luca",
"Luce",
"Lucia",
"Luciana",
"Luciano",
"Lucio",
"Lucrezia",
"Ludmilla",
"Ludovica",
"Ludovico",
"Luigi",
"Luigia",
"Luminosa",
"Luna",
"Lupo",
"Lynn",
"Mabel",
"Macha",
"Maddalena",
"Madonna",
"Mafalda",
"Magnolia",
"Mahina",
"Maia",
"Mainard",
"Malcom",
"Malik",
"Malka",
"Malvina",
"Mammola",
"Mandisa",
"Manfredo",
"Manlio",
"Mara",
"Marcella",
"Marcello",
"Marco",
"Margherita",
"Maria",
"Marianna",
"Nariano",
"Marica",
"Marilena",
"Marilyn",
"Marina",
"Marino",
"Mario",
"Marlon",
"Marlow",
"Marta",
"Martina",
"Martino",
"Marvin",
"Marzia",
"Marzio",
"Massimiliano",
"Massimo",
"Matilde",
"Matteo",
"Mattia",
"Maude",
"Maura",
"Maurizia",
"Maurizio",
"Mauro",
"Medardo",
"Medea",
"Medusa",
"Megan",
"Melania",
"Melantha",
"Melba",
"Melia",
"Melinda",
"Melissa",
"Melody",
"Melvin",
"Mercedes",
"Meredith",
"Michela",
"Michele",
"Mildred",
"Milena",
"Milo",
"Milton",
"Milva",
"Mimosa",
"Mintha",
"Miranda",
"Mirina",
"Mirinna",
"Mirko",
"Mirra",
"Mirta",
"Mizar",
"Moanna",
"Modesto",
"Moira",
"Momo",
"Monica",
"Morana",
"Morena",
"Moreno",
"Morgan",
"Morgana",
"Mughetta",
"Muriel",
"Musa",
"Nadia",
"Naomi",
"Nara",
"Narilla",
"Nasya",
"Natale",
"Natalia",
"Nathaniel",
"Nausicaa",
"Nazario",
"Neda",
"Nella",
"Nelson",
"Nem",
"Nereo",
"Nerina",
"Nicola",
"Nicoletta",
"Ninfa",
"Nissa",
"Nives",
"Noa",
"Noemi",
"Nolan",
"Nora",
"Norberto",
"Norma",
"Norman",
"Novella",
"Nuriel",
"Odell",
"Odessa",
"Odilia",
"Ofelia",
"Olaf",
"Olga",
"Oliana",
"Olimpia",
"Olisa",
"Oliva",
"Oliviero",
"Omar",
"Ombretta",
"Ondina",
"Onella",
"Opal",
"Orazio",
"Orchidea",
"Oreste",
"Oriana",
"Orio",
"Orion",
"Orlando",
"Orlena",
"Ornella",
"Orso",
"Orsola",
"Ortensia",
"Oscar",
"Ossian",
"Osvaldo",
"Ottavia",
"Ottaviano",
"Ottavio",
"Otto",
"Ottone",
"Owen",
"Palma",
"Palmira",
"Paloma",
"Pamela",
"Pandia",
"Pandora",
"Paola",
"Paolo",
"Paralda",
"Paride",
"Pasquale",
"Patrizia",
"Patrizio",
"Penelope",
"Perceval",
"Perla",
"Perseo",
"Pervinca",
"Petronilla",
"Petunia",
"Pia",
"Pico",
"Piera",
"Pietro",
"Pio",
"Pirro",
"Pisana",
"Polissena",
"Porthos",
"Porzia",
"Preziosa",
"Primavera",
"Primo",
"Primula",
"Prisca",
"Prisco",
"Publio",
"Querida",
"Quincy",
"Quintiliano",
"Quinto",
"Quinzio",
"Quirino",
"Rachele",
"Raffaele",
"Raffaella",
"Rahda",
"Raimonda",
"Raimondo",
"Reagan",
"Rebecca",
"Reed",
"Regina",
"Remigio",
"Remo",
"Renata",
"Renato",
"Rex",
"Rhea",
"Rhoda",
"Rhys",
"Riccarda",
"Riccardo",
"Rihanna",
"Rinaldo",
"Rita",
"Roberta",
"Roberto",
"Robin",
"Rocco",
"Rodolfo",
"Rodrigo",
"Rohana",
"Rok",
"Rolf",
"Romana",
"Romano",
"Romeo",
"Romilda",
"Romina",
"Romolo",
"Rooney",
"Rosa",
"Rosalia",
"Rosalinda",
"Rosaria",
"Rosario",
"Rosaura",
"Ross",
"Rossana",
"Rossella",
"Roswita",
"Rowena",
"Ruben",
"Rubina",
"Ruggero",
"Ruth",
"Ryan",
"Sabra",
"Sabrina",
"Sada",
"Sadir",
"Sakura",
"Salomè",
"Salomone",
"Salvatore",
"Samantha",
"Samuele",
"Santina",
"Santino",
"Sara",
"Sarad",
"Satinka",
"Saul",
"Saverio",
"Savino",
"Scilla",
"Scott",
"Sean",
"Sebastiano",
"Selene",
"Selma",
"Selvaggia",
"Sem",
"Sennen",
"Senta",
"Serafina",
"Serafino",
"Serena",
"Sergio
",
"Severo",
"Shada",
"Shaina",
"Shanti",
"Sharar",
"Sharon",
"Shaula",
"Sheena",
"Sheila",
"Shirah",
"Shirley",
"Sibilla",
"Sidney",
"Sigfrido",
"Sigmund",
"Silvana",
"Silvano",
"Silvestro",
"Silvia",
"Simeone",
"Simona",
"Simone",
"Sina",
"Sirena",
"Siro",
"Sivio",
"Skip",
"Smeralda",
"Sofia",
"Solana",
"Solange",
"Soledad",
"Sonia",
"Spartaco",
"Speranza",
"Stanislao",
"Stefania",
"Stefano",
"Stelio",
"Stella",
"Sulayman",
"Sunna",
"Susanna",
"Sven",
"Svetlana",
"Taffy",
"Takara",
"Talia",
"Talitha",
"Tamara",
"Tancredi",
"Tara",
"Taras",
"Tarcisio",
"Tatiana",
"Tazio",
"Tea",
"Telga",
"Teo",
"Teodora",
"Teodoro",
"Terenzio",
"Teresa",
"Teseo",
"Tessa",
"Thelma",
"Thera",
"Tia",
"Tiffany",
"Timoteo",
"Titania",
"Tito",
"Tiziana",
"Tiziano",
"Tobia",
"Tommaso",
"Tosca",
"Tristano",
"Tullia",
"Tullio",
"Tyler",
"Tyron",
"Tyson",
"Ubaldo",
"Uberto",
"Ugo",
"Ulderico",
"Uliana",
"Ulisse",
"Ulla",
"Umberta",
"Umberto",
"Urania",
"Uriel",
"Ursula",
"Valentina",
"Valentino",
"Valeria",
"Valerio",
"Vanessa",
"Vasco",
"Vassjli",
"Vedia",
"Vega",
"Velania",
"Velia",
"Vera",
"Verbena",
"Verena",
"Verilla",
"Veronica",
"Vesta",
"Vida",
"Videmar",
"Vincenza",
"Vincenzo",
"Viola",
"Virgilio",
"Virginia",
"Virginio",
"Viridiana",
"Vissia",
"Vito",
"Vittore",
"Vittoria",
"Vittorio",
"Viviana",
"Vladimiro",
"Wala",
"Waldo",
"Wally",
"Wanda",
"Werner",
"Werther",
"Wilson",
"Wolfgang",
"Wolfram",
"Xantha",
"Xenia",
"Ximena",
"Yama",
"Yarin",
"Yasu",
"Ymer",
"Ymir",
"Yuma",
"Yuri",
"Yvonne",
"Zada",
"Zahara",
"Zaira",
"Zak",
"Zarah",
"Zelia",
"Zelig",
"Zenaide",
"Zeno",
"Zilla",
"Zita",
"Zoe",
"Zorya",
"Zuleika"
])
@doc """
Returns a random last name
## Examples
iex> Faker.Name.It.last_name()
"Bruno"
iex> Faker.Name.It.last_name()
"Russo"
iex> Faker.Name.It.last_name()
"Serra"
iex> Faker.Name.It.last_name()
"Bianchi"
"""
@spec last_name() :: String.t()
sampler(:last_name, [
"Rossi",
"Russo",
"Ferrari",
"Esposito",
"Bianchi",
"Romano",
"Colombo",
"Ricci",
"Marino",
"Greco",
"Bruno",
"Gallo",
"Conti",
"<NAME>",
"Mancini",
"Costa",
"Giordano",
"Rizzo",
"Lombardi",
"Moretti",
"Barbieri",
"Fontana",
"Santoro",
"Mariani",
"Rinaldi",
"Caruso",
"Ferrara",
"Galli",
"Martini",
"Leone",
"Longo",
"Gentile",
"Martinelli",
"Vitale",
"Lombardo",
"Serra",
"Coppola",
"<NAME>",
"D'angelo",
"Marchetti",
"Parisi",
"Villa",
"Conte",
"Ferraro",
"Ferri",
"Fabbri",
"Bianco",
"Marini",
"Grasso",
"Valentini",
"Messina",
"Sala",
"<NAME>",
"Gatti",
"Pellegrini",
"Palumbo",
"Sanna",
"Farina",
"Rizzi",
"Monti",
"Cattaneo",
"Morelli",
"Amato",
"Silvestri",
"Mazza",
"Testa",
"Grassi",
"Pellegrino",
"Carbone",
"Giuliani",
"Benedetti",
"Barone",
"Rossetti",
"Caputo",
"Montanari",
"Guerra",
"Palmieri",
"Bernardi",
"Martino",
"Fiore",
"<NAME>",
"Ferretti",
"Bellini",
"Basile",
"Riva",
"Donati",
"Piras",
"Vitali",
"Battaglia",
"Sartori",
"Neri",
"Costantini",
"Milani",
"Pagano",
"Ruggiero",
"Sorrentino",
"D'amico",
"Orlando",
"Damico",
"Negri"
])
@doc """
Returns a random prefix
## Examples
iex> Faker.Name.It.prefix()
"Sig."
iex> Faker.Name.It.prefix()
"Sig.ra"
iex> Faker.Name.It.prefix()
"Sig."
iex> Faker.Name.It.prefix()
"Avv."
"""
@spec prefix() :: String.t()
sampler(:prefix, [
"Sig.",
"Sig.ra",
"Dott.",
"Ing.",
"Avv."
])
end
|
lib/faker/name/it.ex
| 0.627837
| 0.517083
|
it.ex
|
starcoder
|
defmodule Oban.Worker do
@moduledoc """
Defines a behavior and macro to guide the creation of worker modules.
Worker modules do the work of processing a job. At a minimum they must define a `perform/2`
function, which is called with an `args` map and the full `Oban.Job` struct.
## Defining Workers
Worker modules are defined by using `Oban.Worker`. A bare `use Oban.Worker` invocation sets a
worker with these defaults:
* `:max_attempts` — 20
* `:priority` — 0
* `:queue` — `:default`
* `:unique` — no uniquness set
The following example defines a worker module to process jobs in the `events` queue. It then
dials down the priority from 0 to 1, limits retrying on failures to 10, adds a "business" tag,
and ensures that duplicate jobs aren't enqueued within a 30 second period:
defmodule MyApp.Workers.Business do
use Oban.Worker,
queue: :events,
max_attempts: 10,
priority: 1,
tags: ["business"],
unique: [period: 30]
@impl Oban.Worker
def perform(_args, %Oban.Job{attempt: attempt}) when attempt > 3 do
IO.inspect(attempt)
end
def perform(args, _job) do
IO.inspect(args)
end
end
The `perform/2` function receives an args map and an `Oban.Job` struct as arguments. This allows
workers to change the behavior of `perform/2` based on attributes of the job, e.g. the number of
execution attempts or when it was inserted.
The value returned from `perform/2` can control whether the job is a success or a failure:
* `:ok` or `{:ok, value}` — the job is successful; for success tuples the `value` is ignored
* `:discard` — discard the job and prevent it from being retried again
* `{:error, error}` — the job failed, record the error and schedule a retry if possible
* `{:snooze, seconds}` — consider the job a success and schedule it to run `seconds` in the
future. Snoozing will also increase the `max_attempts` by one to ensure that the job isn't
accidentally discarded before it can run.
In addition to explicit return values, any _unhandled exception_, _exit_ or _throw_ will fail
the job and schedule a retry if possible.
As an example of error tuple handling, this worker will return an error tuple when the `value`
is less than one:
defmodule MyApp.Workers.ErrorExample do
use Oban.Worker
@impl Worker
def perform(%{"value" => value}, _job) do
if value > 1 do
:ok
else
{:error, "invalid value given: " <> inspect(value)}
end
end
end
## Enqueuing Jobs
All workers implement a `new/2` function that converts an args map into a job changeset
suitable for inserting into the database for later execution:
%{in_the: "business", of_doing: "business"}
|> MyApp.Workers.Business.new()
|> Oban.insert()
The worker's defaults may be overridden by passing options:
%{vote_for: "none of the above"}
|> MyApp.Workers.Business.new(queue: "special", max_attempts: 5)
|> Oban.insert()
Uniqueness options may also be overridden by passing options:
%{expensive: "business"}
|> MyApp.Workers.Business.new(unique: [period: 120, fields: [:worker]])
|> Oban.insert()
Note that `unique` options aren't merged, they are overridden entirely.
See `Oban.Job` for all available options.
## Customizing Backoff
When jobs fail they may be retried again in the future using a backoff algorithm. By default
the backoff is exponential with a fixed padding of 15 seconds. This may be too aggressive for
jobs that are resource intensive or need more time between retries. To make backoff scheduling
flexible a worker module may define a custom backoff function.
This worker defines a backoff function that delays retries using a variant of the historic
Resque/Sidekiq algorithm:
defmodule MyApp.SidekiqBackoffWorker do
use Oban.Worker
@impl Worker
def backoff(%Job{attempt: attempt}) do
:math.pow(attempt, 4) + 15 + :rand.uniform(30) * attempt
end
@impl Worker
def perform(_args, _job) do
:do_business
end
end
Here are some alternative backoff strategies to consider:
* **constant** — delay by a fixed number of seconds, e.g. 1→15, 2→15, 3→15
* **linear** — delay for the same number of seconds as the current attempt, e.g. 1→1, 2→2, 3→3
* **squared** — delay by attempt number squared, e.g. 1→1, 2→4, 3→9
* **sidekiq** — delay by a base amount plus some jitter, e.g. 1→32, 2→61, 3→135
## Limiting Execution Time
By default, individual jobs may execute indefinitely. If this is undesirable you may define a
timeout in milliseconds with the `timeout/1` callback on your worker module.
For example, to limit a worker's execution time to 30 seconds:
def MyApp.Worker do
use Oban.Worker
@impl Oban.Worker
def perform(_args, _job) do
something_that_may_take_a_long_time()
:ok
end
@impl Oban.Worker
def timeout(_job), do: :timer.seconds(30)
end
The `timeout/1` function accepts an `Oban.Job` struct, so you can customize the timeout using
any job attributes.
Define the `timeout` value through job args:
def timeout(%_{args: %{"timeout" => timeout}}), do: timeout
Define the `timeout` based on the number of attempts:
def timeout(%_{attempt: attempt}), do: attempt * :timer.seconds(5)
"""
@moduledoc since: "0.1.0"
alias Oban.Job
@type t :: module()
@doc """
Build a job changeset for this worker with optional overrides.
See `Oban.Job.new/2` for the available options.
"""
@callback new(args :: Job.args(), opts :: [Job.option()]) :: Ecto.Changeset.t()
@doc """
Calculate the execution backoff, or the number of seconds to wait before retrying a failed job.
Defaults to an exponential algorithm with a minimum delay of 15 seconds.
"""
@callback backoff(job :: Job.t()) :: pos_integer()
@doc """
Set a job's maximum execution time in milliseconds. Jobs that exceed the time limit are
considered a failure and may be retried.
Defaults to `:infinity`.
"""
@callback timeout(job :: Job.t()) :: :infinity | pos_integer()
@doc """
The `perform/2` function is called to execute a job.
Each `perform/2` function should return `:ok` or a success tuple. When the return is an error
tuple, an uncaught exception or a throw then the error is recorded and the job may be retried
if there are any attempts remaining.
Note that the `args` map passed to `perform/2` will _always_ have string keys, regardless of the
key type when the job was enqueued. The `args` are stored as `jsonb` in PostgreSQL and the
serialization process automatically stringifies all keys.
"""
@callback perform(args :: Job.args(), job :: Job.t()) ::
:ok
| :discard
| {:ok, ignored :: term()}
| {:error, reason :: term()}
| {:snooze, seconds :: pos_integer()}
@doc false
defmacro __using__(opts) do
quote location: :keep do
alias Oban.{Job, Worker}
@before_compile Worker
@after_compile Worker
@behaviour Worker
@doc false
def __opts__ do
Keyword.put(unquote(opts), :worker, to_string(__MODULE__))
end
@impl Worker
def new(args, opts \\ []) when is_map(args) and is_list(opts) do
Job.new(args, Keyword.merge(__opts__(), opts, &Worker.resolve_opts/3))
end
@impl Worker
def timeout(%Job{} = _job) do
:infinity
end
defoverridable Worker
end
end
@doc false
defmacro __before_compile__(_env) do
quote do
@impl Oban.Worker
def backoff(attempt) when is_integer(attempt), do: Oban.Worker.backoff(attempt)
def backoff(%Oban.Job{attempt: attempt}), do: backoff(attempt)
defoverridable backoff: 1
end
end
@doc false
defmacro __after_compile__(%{module: module}, _env) do
Enum.each(module.__opts__(), &validate_opt!/1)
end
@doc false
def resolve_opts(:unique, [_ | _] = opts_1, [_ | _] = opts_2) do
Keyword.merge(opts_1, opts_2)
end
def resolve_opts(_key, _opts, opts), do: opts
@doc false
@spec backoff(pos_integer(), non_neg_integer()) :: pos_integer()
def backoff(attempt, base_backoff \\ 15) when is_integer(attempt) do
trunc(:math.pow(2, attempt) + base_backoff)
end
defp validate_opt!({:max_attempts, max_attempts}) do
unless is_integer(max_attempts) and max_attempts > 0 do
raise ArgumentError,
"expected :max_attempts to be a positive integer, got: #{inspect(max_attempts)}"
end
end
defp validate_opt!({:priority, priority}) do
unless is_integer(priority) and priority > -1 and priority < 4 do
raise ArgumentError,
"expected :priority to be an integer from 0 to 3, got: #{inspect(priority)}"
end
end
defp validate_opt!({:queue, queue}) do
unless is_atom(queue) or is_binary(queue) do
raise ArgumentError, "expected :queue to be an atom or a binary, got: #{inspect(queue)}"
end
end
defp validate_opt!({:tags, tags}) do
unless is_list(tags) and Enum.all?(tags, &is_binary/1) do
raise ArgumentError, "expected :tags to be a list of strings, got: #{inspect(tags)}"
end
end
defp validate_opt!({:unique, unique}) do
unless is_list(unique) and Enum.all?(unique, &Job.valid_unique_opt?/1) do
raise ArgumentError, "unexpected unique options: #{inspect(unique)}"
end
end
defp validate_opt!({:worker, worker}) do
unless is_binary(worker) do
raise ArgumentError, "expected :worker to be a binary, got: #{inspect(worker)}"
end
end
defp validate_opt!(option) do
raise ArgumentError, "unknown option provided #{inspect(option)}"
end
end
|
lib/oban/worker.ex
| 0.895117
| 0.777996
|
worker.ex
|
starcoder
|
defmodule Pass do
defstruct [:row, :col, :id]
def new(row, col) do
%Pass{row: row, col: col, id: row * 8 + col}
end
def compare(p1, p2) do
cond do
p1.id < p2.id -> :lt
p1.id > p2.id -> :gt
true -> :eq
end
end
end
defmodule D5 do
@moduledoc """
--- Day 5: Binary Boarding ---
You board your plane only to discover a new problem: you dropped your boarding pass! You aren't sure which seat is yours, and all of the flight attendants are busy with the flood of people that suddenly made it through passport control.
You write a quick program to use your phone's camera to scan all of the nearby boarding passes (your puzzle input); perhaps you can find your seat through process of elimination.
Instead of zones or groups, this airline uses binary space partitioning to seat people. A seat might be specified like FBFBBFFRLR, where F means "front", B means "back", L means "left", and R means "right".
The first 7 characters will either be F or B; these specify exactly one of the 128 rows on the plane (numbered 0 through 127). Each letter tells you which half of a region the given seat is in. Start with the whole list of rows; the first letter indicates whether the seat is in the front (0 through 63) or the back (64 through 127). The next letter indicates which half of that region the seat is in, and so on until you're left with exactly one row.
Every seat also has a unique seat ID: multiply the row by 8, then add the column. In this example, the seat has ID 44 * 8 + 5 = 357.
As a sanity check, look through your list of boarding passes. What is the highest seat ID on a boarding pass?
--- Part Two ---
Ding! The "fasten seat belt" signs have turned on. Time to find your seat.
It's a completely full flight, so your seat should be the only missing boarding pass in your list. However, there's a catch: some of the seats at the very front and back of the plane don't exist on this aircraft, so they'll be missing from your list as well.
Your seat wasn't at the very front or back, though; the seats with IDs +1 and -1 from yours will be in your list.
What is the ID of your seat?
"""
@behaviour Day
defp to_int(list) do
list
|> Enum.map(fn
?B -> 1
?F -> 0
?L -> 0
?R -> 1
end)
|> Integer.undigits(2)
end
defp interpret_passes(input) do
input
|> Enum.map(fn pass ->
<<row_string::binary-size(7), col_string::binary-size(3)>> = pass
row = to_charlist(row_string)
col = to_charlist(col_string)
Pass.new(to_int(row), to_int(col))
end)
end
defp find_pass(last, [pass | passes]) do
if last.id - pass.id != 1, do: last.id - 1, else: find_pass(pass, passes)
end
defp find_pass(passes), do: find_pass(hd(passes), tl(passes))
@impl true
def solve(input) do
passes =
input
|> interpret_passes
|> Enum.sort({:desc, Pass})
part_1 = hd(passes).id
part_2 = find_pass(passes)
{part_1, part_2}
end
end
|
lib/days/05.ex
| 0.714827
| 0.707152
|
05.ex
|
starcoder
|
defmodule Yggdrasil.Adapter do
@moduledoc """
This module defines a generalization of an adapter (for adapter creation
refer to `Yggdrasil.Subscriber.Adapter` and `Yggdrasil.Publisher.Adapter`
behaviour).
## Adapter alias
If you already have implemented the publisher and subscriber adapters, then
you can alias the module names in order to publish and subscribe using the
same `Channel` struct.
Let's say that you have both `Yggdrasil.Publisher.Adapter.MyAdapter` and
`Yggdrasil.Subscriber.Adapter.MyAdapter` and you want to alias both of this
modules to `:my_adapter`, the you would do the following:
```
defmodule Yggdrasil.Adapter.MyAdapter do
use Yggdrasil.Adapter, name: :my_adapter
end
```
And adding the following to your application supervision tree:
```
Supervisor.start_link([
{Yggdrasil.Adapter.MyAdapter, []}
...
])
```
This will allow you to use `:my_adapter` as a `Channel` adapter to subscribe
and publish with `MyAdapter` e.g:
```
%Channel{name: "my_channel", adapter: :my_adapter}
```
"""
alias __MODULE__
alias Yggdrasil.Registry
@doc """
Macro for using `Yggdrasil.Adapter`.
The following are the available options:
- `:name` - Name of the adapter. Must be an atom.
- `:transformer` - Default transformer module or alias. Defaults to
`:default`
- `:backend` - Default backend module or alias. Defaults to `:default`.
- `:subscriber` - Subscriber module. Defaults to
`Yggdrasil.Subscriber.Adapter.<NAME>` where `<NAME>` is the last part of the
current module name e.g. the `<NAME>` for `Yggdrasil.MyAdapter` would be
`MyAdapter`. If the module does not exist, then defaults to `:elixir`.
- `:publisher` - Publisher module. Defaults to
`Yggdrasil.Publisher.Adapter.<NAME>` where `<NAME>` is the last part of the
current module name e.g. the `<NAME>` for `Yggdrasil.MyAdapter` would be
`MyAdapter`. If the module does not exist, then defaults to `:elixir`.
"""
defmacro __using__(options) do
adapter_alias =
options[:name] || raise ArgumentError, message: "adapter alias not found"
transformer_alias = Keyword.get(options, :transformer, :default)
backend_alias = Keyword.get(options, :backend, :default)
subscriber_module = Keyword.get(options, :subscriber)
publisher_module = Keyword.get(options, :publisher)
quote do
use Task, restart: :transient
@doc """
Start task to register the adapter in the `Registry`.
"""
@spec start_link(term()) :: {:ok, pid()}
def start_link(_) do
Task.start_link(__MODULE__, :register, [])
end
@doc """
Registers adapter in `Registry`.
"""
@spec register() :: :ok
def register do
name = unquote(adapter_alias)
Registry.register_adapter(name, __MODULE__)
end
@doc """
Gets default transformer.
"""
@spec get_transformer() :: atom()
def get_transformer do
unquote(transformer_alias)
end
@doc """
Gets default backend.
"""
@spec get_backend() :: atom()
def get_backend do
unquote(backend_alias)
end
@doc """
Gets subscriber module.
"""
@spec get_subscriber_module() :: {:ok, module()} | {:error, term()}
def get_subscriber_module do
get_subscriber_module(unquote(subscriber_module))
end
@doc """
Gets subscriber module only if the given module exists.
"""
@spec get_subscriber_module(nil | module()) ::
{:ok, module()} | {:error, term()}
def get_subscriber_module(module)
def get_subscriber_module(nil) do
type = Subscriber
with {:ok, module} <- Adapter.generate_module(__MODULE__, type) do
get_subscriber_module(module)
end
end
def get_subscriber_module(module) do
Adapter.check_module(__MODULE__, module)
end
@doc """
Gets publisher module.
"""
@spec get_publisher_module() :: {:ok, module()} | {:error, term()}
def get_publisher_module do
get_publisher_module(unquote(publisher_module))
end
@doc """
Gets publisher module only if the given module exists.
"""
@spec get_publisher_module(nil | module()) ::
{:ok, module()} | {:error, term()}
def get_publisher_module(module)
def get_publisher_module(nil) do
type = Publisher
with {:ok, module} <- Adapter.generate_module(__MODULE__, type) do
get_publisher_module(module)
end
end
def get_publisher_module(module) do
Adapter.check_module(__MODULE__, module)
end
end
end
@typedoc """
Adapter types.
"""
@type type :: Subscriber | Publisher
@types [Subscriber, Publisher]
@doc """
Generates module given an `adapter` module and a `type`.
"""
@spec generate_module(module(), type()) :: {:ok, module()} | {:error, term()}
def generate_module(adapter, type)
def generate_module(adapter, type) when type in @types do
base = Module.split(adapter)
case Enum.split_while(base, &(&1 != "Adapter")) do
{[], _} ->
{:error, "Cannot generate #{type} module for #{adapter}"}
{_, []} ->
{:error, "Cannot generate #{type} module for #{adapter}"}
{prefix, suffix} ->
module = Module.concat(prefix ++ [to_string(type) | suffix])
{:ok, module}
end
end
@doc """
Checks that the module exists.
"""
@spec check_module(module(), module()) :: {:ok, module()} | {:error, term()}
def check_module(adapter, module)
def check_module(adapter, module) do
case Code.ensure_loaded(module) do
{:module, ^module} ->
{:ok, module}
_ ->
{:error, "Cannot find #{module} for #{adapter}"}
end
end
end
|
lib/yggdrasil/adapter.ex
| 0.853043
| 0.899519
|
adapter.ex
|
starcoder
|
defmodule EVM.SubState do
@moduledoc """
Functions for handling the sub-state that exists only
between operations in an execution for a contract.
"""
alias EVM.{
ExecEnv,
LogEntry,
MachineState,
Operation,
Refunds
}
defstruct selfdestruct_list: MapSet.new(),
touched_accounts: MapSet.new(),
logs: [],
refund: 0
@type address_list :: MapSet.t()
@type logs :: [LogEntry.t()]
@type refund :: EVM.Wei.t()
@type t :: %__MODULE__{
selfdestruct_list: address_list,
touched_accounts: address_list,
logs: logs,
refund: refund
}
@doc """
Returns the canonical empty sub-state.
"""
def empty(), do: %__MODULE__{}
@doc """
Checks whether the given `sub_state` is empty.
"""
def empty?(sub_state), do: %{sub_state | touched_accounts: MapSet.new()} == empty()
@doc """
Adds log entry to substate's log entry list.
## Examples
iex> sub_state = %EVM.SubState{}
iex> sub_state |> EVM.SubState.add_log(0, [1, 10, 12], "adsfa")
%EVM.SubState{
logs: [
%EVM.LogEntry{
address: <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>,
data: "adsfa",
topics: [
<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1>>,
<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10>>,
<<0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12>>
]
}
]
}
"""
@spec add_log(t(), EVM.address(), Operation.stack_args(), binary()) :: t()
def add_log(sub_state, address, topics, data) do
log_entry = LogEntry.new(address, topics, data)
new_logs = sub_state.logs ++ [log_entry]
%{sub_state | logs: new_logs}
end
@doc """
Adds refunds based on the current instruction to the specified machine
substate.
## Examples
iex> account_repo = EVM.Mock.MockAccountRepo.new()
iex> machine_code = <<EVM.Operation.metadata(:sstore).id>>
iex> exec_env = %EVM.ExecEnv{
...> account_repo: account_repo,
...> machine_code: machine_code,
...> } |> EVM.ExecEnv.put_storage(5, 4)
iex> machine_state = %EVM.MachineState{gas: 10, stack: [5 , 0], program_counter: 0}
iex> sub_state = %EVM.SubState{}
iex> {_exec_env, sub_state} = EVM.SubState.add_refund(machine_state, sub_state, exec_env)
iex> sub_state
%EVM.SubState{refund: 15000}
"""
@spec add_refund(MachineState.t(), t(), ExecEnv.t()) :: {ExecEnv.t(), t()}
def add_refund(machine_state, sub_state, exec_env) do
{updated_exec_env, refund} = Refunds.refund(machine_state, sub_state, exec_env)
{updated_exec_env, %{sub_state | refund: sub_state.refund + refund}}
end
@doc """
Merges two SubState structs.
## Examples
iex> substate1 = %EVM.SubState{refund: 5, logs: [1], selfdestruct_list: MapSet.new([5])}
iex> substate2 = %EVM.SubState{refund: 5, logs: [5], selfdestruct_list: MapSet.new([1])}
iex> EVM.SubState.merge(substate1, substate2)
%EVM.SubState{refund: 10, logs: [1, 5], selfdestruct_list: MapSet.new([5, 1])}
"""
@spec merge(t(), t()) :: t()
def merge(sub_state1, sub_state2) do
selfdestruct_list = MapSet.union(sub_state1.selfdestruct_list, sub_state2.selfdestruct_list)
logs = sub_state1.logs ++ sub_state2.logs
common_refund =
sub_state1.selfdestruct_list
|> MapSet.intersection(sub_state2.selfdestruct_list)
|> Enum.count()
|> Kernel.*(Refunds.selfdestruct_refund())
refund = sub_state1.refund + sub_state2.refund - common_refund
touched_accounts = MapSet.union(sub_state1.touched_accounts, sub_state2.touched_accounts)
%__MODULE__{
refund: refund,
selfdestruct_list: selfdestruct_list,
touched_accounts: touched_accounts,
logs: logs
}
end
@doc """
Marks an account for later destruction.
## Examples
iex> sub_state = %EVM.SubState{}
iex> address = <<0x01::160>>
iex> EVM.SubState.mark_account_for_destruction(sub_state, address)
%EVM.SubState{selfdestruct_list: MapSet.new([<<0x01::160>>])}
"""
def mark_account_for_destruction(sub_state, address) do
new_selfdestruct_list = MapSet.put(sub_state.selfdestruct_list, address)
%{sub_state | selfdestruct_list: new_selfdestruct_list}
end
def add_touched_account(sub_state, address) do
new_touched_accounts = MapSet.put(sub_state.touched_accounts, address)
%{sub_state | touched_accounts: new_touched_accounts}
end
end
|
apps/evm/lib/evm/sub_state.ex
| 0.769037
| 0.430028
|
sub_state.ex
|
starcoder
|
defmodule A do
@moduledoc false
alias Pbuf.Decoder
@derive {Jason.Encoder, []}
defstruct [
b: nil
]
@type t :: %__MODULE__{
b: A.B.t
}
@spec new(Enum.t) :: t
def new(data \\ []), do: struct(__MODULE__, data)
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:struct, data.b, <<10>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.struct_field(A.B, :b, acc, data)
end
import Bitwise, only: [bsr: 2, band: 2]
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{k, v}, acc -> Map.put(acc, k, v)
end)
struct
end
end
defmodule A.B do
@moduledoc false
alias Pbuf.Decoder
@derive {Jason.Encoder, []}
defstruct [
c: nil
]
@type t :: %__MODULE__{
c: A.B.C.t
}
@spec new(Enum.t) :: t
def new(data \\ []), do: struct(__MODULE__, data)
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:struct, data.c, <<10>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<10, data::binary>>) do
Decoder.struct_field(A.B.C, :c, acc, data)
end
import Bitwise, only: [bsr: 2, band: 2]
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [1] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{k, v}, acc -> Map.put(acc, k, v)
end)
struct
end
end
defmodule A.B.C do
@moduledoc false
alias Pbuf.Decoder
@derive {Jason.Encoder, []}
defstruct [
d: 0
]
@type t :: %__MODULE__{
d: integer
}
@spec new(Enum.t) :: t
def new(data \\ []), do: struct(__MODULE__, data)
@spec encode_to_iodata!(t | map) :: iodata
def encode_to_iodata!(data) do
alias Elixir.Pbuf.Encoder
[
Encoder.field(:int32, data.d, <<16>>),
]
end
@spec encode!(t | map) :: binary
def encode!(data) do
:erlang.iolist_to_binary(encode_to_iodata!(data))
end
@spec decode!(binary) :: t
def decode!(data) do
Decoder.decode!(__MODULE__, data)
end
@spec decode(binary) :: {:ok, t} | :error
def decode(data) do
Decoder.decode(__MODULE__, data)
end
def decode(acc, <<16, data::binary>>) do
Decoder.field(:int32, :d, acc, data)
end
import Bitwise, only: [bsr: 2, band: 2]
# failed to decode, either this is an unknown tag (which we can skip), or
# it is a wrong type (which is an error)
def decode(acc, data) do
{prefix, data} = Decoder.varint(data)
tag = bsr(prefix, 3)
type = band(prefix, 7)
case tag in [2] do
false -> {acc, Decoder.skip(type, data)}
true ->
err = %Decoder.Error{
tag: tag,
module: __MODULE__,
message: "#{__MODULE__} tag #{tag} has an incorrect type of #{type}"
}
{:error, err}
end
end
def __finalize_decode__(args) do
struct = Elixir.Enum.reduce(args, %__MODULE__{}, fn
{k, v}, acc -> Map.put(acc, k, v)
end)
struct
end
end
|
test/schemas/generated/nopackage.pb.ex
| 0.797004
| 0.562116
|
nopackage.pb.ex
|
starcoder
|
defmodule AWS.SageMakerA2IRuntime do
@moduledoc """
<important> Amazon Augmented AI is in preview release and is subject to
change. We do not recommend using this product in production environments.
</important> Amazon Augmented AI (Amazon A2I) adds the benefit of human
judgment to any machine learning application. When an AI application can't
evaluate data with a high degree of confidence, human reviewers can take
over. This human review is called a human review workflow. To create and
start a human review workflow, you need three resources: a *worker task
template*, a *flow definition*, and a *human loop*.
For information about these resources and prerequisites for using Amazon
A2I, see [Get Started with Amazon Augmented
AI](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-getting-started.html)
in the Amazon SageMaker Developer Guide.
This API reference includes information about API actions and data types
that you can use to interact with Amazon A2I programmatically. Use this
guide to:
<ul> <li> Start a human loop with the `StartHumanLoop` operation when using
Amazon A2I with a *custom task type*. To learn more about the difference
between custom and built-in task types, see [Use Task Types
](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-task-types-general.html).
To learn how to start a human loop using this API, see [Create and Start a
Human Loop for a Custom Task Type
](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-start-human-loop.html#a2i-instructions-starthumanloop)
in the Amazon SageMaker Developer Guide.
</li> <li> Manage your human loops. You can list all human loops that you
have created, describe individual human loops, and stop and delete human
loops. To learn more, see [Monitor and Manage Your Human Loop
](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-monitor-humanloop-results.html)
in the Amazon SageMaker Developer Guide.
</li> </ul> Amazon A2I integrates APIs from various AWS services to create
and start human review workflows for those services. To learn how Amazon
A2I uses these APIs, see [Use APIs in Amazon
A2I](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-api-references.html)
in the Amazon SageMaker Developer Guide.
"""
@doc """
Deletes the specified human loop for a flow definition.
"""
def delete_human_loop(client, human_loop_name, input, options \\ []) do
path_ = "/human-loops/#{URI.encode(human_loop_name)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Returns information about the specified human loop.
"""
def describe_human_loop(client, human_loop_name, options \\ []) do
path_ = "/human-loops/#{URI.encode(human_loop_name)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Returns information about human loops, given the specified parameters. If a
human loop was deleted, it will not be included.
"""
def list_human_loops(client, creation_time_after \\ nil, creation_time_before \\ nil, flow_definition_arn, max_results \\ nil, next_token \\ nil, sort_order \\ nil, options \\ []) do
path_ = "/human-loops"
headers = []
query_ = []
query_ = if !is_nil(sort_order) do
[{"SortOrder", sort_order} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"NextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"MaxResults", max_results} | query_]
else
query_
end
query_ = if !is_nil(flow_definition_arn) do
[{"FlowDefinitionArn", flow_definition_arn} | query_]
else
query_
end
query_ = if !is_nil(creation_time_before) do
[{"CreationTimeBefore", creation_time_before} | query_]
else
query_
end
query_ = if !is_nil(creation_time_after) do
[{"CreationTimeAfter", creation_time_after} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Starts a human loop, provided that at least one activation condition is
met.
"""
def start_human_loop(client, input, options \\ []) do
path_ = "/human-loops"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Stops the specified human loop.
"""
def stop_human_loop(client, input, options \\ []) do
path_ = "/human-loops/stop"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "sagemaker"}
host = build_host("a2i-runtime.sagemaker", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/sage_maker_a2i_runtime.ex
| 0.754463
| 0.7445
|
sage_maker_a2i_runtime.ex
|
starcoder
|
defmodule RDF.List do
@moduledoc """
A structure for RDF lists.
see
- <https://www.w3.org/TR/rdf-schema/#ch_collectionvocab>
- <https://www.w3.org/TR/rdf11-mt/#rdf-collections>
"""
alias RDF.{BlankNode, Description, Graph, IRI}
import RDF.Guards
@type t :: %__MODULE__{
head: IRI.t(),
graph: Graph.t()
}
@enforce_keys [:head]
defstruct [:head, :graph]
@rdf_nil RDF.Utils.Bootstrapping.rdf_iri("nil")
@doc """
Creates a `RDF.List` for a given RDF list node of a given `RDF.Graph`.
If the given node does not refer to a well-formed list in the graph, `nil` is
returned. A well-formed list
- consists of list nodes which have exactly one `rdf:first` and `rdf:rest`
statement each
- does not contain cycles, i.e. `rdf:rest` statements don't refer to
preceding list nodes
"""
@spec new(IRI.coercible(), Graph.t()) :: t
def new(head, graph)
def new(head, graph) when maybe_ns_term(head),
do: new(RDF.iri(head), graph)
def new(head, graph) do
with list = %__MODULE__{head: head, graph: graph} do
if well_formed?(list) do
list
end
end
end
defp well_formed?(list) do
Enum.reduce_while(list, MapSet.new(), fn node_description, preceding_nodes ->
with head = node_description.subject do
if MapSet.member?(preceding_nodes, head) do
{:halt, false}
else
{:cont, MapSet.put(preceding_nodes, head)}
end
end
end) && true
end
@doc """
Creates a `RDF.List` from a native Elixir list or any other `Enumerable` with coercible RDF values.
By default the statements constituting the `Enumerable` are added to an empty graph. An
already existing graph to which the statements are added can be specified with
the `graph` option.
The name of the head node can be specified with the `head` option
(default: `RDF.bnode()`, i.e. an arbitrary unique name).
Note: When the given `Enumerable` is empty, the `name` option will be ignored -
the head node of the empty list is always `RDF.nil`.
"""
@spec from(Enumerable.t(), keyword) :: t
def from(list, opts \\ []) do
with head = Keyword.get(opts, :head, RDF.bnode()),
graph = Keyword.get(opts, :graph, RDF.graph()),
{head, graph} = do_from(list, head, graph, opts) do
%__MODULE__{head: head, graph: graph}
end
end
defp do_from([], _, graph, _) do
{RDF.nil(), graph}
end
defp do_from(list, head, graph, opts) when maybe_ns_term(head) do
do_from(list, RDF.iri!(head), graph, opts)
end
defp do_from([list | rest], head, graph, opts) when is_list(list) do
with {nested_list_node, graph} = do_from(list, RDF.bnode(), graph, opts) do
do_from([nested_list_node | rest], head, graph, opts)
end
end
defp do_from([first | rest], head, graph, opts) do
with {next, graph} = do_from(rest, RDF.bnode(), graph, opts) do
{
head,
Graph.add(
graph,
head
|> RDF.first(first)
|> RDF.rest(next)
)
}
end
end
defp do_from(enumerable, head, graph, opts) do
enumerable
|> Enum.into([])
|> do_from(head, graph, opts)
end
@doc """
The values of a `RDF.List` as an Elixir list.
Nested lists are converted recursively.
"""
@spec values(t) :: Enumerable.t()
def values(%__MODULE__{graph: graph} = list) do
Enum.map(list, fn node_description ->
value = Description.first(node_description, RDF.first())
if node?(value, graph) do
value
|> new(graph)
|> values
else
value
end
end)
end
@doc """
The RDF nodes constituting a `RDF.List` as an Elixir list.
"""
@spec nodes(t) :: [BlankNode.t()]
def nodes(%__MODULE__{} = list) do
Enum.map(list, fn node_description -> node_description.subject end)
end
@doc """
Checks if a list is the empty list.
"""
@spec empty?(t) :: boolean
def empty?(%__MODULE__{head: @rdf_nil}), do: true
def empty?(%__MODULE__{}), do: false
@doc """
Checks if the given list consists of list nodes which are all blank nodes.
"""
@spec valid?(t) :: boolean
def valid?(%__MODULE__{head: @rdf_nil}), do: true
def valid?(%__MODULE__{} = list), do: Enum.all?(list, &RDF.bnode?(&1.subject))
@doc """
Checks if a given resource is a RDF list node in a given `RDF.Graph`.
Although, technically a resource is a list, if it uses at least one `rdf:first`
or `rdf:rest`, we pragmatically require the usage of both.
Note: This function doesn't indicate if the list is valid.
See `new/2` and `valid?/2` for validations.
"""
@spec node?(any, Graph.t()) :: boolean
def node?(list_node, graph)
def node?(@rdf_nil, _), do: true
def node?(%BlankNode{} = list_node, graph), do: do_node?(list_node, graph)
def node?(%IRI{} = list_node, graph), do: do_node?(list_node, graph)
def node?(list_node, graph) when maybe_ns_term(list_node),
do: do_node?(RDF.iri(list_node), graph)
def node?(_, _), do: false
defp do_node?(list_node, graph), do: graph |> Graph.description(list_node) |> node?()
@doc """
Checks if the given `RDF.Description` describes a RDF list node.
"""
def node?(description)
def node?(nil), do: false
def node?(%Description{predications: predications}) do
Map.has_key?(predications, RDF.first()) and
Map.has_key?(predications, RDF.rest())
end
defimpl Enumerable do
@rdf_nil RDF.Utils.Bootstrapping.rdf_iri("nil")
def reduce(_, {:halt, acc}, _fun), do: {:halted, acc}
def reduce(list, {:suspend, acc}, fun), do: {:suspended, acc, &reduce(list, &1, fun)}
def reduce(%RDF.List{head: @rdf_nil}, {:cont, acc}, _fun),
do: {:done, acc}
def reduce(%RDF.List{head: %BlankNode{}} = list, acc, fun),
do: do_reduce(list, acc, fun)
def reduce(%RDF.List{head: %IRI{}} = list, acc, fun),
do: do_reduce(list, acc, fun)
def reduce(_, _, _), do: {:halted, nil}
defp do_reduce(%RDF.List{head: head, graph: graph}, {:cont, acc}, fun) do
with description when not is_nil(description) <-
Graph.get(graph, head),
[_] <- Description.get(description, RDF.first()),
[rest] <- Description.get(description, RDF.rest()),
acc = fun.(description, acc) do
if rest == @rdf_nil do
case acc do
{:cont, acc} -> {:done, acc}
# TODO: Is the :suspend case handled properly
_ -> reduce(%RDF.List{head: rest, graph: graph}, acc, fun)
end
else
reduce(%RDF.List{head: rest, graph: graph}, acc, fun)
end
else
nil ->
{:halted, nil}
values when is_list(values) ->
{:halted, nil}
end
end
def member?(_, _), do: {:error, __MODULE__}
def count(_), do: {:error, __MODULE__}
def slice(_), do: {:error, __MODULE__}
end
end
|
lib/rdf/list.ex
| 0.864139
| 0.640495
|
list.ex
|
starcoder
|
defmodule Astra.Document do
alias Astra.Document.Http
@moduledoc """
Provides functions to access the public methods of the Document API for databases hosted on https://astra.datastax.com.
Astra's Document API is implemented using the stargate project, https://stargate.io. Swagger docs for this interface are available here https://docs.astra.datastax.com/reference#namespaces-1.
If required, raw access to the Astra REST Document API can be obtained through the `Astra.Document.Http` module.
"""
@doc """
update part of a document
## Parameters
- namespace: namespace name
- collection: name of the document collection
- id: the id of the document
## Examples
```
> Astra.Document.patch_doc("test", "docs", id, %{name: "fred"})
{:ok, nil}
```
"""
@spec patch_doc(String, String, String, Map) :: {Atom, nil}
def patch_doc(namespace, collection, id, patch), do: Http.patch("#{namespace}/collections/#{collection}/#{id}", Http.json!(patch)) |> Http.parse_response
@doc """
update part of a sub document
## Parameters
- namespace: namespace name
- collection: name of the document collection
- id: the id of the document'
- path: the path to the subdocument. Nested paths can be matched by joining the keys with a `\\`, ex. `address\\city`
- patch: a map of attributes to match. Sub-document attributes not in the patch Map will remain as is.
## Examples
```
> Astra.Document.patch_sub_doc("test", "docs", id, "other", %{first: "object"})
{:ok, nil}
```
"""
@spec patch_sub_doc(String, String, String, String, Map) :: {Atom, nil}
def patch_sub_doc(namespace, collection, id, path, patch), do: Http.patch("#{namespace}/collections/#{collection}/#{id}/#{path}", Http.json!(patch)) |> Http.parse_response
@doc """
replace a document
## Parameters
- namespace: namespace name
- collection: name of the document collection
- id: the id of the document
- doc: the new document. Any existing document at the id will be removed.
## Examples
```
> doc = %{
name: "other-stuff",
other: %{ first: "thing", last: "thing"}
}
> Astra.Document.put_doc("test", "docs", uuid, doc)
```
"""
@spec put_doc(String, String, String, Map) :: {Atom, Map}
def put_doc(namespace, collection, id, doc), do: Http.put("#{namespace}/collections/#{collection}/#{id}", Http.json!(doc)) |> Http.parse_response
@doc """
replace a sub document
## Parameters
- namespace: namespace name
- collection: name of the document collection
- id: the id of the document
- path: the path to the subdocument. Nested paths can be matched by joining the keys with a `\\`, ex. `address\\city`
- doc: the new document. Any existing document at the id will be removed.
## Examples
```
> doc = %{
name: "other-stuff",
other: %{ first: "thing", last: "thing"}
}
> Astra.Document.put_doc("test", "docs", uuid, doc)
```
"""
@spec put_sub_doc(String, String, String, String, Map) :: {Atom, Map}
def put_sub_doc(namespace, collection, id, path, subdoc), do: Http.put("#{namespace}/collections/#{collection}/#{id}/#{path}", Http.json!(subdoc)) |> Http.parse_response
@doc """
add a new document, id will be an auto generated uuid
## Parameters
- namespace: namespace name
- collection: name of the document collection
- doc: the new document.
## Examples
```
> doc = %{
name: "other-stuff",
other: %{ first: "thing", last: "thing"}
}
> Astra.Document.post_doc("test", "docs", doc)
{:ok, %{documentId: "3f9d03af-e29d-40d5-9fe1-c77a2ff6a40a"}}
```
"""
@spec post_doc(String, String, Map) :: {Atom, Map}
def post_doc(namespace, collection, document), do: Http.post("#{namespace}/collections/#{collection}", Http.json!(document)) |> Http.parse_response
@doc """
get a document by id
## Parameters
- namespace: namespace name
- collection: name of the document collection
- id: the id of the document to retrieve
## Examples
```
> Astra.Document.get_doc("test", "docs", id)
{:ok, %{name: "other-stuff", other: "This makes no sense"}}
```
"""
@spec get_doc(String, String, String) :: {Atom, Map}
def get_doc(namespace, collection, id), do: Http.get("#{namespace}/collections/#{collection}/#{id}") |> Http.parse_response
@doc """
get a sub document by id
## Parameters
- namespace: namespace name
- collection: name of the document collection
- id: the id of the document to retrieve
- path: the path to the subdocument. Nested paths can be matched by joining the keys with a `\\`, ex. `address\\city`
## Examples
```
doc = %{
name: "other-stuff",
other: %{ first: "thing", last: "thing"}
}
> {:ok, %{documentId: id}} = Astra.Document.post_doc("test", "docs", doc)
> Astra.Document.get_sub_doc("test", "docs", id, "other")
{:ok, %{ first: "thing", last: "thing"}}
```
"""
@spec get_sub_doc(String, String, String, String) :: {Atom, Map}
def get_sub_doc(namespace, collection, id, path), do: Http.get("#{namespace}/collections/#{collection}/#{id}/#{path}") |> Http.parse_response
@doc """
delete a document
## Parameters
- namespace: namespace name
- collection: name of the document collection
- id: the id of the document to destroy
## Examples
```
> Astra.Document.delete_doc("test", "docs", id)
{:ok, []}
```
"""
@spec delete_doc(String, String, String) :: {Atom, []}
def delete_doc(namespace, collection, id), do: Http.delete("#{namespace}/collections/#{collection}/#{id}") |> Http.parse_response
@doc """
delete a sub document
## Parameters
- namespace: namespace name
- collection: name of the document collection
- id: the id of the document to destroy
- path: the path to the subdocument. Nested paths can be matched by joining the keys with a `\\`, ex. `address\\city`
## Examples
```
doc = %{
name: "other-stuff",
other: %{ first: "thing", last: "thing"}
}
> {:ok, %{documentId: id}} = Astra.Document.post_doc("test", "docs", doc)
> Astra.Document.get_sub_doc("test", "docs", id, "other")
{:ok, %{ first: "thing", last: "thing"}}
```
"""
@spec delete_sub_doc(String, String, String, String) :: {Atom, []}
def delete_sub_doc(namespace, collection, id, path), do: Http.delete("#{namespace}/collections/#{collection}/#{id}/#{path}") |> Http.parse_response
@doc """
search for documents in a collection
## Parameters
- namespace: namespace name
- collection: name of the document collection
- where: search clause for matching documents
- options: Valid options [:fields, :"page-size", :"page-state", :sort]
## Examples
```
> query = %{
other: %{
"$eq": uuid
}
}
> Astra.Document.search_docs("test", "docs", query, %{"page-size": 20})
```
"""
@spec search_docs(String, String, Map, Map) :: {Atom, Map}
def search_docs(namespace, collection, where, options) do
params = Map.take(options, [:fields, :"page-size", :"page-state", :sort])
|> Map.put(:where, Http.json!(where))
Http.get("#{namespace}/collections/#{collection}",[], params: params)
|> Http.parse_response
end
end
|
lib/document/document.ex
| 0.9231
| 0.795857
|
document.ex
|
starcoder
|
defmodule Msgpax.PackError do
@moduledoc """
Exception that represents an error in packing terms.
This exception has a `:reason` field that can have one of the following
values:
* `{:not_encodable, term}` - means that the given argument is not
serializable. For example, this is returned when you try to pack bits
instead of a binary (as only binaries can be serialized).
* `{:too_big, term}` - means that the given term is too big to be
encoded. What "too big" means depends on the term being encoded; for
example, integers larger than `18_446_744_073_709_551_616` are too big to be
encoded with MessagePack.
"""
@type t :: %__MODULE__{
reason: {:too_big, any} | {:not_encodable, any},
}
defexception [:reason]
def message(%__MODULE__{} = exception) do
case exception.reason do
{:too_big, term} ->
"value is too big: #{inspect(term)}"
{:not_encodable, term} ->
"value is not encodable: #{inspect(term)}"
end
end
end
defprotocol Msgpax.Packer do
@moduledoc """
The `Msgpax.Packer` protocol is responsible for serializing any Elixir data
structure according to the MessagePack specification.
Some notable properties of the implementation of this protocol for the
built-in Elixir data structures:
* atoms are encoded as strings (i.e., they're converted to strings first and
then encoded as strings)
* bitstrings can only be encoded as long as they're binaries (and not actual
bitstrings - i.e., the number of bits must be a multiple of 8)
* binaries (or `Msgpax.Bin` structs) containing `2^32` or more bytes cannot
be encoded
* maps with more than `(2^32) - 1` elements cannot be encoded
* lists with more than `(2^32) - 1` elements cannot be encoded
* integers bigger than `(2^64) - 1` or smaller than `-2^63` cannot be
encoded
## Serializing a subset of fields for structs
The `Msgpax.Packer` protocol supports serialization of only a subset of the
fields of a struct when derived. For example:
defmodule User do
@derive [{Msgpax.Packer, fields: [:name]}]
defstruct [:name, :sensitive_data]
end
In the example, packing `User` will only serialize the `:name` field and leave
out the `:sensitive_data` field. By default, the `:__struct__` field is taken
out of the struct before packing it. If you want this field to be present in
the packed map, you have to set the `:include_struct_field` option to `true`.
## Unpacking back to Elixir structs
When packing a struct, that struct will be packed as the underlying map and
will be unpacked with string keys instead of atom keys. This makes it hard to
reconstruct the map as tools like `Kernel.struct/2` can't be used (given keys
are strings). Also, unless specifically stated with the `:include_struct_field`
option, the `:__struct__` field is lost when packing a struct, so information
about *which* struct it was is lost.
%User{name: "Juri"} |> Msgpax.pack!() |> Msgpax.unpack!()
#=> %{"name" => "Juri"}
These things can be overcome by using something like
[Maptu](https://github.com/lexhide/maptu), which helps to reconstruct
structs:
map = %User{name: "Juri"} |> Msgpax.pack!() |> Msgpax.unpack!()
Maptu.struct!(User, map)
#=> %User{name: "Juri"}
map =
%{"__struct__" => "Elixir.User", "name" => "Juri"}
|> Msgpax.pack!()
|> Msgpax.unpack!()
Maptu.struct!(map)
#=> %User{name: "Juri"}
"""
@doc """
This function serializes `term`.
It returns an iodata result.
"""
def pack(term)
end
defimpl Msgpax.Packer, for: Atom do
def pack(nil), do: [0xC0]
def pack(false), do: [0xC2]
def pack(true), do: [0xC3]
def pack(atom) do
atom
|> Atom.to_string()
|> @protocol.BitString.pack()
end
end
defimpl Msgpax.Packer, for: BitString do
def pack(binary) when is_binary(binary) do
[format(binary) | binary]
end
def pack(bits) do
throw({:not_encodable, bits})
end
defp format(binary) do
size = byte_size(binary)
cond do
size < 32 -> 0b10100000 + size
size < 256 -> [0xD9, size]
size < 0x10000 -> <<0xDA, size::16>>
size < 0x100000000 -> <<0xDB, size::32>>
true -> throw({:too_big, binary})
end
end
end
defimpl Msgpax.Packer, for: Map do
defmacro __deriving__(module, struct, options) do
@protocol.Any.deriving(module, struct, options)
end
def pack(map) do
for {key, value} <- map, into: [format(map)] do
[@protocol.pack(key) | @protocol.pack(value)]
end
end
defp format(map) do
length = map_size(map)
cond do
length < 16 -> 0b10000000 + length
length < 0x10000 -> <<0xDE, length::16>>
length < 0x100000000 -> <<0xDF, length::32>>
true -> throw({:too_big, map})
end
end
end
defimpl Msgpax.Packer, for: List do
def pack(list) do
for item <- list, into: [format(list)] do
@protocol.pack(item)
end
end
defp format(list) do
length = length(list)
cond do
length < 16 -> 0b10010000 + length
length < 0x10000 -> <<0xDC, length::16>>
length < 0x100000000 -> <<0xDD, length::32>>
true -> throw({:too_big, list})
end
end
end
defimpl Msgpax.Packer, for: Float do
def pack(num) do
<<0xCB, num::64-float>>
end
end
defimpl Msgpax.Packer, for: Integer do
def pack(int) when int < 0 do
cond do
int >= -32 -> [0x100 + int]
int >= -128 -> [0xD0, 0x100 + int]
int >= -0x8000 -> <<0xD1, int::16>>
int >= -0x80000000 -> <<0xD2, int::32>>
int >= -0x8000000000000000 -> <<0xD3, int::64>>
true -> throw({:too_big, int})
end
end
def pack(int) do
cond do
int < 128 -> [int]
int < 256 -> [0xCC, int]
int < 0x10000 -> <<0xCD, int::16>>
int < 0x100000000 -> <<0xCE, int::32>>
int < 0x10000000000000000 -> <<0xCF, int::64>>
true -> throw({:too_big, int})
end
end
end
defimpl Msgpax.Packer, for: Msgpax.Bin do
def pack(%{data: data}) when is_binary(data),
do: [format(data) | data]
defp format(binary) do
size = byte_size(binary)
cond do
size < 256 -> [0xC4, size]
size < 0x10000 -> <<0xC5, size::16>>
size < 0x100000000 -> <<0xC6, size::32>>
true -> throw({:too_big, binary})
end
end
end
defimpl Msgpax.Packer, for: [Msgpax.Ext, Msgpax.ReservedExt] do
def pack(%_{type: type, data: data}) do
[format(data), <<type>> | data]
end
defp format(data) do
size = byte_size(data)
cond do
size == 1 -> [0xD4]
size == 2 -> [0xD5]
size == 4 -> [0xD6]
size == 8 -> [0xD7]
size == 16 -> [0xD8]
size < 256 -> [0xC7, size]
size < 0x10000 -> <<0xC8, size::16>>
size < 0x100000000 -> <<0xC9, size::32>>
true -> throw({:too_big, data})
end
end
end
defimpl Msgpax.Packer, for: Any do
defmacro __deriving__(module, struct, options) do
deriving(module, struct, options)
end
def deriving(module, struct, options) do
keys = struct |> Map.from_struct() |> Map.keys()
fields = Keyword.get(options, :fields, keys)
include_struct_field? = Keyword.get(options, :include_struct_field, :__struct__ in fields)
fields = List.delete(fields, :__struct__)
extractor =
cond do
fields == keys and include_struct_field? ->
quote(do: Map.from_struct(struct) |> Map.put("__struct__", unquote(module)))
fields == keys ->
quote(do: Map.from_struct(struct))
include_struct_field? ->
quote(do: Map.take(struct, unquote(fields)) |> Map.put("__struct__", unquote(module)))
true ->
quote(do: Map.take(struct, unquote(fields)))
end
quote do
defimpl unquote(@protocol), for: unquote(module) do
def pack(struct) do
unquote(extractor)
|> @protocol.Map.pack
end
end
end
end
def pack(term) do
raise Protocol.UndefinedError,
protocol: @protocol, value: term
end
end
|
lib/msgpax/packer.ex
| 0.917774
| 0.564459
|
packer.ex
|
starcoder
|
defmodule Snitch.Data.Schema.PaymentMethod do
@moduledoc """
Models a PaymentMethod.
"""
use Snitch.Data.Schema
alias Snitch.Data.Schema.Payment
@typedoc """
A struct which represents PaymentMethod.
All the fields mentioned in the struct map
to fields of the database table.
The fields are:
* `name`: - Stores the name which would be shown to the user.
* `code`: - Stores a code to identify the type of the payment.
See `SnitchPayments.PaymentMethodCode`.
* `active?`: - A boolean to determine whether payment method is active
or not in latter case is not shown to the user.
* `live_mode?`: - A boolean to determine whether `test`_url or `live`_url
shoulde be used for the `provider`.
* `provider`: - Stores the name of the module which implements the logic
for handling the transactions. The providers are picked
from gateways in `SnitchPayments`.
* `preferences`: - A map to store credentials for the provider gateway.
TODO: Move preferences to a separate db or store it
after encoding.
"""
@type t :: %__MODULE__{}
schema "snitch_payment_methods" do
field(:name, :string)
field(:code, :string, size: 3)
field(:active?, :boolean, default: true)
field(:live_mode?, :boolean, default: false)
field(:provider, Ecto.Atom)
field(:preferences, :map)
field(:description, :string)
has_many(:payments, Payment)
timestamps()
end
@required_fields ~w(name provider code)a
@update_fields ~w(name active? provider live_mode? preferences description)a
@create_fields [:code | @update_fields]
@doc """
Returns a `PaymentMethod` changeset for a new `payment_method`.
"""
@spec create_changeset(t, map) :: Ecto.Changeset.t()
def create_changeset(%__MODULE__{} = payment_method, params) do
payment_method
|> cast(params, @create_fields)
|> validate_required(@required_fields)
|> validate_length(:code, is: 3)
|> unique_constraint(:name)
|> modify_provider_name()
end
defp modify_provider_name(%Ecto.Changeset{valid?: true} = changeset) do
case fetch_change(changeset, :provider) do
{:ok, provider} ->
put_change(changeset, :provider, Module.safe_concat(Elixir, provider))
:errror ->
changeset
end
end
defp modify_provider_name(changeset), do: changeset
@doc """
Returns a `PaymentMethod` changeset to update `payment_method`.
"""
@spec update_changeset(t, map) :: Ecto.Changeset.t()
def update_changeset(%__MODULE__{} = payment_method, params) do
cast(payment_method, params, @update_fields)
end
end
|
apps/snitch_core/lib/core/data/schema/payment/payment_method.ex
| 0.818156
| 0.5794
|
payment_method.ex
|
starcoder
|
defmodule Cassette.Support do
@moduledoc """
This macro module allows you to create your own Cassette service with custom
configurations
You can customize the (otp) application name with the `:process_name` key and
provide your configuration in the `:config` key.
```elixir
defmodule MyCas do
use Cassette.Support, process_name: :EmployeesCas, config:
%{Cassette.Config.default | base_url: "https://employees-cas.example.org"}
end
```
Please refer to `Cassette` for usage
"""
alias Cassette.Config
alias Cassette.Server
alias Cassette.User
import Cassette.Version, only: [version: 2]
require Cassette.Version
@type t :: module()
defmacro __using__(opts \\ []) do
quote bind_quoted: [opts: opts] do
@name opts[:process_name] || :CassetteServer
@config opts[:config] || %{}
@spec stop(term) :: term
def stop(_state) do
GenServer.stop(@name)
end
@spec start() :: GenServer.on_start()
def start do
config =
Config.default()
|> Map.merge(@config)
|> Config.resolve()
version ">= 1.5.0" do
children = [{Server, [@name, config]}]
else
import Supervisor.Spec
children = [worker(Server, [@name, config])]
end
options = [strategy: :one_for_one, name: :"#{@name}.Supervisor"]
case Supervisor.start_link(children, options) do
{:ok, pid} -> {:ok, pid}
{:error, {:already_started, pid}} -> {:ok, pid}
other -> other
end
end
@doc """
Elixir 1.5+ compatible child spec.
If you are adding a custom cassette instance, you can add to your
supervision tree by using:
```elixir
defmodule MyCas do
use Cassette.Support, process_name: :MyCas
end
children = [
# ...
MyCas
]
Supervisor.start_link(children, ...)
```
"""
@spec child_spec(term) :: :supervisor.child_spec()
def child_spec(_opts) do
%{
id: @name,
start: {__MODULE__, :start, []},
restart: :permanent,
type: :supervisor
}
end
@doc """
Generates a child spec for a custom Cassette module for Elixir < 1.5
If you are adding a custom cassette instance, you can add to your
supervision tree by using:
```elixir
defmodule MyCas do
use Cassette.Support, process_name: :MyCas
end
children = [
# ...
MyCas.child_spec
]
Supervisor.start_link(children, ...)
```
"""
@spec child_spec() :: Supervisor.Spec.spec()
def child_spec do
mod = __MODULE__
{mod, {mod, :start, []}, :permanent, :infinity, :supervisor, [mod]}
end
@doc """
Returns the configuration used by this Cassette server
Will return the default configuration if not provided.
Please refer to `Cassette.Config.t` for details
"""
@spec config :: Config.t()
def config do
Server.config(@name)
end
@doc """
Generates a Ticket Granting Ticket
"""
@spec tgt :: {:ok, String.t()} | {:error, term}
def tgt do
Server.tgt(@name)
end
@doc """
Generates a Service Ticket for the given `service`
This function retries once when the TGT is expired on the server side.
"""
@spec st(String.t()) :: {:ok, String.t()} | {:error, term}
def st(service) do
{:ok, current_tgt} = tgt()
case Server.st(@name, current_tgt, service) do
{:error, :tgt_expired} ->
{:ok, new_tgt} = tgt()
Server.st(@name, new_tgt, service)
reply ->
reply
end
end
@doc """
Validates a given `ticket` against the given `service` or the service set
in the configuration
"""
@spec validate(String.t(), String.t()) :: {:ok, User.t()} | {:error, term}
def validate(ticket, service \\ config().service) do
Server.validate(@name, ticket, service)
end
@doc false
@spec reload(Config.t()) :: term
def reload(cfg \\ Config.default()) do
Server.reload(@name, cfg)
end
end
end
end
|
lib/cassette/support.ex
| 0.810141
| 0.682772
|
support.ex
|
starcoder
|
defmodule AdventOfCode.Day02 do
@moduledoc """
Day 02.
Count the number of box IDs that have exactly two of any letter or that have
exactly three of any letter.
i.e.
abcdef does not have any
bababc has both two letters exactly and three letters exactly, so it counts for both.
"""
@day2 Path.join(["day2.txt"])
def read do
data =
File.stream!(@day2, [], :line)
|> Stream.map(&String.trim/1)
|> Enum.to_list()
two_count = box_count(data, 0, 2)
three_count = box_count(data, 0, 3)
two_count * three_count
end
def box_count([box_id | tail], count, n) do
box_id
|> String.to_charlist()
|> char_frequency()
|> Map.values()
|> Enum.member?(n)
|> if do
box_count(tail, count + 1, n)
else
box_count(tail, count, n)
end
end
def box_count([], count, _n), do: count
defp char_frequency(char_list) do
char_list
|> Enum.reduce(%{}, fn(char, acc) ->
if Map.get(acc, char, false) do
Map.update!(acc, char, &(&1 + 1))
else
Map.put(acc, char, 1)
end
end)
end
@doc """
Part 2.
Find two box IDs that only have one character that is different within each.
"""
def get_box_ids do
data =
File.stream!(@day2, [], :line)
|> Stream.map(&String.trim/1)
|> Stream.map(&box_id_to_mapset/1)
Enum.reduce_while(data, [], fn box_id, acc ->
case check_box_id(data, box_id) do
[data] when is_binary(data) ->
{:halt, [data | acc]}
_ -> {:cont, []}
end
end)
end
# change the box IDs to a mapset with indices
def box_id_to_mapset(box_id) do
String.to_charlist(box_id)
|> Stream.with_index()
|> MapSet.new()
end
# This is our helper function to have a box_id check itself with the other
# box values.
def check_box_id(boxes, box_id) do
boxes
|> Stream.flat_map(fn box ->
[{MapSet.intersection(box_id, box), MapSet.difference(box_id, box)}]
end)
|> Stream.filter(fn {_intersection, difference} ->
MapSet.size(difference) == 1
end)
|> Enum.map(fn({intersection, _difference}) ->
intersection
# So a MapSet.net will sort the data, but we want the order we passed it in.
|> Enum.sort_by(fn {_char, index} -> index end)
|> Enum.map(fn {char, _index} -> char end)
|> IO.iodata_to_binary()
end)
end
end
|
2018/elixir/advent_of_code/lib/day02/day02.ex
| 0.697094
| 0.588741
|
day02.ex
|
starcoder
|
defmodule Flawless.Rule do
@moduledoc """
Provides helpers to build and evaluate rules. It also defines all the built-in rules.
"""
defstruct predicate: nil, message: nil
alias Flawless.Context
import Flawless.Utils.Interpolation, only: [sigil_t: 2]
@type predicate() :: (any -> boolean())
@type error_function() ::
Flawless.Error.t_message()
| (any -> Flawless.Error.t_message())
| (any, list -> Flawless.Error.t_message())
@type t() :: %__MODULE__{
predicate: function(),
message: error_function() | nil
}
@spec rule(predicate(), error_function() | nil) :: Flawless.Rule.t()
def rule(predicate, error_message \\ nil) do
%__MODULE__{
predicate: predicate,
message: error_message
}
end
@spec evaluate(Flawless.Rule.t() | function(), any, Context.t()) :: [] | Flawless.Error.t()
def evaluate(%__MODULE__{predicate: predicate, message: error_message} = _rule, data, context) do
do_evaluate(predicate, error_message, data, context)
end
def evaluate(predicate, data, context) do
do_evaluate(predicate, nil, data, context)
end
defp do_evaluate(predicate, error_message, data, context) do
case predicate_result(predicate, data) do
:ok ->
[]
{:error, error} ->
Flawless.Error.new(error, context)
:error ->
error_message
|> evaluate_error_message(data, context)
|> Flawless.Error.new(context)
end
rescue
_ -> error_on_exception(context)
end
def predicate_result(predicate, data) do
case predicate.(data) do
true -> :ok
false -> :error
:ok -> :ok
:error -> :error
{:ok, _} -> :ok
{:error, error} -> {:error, error}
end
end
defp evaluate_error_message(error_message, data, context) do
cond do
is_nil(error_message) -> "The predicate failed."
is_binary(error_message) -> error_message
is_tuple(error_message) -> error_message
is_function(error_message, 1) -> error_message.(data)
is_function(error_message, 2) -> error_message.(data, context)
end
end
defp error_on_exception(context) do
Flawless.Error.new(
"An exception was raised while evaluating a rule on that element, so it is likely incorrect.",
context
)
end
@spec one_of(list) :: t()
def one_of(options) when is_list(options) do
rule(
&(&1 in options),
&{~t"Invalid value: %{value}. Valid options: %{options}",
value: inspect(&1), options: inspect(options)}
)
end
defp value_length(value) when is_binary(value), do: String.length(value)
defp value_length(value) when is_list(value), do: length(value)
defp value_length(_), do: nil
@spec min_length(integer()) :: t()
def min_length(length) when is_integer(length) do
rule(
fn value ->
case value_length(value) do
nil -> true
actual_length -> actual_length >= length
end
end,
&{~t"Minimum length of %{min_length} required (current: %{actual_length}).",
min_length: length, actual_length: value_length(&1)}
)
end
@spec max_length(integer()) :: t()
def max_length(length) when is_integer(length) do
rule(
fn value ->
case value_length(value) do
nil -> true
actual_length -> actual_length <= length
end
end,
&{~t"Maximum length of %{max_length} required (current: %{actual_length}).",
max_length: length, actual_length: value_length(&1)}
)
end
@spec non_empty() :: t()
def non_empty() do
rule(
fn value ->
case value_length(value) do
nil -> true
actual_length -> actual_length > 0
end
end,
"Value cannot be empty."
)
end
@spec exact_length(integer()) :: t()
def exact_length(length) when is_integer(length) do
rule(
fn value ->
case value_length(value) do
nil -> true
actual_length -> actual_length == length
end
end,
&{~t"Expected length of %{expected_length} (current: %{actual_length}).",
expected_length: length, actual_length: value_length(&1)}
)
end
defp duplicates(list) when is_list(list) do
Enum.uniq(list -- Enum.uniq(list))
end
@spec no_duplicate() :: t()
def no_duplicate() do
rule(
fn value -> duplicates(value) == [] end,
&{~t"The list should not contain duplicates (duplicates found: %{duplicates}).",
duplicates: inspect(duplicates(&1))}
)
end
@spec match(String.t() | Regex.t()) :: t()
def match(regex) when is_binary(regex) do
regex
|> Regex.compile!()
|> match()
end
def match(%Regex{} = regex) do
rule(
fn value -> Regex.match?(regex, value) end,
&{~t"Value %{value} does not match regex %{regex}.",
value: inspect(&1), regex: inspect(regex)}
)
end
@spec min(number) :: t()
def min(min_value) when is_number(min_value) do
rule(
fn value -> value >= min_value end,
{~t"Must be greater than or equal to %{min_value}.", min_value: min_value}
)
end
@spec max(number) :: t()
def max(max_value) when is_number(max_value) do
rule(
fn value -> value <= max_value end,
{~t"Must be less than or equal to %{max_value}.", max_value: max_value}
)
end
@spec between(number, number) :: t()
def between(min, max) when is_number(min) and is_number(max) do
rule(
fn value -> value >= min and value <= max end,
{~t"Must be between %{min_value} and %{max_value}.", min_value: min, max_value: max}
)
end
@spec not_both(any, any) :: t()
def not_both(field1, field2) do
rule(
fn map -> not (field1 in Map.keys(map) and field2 in Map.keys(map)) end,
{~t"Fields %{field1} and %{field2} cannot both be defined.", field1: field1, field2: field2}
)
end
defp get_arity(func) do
func
|> Function.info()
|> Keyword.get(:arity, 0)
end
@spec arity(integer) :: t()
def arity(arity) do
rule(
fn func -> get_arity(func) == arity end,
&{~t"Expected arity of %{expected_arity}, found: %{actual_arity}.",
expected_arity: arity, actual_arity: get_arity(&1)}
)
end
end
|
lib/flawless/rule.ex
| 0.875667
| 0.473049
|
rule.ex
|
starcoder
|
defmodule DiodeClient do
@moduledoc ~S"""
DiodeClient secure end-to-end encrypted connections bettween any two machines. Connections are established
either through direct peer-to-peer TCP connections or bridged via the Diode network. To learn more about the
decentralized Diode network visit https://diode.io/
Example Usage with a simple server:
```elixir
DiodeClient.interface_add("example_server_interface")
address = DiodeClient.Base16.encode(DiodeClient.address())
{:ok, port} = DiodeClient.port_listen(5000)
spawn_link(fn ->
IO.puts("server #{address} started")
{:ok, ssl} = DiodeClient.port_accept(port)
peer = DiodeClient.Port.peer(ssl)
IO.puts("got a connection from #{Base.encode16(peer)}")
:ssl.controlling_process(ssl, self())
:ssl.setopts(ssl, [packet: :line, active: true])
for x <- 1..10 do
IO.puts("sending message #{x}")
:ssl.send(ssl, "Hello #{Base.encode16(peer)} this is message #{x}\n")
end
receive do
{:ssl_closed, _ssl} -> IO.puts("closed!")
end
end)
```
And the client. Here insert in the server address the address that has been printed above.
For example `server_address = "0x389eba94b330140579cdce1feb1a6e905ff876e6"`
```elixir
# Client:
server_address = "0x389eba94b330140579cdce1feb1a6e905ff876e6"
DiodeClient.interface_add("example_client_interface")
spawn_link(fn ->
{:ok, ssl} = DiodeClient.port_connect(server_address, 5000)
:ssl.controlling_process(ssl, self())
:ssl.setopts(ssl, [packet: :line, active: true])
Enum.reduce_while(1..10, nil, fn _, _ ->
receive do
{:ssl, _ssl, msg} -> {:cont, IO.inspect(msg)}
other -> {:halt, IO.inspect(other)}
end
end)
:ssl.close(ssl)
IO.puts("closed!")
end)
```
"""
use Application
alias DiodeClient.{ETSLru, Port, Wallet, ShellCache, HashCache, Sup}
require Logger
@impl true
def start(_start_type, _start_args) do
ETSLru.new(ShellCache, 10_000, fn
[:error | _] -> false
_other -> true
end)
ETSLru.new(HashCache, 1_000)
Supervisor.start_link([], strategy: :one_for_one, name: __MODULE__)
end
def interface_add(wallet \\ "diode_client_interface", name \\ :default) do
set_wallet(wallet)
Supervisor.start_child(__MODULE__, {Sup, name})
end
def interface_online?(name \\ :default) do
Supervisor.which_children(__MODULE__)
|> Enum.any?(fn {cname, pid, _type, _mods} -> cname == name and is_pid(pid) end)
end
@spec interface_stop(atom()) :: :ok | {:error, :not_found}
def interface_stop(name \\ :default) do
Supervisor.terminate_child(__MODULE__, name)
end
@spec interface_restart(atom()) :: {:error, any} | {:ok, pid} | {:ok, pid, any}
def interface_restart(name \\ :default) do
Supervisor.restart_child(__MODULE__, name)
end
@spec port_connect(binary(), integer(), Keyword.t()) :: {:ok, any()} | {:error, any()}
def port_connect(destination, port, options \\ [])
def port_connect(destination = <<_::336>>, port, options) do
port_connect(DiodeClient.Base16.decode(destination), port, options)
end
def port_connect(destination = <<_::320>>, port, options) do
port_connect(Base.decode16!(destination), port, options)
end
def port_connect(destination = <<_::160>>, port, options)
when (is_integer(port) or is_binary(port)) and is_list(options) do
ensure_wallet()
Port.connect(destination, port, options)
end
def port_listen(portnum, opts \\ []) when is_integer(portnum) or is_binary(portnum) do
ensure_wallet()
Port.listen(portnum, opts)
end
def port_accept(listener) do
ensure_wallet()
Port.accept(listener)
end
def port_close(pid) do
ensure_wallet()
Port.close(pid)
end
@doc false
def set_wallet(mfa = {m, f, a}) when is_atom(m) and is_atom(f) and is_list(a),
do: do_set_wallet(mfa)
def set_wallet({:wallet, privkey, _pubkey, _address} = w) when is_binary(privkey),
do: set_wallet(fn -> w end)
def set_wallet(fun) when is_function(fun, 0), do: do_set_wallet(fun)
def set_wallet(fun) when is_function(fun, 0), do: do_set_wallet(fun)
def set_wallet(path) when is_binary(path) do
if not File.exists?(path) do
Logger.warn("diode_client is creating a new id at #{path}")
wallet = Wallet.new()
File.write!(path, Wallet.privkey!(wallet))
File.chmod!(path, 0o600)
wallet
else
File.read!(path)
|> Wallet.from_privkey()
end
|> set_wallet()
end
defp do_set_wallet(w) do
if :persistent_term.get({__MODULE__, :wallet}, nil) == nil do
:persistent_term.put({__MODULE__, :wallet}, w)
else
{:error, "can't reset wallet"}
end
end
@doc false
def wallet() do
case :persistent_term.get({__MODULE__, :wallet}) do
{module, fun, args} -> apply(module, fun, args)
cb when is_function(cb) -> cb.()
end
end
@doc false
def ensure_wallet() do
if :persistent_term.get({__MODULE__, :wallet}, nil) == nil do
interface_add()
end
wallet()
end
def address() do
Wallet.address!(wallet())
end
@doc false
def default_conn() do
case Process.whereis(DiodeClient.Manager) do
nil ->
Process.sleep(1_000)
default_conn()
_pid ->
DiodeClient.Manager.get_connection()
end
end
@doc false
def connections() do
case Process.whereis(DiodeClient.Manager) do
nil -> []
_pid -> DiodeClient.Manager.connections()
end
end
end
|
lib/diode_client.ex
| 0.786213
| 0.743517
|
diode_client.ex
|
starcoder
|
defmodule Calendar do
@moduledoc """
This module defines the responsibilities for working with
calendars, dates, times and datetimes in Elixir.
Currently it defines types and the minimal implementation
for a calendar behaviour in Elixir. The goal of the Calendar
features in Elixir is to provide a base for interoperability
instead of full-featured datetime API.
For the actual date, time and datetime structures, see `Date`,
`Time`, `NaiveDateTime` and `DateTime`.
Note designations for year, month, day, and the like, are overspecified
(i.e. an integer instead of `1..12` for months) because different
calendars may have a different number of days per month, months per year and so on.
"""
@type year :: integer
@type month :: pos_integer
@type day :: pos_integer
@type week :: pos_integer
@type day_of_week :: non_neg_integer
@type era :: non_neg_integer
@typedoc """
A tuple representing the `day` and the `era`.
"""
@type day_of_era :: {day :: non_neg_integer(), era}
@type hour :: non_neg_integer
@type minute :: non_neg_integer
@type second :: non_neg_integer
@typedoc """
The internal time format is used when converting between calendars.
It represents time as a fraction of a day (starting from midnight).
`parts_in_day` specifies how much of the day is already passed,
while `parts_per_day` signifies how many parts there fit in a day.
"""
@type day_fraction :: {parts_in_day :: non_neg_integer, parts_per_day :: pos_integer}
@typedoc """
The internal date format that is used when converting between calendars.
This is the number of days including the fractional part that has passed of
the last day since 0000-01-01+00:00T00:00.000000 in ISO 8601 notation (also
known as midnight 1 January BC 1 of the proleptic Gregorian calendar).
"""
@type iso_days :: {days :: integer, day_fraction}
@typedoc """
Microseconds with stored precision.
The precision represents the number of digits that must be used when
representing the microseconds to external format. If the precision is 0,
it means microseconds must be skipped.
"""
@type microsecond :: {value :: non_neg_integer, precision :: non_neg_integer}
@typedoc "A calendar implementation"
@type calendar :: module
@typedoc "The time zone ID according to the IANA tz database (for example, Europe/Zurich)"
@type time_zone :: String.t()
@typedoc "The time zone abbreviation (for example, CET or CEST or BST, and such)"
@type zone_abbr :: String.t()
@typedoc """
The time zone UTC offset in seconds for standard time.
See also `t:std_offset/0`.
"""
@type utc_offset :: integer
@typedoc """
The time zone standard offset in seconds (typically not zero in summer times).
It must be added to `t:utc_offset/0` to get the total offset from UTC used for "wall time".
"""
@type std_offset :: integer
@typedoc "Any map/struct that contains the date fields"
@type date :: %{optional(any) => any, calendar: calendar, year: year, month: month, day: day}
@typedoc "Any map/struct that contains the time fields"
@type time :: %{
optional(any) => any,
hour: hour,
minute: minute,
second: second,
microsecond: microsecond
}
@typedoc "Any map/struct that contains the naive_datetime fields"
@type naive_datetime :: %{
optional(any) => any,
calendar: calendar,
year: year,
month: month,
day: day,
hour: hour,
minute: minute,
second: second,
microsecond: microsecond
}
@typedoc "Any map/struct that contains the datetime fields"
@type datetime :: %{
optional(any) => any,
calendar: calendar,
year: year,
month: month,
day: day,
hour: hour,
minute: minute,
second: second,
microsecond: microsecond,
time_zone: time_zone,
zone_abbr: zone_abbr,
utc_offset: utc_offset,
std_offset: std_offset
}
@typedoc """
Specifies the time zone database for calendar operations.
Many functions in the `DateTime` module require a time zone database.
By default, it uses the default time zone database returned by
`Calendar.get_time_zone_database/0`, which defaults to
`Calendar.UTCOnlyTimeZoneDatabase` which only handles "Etc/UTC"
datetimes and returns `{:error, :utc_only_time_zone_database}`
for any other time zone.
Other time zone databases (including ones provided by packages)
can be configured as default either via configuration:
config :elixir, :time_zone_database, CustomTimeZoneDatabase
or by calling `Calendar.put_time_zone_database/1`.
See `Calendar.TimeZoneDatabase` for more information on custom
time zone databases.
"""
@type time_zone_database :: module()
@doc """
Returns how many days there are in the given year-month.
"""
@callback days_in_month(year, month) :: day
@doc """
Returns how many months there are in the given year.
"""
@callback months_in_year(year) :: month
@doc """
Returns `true` if the given year is a leap year.
A leap year is a year of a longer length than normal. The exact meaning
is up to the calendar. A calendar must return `false` if it does not support
the concept of leap years.
"""
@callback leap_year?(year) :: boolean
@doc """
Calculates the day of the week from the given `year`, `month`, and `day`.
The `starting_on` represents the starting day of the week. All
calendars must support at least the `:default` value. They may
also support other values representing their days of the week.
"""
@callback day_of_week(year, month, day, starting_on :: :default | atom) ::
{day_of_week(), first_day_of_week :: non_neg_integer(),
last_day_of_week :: non_neg_integer()}
@doc """
Calculates the day of the year from the given `year`, `month`, and `day`.
"""
@callback day_of_year(year, month, day) :: non_neg_integer()
@doc """
Calculates the quarter of the year from the given `year`, `month`, and `day`.
"""
@callback quarter_of_year(year, month, day) :: non_neg_integer()
@doc """
Calculates the year and era from the given `year`.
"""
@callback year_of_era(year, month, day) :: {year, era}
@doc """
Calculates the day and era from the given `year`, `month`, and `day`.
"""
@callback day_of_era(year, month, day) :: day_of_era()
@doc """
Converts the date into a string according to the calendar.
"""
@callback date_to_string(year, month, day) :: String.t()
@doc """
Converts the datetime (without time zone) into a string according to the calendar.
"""
@callback naive_datetime_to_string(year, month, day, hour, minute, second, microsecond) ::
String.t()
@doc """
Converts the datetime (with time zone) into a string according to the calendar.
"""
@callback datetime_to_string(
year,
month,
day,
hour,
minute,
second,
microsecond,
time_zone,
zone_abbr,
utc_offset,
std_offset
) :: String.t()
@doc """
Converts the time into a string according to the calendar.
"""
@callback time_to_string(hour, minute, second, microsecond) :: String.t()
@doc """
Converts the given datetime (without time zone) into the `t:iso_days/0` format.
"""
@callback naive_datetime_to_iso_days(year, month, day, hour, minute, second, microsecond) ::
iso_days
@doc """
Converts `t:iso_days/0` to the Calendar's datetime format.
"""
@callback naive_datetime_from_iso_days(iso_days) ::
{year, month, day, hour, minute, second, microsecond}
@doc """
Converts the given time to the `t:day_fraction/0` format.
"""
@callback time_to_day_fraction(hour, minute, second, microsecond) :: day_fraction
@doc """
Converts `t:day_fraction/0` to the Calendar's time format.
"""
@callback time_from_day_fraction(day_fraction) :: {hour, minute, second, microsecond}
@doc """
Define the rollover moment for the given calendar.
This is the moment, in your calendar, when the current day ends
and the next day starts.
The result of this function is used to check if two calendars rollover at
the same time of day. If they do not, we can only convert datetimes and times
between them. If they do, this means that we can also convert dates as well
as naive datetimes between them.
This day fraction should be in its most simplified form possible, to make comparisons fast.
## Examples
* If, in your Calendar, a new day starts at midnight, return {0, 1}.
* If, in your Calendar, a new day starts at sunrise, return {1, 4}.
* If, in your Calendar, a new day starts at noon, return {1, 2}.
* If, in your Calendar, a new day starts at sunset, return {3, 4}.
"""
@callback day_rollover_relative_to_midnight_utc() :: day_fraction
@doc """
Should return `true` if the given date describes a proper date in the calendar.
"""
@callback valid_date?(year, month, day) :: boolean
@doc """
Should return `true` if the given time describes a proper time in the calendar.
"""
@callback valid_time?(hour, minute, second, microsecond) :: boolean
@doc """
Parses the string representation for a time returned by `c:time_to_string/4`
into a time-tuple.
"""
@doc since: "1.10.0"
@callback parse_time(String.t()) ::
{:ok, {hour, minute, second, microsecond}}
| {:error, atom}
@doc """
Parses the string representation for a date returned by `c:date_to_string/3`
into a date-tuple.
"""
@doc since: "1.10.0"
@callback parse_date(String.t()) ::
{:ok, {year, month, day}}
| {:error, atom}
@doc """
Parses the string representation for a naive datetime returned by
`c:naive_datetime_to_string/7` into a naive-datetime-tuple.
The given string may contain a timezone offset but it is ignored.
"""
@doc since: "1.10.0"
@callback parse_naive_datetime(String.t()) ::
{:ok, {year, month, day, hour, minute, second, microsecond}}
| {:error, atom}
@doc """
Parses the string representation for a datetime returned by
`c:datetime_to_string/11` into a datetime-tuple.
The returned datetime must be in UTC. The original `utc_offset`
it was written in must be returned in the result.
"""
@doc since: "1.10.0"
@callback parse_utc_datetime(String.t()) ::
{:ok, {year, month, day, hour, minute, second, microsecond}, utc_offset}
| {:error, atom}
# General Helpers
@doc """
Returns `true` if two calendars have the same moment of starting a new day,
`false` otherwise.
If two calendars are not compatible, we can only convert datetimes and times
between them. If they are compatible, this means that we can also convert
dates as well as naive datetimes between them.
"""
@doc since: "1.5.0"
@spec compatible_calendars?(Calendar.calendar(), Calendar.calendar()) :: boolean
def compatible_calendars?(calendar, calendar), do: true
def compatible_calendars?(calendar1, calendar2) do
calendar1.day_rollover_relative_to_midnight_utc() ==
calendar2.day_rollover_relative_to_midnight_utc()
end
@doc """
Returns a microsecond tuple truncated to a given precision (`:microsecond`,
`:millisecond` or `:second`).
"""
@doc since: "1.6.0"
@spec truncate(Calendar.microsecond(), :microsecond | :millisecond | :second) ::
Calendar.microsecond()
def truncate(microsecond_tuple, :microsecond), do: microsecond_tuple
def truncate({microsecond, precision}, :millisecond) do
output_precision = min(precision, 3)
{div(microsecond, 1000) * 1000, output_precision}
end
def truncate(_, :second), do: {0, 0}
@doc """
Sets the current time zone database.
"""
@doc since: "1.8.0"
@spec put_time_zone_database(time_zone_database()) :: :ok
def put_time_zone_database(database) do
Application.put_env(:elixir, :time_zone_database, database)
end
@doc """
Gets the current time zone database.
"""
@doc since: "1.8.0"
@spec get_time_zone_database() :: time_zone_database()
def get_time_zone_database() do
Application.get_env(:elixir, :time_zone_database, Calendar.UTCOnlyTimeZoneDatabase)
end
@doc """
Formats received datetime into a string.
The datetime can be any of the Calendar types (`Time`, `Date`,
`NaiveDateTime`, and `DateTime`) or any map, as long as they
contain all of the relevant fields necessary for formatting.
For example, if you use `%Y` to format the year, the datetime
must have the `:year` field. Therefore, if you pass a `Time`,
or a map without the `:year` field to a format that expects `%Y`,
an error will be raised.
## Options
* `:preferred_datetime` - a string for the preferred format to show datetimes,
it can't contain the `%c` format and defaults to `"%Y-%m-%d %H:%M:%S"`
if the option is not received
* `:preferred_date` - a string for the preferred format to show dates,
it can't contain the `%x` format and defaults to `"%Y-%m-%d"`
if the option is not received
* `:preferred_time` - a string for the preferred format to show times,
it can't contain the `%X` format and defaults to `"%H:%M:%S"`
if the option is not received
* `:am_pm_names` - a function that receives either `:am` or `:pm` and returns
the name of the period of the day, if the option is not received it defaults
to a function that returns `"am"` and `"pm"`, respectively
* `:month_names` - a function that receives a number and returns the name of
the corresponding month, if the option is not received it defaults to a
function that returns the month names in English
* `:abbreviated_month_names` - a function that receives a number and returns the
abbreviated name of the corresponding month, if the option is not received it
defaults to a function that returns the abbreviated month names in English
* `:day_of_week_names` - a function that receives a number and returns the name of
the corresponding day of week, if the option is not received it defaults to a
function that returns the day of week names in English
* `:abbreviated_day_of_week_names` - a function that receives a number and returns
the abbreviated name of the corresponding day of week, if the option is not received
it defaults to a function that returns the abbreviated day of week names in English
## Formatting syntax
The formatting syntax for strftime is a sequence of characters in the following format:
%<padding><width><format>
where:
* `%`: indicates the start of a formatted section
* `<padding>`: set the padding (see below)
* `<width>`: a number indicating the minimum size of the formatted section
* `<format>`: the format itself (see below)
### Accepted padding options
* `-`: no padding, removes all padding from the format
* `_`: pad with spaces
* `0`: pad with zeroes
### Accepted formats
The accepted formats are:
Format | Description | Examples (in ISO)
:----- | :-----------------------------------------------------------------------| :------------------------
a | Abbreviated name of day | Mon
A | Full name of day | Monday
b | Abbreviated month name | Jan
B | Full month name | January
c | Preferred date+time representation | 2018-10-17 12:34:56
d | Day of the month | 01, 31
f | Microseconds *(does not support width and padding modifiers)* | 000000, 999999, 0123
H | Hour using a 24-hour clock | 00, 23
I | Hour using a 12-hour clock | 01, 12
j | Day of the year | 001, 366
m | Month | 01, 12
M | Minute | 00, 59
p | "AM" or "PM" (noon is "PM", midnight as "AM") | AM, PM
P | "am" or "pm" (noon is "pm", midnight as "am") | am, pm
q | Quarter | 1, 2, 3, 4
S | Second | 00, 59, 60
u | Day of the week | 1 (Monday), 7 (Sunday)
x | Preferred date (without time) representation | 2018-10-17
X | Preferred time (without date) representation | 12:34:56
y | Year as 2-digits | 01, 01, 86, 18
Y | Year | -0001, 0001, 1986
z | +hhmm/-hhmm time zone offset from UTC (empty string if naive) | +0300, -0530
Z | Time zone abbreviation (empty string if naive) | CET, BRST
% | Literal "%" character | %
Any other character will be interpreted as an invalid format and raise an error
## Examples
Without options:
iex> Calendar.strftime(~U[2019-08-26 13:52:06.0Z], "%y-%m-%d %I:%M:%S %p")
"19-08-26 01:52:06 PM"
iex> Calendar.strftime(~U[2019-08-26 13:52:06.0Z], "%a, %B %d %Y")
"Mon, August 26 2019"
iex> Calendar.strftime(~U[2020-04-02 13:52:06.0Z], "%B %-d, %Y")
"April 2, 2020"
iex> Calendar.strftime(~U[2019-08-26 13:52:06.0Z], "%c")
"2019-08-26 13:52:06"
With options:
iex> Calendar.strftime(~U[2019-08-26 13:52:06.0Z], "%c", preferred_datetime: "%H:%M:%S %d-%m-%y")
"13:52:06 26-08-19"
iex> Calendar.strftime(
...> ~U[2019-08-26 13:52:06.0Z],
...> "%A",
...> day_of_week_names: fn day_of_week ->
...> {"segunda-feira", "terça-feira", "quarta-feira", "quinta-feira",
...> "sexta-feira", "sábado", "domingo"}
...> |> elem(day_of_week - 1)
...> end
...>)
"segunda-feira"
iex> Calendar.strftime(
...> ~U[2019-08-26 13:52:06.0Z],
...> "%B",
...> month_names: fn month ->
...> {"январь", "февраль", "март", "апрель", "май", "июнь",
...> "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}
...> |> elem(month - 1)
...> end
...>)
"август"
"""
@doc since: "1.11.0"
@spec strftime(map(), String.t(), keyword()) :: String.t()
def strftime(date_or_time_or_datetime, string_format, user_options \\ [])
when is_map(date_or_time_or_datetime) and is_binary(string_format) do
parse(
string_format,
date_or_time_or_datetime,
options(user_options),
[]
)
|> IO.iodata_to_binary()
end
defp parse("", _datetime, _format_options, acc),
do: Enum.reverse(acc)
defp parse("%" <> rest, datetime, format_options, acc),
do: parse_modifiers(rest, nil, nil, {datetime, format_options, acc})
defp parse(<<char, rest::binary>>, datetime, format_options, acc),
do: parse(rest, datetime, format_options, [char | acc])
defp parse_modifiers("-" <> rest, width, nil, parser_data) do
parse_modifiers(rest, width, "", parser_data)
end
defp parse_modifiers("0" <> rest, nil, nil, parser_data) do
parse_modifiers(rest, nil, ?0, parser_data)
end
defp parse_modifiers("_" <> rest, width, nil, parser_data) do
parse_modifiers(rest, width, ?\s, parser_data)
end
defp parse_modifiers(<<digit, rest::binary>>, width, pad, parser_data) when digit in ?0..?9 do
new_width = (width || 0) * 10 + (digit - ?0)
parse_modifiers(rest, new_width, pad, parser_data)
end
# set default padding if none was specified
defp parse_modifiers(<<format, _::binary>> = rest, width, nil, parser_data) do
parse_modifiers(rest, width, default_pad(format), parser_data)
end
# set default width if none was specified
defp parse_modifiers(<<format, _::binary>> = rest, nil, pad, parser_data) do
parse_modifiers(rest, default_width(format), pad, parser_data)
end
defp parse_modifiers(rest, width, pad, {datetime, format_options, acc}) do
format_modifiers(rest, width, pad, datetime, format_options, acc)
end
defp am_pm(hour, format_options) when hour > 11 do
format_options.am_pm_names.(:pm)
end
defp am_pm(hour, format_options) when hour <= 11 do
format_options.am_pm_names.(:am)
end
defp default_pad(format) when format in 'aAbBpPZ', do: ?\s
defp default_pad(_format), do: ?0
defp default_width(format) when format in 'dHImMSy', do: 2
defp default_width(?j), do: 3
defp default_width(format) when format in 'Yz', do: 4
defp default_width(_format), do: 0
# Literally just %
defp format_modifiers("%" <> rest, width, pad, datetime, format_options, acc) do
parse(rest, datetime, format_options, [pad_leading("%", width, pad) | acc])
end
# Abbreviated name of day
defp format_modifiers("a" <> rest, width, pad, datetime, format_options, acc) do
result =
datetime
|> Date.day_of_week()
|> format_options.abbreviated_day_of_week_names.()
|> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Full name of day
defp format_modifiers("A" <> rest, width, pad, datetime, format_options, acc) do
result =
datetime
|> Date.day_of_week()
|> format_options.day_of_week_names.()
|> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Abbreviated month name
defp format_modifiers("b" <> rest, width, pad, datetime, format_options, acc) do
result =
datetime.month
|> format_options.abbreviated_month_names.()
|> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Full month name
defp format_modifiers("B" <> rest, width, pad, datetime, format_options, acc) do
result = datetime.month |> format_options.month_names.() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Preferred date+time representation
defp format_modifiers(
"c" <> _rest,
_width,
_pad,
_datetime,
%{preferred_datetime_invoked: true},
_acc
) do
raise ArgumentError,
"tried to format preferred_datetime within another preferred_datetime format"
end
defp format_modifiers("c" <> rest, width, pad, datetime, format_options, acc) do
result =
format_options.preferred_datetime
|> parse(datetime, %{format_options | preferred_datetime_invoked: true}, [])
|> pad_preferred(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Day of the month
defp format_modifiers("d" <> rest, width, pad, datetime, format_options, acc) do
result = datetime.day |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Microseconds
defp format_modifiers("f" <> rest, _width, _pad, datetime, format_options, acc) do
{microsecond, precision} = datetime.microsecond
result =
microsecond
|> Integer.to_string()
|> String.pad_leading(6, "0")
|> binary_part(0, max(precision, 1))
parse(rest, datetime, format_options, [result | acc])
end
# Hour using a 24-hour clock
defp format_modifiers("H" <> rest, width, pad, datetime, format_options, acc) do
result = datetime.hour |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Hour using a 12-hour clock
defp format_modifiers("I" <> rest, width, pad, datetime, format_options, acc) do
result = (rem(datetime.hour() + 23, 12) + 1) |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Day of the year
defp format_modifiers("j" <> rest, width, pad, datetime, format_options, acc) do
result = datetime |> Date.day_of_year() |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Month
defp format_modifiers("m" <> rest, width, pad, datetime, format_options, acc) do
result = datetime.month |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Minute
defp format_modifiers("M" <> rest, width, pad, datetime, format_options, acc) do
result = datetime.minute |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# "AM" or "PM" (noon is "PM", midnight as "AM")
defp format_modifiers("p" <> rest, width, pad, datetime, format_options, acc) do
result = datetime.hour |> am_pm(format_options) |> String.upcase() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# "am" or "pm" (noon is "pm", midnight as "am")
defp format_modifiers("P" <> rest, width, pad, datetime, format_options, acc) do
result =
datetime.hour
|> am_pm(format_options)
|> String.downcase()
|> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Quarter
defp format_modifiers("q" <> rest, width, pad, datetime, format_options, acc) do
result = datetime |> Date.quarter_of_year() |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Second
defp format_modifiers("S" <> rest, width, pad, datetime, format_options, acc) do
result = datetime.second |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Day of the week
defp format_modifiers("u" <> rest, width, pad, datetime, format_options, acc) do
result = datetime |> Date.day_of_week() |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Preferred date (without time) representation
defp format_modifiers(
"x" <> _rest,
_width,
_pad,
_datetime,
%{preferred_date_invoked: true},
_acc
) do
raise ArgumentError,
"tried to format preferred_date within another preferred_date format"
end
defp format_modifiers("x" <> rest, width, pad, datetime, format_options, acc) do
result =
format_options.preferred_date
|> parse(datetime, %{format_options | preferred_date_invoked: true}, [])
|> pad_preferred(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Preferred time (without date) representation
defp format_modifiers(
"X" <> _rest,
_width,
_pad,
_datetime,
%{preferred_time_invoked: true},
_acc
) do
raise ArgumentError,
"tried to format preferred_time within another preferred_time format"
end
defp format_modifiers("X" <> rest, width, pad, datetime, format_options, acc) do
result =
format_options.preferred_time
|> parse(datetime, %{format_options | preferred_time_invoked: true}, [])
|> pad_preferred(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Year as 2-digits
defp format_modifiers("y" <> rest, width, pad, datetime, format_options, acc) do
result = datetime.year |> rem(100) |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# Year
defp format_modifiers("Y" <> rest, width, pad, datetime, format_options, acc) do
result = datetime.year |> Integer.to_string() |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
# +hhmm/-hhmm time zone offset from UTC (empty string if naive)
defp format_modifiers(
"z" <> rest,
width,
pad,
datetime = %{utc_offset: utc_offset, std_offset: std_offset},
format_options,
acc
) do
absolute_offset = abs(utc_offset + std_offset)
offset_number =
Integer.to_string(div(absolute_offset, 3600) * 100 + rem(div(absolute_offset, 60), 60))
sign = if utc_offset + std_offset >= 0, do: "+", else: "-"
result = "#{sign}#{pad_leading(offset_number, width, pad)}"
parse(rest, datetime, format_options, [result | acc])
end
defp format_modifiers("z" <> rest, _width, _pad, datetime, format_options, acc) do
parse(rest, datetime, format_options, ["" | acc])
end
# Time zone abbreviation (empty string if naive)
defp format_modifiers("Z" <> rest, width, pad, datetime, format_options, acc) do
result = datetime |> Map.get(:zone_abbr, "") |> pad_leading(width, pad)
parse(rest, datetime, format_options, [result | acc])
end
defp format_modifiers(rest, _width, _pad, _datetime, _format_options, _acc) do
{next, _rest} = String.next_grapheme(rest) || {"", ""}
raise ArgumentError, "invalid strftime format: %#{next}"
end
defp pad_preferred(result, width, pad) when length(result) < width do
pad_preferred([pad | result], width, pad)
end
defp pad_preferred(result, _width, _pad), do: result
defp pad_leading(string, count, padding) do
to_pad = count - byte_size(string)
if to_pad > 0, do: do_pad_leading(to_pad, padding, string), else: string
end
defp do_pad_leading(0, _, acc), do: acc
defp do_pad_leading(count, padding, acc),
do: do_pad_leading(count - 1, padding, [padding | acc])
defp options(user_options) do
default_options = %{
preferred_date: "%Y-%m-%d",
preferred_time: "%H:%M:%S",
preferred_datetime: "%Y-%m-%d %H:%M:%S",
am_pm_names: fn
:am -> "am"
:pm -> "pm"
end,
month_names: fn month ->
{"January", "February", "March", "April", "May", "June", "July", "August", "September",
"October", "November", "December"}
|> elem(month - 1)
end,
day_of_week_names: fn day_of_week ->
{"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"}
|> elem(day_of_week - 1)
end,
abbreviated_month_names: fn month ->
{"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}
|> elem(month - 1)
end,
abbreviated_day_of_week_names: fn day_of_week ->
{"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"} |> elem(day_of_week - 1)
end,
preferred_datetime_invoked: false,
preferred_date_invoked: false,
preferred_time_invoked: false
}
Enum.reduce(user_options, default_options, fn {key, value}, acc ->
if Map.has_key?(acc, key) do
%{acc | key => value}
else
raise ArgumentError, "unknown option #{inspect(key)} given to Calendar.strftime/3"
end
end)
end
end
|
lib/elixir/lib/calendar.ex
| 0.942823
| 0.778144
|
calendar.ex
|
starcoder
|
defmodule Packer do
@doc """
Encodes a term. Returns an array with the version-specific magic string, the
data schema, and buffer.
Options supported include:
* compress: boolean, defaulting to true
* small_int: boolean, defaulting to true. When false integers that would fit into a single byte
are instead encoded in 2 bytes; for large terms with a mix of single- and two-byte
encodable integers, setting small_int to false to can result in a significantly
smaller schema
* header: atom, defaulting to :version; controls what sort of header information, if any, to
prepend to return. Recognized values:
** :version -> the version number of the encoding is prepended
** :full -> a header suitable for durable storage (e.g. a file) is prepended
** :none -> no header; fewer bytes, less safety. buckle up, cowboy!
* format: atom, defaulting to :iolist; controls the format of the resulting encoding
** :iolist: returns a list of binaries, [header, schema, data] or [schema, data]
** :binary: returns a single binary buffer
"""
@spec encode(term :: any(), opts :: encode_options()) :: iolist()
@type encode_options() :: [
{:compress, boolean},
{:short_int, boolean},
{:header, :version | :full | :none},
{:format, :iolist | :binary}
]
defdelegate encode(term, opts \\ []), to: Packer.Encode, as: :from_term
@doc """
Decodes iodata to a term. The output of Packer.encode/2 is expected, and this function is
indeed symetric to Packer.encode/2. It also supports a similar set of options:
* compress: boolean, defaults to true; if the payload was not compressed, leaving this as
true will be less efficient but should not be harmful
* header: the type of header that is expected, from :version (the default), :full, or :none
"""
@spec decode(data :: iodata(), opts :: decode_options()) :: any()
@type decode_options() :: [
{:compress, boolean},
{:header, :version | :full | :none}
]
defdelegate decode(data, opts \\ []), to: Packer.Decode, as: :from
@doc """
Returns the magic string header prepended to encodings. The type parameter can be :none,
:full or :version. The default is :version, which is a minimal prefix of one byte containing
the version of the encoding.
"""
@spec encoded_term_header(type :: :none | :full | :version) :: String.t()
defdelegate encoded_term_header(type \\ :version), to: Packer.Encode
end
|
lib/packer.ex
| 0.870446
| 0.76882
|
packer.ex
|
starcoder
|
defmodule Server.Pycomm do
use GenServer
@moduledoc """
This module is a GenServer that manages the Python code that will be executed.
The general idea is that you will call functions in the library's API and this will usually add
code here. Then when you call Expyplot.Plot.show(), it will flush the code to the Python3
interpretor, thus making the plot.
"""
@pyport 9849
@signal "$<>;;<>%"
## Client API
@doc """
Starts the server.
"""
def start_link(name) do
GenServer.start_link(__MODULE__, :ok, name: name)
end
@doc """
Adds the given code to the buffer.
"""
def eval_code(pid, code) do
GenServer.call(pid, {:eval, code}, :infinity)
end
## Server Callbacks
def init(:ok) do
:gen_tcp.connect('localhost', @pyport, [:binary, packet: :line, active: false])
end
def handle_call(call, _from, to_python) do
case call do
{:eval, ""} ->
raise ArgumentError, message: "Cannot supply Python with empty code string"
{:eval, code} ->
return_val = send_to_python(code, to_python)
{:reply, return_val, to_python}
end
end
## Helper Functions
defp send_to_python(code, to_python) do
do_send_to_python(code, to_python)
end
defp do_send_to_python(code, to_python) do
max_size = 4096 - String.length(@signal)
chunk = code |> String.slice(0..max_size)
leftover = code |> String.slice((max_size + 1)..-1)
cond do
String.length(chunk) <= max_size ->
write_line(chunk <> "\n" <> @signal, to_python)
read_line to_python
String.length(chunk) > max_size ->
write_line(chunk, to_python)
do_send_to_python(leftover, to_python)
end
end
defp read_line(from) do
do_read_line(from, "")
end
defp do_read_line(from, acc) do
{:ok, data} = :gen_tcp.recv(from, 0)
if String.ends_with?(data, @signal <> "\n") do
i = String.length(data) - (String.length(@signal <> "\n") + 1)
last = String.slice(data, 0..i)
acc <> last
else
do_read_line(from, acc <> data)
end
end
defp write_line(line, to) do
:gen_tcp.send(to, line)
end
end
|
lib/server/pycomm.ex
| 0.677154
| 0.490175
|
pycomm.ex
|
starcoder
|
defmodule InteropProxy do
@moduledoc """
Entrypoint for the Interop Proxy service.
This service wraps the behavior of the AUVSI SUAS Interoperability
Server for use from other services in the stack.
"""
alias InteropProxy.Message.Interop.{InteropTelem, Odlc}
alias InteropProxy.Request
alias InteropProxy.Sanitize
import InteropProxy.Session, only: [url: 0, cookie: 0, mission_id: 0]
@doc """
Return the mission on the server.
"""
def get_mission do
case Request.get_mission url(), cookie(), mission_id() do
{:ok, mission} ->
message = mission |> Sanitize.sanitize_mission
{:ok, message}
{:error, {:message, 404, _content}} ->
message = nil |> Sanitize.sanitize_mission
{:ok, message}
{:error, _} = other ->
other
end
end
@doc """
Same as `get_mission/0`, but raises an exception on failure.
"""
def get_mission!, do: when_ok get_mission()
@doc """
Return the stationary on the server from the mission.
"""
def get_obstacles do
case Request.get_mission url(), cookie(), mission_id() do
{:ok, mission} ->
message = mission |> Sanitize.sanitize_obstacles
{:ok, message}
{:error, {:message, 404, _content}} ->
message = [] |> Sanitize.sanitize_mission
{:ok, message}
{:error, _} = other -> other
end
end
@doc """
Same as `get_obstacles/0`, but raises an exception on failure.
"""
def get_obstacles!, do: when_ok get_obstacles()
@doc """
Post telemetry to the server.
"""
def post_telemetry(%InteropTelem{} = telem) do
outgoing_telem = Sanitize.sanitize_outgoing_telemetry telem
case Request.post_telemetry url(), cookie(), outgoing_telem do
{:ok, message} -> {:ok, Sanitize.sanitize_message(message)}
{:error, _} = other -> other
end
end
@doc """
Same as `post_telemetry/1`, but raises an exception on failure.
"""
def post_telemetry!(telem), do: when_ok post_telemetry(telem)
@doc """
Post a new odlc on the server.
Note that images can be included with the odlc and will be send in
an another request.
"""
def post_odlc(%Odlc{} = odlc) do
{outgoing_odlc, image} = Sanitize.sanitize_outgoing_odlc odlc
case Request.post_odlc url(), cookie(), outgoing_odlc, mission_id() do
{:ok, returned} ->
returned = Sanitize.sanitize_odlc returned
if image !== <<>> do
case Request.post_odlc_image url(), cookie(), returned.id, image do
{:ok, _} -> {:ok, returned}
{:error, _} ->
# Delete odlc that was submitted if image submission fails
case Request.delete_odlc url(), cookie(), returned.id do
{:ok, _} -> {:ok, returned}
{:error, _} = other -> other
end
end
else
{:ok, returned}
end
{:error, _} = other ->
other
end
end
@doc """
Same as `post_odlc/1`, but raises an exception on failure.
"""
def post_odlc!(odlc), do: when_ok post_odlc(odlc)
@doc """
Get all odlcs on the server.
By default images are not also returned for each target, to return
the images, pass in `image: true` with the options.
"""
def get_odlcs(opts \\ []) do
case Request.get_odlcs url(), cookie(), mission_id() do
{:ok, odlcs} ->
image_resp_list = Enum.map odlcs, fn odlc ->
case Keyword.get opts, :image, false do
true -> Request.get_odlc_image url(), cookie(), odlc["id"]
false -> {:ok, <<>>}
end
end
first_error = Enum.find image_resp_list, fn
{:ok, _} -> false
{:error, _} -> true
end
if first_error === nil do
message = Enum.zip(odlcs, image_resp_list)
|> Enum.map(fn {odlc, {:ok, image}} ->
Sanitize.sanitize_odlc odlc, image
end)
|> Sanitize.sanitize_odlc_list
{:ok, message}
else
first_error
end
{:error, _} = other ->
other
end
end
@doc """
Same as `get_odlcs/1`, but raises an exception on failure.
"""
def get_odlcs!(opts \\ []), do: when_ok get_odlcs(opts)
@doc """
Get an odlc on the server by its id.
By default images are not also returned for the target, to return
the image, pass in `image: true` with the options.
"""
def get_odlc(id, opts \\ []) do
case Request.get_odlc url(), cookie(), id do
{:ok, odlc} ->
image_resp = case Keyword.get opts, :image, false do
true -> Request.get_odlc_image url(), cookie(), id
false -> {:ok, <<>>}
end
case image_resp do
{:ok, image} -> {:ok, Sanitize.sanitize_odlc(odlc, image)}
{:error, _} = other -> other
end
{:error, _} = other ->
other
end
end
@doc """
Same as `get_odlc/2`, but raises an exception on failure.
"""
def get_odlc!(id, odlc \\ []), do: when_ok get_odlc(id, odlc)
@doc """
Update an odlc on the server.
Unlike the `PUT /api/odlcs` endpoint on the interop server, the
odlc pass here is completely replaced by the input.
Note that images can be included with the odlc and will be send in
an another request, an empty binary value will not replace the
image on the servers.
"""
def put_odlc(id, %Odlc{} = odlc) do
{outgoing_odlc, image} = Sanitize.sanitize_outgoing_odlc odlc
case Request.put_odlc url(), cookie(), id, outgoing_odlc, mission_id() do
{:ok, returned} ->
returned = Sanitize.sanitize_odlc returned
if image !== <<>> do
case Request.put_odlc_image url(), cookie(), returned.id, image do
{:ok, _} -> {:ok, returned}
other -> other
end
else
{:ok, returned}
end
{:error, _} = other ->
other
end
end
@doc """
Same as `put_odlc/2`, but raises an exception on failure.
"""
def put_odlc!(id, odlc), do: when_ok put_odlc(id, odlc)
@doc """
Delete an odlc on the server.
"""
def delete_odlc(id) do
case Request.delete_odlc url(), cookie(), id do
{:ok, message} -> {:ok, Sanitize.sanitize_message(message)}
{:error, _} = other -> other
end
end
@doc """
Same as `delete_odlc/1`, but raises an exception on failure.
"""
def delete_odlc!(id), do: when_ok delete_odlc(id)
# For the methods which raise errors.
defp when_ok({:ok, message}), do: message
end
|
services/interop-proxy/lib/interop_proxy.ex
| 0.794425
| 0.486575
|
interop_proxy.ex
|
starcoder
|
defmodule Fsharpy.FromFsharp do
@moduledoc """
Helper functions to take FSI output and convert `val` items into their
appoxiamate Elixir equivalents.
"""
def get_vals raw_output do
raw_output
|> String.replace("list =\n", "list = ")
|> String.split("\n")
|> Enum.filter(fn z -> String.starts_with?(z, "val ") end)
|> Enum.map(fn z -> get_val(z) end)
end
def get_val line do
[name_part | [rest]] = String.split(line, " : ", parts: 2, trim: true)
[type_part | [value_part]] = String.split(rest, " = ", parts: 2, trim: true)
variable_name = String.trim_leading(name_part, "val ")
map = %{name: variable_name, type: type_part, value: value_part}
attempt_deserializion map
end
defp attempt_deserializion(%{name: variable_name, type: "int", value: value_part}) do
%{variable_name => to_int(value_part)}
end
defp attempt_deserializion(%{name: variable_name, type: "float", value: value_part}) do
%{variable_name => to_float(value_part)}
end
defp attempt_deserializion(%{name: variable_name, type: "string", value: value_part}) do
new_value = value_part
|> String.replace_prefix("\"", "")
|> String.replace_suffix("\"", "")
%{variable_name => new_value}
end
defp attempt_deserializion(%{name: variable_name, type: "bool", value: value_part}) do
new_value = case value_part do
"true" -> true
"false" -> false
end
%{variable_name => new_value}
end
defp attempt_deserializion(%{name: variable_name, type: "int * int", value: value_part}) do
%{variable_name => to_tuple(value_part, &(to_int/1))}
end
defp attempt_deserializion(%{name: variable_name, type: "int * int * int", value: value_part}) do
%{variable_name => to_tuple(value_part, &(to_int/1))}
end
defp attempt_deserializion(%{name: variable_name, type: "float * float", value: value_part}) do
%{variable_name => to_tuple(value_part, &(to_float/1))}
end
defp attempt_deserializion(%{name: variable_name, type: "float * float * float", value: value_part}) do
%{variable_name => to_tuple(value_part, &(to_float/1))}
end
defp attempt_deserializion(%{name: variable_name, type: "int list", value: value_part}) do
new_value =
value_part
|> String.replace_prefix("[", "") |> String.replace_suffix("]", "")
|> String.split("; ")
|> Enum.map(fn (x) -> to_int(x) end)
%{variable_name => new_value}
end
defp attempt_deserializion(map) do
map
end
defp to_tuple(value_part, parser_fun) do
elements = value_part
|> String.trim_leading("(")
|> String.trim_trailing(")")
|> String.split(", ", trim: true)
list = elements |> Enum.map(fn x -> parser_fun.(x) end)
list |> List.to_tuple
end
defp to_int int_string do
{result, _} = Integer.parse(int_string)
result
end
defp to_float float_string do
{result, _} = Float.parse(float_string)
result
end
end
|
lib/Fsharpy/from_fsharp.ex
| 0.779448
| 0.436622
|
from_fsharp.ex
|
starcoder
|
defmodule Day02 do
@moduledoc """
Advent of Code 2018, day 2.
"""
@doc """
Read a file of strings;
Check each string for duplicate and triplicate characters;
Multiply the number of words that contain dups with the number of words that contain trips.
Conditions:
A dup must have exactly two chars, i.e. "abca" is a dup, "abcadaga" is not. Same for trips.
If a word has multiple dups or multiple trips, count just one.
If a word has both a dup and a trip, count both.
## Examples
iex> Day02.part1("data/day02.txt")
8715
"""
def part1(file_name) do
# char_groups is a list of lists of lists. Each element in char_groups
# corresponds to a string in the file. The string has been transformed
# into a list of lists. Each inner list is a list of one or more
# identical chars that we can count.
char_groups =
File.stream!(file_name)
# We can leave the newlines because they won't affect the results
|> Enum.map(&group_chars/1)
count_chars(char_groups, 2) * count_chars(char_groups, 3)
end
defp group_chars(s) do
# From "aba" to ["a", "b", "a"]
String.graphemes(s)
# From ["a", "b", "a"] to %{"a" => ["a", "a"], "b" => ["b"]}
|> Enum.group_by(& &1)
# From %{"a" => ["a", "a"], "b" => ["b"]} to [["a", "a"], ["b"]]
|> Map.values()
end
defp count_chars(char_groups, len) do
char_groups
|> Enum.filter(&Enum.any?(&1, fn letter_list -> length(letter_list) == len end))
|> length
end
@doc """
Read a file of strings;
Find the two strings that differ by one char in the same position;
Return the other chars they have in common.
Conditions:
Preserve the original order of the common chars.
Notes:
* Builds all the string pairs in the for form.
* Finds the right strings by filtering in the for form.
* Calls common_chars/2 repeatedly for each string.
## Examples
iex> Day02.part2_for_v1("data/day02.txt")
"fvstwblgqkhpuixdrnevmaycd"
"""
def part2_for_v1(file_name) do
ss =
File.stream!(file_name)
|> Stream.map(&String.trim/1)
# From ["abc", "def", "ghi"] to [["abc", "def"], ["abc", "ghi"], ["def", "ghi"]]
# The less-than filter makes sure we don't get dups by comparing both ["a", "b"] and ["b", "a"]
# Then filter to find the pair that differs by 1 char.
# Then extract the common chars.
[h | _] = for(s1 <- ss, s2 <- ss, s1 < s2, differ_by_1?(s1, s2), do: common_chars(s1, s2))
h
end
# True if first string is one char longer than the common chars.
defp differ_by_1?(s1, s2), do: String.length(s1) == String.length(common_chars(s1, s2)) + 1
defp common_chars(s1, s2) when is_binary(s1) and is_binary(s2),
do: common_chars(String.graphemes(s1), String.graphemes(s2)) |> Enum.join()
defp common_chars([], []), do: []
defp common_chars([h1 | t1], [h2 | t2]) when h1 == h2, do: [h1 | common_chars(t1, t2)]
defp common_chars([_ | t1], [_ | t2]), do: common_chars(t1, t2)
@doc """
Notes:
* Builds all the string pairs in the for form.
* Finds the right strings after the for form.
* Calls common_chars/2 once for each string.
## Examples
iex> Day02.part2_for_v2("data/day02.txt")
"fvstwblgqkhpuixdrnevmaycd"
"""
def part2_for_v2(file_name) do
ss =
File.stream!(file_name)
|> Stream.map(&String.trim/1)
# From ["abc", "def", "ghi"] to [["abc", "def"], ["abc", "ghi"], ["def", "ghi"]]
# The less-than filter makes sure we don't get dups by comparing both ["a", "b"] and ["b", "a"]
# The for form returns a list whose elements are tuples of {first string, common chars}
{_, winner} =
for(s1 <- ss, s2 <- ss, s1 < s2, do: {s1, common_chars(s1, s2)})
|> Enum.find(fn t ->
# Winner if first string is one char longer than the common chars.
String.length(elem(t, 0)) == String.length(elem(t, 1)) + 1
end)
winner
end
@doc """
Notes:
* No for form
* Doesn't pre-build all the string pairs
* Stops as soon as it finds the right strings
## Examples
iex> Day02.part2_fast("data/day02.txt")
"fvstwblgqkhpuixdrnevmaycd"
"""
def part2_fast(file_name) do
ss =
File.stream!(file_name)
|> Enum.map(&String.trim/1)
check_string(ss, ss, ss)
end
# If the second list is empty, we've checked the head of the first list
# against all the other strings, without success. Move on to the next
# value in the first list, and start with a full second list.
defp check_string([_h | t], [], ss), do: check_string(t, ss, ss)
# Check the head of the first list against each value in the second list.
defp check_string([h1 | _t1] = lst1, [h2 | t2], ss) do
cc = common_chars(h1, h2)
# If the common chars between the head of the first list and the
# head of the second list are just one char shorter than the head
# of the first list, the two heads differ by just one char, and
# we're done. Return the common chars.
if String.length(h1) == String.length(cc) + 1 do
cc
else
# Check the head of the first list against the next value in the second list.
check_string(lst1, t2, ss)
end
end
@doc """
Notes:
* Uses an agent to store the original list of strings
* No for form
* Doesn't pre-build all the string pairs
* Stops as soon as it finds the right strings
## Examples
iex> Day02.part2_fast_agent("data/day02.txt")
"fvstwblgqkhpuixdrnevmaycd"
"""
def part2_fast_agent(file_name) do
ss =
File.stream!(file_name)
|> Enum.map(&String.trim/1)
# Unless the Agent is already running, start it
unless Process.whereis(SS), do: {:ok, _} = Agent.start_link(fn -> ss end, name: SS)
check_string_fa(ss, ss)
end
# If the second list is empty, we've checked the head of the first list
# against all the other strings, without success. Move on to the next
# value in the first list, and start with a full second list.
defp check_string_fa([_h | t], []) do
ss = Agent.get(SS, & &1)
check_string_fa(t, ss)
end
# Check the head of the first list against each value in the second list.
defp check_string_fa([h1 | _t1] = lst1, [h2 | t2]) do
cc = common_chars(h1, h2)
# If the common chars between the head of the first list and the
# head of the second list are just one char shorter than the head
# of the first list, the two heads differ by just one char, and
# we're done. Return the common chars.
if String.length(h1) == String.length(cc) + 1 do
cc
else
# Check the head of the first list against the next value in the second list.
check_string_fa(lst1, t2)
end
end
@doc """
Notes:
* Uses concurrency to search for the right strings
* Stops as soon as it finds (via a message) the right strings
* Spawns _after_ the for form
## Examples
iex> Day02.part2_conc_v1("data/day02.txt")
"fvstwblgqkhpuixdrnevmaycd"
"""
def part2_conc_v1(file_name) do
me = self()
ss =
File.stream!(file_name)
|> Enum.map(&String.trim/1)
# For each pair of strings, spawn a process that diffs them.
for(s1 <- ss, s2 <- ss, s1 < s2, do: {s1, s2})
|> Enum.each(&spawn_link(fn -> send(me, diff_strings(elem(&1, 0), elem(&1, 1))) end))
message_loop()
end
defp message_loop do
receive do
{:found, cc} ->
cc
_ ->
message_loop()
end
end
# If s1 and s2 differ by just 1 char, return :found and the common chars.
defp diff_strings(s1, s2) do
cc = common_chars(s1, s2)
if String.length(s1) == String.length(cc) + 1, do: {:found, cc}, else: :not_found
end
@doc """
Notes:
* Uses concurrency to search for the right strings
* Stops as soon as it finds (via a message) the right strings
* Uses the for form to spawn
## Examples
iex> Day02.part2_conc_v2("data/day02.txt")
"fvstwblgqkhpuixdrnevmaycd"
"""
def part2_conc_v2(file_name) do
me = self()
ss =
File.stream!(file_name)
|> Enum.map(&String.trim/1)
# For each pair of strings, spawn a process that diffs them.
for(
s1 <- ss,
s2 <- ss,
s1 < s2,
do: spawn_link(fn -> send(me, diff_strings(s1, s2)) end)
)
message_loop()
end
@doc """
Notes:
* Uses concurrency to search for the right strings
* Stops as soon as it finds (via a message) the right strings
* Uses reduce_while
## Examples
iex> Day02.part2_conc_v3("data/day02.txt")
"fvstwblgqkhpuixdrnevmaycd"
"""
def part2_conc_v3(file_name) do
me = self()
ss =
File.stream!(file_name)
|> Enum.map(&String.trim/1)
# For each pair of strings, spawn a process that diffs them.
Enum.each(ss, &spawn_link(fn -> send(me, diff_strings_v3(&1, ss)) end))
message_loop()
end
defp diff_strings_v3(s, ss) do
rv =
Enum.reduce_while(ss, nil, fn nxt, _acc ->
cc = common_chars(s, nxt)
if String.length(s) == String.length(cc) + 1, do: {:halt, cc}, else: {:cont, nil}
end)
if rv, do: {:found, rv}, else: :not_found
end
end
|
day02/lib/day02.ex
| 0.874988
| 0.635844
|
day02.ex
|
starcoder
|
defmodule XDR.Int do
@moduledoc """
This module manages the `Integer` type based on the RFC4506 XDR Standard.
"""
@behaviour XDR.Declaration
alias XDR.Error.Int, as: IntError
defstruct [:datum]
@typedoc """
`XDR.Int` structure type specification.
"""
@type t :: %XDR.Int{datum: integer}
@doc """
Create a new `XDR.Int` structure with the `opaque` and `length` passed.
"""
@spec new(datum :: integer) :: t
def new(datum), do: %XDR.Int{datum: datum}
@impl XDR.Declaration
@doc """
Encode a `XDR.Int` structure into a XDR format.
"""
@spec encode_xdr(int :: t) ::
{:ok, binary} | {:error, :not_integer | :exceed_upper_limit | :exceed_lower_limit}
def encode_xdr(%XDR.Int{datum: datum}) when not is_integer(datum), do: {:error, :not_integer}
def encode_xdr(%XDR.Int{datum: datum}) when datum > 2_147_483_647,
do: {:error, :exceed_upper_limit}
def encode_xdr(%XDR.Int{datum: datum}) when datum < -2_147_483_648,
do: {:error, :exceed_lower_limit}
def encode_xdr(%XDR.Int{datum: datum}), do: {:ok, <<datum::big-signed-integer-size(32)>>}
@impl XDR.Declaration
@doc """
Encode a `XDR.Int` structure into a XDR format.
If the `int` is not valid, an exception is raised.
"""
@spec encode_xdr!(int :: t) :: binary()
def encode_xdr!(int) do
case encode_xdr(int) do
{:ok, binary} -> binary
{:error, reason} -> raise(IntError, reason)
end
end
@impl XDR.Declaration
@doc """
Decode the Integer in XDR format to a `XDR.Int` structure.
"""
@spec decode_xdr(bytes :: binary(), int :: t) :: {:ok, {t, binary}} | {:error, :not_binary}
def decode_xdr(bytes, int \\ nil)
def decode_xdr(bytes, _int) when not is_binary(bytes), do: {:error, :not_binary}
def decode_xdr(<<datum::big-signed-integer-size(32), rest::binary>>, _int),
do: {:ok, {new(datum), rest}}
@impl XDR.Declaration
@doc """
Decode the Integer in XDR format to a `XDR.Int` structure.
If the binaries are not valid, an exception is raised.
"""
@spec decode_xdr!(bytes :: binary(), int :: t) :: {t, binary()}
def decode_xdr!(bytes, int \\ nil)
def decode_xdr!(bytes, _int) do
case decode_xdr(bytes) do
{:ok, result} -> result
{:error, reason} -> raise(IntError, reason)
end
end
end
|
lib/xdr/int.ex
| 0.929344
| 0.616373
|
int.ex
|
starcoder
|
defmodule Pow.Phoenix.SessionController do
@moduledoc """
Controller actions for session.
The `:request_path` param will automatically be assigned in `:new` and
`:create` actions, and used for the `pow_session_path(conn, :create)` path.
"""
use Pow.Phoenix.Controller
alias Plug.Conn
alias Pow.Plug
plug :require_not_authenticated when action in [:new, :create]
plug :require_authenticated when action in [:delete]
plug :assign_request_path when action in [:new, :create]
plug :assign_create_path when action in [:new, :create]
plug :put_no_cache_header when action in [:new]
@doc false
@spec process_new(Conn.t(), map()) :: {:ok, map(), Conn.t()}
def process_new(conn, _params) do
{:ok, Plug.change_user(conn), conn}
end
@doc false
@spec respond_new({:ok, map(), Conn.t()}) :: Conn.t()
def respond_new({:ok, changeset, conn}) do
conn
|> assign(:changeset, changeset)
|> render("new.html")
end
@doc false
@spec process_create(Conn.t(), map()) :: {:ok | :error, Conn.t()}
def process_create(conn, %{"user" => user_params}) do
Plug.authenticate_user(conn, user_params)
end
@doc false
@spec respond_create({:ok | :error, Conn.t()}) :: Conn.t()
def respond_create({:ok, conn}) do
conn
|> put_flash(:info, messages(conn).signed_in(conn))
|> redirect(to: routes(conn).after_sign_in_path(conn))
end
def respond_create({:error, conn}) do
conn
|> assign(:changeset, Plug.change_user(conn, conn.params["user"]))
|> put_flash(:error, messages(conn).invalid_credentials(conn))
|> render("new.html")
end
@doc false
@spec process_delete(Conn.t(), map()) :: {:ok, Conn.t()}
def process_delete(conn, _params), do: Plug.clear_authenticated_user(conn)
@doc false
@spec respond_delete({:ok, Conn.t()}) :: Conn.t()
def respond_delete({:ok, conn}) do
conn
|> put_flash(:info, messages(conn).signed_out(conn))
|> redirect(to: routes(conn).after_sign_out_path(conn))
end
defp assign_request_path(%{params: %{"request_path" => request_path}} = conn, _opts) do
Conn.assign(conn, :request_path, request_path)
end
defp assign_request_path(conn, _opts), do: conn
defp assign_create_path(conn, _opts) do
Conn.assign(conn, :action, create_path(conn))
end
defp create_path(%{assigns: %{request_path: request_path}} = conn) do
create_path(conn, request_path: request_path)
end
defp create_path(conn, query_params \\ []) do
routes(conn).path_for(conn, __MODULE__, :create, [], query_params)
end
end
|
lib/pow/phoenix/controllers/session_controller.ex
| 0.730386
| 0.415225
|
session_controller.ex
|
starcoder
|
defmodule Clickhousex.Codec.RowBinary.Old do
@moduledoc false
alias Clickhousex.{Codec, Codec.Binary}
require Record
Record.defrecord(:state, column_count: 0, column_names: [], column_types: [], rows: [], count: 0)
@behaviour Codec
@impl Codec
def response_format do
"RowBinaryWithNamesAndTypes"
end
@impl Codec
def request_format do
"Values"
end
@impl Codec
def encode(query, replacements, params) do
params =
Enum.map(params, fn
%DateTime{} = dt -> DateTime.to_unix(dt)
other -> other
end)
Codec.Values.encode(query, replacements, params)
end
@impl Codec
def new do
nil
end
@impl Codec
def append(nil, data) do
case Binary.decode(data, :varint) do
{:ok, column_count, rest} ->
state = state(column_count: column_count)
case decode_column_names(rest, column_count, state) do
state() = state -> state
{:resume, _} = resumer -> resumer
end
{:resume, _} ->
{:resume, &append(nil, data <> &1)}
end
end
def append(state() = state, data) do
decode_rows(data, state)
end
def append({:resume, resumer}, data) do
case resumer.(data) do
{:resume, _} = resumer -> resumer
state() = state -> state
end
end
@impl Codec
def decode(state(column_names: column_names, rows: rows, count: count)) do
{:ok, %{column_names: column_names, rows: Enum.reverse(rows), count: count}}
end
def decode(nil) do
decode(state())
end
defp decode_column_names(
bytes,
0,
state(column_names: names, column_count: column_count) = state
) do
decode_column_types(bytes, column_count, state(state, column_names: Enum.reverse(names)))
end
defp decode_column_names(bytes, remaining_columns, state(column_names: names) = state) do
case Binary.decode(bytes, :string) do
{:ok, column_name, rest} ->
decode_column_names(
rest,
remaining_columns - 1,
state(state, column_names: [column_name | names])
)
{:resume, _} ->
{:resume,
fn more_data -> decode_column_names(bytes <> more_data, remaining_columns, state) end}
end
end
defp decode_column_types(bytes, 0, state(column_types: types) = state) do
decode_rows(bytes, state(state, column_types: Enum.reverse(types)))
end
defp decode_column_types(bytes, remaining_columns, state(column_types: types) = state) do
case Binary.decode(bytes, :string) do
{:ok, column_type, rest} ->
column_type = parse_type(column_type)
column_types = [column_type | types]
decode_column_types(rest, remaining_columns - 1, state(state, column_types: column_types))
{:resume, _} ->
{:resume,
fn more_data -> decode_column_types(bytes <> more_data, remaining_columns, state) end}
end
end
defp decode_rows(<<>>, state() = state) do
state
end
defp decode_rows(bytes, state(column_types: column_types, rows: rows, count: count) = state) do
case decode_row(bytes, column_types, []) do
{:ok, row, rest} ->
decode_rows(rest, state(state, rows: [row | rows], count: count + 1))
{:resume, _} ->
{:resume, fn more_data -> decode_rows(bytes <> more_data, state) end}
end
end
defp decode_row(<<bytes::bits>>, [], row) do
row_tuple =
row
|> Enum.reverse()
|> List.to_tuple()
{:ok, row_tuple, bytes}
end
defp decode_row(<<1, rest::binary>>, [{:nullable, _} | types], row) do
decode_row(rest, types, [nil | row])
end
defp decode_row(<<0, rest::binary>>, [{:nullable, actual_type} | types], row) do
decode_row(rest, [actual_type | types], row)
end
defp decode_row(<<bytes::bits>>, [{:fixed_string, length} | types], row)
when byte_size(bytes) >= length do
<<value::binary-size(length), rest::binary>> = bytes
decode_row(rest, types, [value | row])
end
defp decode_row(<<bytes::bits>>, [{:fixed_string, _} | _] = current_types, row) do
{:resume, fn more_data -> decode_row(bytes <> more_data, current_types, row) end}
end
defp decode_row(<<bytes::bits>>, [{:array, elem_type} | types] = current_types, row) do
case Binary.decode(bytes, {:list, elem_type}) do
{:ok, value, rest} ->
decode_row(rest, types, [value | row])
{:resume, _} ->
{:resume, fn more_data -> decode_row(bytes <> more_data, current_types, row) end}
end
end
defp decode_row(<<bytes::bits>>, [type | types] = current_types, row) do
case Binary.decode(bytes, type) do
{:ok, value, rest} ->
decode_row(rest, types, [value | row])
{:resume, _} ->
{:resume, fn more_data -> decode_row(bytes <> more_data, current_types, row) end}
end
end
defp parse_type(<<"Nullable(", type::binary>>) do
rest_type =
type
|> String.replace_suffix(")", "")
|> parse_type()
{:nullable, rest_type}
end
defp parse_type(<<"FixedString(", rest::binary>>) do
case Integer.parse(rest) do
{length, rest} ->
rest
|> String.replace_suffix(")", "")
{:fixed_string, length}
end
end
defp parse_type(<<"Array(", type::binary>>) do
rest_type =
type
|> String.replace_suffix(")", "")
|> parse_type()
{:array, rest_type}
end
@clickhouse_mappings [
{"Int64", :i64},
{"Int32", :i32},
{"Int16", :i16},
{"Int8", :i8},
{"UInt64", :u64},
{"UInt32", :u32},
{"UInt16", :u16},
{"UInt8", :u8},
{"Float64", :f64},
{"Float32", :f32},
{"Float16", :f16},
{"Float8", :f8},
{"String", :string},
{"Date", :date},
{"DateTime", :datetime}
]
for {clickhouse_type, local_type} <- @clickhouse_mappings do
defp parse_type(unquote(clickhouse_type)) do
unquote(local_type)
end
end
end
|
lib/clickhousex/codec/row_binary_old.ex
| 0.675658
| 0.550668
|
row_binary_old.ex
|
starcoder
|
defmodule ExPixBRCode.BRCodes.Decoder do
@moduledoc """
Decode iodata that represent a BRCode.
"""
alias ExPixBRCode.BRCodes.Models.BRCode
alias ExPixBRCode.Changesets
@keys %{
"00" => "payload_format_indicator",
"01" => "point_of_initiation_method",
"26" =>
{"merchant_account_information",
%{
"00" => "gui",
"01" => "chave",
"02" => "info_adicional",
"03" => "fss",
"25" => "url"
}},
"52" => "merchant_category_code",
"53" => "transaction_currency",
"54" => "transaction_amount",
"58" => "country_code",
"59" => "merchant_name",
"60" => "merchant_city",
"61" => "postal_code",
"62" => {"additional_data_field_template", %{"05" => "reference_label"}},
"63" => "crc",
"80" => {"unreserved_templates", %{"00" => "gui"}}
}
@max_size_in_bytes 512
@doc """
Decode input into a map with string keys with known keys for BRCode.
There is no actual validation about the values. If you want to coerce values and validate see
`decode_to/3` function.
## Errors
Known validation errors result in a tuple with `{:validation, reason}`. Reason might be an atom
or a string.
## Options
The following options are currently supported:
- `strict_validation` (`t:boolean()` - default `false`): whether to allow unknown values or not
## Example
iex> brcode = "00020126580014br.gov.bcb.pix0136123e4567-e12b-12d1-a456-426655440000" <>
...> "5204000053039865802BR5913Fulano de Tal6008BRASILIA62070503***63041D3D"
...> decode(brcode)
{:ok, %{"additional_data_field_template" => "0503***",
"country_code" => "BR",
"crc" => "1D3D",
"merchant_account_information" => %{
"gui" => "br.gov.bcb.pix",
"key" => "<KEY>"
},
"merchant_category_code" => "0000",
"merchant_city" => "BRASILIA",
"merchant_name" => "<NAME>",
"payload_format_indicator" => "01",
"transaction_currency" => "986"
}}
"""
@spec decode(input :: iodata(), Keyword.t()) ::
{:ok, term()}
| {:error,
{:validation,
:invalid_tag_length_value
| {:unexpected_value_length_for_key, String.t()}
| {:unknown_key, String.t()}}
| :unknown_error
| :invalid_crc
| :invalid_input_length
| :invalid_input_size}
def decode(input, opts \\ []) do
brcode = IO.iodata_to_binary(input)
with {:ok, brcode} <- check_brcode_size(brcode),
{:ok, {contents, crc}} <- extract_crc(brcode),
:ok <- validate_crc(contents, crc) do
parse(brcode, opts)
end
end
# This is basically String.split_at(binary, -4),
# optimized using the facts that we only have
# 1-byte characters in the CRC and that we can count
# right to left. The guard is needed so that
# binary_part/3 doesn't raise
defp extract_crc(binary) when byte_size(binary) > 4 do
len = byte_size(binary)
crc = binary_part(binary, len, -4)
val = binary_part(binary, 0, len - 4)
{:ok, {val, crc}}
end
defp extract_crc(_binary), do: {:error, :invalid_input_length}
defp validate_crc(contents, received_crc) do
calculated_crc =
contents
|> CRC.ccitt_16()
|> Integer.to_string(16)
|> String.pad_leading(4, "0")
if received_crc == calculated_crc do
:ok
else
{:error, :invalid_crc}
end
end
defp parse(brcode, opts) do
case do_parse(brcode, opts) do
result when is_map(result) ->
{:ok, result}
error ->
error
end
end
defp check_brcode_size(brcode) when byte_size(brcode) <= @max_size_in_bytes, do: {:ok, brcode}
defp check_brcode_size(_brcode), do: {:error, :invalid_input_size}
@doc """
Decode an iodata to a given schema module.
This calls `decode/2` and then casts it into an `t:Ecto.Schema` module.
It must have a changeset/2 public function.
"""
@spec decode_to(input :: iodata(), Keyword.t(), schema :: module()) ::
{:ok, struct()} | {:error, term()}
def decode_to(input, opts \\ [], schema \\ BRCode) do
case decode(input, opts) do
{:ok, result} -> Changesets.cast_and_apply(schema, result)
err -> err
end
end
# This guard ensures a number is the integer representation of the
# ASCII characters 0 through 9, inclusive
defguardp is_digit(value) when is_integer(value) and value >= ?0 and value <= ?9
defp do_parse(brcode, opts, keys \\ @keys, acc \\ %{})
defp do_parse(<<>>, _opts, _keys, acc), do: acc
# Elixir uses Binaries as the underlying representation for String.
# The first argument used Elixir's Binary-matching syntax.
# The matches mean:
# - key::binary-size(2) -> key is a 2-byte string
# - size_tens::size(8) -> size_tens is an 8-bit integer
# - size_units::size(8) -> size_tens is an 8-bit integer
# - rest::binary -> rest is an undefined-length string
# Also the is_digit guard is used to ensure that both size_tens and
# size_units are ASCII encoded digits
defp do_parse(
<<key::binary-size(2), size_tens::size(8), size_units::size(8), rest::binary>>,
opts,
keys,
acc
)
when is_digit(size_tens) and is_digit(size_units) do
# Having the tens-place digit and the units-place digit,
# we need to multiply size_tens by 10 to shift it to the
# left (e.g. if size_tens is "3", we want to add 30).
# However, since they are ascii encoded, we also need to subtract
# ?0 (the ASCII value for "0") from both `size_tens` and `size_units`.
len = (size_tens - ?0) * 10 + (size_units - ?0)
{value, rest} = String.split_at(rest, len)
effective_length = String.length(value)
strict_validation = Keyword.get(opts, :strict_validation, false)
result = Map.get(keys, key)
case {effective_length, result, strict_validation} do
{effective_length, _result, _strict_validation} when effective_length != len ->
{:error, {:validation, {:unexpected_value_length_for_key, key}}}
{_, {key, sub_keys}, _} ->
value = do_parse(value, opts, sub_keys, %{})
acc = Map.put(acc, key, value)
do_parse(rest, opts, keys, acc)
{_, key, _} when is_binary(key) ->
acc = Map.put(acc, key, value)
do_parse(rest, opts, keys, acc)
{_, nil, false} ->
do_parse(rest, opts, keys, acc)
_ ->
{:error, :validation, {:unknown_key, key}}
end
end
defp do_parse(
<<key::binary-size(2), size_tens::size(8), size_units::size(8), _rest::binary>>,
_opts,
_keys,
_acc
)
when not is_digit(size_tens) or not is_digit(size_units) do
{:error, {:validation, {:invalid_length_for_tag, key}}}
end
defp do_parse(_, _, _, _), do: {:error, {:validation, :invalid_tag_length_value}}
end
|
lib/ex_pix_brcode/brcode/decoder.ex
| 0.884277
| 0.42913
|
decoder.ex
|
starcoder
|
defmodule ExHash.CLI do
defp switches() do
[
file: :string,
text: :string,
alg: :string,
help: :boolean,
]
end
defp aliases() do
[
f: :file,
t: :text,
a: :alg,
h: :help,
]
end
defp algorithms() do
[
"sha",
"sha224",
"sha256",
"sha384",
"sha512",
"md4",
"md5",
"ripemd160",
]
end
defp help_msg() do
"""
Welcome to ExHash, a simple CLI for hashing text and files!
File Usage:
./ex_hash (-f|--file) <filepath> (-a|--alg) <algorithm>
Text Usage:
./ex_hash (-t|--text) <text> (-a|--alg) <algorithm>
Display this Help Message:
./ex_hash (-h|--help)
Supported Hash Algorithms:
""" <> Enum.join(Enum.map(algorithms(), &(" - #{&1}\n")))
end
def main(args) do
{opts, _, _} = OptionParser.parse(args, switches: switches(), aliases: aliases())
opts = Enum.into(opts, %{})
case opts do
%{help: true} ->
IO.puts(help_msg())
System.halt(1)
%{file: file_name, alg: alg} ->
hash_file(file_name, alg)
%{text: text, alg: alg} ->
hash_text(text, alg)
%{file: _} ->
IO.puts("You must specify a hashing algorithm to use")
IO.puts("Here are the available hashing algorithms:")
algorithms() |> Enum.map(&(IO.puts(" - #{&1}")))
System.halt(1)
%{text: _} ->
IO.puts("You must specify a hashing algorithm to use")
IO.puts("Here are the available hashing algorithms:")
algorithms() |> Enum.map(&(IO.puts(" - #{&1}")))
System.halt(1)
%{alg: _} ->
IO.puts("You must specify either a file or text to hash")
System.halt(1)
_ ->
IO.puts(help_msg())
System.halt(1)
end
end
def check_file(file_name) do
full_path = Path.expand(file_name)
exists = File.exists?(full_path)
unless exists do
IO.puts("File #{full_path} does not exist")
System.halt(1)
end
end
def check_alg(alg) do
unless alg in algorithms() do
IO.puts("Hashing algorithm #{alg} not supported.")
IO.puts("Available algorithms are:")
algorithms() |> Enum.map(&(IO.puts(" - #{&1}")))
System.halt(1)
end
end
def hash_file(file_name, algorithm) do
check_file(file_name)
check_alg(algorithm)
alg = String.to_atom(algorithm)
IO.puts("hashing #{truncate(file_name)} with #{algorithm} algorithm...")
data = File.read!(Path.expand(file_name))
hash = :crypto.hash(alg, data) |> Base.encode16 |> String.downcase
IO.puts("\n#{hash}")
end
def hash_text(text, algorithm) do
check_alg(algorithm)
alg = String.to_atom(algorithm)
IO.puts("hashing #{truncate(text)} with #{algorithm} algorithm...")
hash = :crypto.hash(alg, text) |> Base.encode16 |> String.downcase
IO.puts("\n#{hash}")
end
defp truncate(str, max_chars \\ 10) when max_chars >= 0 do
if (String.length(str) > max_chars) do
String.slice(str, 0, max_chars - 3) <> "..."
else
str
end
end
end
|
lib/cli.ex
| 0.563978
| 0.425098
|
cli.ex
|
starcoder
|
defmodule KittenBlue.JWK do
@moduledoc """
Structure containing `kid`, `alg`, `JOSE.JWK` and handling functions
"""
defstruct [
:kid,
:alg,
:key
]
@type t :: %__MODULE__{kid: String.t(), alg: String.t(), key: JOSE.JWK.t()}
@doc """
```Elixir
kid = "sample_201804"
alg = "RS256"
key = JOSE.JWK.from_pem_file("rsa-2048.pem")
kb_jwk = KittenBlue.JWK.new([kid, alg, key])
kb_jwk = KittenBlue.JWK.new([kid: kid, alg: alg, key: key])
kb_jwk = KittenBlue.JWK.new(%{kid: kid, alg: alg, key: key})
```
"""
@spec new(params :: Keywords.t()) :: t
def new(params = [kid: _, alg: _, key: _]) do
struct(__MODULE__, Map.new(params))
end
@spec new(params :: List.t()) :: t
def new([kid, alg, key]) do
struct(__MODULE__, %{kid: kid, alg: alg, key: key})
end
@spec new(params :: Map.t()) :: t
def new(params = %{kid: _, alg: _, key: _}) do
struct(__MODULE__, params)
end
@doc """
Convert `KittenBlue.JWK` list to `JSON Web Key Sets` format public keys.
```Elixir
kb_jwk_list = [kb_jwk]
public_jwk_sets = KittenBlue.JWK.list_to_public_jwk_sets(kb_jwk_list)
```
"""
@spec list_to_public_jwk_sets(jwk_list :: List.t()) :: map | nil
def list_to_public_jwk_sets([]) do
nil
end
def list_to_public_jwk_sets(jwk_list) when is_list(jwk_list) do
%{
"keys" =>
jwk_list
|> Enum.map(fn jwk -> to_public_jwk_set(jwk) end)
|> Enum.filter(&(!is_nil(&1)))
}
end
@doc """
Convert `KittenBlue.JWK` to `JSON Web Key Sets` format public key.
```Elixir
public_jwk_set = KittenBlue.JWK.to_public_jwk_set(kb_jwk)
```
"""
@spec to_public_jwk_set(jwk :: t) :: map | nil
def to_public_jwk_set(jwk = %__MODULE__{}) do
jwk.key
|> JOSE.JWK.to_public()
|> JOSE.JWK.to_map()
|> elem(1)
|> Map.put("alg", jwk.alg)
|> Map.put("kid", jwk.kid)
end
def to_public_jwk_set(_) do
nil
end
@doc """
Convert `JSON Web Key Sets` format public keys to `KittenBlue.JWK` list.
```
kb_jwk_list = KittenBlue.JWK.public_jwk_sets_to_list(public_jwk_sets)
```
"""
@spec public_jwk_sets_to_list(public_json_web_key_sets :: map) :: List.t()
def public_jwk_sets_to_list(_public_json_web_key_sets = %{"keys" => public_jwk_sets}) when is_list(public_jwk_sets) do
public_jwk_sets
|> Enum.map(fn public_jwk_set -> from_public_jwk_set(public_jwk_set) end)
|> Enum.filter(&(!is_nil(&1)))
end
def public_jwk_sets_to_list(_public_json_web_key_sets) do
[]
end
@doc """
Convert `JSON Web Key Sets` format public key to `KittenBlue.JWK`.
```
kb_jwk = KittenBlue.JWK.from_public_jwk_set(public_jwk_set)
```
"""
@spec from_public_jwk_set(public_json_web_key_set :: map) :: t | nil
def from_public_jwk_set(jwk_map) when is_map(jwk_map) do
try do
with alg when alg != nil <- jwk_map["alg"],
kid when kid != nil <- jwk_map["kid"],
key = %JOSE.JWK{} <- jwk_map |> JOSE.JWK.from_map() do
new(kid: kid, alg: alg, key: key)
else
_ -> nil
end
rescue
_ -> nil
end
end
def from_public_jwk_set(_) do
nil
end
@doc """
Convert `KittenBlue.JWK` List to compact storable format for configration.
```
kb_jwk_list = [kb_jwk]
kb_jwk_list_config = KittenBlue.JWK.list_to_compact(kb_jwk_list)
```
"""
@spec list_to_compact(jwk_list :: List.t()) :: List.t()
def list_to_compact(jwk_list) do
jwk_list
|> Enum.map(fn jwk -> to_compact(jwk) end)
end
@doc """
Convert `KittenBlue.JWK` to compact storable format for configration.
```
kb_jwk_config = KittenBlue.JWK.to_compact(kb_jwk)
```
"""
@spec to_compact(jwk :: t()) :: List.t()
def to_compact(jwk) do
case jwk.alg do
alg when alg in ["HS256", "HS384", "HS512"] ->
[jwk.kid, jwk.alg, jwk.key |> JOSE.JWK.to_oct() |> elem(1) |> Base.encode64(padding: false)]
alg when alg in ["RS256", "RS384", "RS512"] ->
[jwk.kid, jwk.alg, jwk.key |> JOSE.JWK.to_pem() |> elem(1)]
_ ->
[jwk.kid, jwk.alg, jwk.key |> JOSE.JWK.to_map() |> elem(1)]
end
end
@doc """
Convert compact storable format to `KittenBlue.JWK`.
```
kb_jwk_list = KittenBlue.JWK.compact_to_list(kb_jwk_list_config)
```
"""
@spec compact_to_list(jwk_compact_list :: list()) :: t()
def compact_to_list(jwk_compact_list) when is_list(jwk_compact_list) do
jwk_compact_list
|> Enum.map(fn jwk_compact -> from_compact(jwk_compact) end)
|> Enum.filter(&(!is_nil(&1)))
end
@doc """
Convert compact storable format to `KittenBlue.JWK`.
```
kb_jwk = KittenBlue.JWK.from_compact(kb_jwk_config)
```
"""
@spec from_compact(jwk_compact :: list()) :: t()
def from_compact(_jwk_compact = [kid, alg, key]) do
cond do
alg in ["HS256", "HS384", "HS512"] ->
[kid, alg, key |> Base.decode64!(padding: false) |> JOSE.JWK.from_oct()] |> new()
alg in ["RS256", "RS384", "RS512"] ->
[kid, alg, key |> JOSE.JWK.from_pem()] |> new()
true ->
[kid, alg, key |> JOSE.JWK.from_map()] |> new()
end
end
end
|
lib/kitten_blue/jwk.ex
| 0.798698
| 0.833019
|
jwk.ex
|
starcoder
|
defmodule Cased.Export do
@moduledoc """
Data modeling a Cased export.
"""
import Norm
@enforce_keys [
:id,
:audit_trails,
:download_url,
:events_found_count,
:fields,
:format,
:phrase,
:state,
:updated_at,
:created_at
]
defstruct [
:id,
:audit_trails,
:download_url,
:events_found_count,
:fields,
:format,
:phrase,
:state,
:updated_at,
:created_at
]
@type t :: %__MODULE__{
id: String.t(),
audit_trails: [String.t()],
download_url: String.t(),
events_found_count: non_neg_integer(),
fields: [String.t()],
format: String.t(),
phrase: nil | String.t(),
state: String.t(),
updated_at: DateTime.t(),
created_at: DateTime.t()
}
@type create_opts :: [create_opt()]
@type create_opt ::
{:audit_trails, [atom()]}
| {:audit_trail, atom()}
| {:fields, [String.t()]}
| {:key, String.t()}
@doc """
Build a request to create an export of audit trail fields.
## Options
The following options are available:
- `:audit_trails` — The list of audit trails to export
- `:audit_trail` — When passing a single audit trail, you can use this instead of `:audit_trails`.
- `:fields` — The fields to export
- `:key` — The Cased policy key allowing access to the audit trails and fields.
The only required option is `:fields`.
- When both `:audit_trail` and `:audit_trails` are omitted, `:audit_trail` is assumed to be `default`.
- When `:key` is omitted, the key configured for the `:audit_trail` (or first of `:audit_trails`) in
the client is used.
## Response
When the resulting request is sucessfully executed by `Cased.Request.run/2` or
`Cased.Request.run/1`, a `Cased.Export` struct is returned.
"""
@spec create(client :: Cased.Client.t(), opts :: create_opts()) ::
Cased.Request.t() | no_return()
def create(client, opts \\ []) do
opts = normalize_create_opts(opts)
with {:ok, params} <- validate_create_opts(opts, client) do
{key, params} =
Map.pop_lazy(params, :key, fn ->
primary_audit_trail = params.audit_trails |> List.first()
client.keys[primary_audit_trail]
end)
%Cased.Request{
client: client,
id: :export_create,
method: :post,
path: "/exports",
key: key,
body: Map.put(params, :format, "json")
}
else
{:error, details} ->
raise %Cased.RequestError{details: details}
end
end
@spec normalize_create_opts(opts :: keyword()) :: keyword()
defp normalize_create_opts(opts) do
if Keyword.has_key?(opts, :audit_trail) || Keyword.has_key?(opts, :audit_trails) do
opts
else
opts
|> Keyword.put(:audit_trail, :default)
end
end
@spec validate_create_opts(opts :: keyword(), client :: Cased.Client.t()) ::
{:ok, map()} | {:error, list()}
defp validate_create_opts(opts, client) do
conform(Map.new(opts), create_opts_schema(client))
|> case do
{:ok, {:multiple_audit_trails, params}} ->
{:ok, params}
{:ok, {:single_audit_trail, params}} ->
{audit_trail, params} = Map.pop(params, :audit_trail)
{:ok, Map.put(params, :audit_trails, [audit_trail])}
{:error, _} = err ->
err
end
end
# Option schema for `create/2`.
@spec create_opts_schema(client :: Cased.Client.t()) :: struct()
defp create_opts_schema(client) do
audit_trail_spec = spec(is_atom() and (&Map.has_key?(client.keys, &1)))
fields_spec = coll_of(spec(is_binary() or is_atom()))
single_schema =
schema(%{
fields: fields_spec,
audit_trail: audit_trail_spec
})
multi_schema =
schema(%{
fields: fields_spec,
audit_trails: coll_of(audit_trail_spec, min_count: 1, distinct: true)
})
alt(
single_audit_trail: single_schema |> selection(),
multiple_audit_trails: multi_schema |> selection()
)
end
@type get_opts :: [get_opt()]
@type get_opt :: {:key, String.t()}
@doc """
Build a request to retrieve data about an export.
## Options
The following options are available:
- `:key` — A Cased policy key allowing access to the export.
When `:key` is omitted, the key for the `:default` audit trail is used.
## Response
When the resulting request is sucessfully executed by `Cased.Request.run/2` or
`Cased.Request.run/1`, a `Cased.Export` struct is returned.
## Example
A basic example:
```
iex> client
...> |> Cased.Export.get("export_...")
...> |> Cased.Request.run!
%Cased.Export{id: "export_...", ...}
```
"""
@spec get(client :: Cased.Client.t(), id :: String.t(), opts :: get_opts()) :: Cased.Request.t()
def get(client, id, opts \\ []) do
%Cased.Request{
client: client,
id: :export,
method: :get,
path: "/exports/#{id}",
key: opts[:key] || Map.fetch!(client.keys, :default)
}
end
@doc """
Build a request to retrieve the export download.
## Options
The following options are available:
- `:key` — A Cased policy key allowing access to the export.
When `:key` is omitted, the key for the `:default` audit trail is used.
## Response
When the resulting request is sucessfully executed by `Cased.Request.run/2` or
`Cased.Request.run/1`, a raw JSON string is returned. The JSON is _not_ decoded
automatically.
## Example
A basic example:
```
iex> client
...> |> Cased.Export.get_download("export_...")
...> |> Cased.Request.run!
"{...}"
```
"""
@spec get_download(client :: Cased.Client.t(), id :: String.t(), opts :: get_opts()) ::
Cased.Request.t()
def get_download(client, id, opts \\ []) do
%Cased.Request{
client: client,
id: :export_download,
method: :get,
path: "/exports/#{id}/download",
key: opts[:key] || Map.fetch!(client.keys, :default)
}
end
@doc false
@spec from_json!(map()) :: t() | no_return()
def from_json!(data) do
data =
data
|> Map.update("created_at", nil, &normalize_datetime/1)
|> Map.update("updated_at", nil, &normalize_datetime/1)
%__MODULE__{
id: data["id"],
audit_trails: Map.get(data, "audit_trails", []),
download_url: data["download_url"],
events_found_count: data["events_found_count"],
fields: Map.get(data, "fields", []),
format: data["format"],
phrase: data["phrase"],
state: data["state"],
updated_at: data["updated_at"],
created_at: data["created_at"]
}
end
@spec normalize_datetime(nil | String.t()) :: DateTime.t()
defp normalize_datetime(nil), do: nil
defp normalize_datetime(datetime) do
case DateTime.from_iso8601(datetime) do
{:ok, result, _} ->
result
{:error, _} ->
raise ArgumentError, "Bad datetime: #{datetime}"
end
end
end
|
lib/cased/export.ex
| 0.895682
| 0.686337
|
export.ex
|
starcoder
|
defmodule Adventofcode.Day03CrossedWires do
use Adventofcode
alias __MODULE__.{CentralPort, Parser, Twister, Wire}
def part_1(input) do
closest_intersection_distance(input)
end
def part_2(input) do
fewest_combined_steps_to_intersection(input)
end
def closest_intersection_distance(input) do
input
|> Parser.parse()
|> Twister.twist()
|> intersections()
|> Map.keys()
|> Enum.map(&manhattan_distance({0, 0}, &1))
|> Enum.min()
end
def fewest_combined_steps_to_intersection(input) do
input
|> Parser.parse()
|> Twister.twist()
|> intersections()
|> Map.values()
|> Enum.min()
end
defp intersections(state) do
wire1 = Enum.at(state.wires, 0)
wire2 = Enum.at(state.wires, 1)
positions1 = MapSet.new(Map.keys(wire1.visited))
positions2 = MapSet.new(Map.keys(wire2.visited))
MapSet.intersection(positions1, positions2)
|> Enum.map(&{&1, wire1.visited[&1] + wire2.visited[&1]})
|> Enum.into(%{})
end
def manhattan_distance({x1, y1}, {x2, y2}), do: abs(x1 - x2) + abs(y1 - y2)
defmodule Wire do
defstruct position: {0, 0}, visited: %{}, instructions: [], steps: 0
end
defmodule CentralPort do
defstruct wires: []
end
defmodule Twister do
def twist(state) do
%{state | wires: twist_wires(state)}
end
defp twist_wires(state) do
state.wires
|> Enum.map(&twist_wire(state, &1))
end
defp twist_wire(state, %Wire{instructions: []}), do: state
defp twist_wire(_state, wire) do
wire.instructions
|> Enum.flat_map(&split_instruction/1)
|> Enum.reduce(wire, fn instruction, wire ->
next_pos = move(wire.position, instruction)
steps = wire.steps + 1
%{
wire
| position: next_pos,
visited: Map.put(wire.visited, next_pos, steps),
steps: steps
}
end)
end
defp move({x, y}, "L"), do: {x - 1, y}
defp move({x, y}, "R"), do: {x + 1, y}
defp move({x, y}, "U"), do: {x, y - 1}
defp move({x, y}, "D"), do: {x, y + 1}
defp split_instruction(instruction) do
direction = String.at(instruction, 0)
distance = instruction |> String.slice(1..-1) |> String.to_integer()
List.duplicate(direction, distance)
end
end
defmodule Printer do
@rows -8..1
@cols -1..9
def print(state) do
IO.puts(build(state))
end
defp build(state) do
@rows
|> Enum.map_join("\n", &build_line(state, &1))
end
defp build_line(state, y) do
@cols
|> Enum.map_join(&get_value(state, {&1, y}))
end
defp get_value(_state, {0, 0}), do: "o"
defp get_value(state, {x, y}) do
if Enum.any?(state.wires, &has_visited(&1, {x, y})) do
"#"
else
"."
end
end
defp has_visited(wire, {x, y}) do
wire.visited
|> Map.keys()
|> MapSet.new()
|> MapSet.member?({x, y})
end
end
defmodule Parser do
alias Adventofcode.Day03CrossedWires.CentralPort
def parse(input) do
input
|> String.split("\n")
|> Enum.map(&String.split(&1, ","))
|> Enum.map(&build_wires/1)
|> build_central_port()
end
defp build_wires(instructions) do
%Wire{instructions: instructions}
end
def build_central_port(wires) do
%CentralPort{wires: wires}
end
end
end
|
lib/day_03_crossed_wires.ex
| 0.631481
| 0.701636
|
day_03_crossed_wires.ex
|
starcoder
|
defmodule Adventofcode.Day16ChronalClassification do
use Adventofcode
defmodule Part1 do
alias Adventofcode.Day16ChronalClassification.Samples
def solve(input) do
input
|> Samples.detect_matching_operations()
|> Enum.filter(&three_or_more_matching_operations?/1)
|> Enum.count()
end
defp three_or_more_matching_operations?({_opcode, operation_names}) do
length(operation_names) >= 3
end
end
defmodule Part2 do
alias Adventofcode.Day16ChronalClassification.{Classification, Samples}
def solve(input) do
input
|> map_opcodes_to_operation_names()
|> run_test_program(parse_test_program(input))
|> Map.get(0)
end
def map_opcodes_to_operation_names(input) do
input
|> Samples.detect_matching_operations()
|> do_map_opcodes([], %{}, length(Classification.operation_names()))
end
defp do_map_opcodes(items, rest, acc, expected_count)
# Done if there's nothing left to map
defp do_map_opcodes([], [], acc, _), do: acc
# Done if all codes have been mapped
defp do_map_opcodes(_, _, acc, goal) when map_size(acc) == goal, do: acc
# Wrap around and check rest when items have all been checked
defp do_map_opcodes([], rest, acc, goal) do
do_map_opcodes(Enum.reverse(rest), [], acc, goal)
end
# Found opcode with only a sinle match, collect and eliminate possibilities
defp do_map_opcodes([{opcode, [name]} | tail], rest, acc, goal) do
tail
|> eliminate_possibilities(opcode, name)
|> do_map_opcodes(rest, Map.put(acc, opcode, name), goal)
end
# Nothing can be done with this one, check the next one
defp do_map_opcodes([head | tail], rest, acc, goal) do
do_map_opcodes(tail, [head | rest], acc, goal)
end
defp eliminate_possibilities(opcodes, opcode, name) do
opcodes
|> Enum.reject(fn {code, _} -> code == opcode end)
|> Enum.map(fn {code, names} -> {code, names -- [name]} end)
end
defp run_test_program(opcode_mapper, test_program) do
Enum.reduce(test_program, %{}, fn {opcode, a, b, c}, acc ->
operation_name = Map.fetch!(opcode_mapper, opcode)
apply(Classification, operation_name, [a, b, c, acc])
end)
end
defp parse_test_program(input) do
input
|> String.split("\n\n\n")
|> Enum.at(1)
|> String.trim("\n")
|> String.split("\n")
|> Enum.map(&Samples.parse_line/1)
end
end
defmodule Samples do
alias Adventofcode.Day16ChronalClassification.Classification
def detect_matching_operations(input) do
Enum.map(parse(input), fn {input, {opcode, a, b, c}, expected} ->
{opcode, matching_operations({input, {opcode, a, b, c}, expected})}
end)
end
defp matching_operations({input, instruction, expected}) do
state = tuple_to_map(input)
expected = tuple_to_map(expected)
Classification.operation_names()
|> Enum.filter(&matching_operation?(&1, state, instruction, expected))
end
defp matching_operation?(operation_name, state, instruction, expected) do
apply_operation(operation_name, instruction, state) == expected
end
defp apply_operation(operation_name, {_code, a, b, c}, state) do
apply(Classification, operation_name, [a, b, c, state])
end
defp tuple_to_map(tuple) do
tuple
|> Tuple.to_list()
|> Enum.with_index()
|> Enum.map(fn {value, index} -> {index, value} end)
|> Enum.into(%{})
end
def parse(input) do
input
|> String.split("\n\n\n")
|> hd()
|> String.trim_trailing("\n")
|> String.split("\n\n")
|> Enum.map(&parse_sample/1)
end
defp parse_sample(sample_input) do
sample_input
|> String.split("\n")
|> Enum.map(&parse_line/1)
|> List.to_tuple()
end
def parse_line(line) do
~r/\d+/
|> Regex.scan(line)
|> List.flatten()
|> Enum.map(&String.to_integer/1)
|> List.to_tuple()
end
end
defmodule Classification do
use Bitwise
def operation_names do
__info__(:functions)
|> Enum.filter(fn {_name, arity} -> arity == 4 end)
|> Enum.map(fn {name, _arity} -> name end)
end
def operation_functions do
Enum.map(operation_names(), &Function.capture(__MODULE__, &1, 4))
end
def addr(a, b, c, state) do
Map.put(state, c, Map.get(state, a, 0) + Map.get(state, b, 0))
end
def addi(a, b, c, state) do
Map.put(state, c, Map.get(state, a, 0) + b)
end
def mulr(a, b, c, state) do
Map.put(state, c, Map.get(state, a, 0) * Map.get(state, b, 0))
end
def muli(a, b, c, state) do
Map.put(state, c, Map.get(state, a, 0) * b)
end
def banr(a, b, c, state) do
Map.put(state, c, band(Map.get(state, a, 0), Map.get(state, b, 0)))
end
def bani(a, b, c, state) do
Map.put(state, c, band(Map.get(state, a, 0), b))
end
def borr(a, b, c, state) do
Map.put(state, c, bor(Map.get(state, a, 0), Map.get(state, b, 0)))
end
def bori(a, b, c, state) do
Map.put(state, c, bor(Map.get(state, a, 0), b))
end
def setr(a, _b, c, state) do
Map.put(state, c, Map.get(state, a, 0))
end
def seti(a, _b, c, state) do
Map.put(state, c, a)
end
def gtir(a, b, c, state) do
Map.put(state, c, if(a > Map.get(state, b, 0), do: 1, else: 0))
end
def gtri(a, b, c, state) do
Map.put(state, c, if(Map.get(state, a, 0) > b, do: 1, else: 0))
end
def gtrr(a, b, c, state) do
value = if(Map.get(state, a, 0) > Map.get(state, b, 0), do: 1, else: 0)
Map.put(state, c, value)
end
def eqir(a, b, c, state) do
Map.put(state, c, if(a == Map.get(state, b, 0), do: 1, else: 0))
end
def eqri(a, b, c, state) do
Map.put(state, c, if(Map.get(state, a, 0) == b, do: 1, else: 0))
end
def eqrr(a, b, c, state) do
value = if(Map.get(state, a, 0) == Map.get(state, b, 0), do: 1, else: 0)
Map.put(state, c, value)
end
end
end
|
lib/day_16_chronal_classification.ex
| 0.745676
| 0.561335
|
day_16_chronal_classification.ex
|
starcoder
|
defmodule SimpleSchema do
@moduledoc """
#{File.read!("README.md")}
"""
@type simple_schema :: SimpleSchema.Schema.simple_schema()
@callback schema(opts :: Keyword.t()) :: simple_schema
@callback from_json(schema :: simple_schema, json :: any, opts :: Keyword.t()) ::
{:ok, any} | {:error, any}
@callback to_json(schema :: simple_schema, value :: any, opts :: Keyword.t()) ::
{:ok, any} | {:error, any}
@undefined_default :simple_schema_default_undefined
defp get_default({_type, opts}) do
Keyword.get(opts, :default, @undefined_default)
end
defp get_default(_type) do
@undefined_default
end
@doc ~S"""
Generate a struct and implement SimpleSchema behaviour by the specified schema.
```
defmodule MySchema do
defschema [
username: {:string, min_length: 4},
email: {:string, default: "", optional: true, format: :email},
]
end
```
is converted to:
```
defmodule MySchema do
@enforce_keys [:username]
defstruct [:username, email: ""]
@behaviour SimpleSchema
@simple_schema %{
username: {:string, min_length: 4},
email: {:string, default: "", optional: true, format: :email},
}
@impl SimpleSchema
def schema(opts) do
{@simple_schema, opts}
end
@impl SimpleSchema
def from_json(schema, value, _opts) do
SimpleSchema.Type.json_to_struct(__MODULE__, schema, value)
end
@impl SimpleSchema
def to_json(schema, value, _opts) do
SimpleSchema.Type.struct_to_json(__MODULE__, schema, value)
end
end
```
"""
defmacro defschema(schema, options \\ []) do
enforce_keys =
schema
|> Enum.filter(fn {_key, value} ->
get_default(value) == @undefined_default
end)
|> Enum.map(fn {key, _} -> key end)
structs =
schema
|> Enum.map(fn {key, value} ->
default = get_default(value)
if default == @undefined_default do
key
else
{key, default}
end
end)
simple_schema = schema
quote do
@enforce_keys unquote(enforce_keys)
defstruct unquote(structs)
@behaviour SimpleSchema
@simple_schema Enum.into(unquote(simple_schema), %{})
@impl SimpleSchema
def schema(opts) do
{@simple_schema, unquote(options) ++ opts}
end
@impl SimpleSchema
def from_json(schema, value, _opts) do
SimpleSchema.Type.json_to_struct(__MODULE__, schema, value)
end
@impl SimpleSchema
def to_json(schema, value, _opts) do
SimpleSchema.Type.struct_to_json(__MODULE__, schema, value)
end
end
end
@doc """
Convert JSON value to a simple schema value.
JSON value is validated before it is converted to a simple schema value.
If `optimistic: true` is specified in `opts`, JSON value is not validated before it is converted.
"""
def from_json(schema, json, opts \\ []) do
optimistic = Keyword.get(opts, :optimistic, false)
if optimistic do
SimpleSchema.Schema.from_json(schema, json)
else
cache_key = Keyword.get(opts, :cache_key, schema)
get_json_schema = Keyword.get(opts, :get_json_schema, fn schema, opts -> SimpleSchema.Schema.to_json_schema(schema, opts) end)
case SimpleSchema.Validator.validate(get_json_schema, schema, opts, json, cache_key) do
{:error, reason} ->
{:error, reason}
:ok ->
SimpleSchema.Schema.from_json(schema, json)
end
end
end
def from_json!(schema, json, opts \\ []) do
case from_json(schema, json, opts) do
{:ok, value} ->
value
{:error, reason} ->
raise "failed from_json/2: #{inspect(reason)}"
end
end
@doc """
Convert a simple schema value to JSON value.
If `optimistic: true` is specified in `opts`, JSON value is not validated after it is converted.
Otherwise, JSON value is validated after it is converted.
"""
def to_json(schema, value, opts \\ []) do
optimistic = Keyword.get(opts, :optimistic, false)
case SimpleSchema.Schema.to_json(schema, value) do
{:error, reason} ->
{:error, reason}
{:ok, json} ->
if optimistic do
{:ok, json}
else
cache_key = Keyword.get(opts, :cache_key, schema)
get_json_schema = Keyword.get(opts, :get_json_schema, fn schema, opts -> SimpleSchema.Schema.to_json_schema(schema, opts) end)
case SimpleSchema.Validator.validate(get_json_schema, schema, opts, json, cache_key) do
{:error, reason} ->
{:error, reason}
:ok ->
{:ok, json}
end
end
end
end
def to_json!(schema, value, opts \\ []) do
case to_json(schema, value, opts) do
{:ok, json} ->
json
{:error, reason} ->
raise "failed to_json/3: #{inspect(reason)}"
end
end
end
|
lib/simple_schema.ex
| 0.811078
| 0.718422
|
simple_schema.ex
|
starcoder
|
defmodule FarmbotFirmware.Param do
@moduledoc "decodes/encodes integer id to name and vice versa"
require Logger
@type t() :: atom()
@doc "Decodes an integer parameter id to a atom parameter name"
def decode(parameter_id)
def decode(0), do: :param_version
def decode(1), do: :param_test
def decode(2), do: :param_config_ok
def decode(3), do: :param_use_eeprom
def decode(4), do: :param_e_stop_on_mov_err
def decode(5), do: :param_mov_nr_retry
def decode(11), do: :movement_timeout_x
def decode(12), do: :movement_timeout_y
def decode(13), do: :movement_timeout_z
def decode(15), do: :movement_keep_active_x
def decode(16), do: :movement_keep_active_y
def decode(17), do: :movement_keep_active_z
def decode(18), do: :movement_home_at_boot_x
def decode(19), do: :movement_home_at_boot_y
def decode(20), do: :movement_home_at_boot_z
def decode(21), do: :movement_invert_endpoints_x
def decode(22), do: :movement_invert_endpoints_y
def decode(23), do: :movement_invert_endpoints_z
def decode(25), do: :movement_enable_endpoints_x
def decode(26), do: :movement_enable_endpoints_y
def decode(27), do: :movement_enable_endpoints_z
def decode(31), do: :movement_invert_motor_x
def decode(32), do: :movement_invert_motor_y
def decode(33), do: :movement_invert_motor_z
def decode(36), do: :movement_secondary_motor_x
def decode(37), do: :movement_secondary_motor_invert_x
def decode(41), do: :movement_steps_acc_dec_x
def decode(42), do: :movement_steps_acc_dec_y
def decode(43), do: :movement_steps_acc_dec_z
def decode(45), do: :movement_stop_at_home_x
def decode(46), do: :movement_stop_at_home_y
def decode(47), do: :movement_stop_at_home_z
def decode(51), do: :movement_home_up_x
def decode(52), do: :movement_home_up_y
def decode(53), do: :movement_home_up_z
def decode(55), do: :movement_step_per_mm_x
def decode(56), do: :movement_step_per_mm_y
def decode(57), do: :movement_step_per_mm_z
def decode(61), do: :movement_min_spd_x
def decode(62), do: :movement_min_spd_y
def decode(63), do: :movement_min_spd_z
def decode(65), do: :movement_home_spd_x
def decode(66), do: :movement_home_spd_y
def decode(67), do: :movement_home_spd_z
def decode(71), do: :movement_max_spd_x
def decode(72), do: :movement_max_spd_y
def decode(73), do: :movement_max_spd_z
def decode(75), do: :movement_invert_2_endpoints_x
def decode(76), do: :movement_invert_2_endpoints_y
def decode(77), do: :movement_invert_2_endpoints_z
def decode(81), do: :movement_motor_current_x
def decode(82), do: :movement_motor_current_y
def decode(83), do: :movement_motor_current_z
def decode(85), do: :movement_stall_sensitivity_x
def decode(86), do: :movement_stall_sensitivity_y
def decode(87), do: :movement_stall_sensitivity_z
def decode(91), do: :movement_microsteps_x
def decode(92), do: :movement_microsteps_y
def decode(93), do: :movement_microsteps_z
def decode(101), do: :encoder_enabled_x
def decode(102), do: :encoder_enabled_y
def decode(103), do: :encoder_enabled_z
def decode(105), do: :encoder_type_x
def decode(106), do: :encoder_type_y
def decode(107), do: :encoder_type_z
def decode(111), do: :encoder_missed_steps_max_x
def decode(112), do: :encoder_missed_steps_max_y
def decode(113), do: :encoder_missed_steps_max_z
def decode(115), do: :encoder_scaling_x
def decode(116), do: :encoder_scaling_y
def decode(117), do: :encoder_scaling_z
def decode(121), do: :encoder_missed_steps_decay_x
def decode(122), do: :encoder_missed_steps_decay_y
def decode(123), do: :encoder_missed_steps_decay_z
def decode(125), do: :encoder_use_for_pos_x
def decode(126), do: :encoder_use_for_pos_y
def decode(127), do: :encoder_use_for_pos_z
def decode(131), do: :encoder_invert_x
def decode(132), do: :encoder_invert_y
def decode(133), do: :encoder_invert_z
def decode(141), do: :movement_axis_nr_steps_x
def decode(142), do: :movement_axis_nr_steps_y
def decode(143), do: :movement_axis_nr_steps_z
def decode(145), do: :movement_stop_at_max_x
def decode(146), do: :movement_stop_at_max_y
def decode(147), do: :movement_stop_at_max_z
def decode(201), do: :pin_guard_1_pin_nr
def decode(202), do: :pin_guard_1_time_out
def decode(203), do: :pin_guard_1_active_state
def decode(205), do: :pin_guard_2_pin_nr
def decode(206), do: :pin_guard_2_time_out
def decode(207), do: :pin_guard_2_active_state
def decode(211), do: :pin_guard_3_pin_nr
def decode(212), do: :pin_guard_3_time_out
def decode(213), do: :pin_guard_3_active_state
def decode(215), do: :pin_guard_4_pin_nr
def decode(216), do: :pin_guard_4_time_out
def decode(217), do: :pin_guard_4_active_state
def decode(221), do: :pin_guard_5_pin_nr
def decode(222), do: :pin_guard_5_time_out
def decode(223), do: :pin_guard_5_active_state
def decode(unknown) when is_integer(unknown) do
Logger.error("unknown firmware parameter: #{unknown}")
:unknown_parameter
end
@doc "Encodes an atom parameter name to an integer parameter id."
def encode(parameter)
def encode(:param_version), do: 0
def encode(:param_test), do: 1
def encode(:param_config_ok), do: 2
def encode(:param_use_eeprom), do: 3
def encode(:param_e_stop_on_mov_err), do: 4
def encode(:param_mov_nr_retry), do: 5
def encode(:movement_timeout_x), do: 11
def encode(:movement_timeout_y), do: 12
def encode(:movement_timeout_z), do: 13
def encode(:movement_keep_active_x), do: 15
def encode(:movement_keep_active_y), do: 16
def encode(:movement_keep_active_z), do: 17
def encode(:movement_home_at_boot_x), do: 18
def encode(:movement_home_at_boot_y), do: 19
def encode(:movement_home_at_boot_z), do: 20
def encode(:movement_invert_endpoints_x), do: 21
def encode(:movement_invert_endpoints_y), do: 22
def encode(:movement_invert_endpoints_z), do: 23
def encode(:movement_enable_endpoints_x), do: 25
def encode(:movement_enable_endpoints_y), do: 26
def encode(:movement_enable_endpoints_z), do: 27
def encode(:movement_invert_motor_x), do: 31
def encode(:movement_invert_motor_y), do: 32
def encode(:movement_invert_motor_z), do: 33
def encode(:movement_secondary_motor_x), do: 36
def encode(:movement_secondary_motor_invert_x), do: 37
def encode(:movement_steps_acc_dec_x), do: 41
def encode(:movement_steps_acc_dec_y), do: 42
def encode(:movement_steps_acc_dec_z), do: 43
def encode(:movement_stop_at_home_x), do: 45
def encode(:movement_stop_at_home_y), do: 46
def encode(:movement_stop_at_home_z), do: 47
def encode(:movement_home_up_x), do: 51
def encode(:movement_home_up_y), do: 52
def encode(:movement_home_up_z), do: 53
def encode(:movement_step_per_mm_x), do: 55
def encode(:movement_step_per_mm_y), do: 56
def encode(:movement_step_per_mm_z), do: 57
def encode(:movement_min_spd_x), do: 61
def encode(:movement_min_spd_y), do: 62
def encode(:movement_min_spd_z), do: 63
def encode(:movement_home_spd_x), do: 65
def encode(:movement_home_spd_y), do: 66
def encode(:movement_home_spd_z), do: 67
def encode(:movement_max_spd_x), do: 71
def encode(:movement_max_spd_y), do: 72
def encode(:movement_max_spd_z), do: 73
def encode(:movement_invert_2_endpoints_x), do: 75
def encode(:movement_invert_2_endpoints_y), do: 76
def encode(:movement_invert_2_endpoints_z), do: 77
def encode(:movement_motor_current_x), do: 81
def encode(:movement_motor_current_y), do: 82
def encode(:movement_motor_current_z), do: 83
def encode(:movement_stall_sensitivity_x), do: 85
def encode(:movement_stall_sensitivity_y), do: 86
def encode(:movement_stall_sensitivity_z), do: 87
def encode(:movement_microsteps_x), do: 91
def encode(:movement_microsteps_y), do: 92
def encode(:movement_microsteps_z), do: 93
def encode(:encoder_enabled_x), do: 101
def encode(:encoder_enabled_y), do: 102
def encode(:encoder_enabled_z), do: 103
def encode(:encoder_type_x), do: 105
def encode(:encoder_type_y), do: 106
def encode(:encoder_type_z), do: 107
def encode(:encoder_missed_steps_max_x), do: 111
def encode(:encoder_missed_steps_max_y), do: 112
def encode(:encoder_missed_steps_max_z), do: 113
def encode(:encoder_scaling_x), do: 115
def encode(:encoder_scaling_y), do: 116
def encode(:encoder_scaling_z), do: 117
def encode(:encoder_missed_steps_decay_x), do: 121
def encode(:encoder_missed_steps_decay_y), do: 122
def encode(:encoder_missed_steps_decay_z), do: 123
def encode(:encoder_use_for_pos_x), do: 125
def encode(:encoder_use_for_pos_y), do: 126
def encode(:encoder_use_for_pos_z), do: 127
def encode(:encoder_invert_x), do: 131
def encode(:encoder_invert_y), do: 132
def encode(:encoder_invert_z), do: 133
def encode(:movement_axis_nr_steps_x), do: 141
def encode(:movement_axis_nr_steps_y), do: 142
def encode(:movement_axis_nr_steps_z), do: 143
def encode(:movement_stop_at_max_x), do: 145
def encode(:movement_stop_at_max_y), do: 146
def encode(:movement_stop_at_max_z), do: 147
def encode(:pin_guard_1_pin_nr), do: 201
def encode(:pin_guard_1_time_out), do: 202
def encode(:pin_guard_1_active_state), do: 203
def encode(:pin_guard_2_pin_nr), do: 205
def encode(:pin_guard_2_time_out), do: 206
def encode(:pin_guard_2_active_state), do: 207
def encode(:pin_guard_3_pin_nr), do: 211
def encode(:pin_guard_3_time_out), do: 212
def encode(:pin_guard_3_active_state), do: 213
def encode(:pin_guard_4_pin_nr), do: 215
def encode(:pin_guard_4_time_out), do: 216
def encode(:pin_guard_4_active_state), do: 217
def encode(:pin_guard_5_pin_nr), do: 221
def encode(:pin_guard_5_time_out), do: 222
def encode(:pin_guard_5_active_state), do: 223
@typedoc "The human readable name of a param"
@type human() :: String.t()
@typedoc "Human readable units for param"
@type unit() :: String.t()
@seconds "(seconds)"
@steps "(steps)"
@steps_per_mm "(steps/mm)"
@milliamps "(milliamps)"
@doc "Translates a param to a human readable string"
@spec to_human(parameter :: t() | number(), value :: number()) ::
{human(), value :: String.t(), nil | unit()}
def to_human(parameter, value)
def to_human(id, value) when is_number(id), do: decode(id) |> to_human(value)
def to_human(:param_version, value),
do: {"param_version", nil, format_float(value)}
def to_human(:param_test, value),
do: {"param_test", nil, format_bool(value)}
def to_human(:param_config_ok, value),
do: {"param_config_ok", nil, format_bool(value)}
def to_human(:param_use_eeprom, value),
do: {"use eeprom", nil, format_bool(value)}
def to_human(:param_e_stop_on_mov_err, value),
do: {"e-stop on movement errors", nil, format_bool(value)}
def to_human(:param_mov_nr_retry, value),
do: {"max retries", nil, format_float(value)}
def to_human(:movement_timeout_x, value),
do: {"timeout after, x-axis", @seconds, format_float(value)}
def to_human(:movement_timeout_y, value),
do: {"timeout after, y-axis", @seconds, format_float(value)}
def to_human(:movement_timeout_z, value),
do: {"timeout after, z-axis", @seconds, format_float(value)}
def to_human(:movement_keep_active_x, value),
do: {"always power motors, x-axis", nil, format_bool(value)}
def to_human(:movement_keep_active_y, value),
do: {"always power motors, y-axis", nil, format_bool(value)}
def to_human(:movement_keep_active_z, value),
do: {"always power motors, z-axis", nil, format_bool(value)}
def to_human(:movement_home_at_boot_x, value),
do: {"find home on boot, x-axis", nil, format_bool(value)}
def to_human(:movement_home_at_boot_y, value),
do: {"find home on boot, y-axis", nil, format_bool(value)}
def to_human(:movement_home_at_boot_z, value),
do: {"find home on boot, z-axis", nil, format_bool(value)}
def to_human(:movement_invert_endpoints_x, value),
do: {"swap endstops, x-axis", nil, format_bool(value)}
def to_human(:movement_invert_endpoints_y, value),
do: {"swap endstops, y-axis", nil, format_bool(value)}
def to_human(:movement_invert_endpoints_z, value),
do: {"swap endstops, z-axis", nil, format_bool(value)}
def to_human(:movement_enable_endpoints_x, value),
do: {"enable endstops, x-axis", nil, format_bool(value)}
def to_human(:movement_enable_endpoints_y, value),
do: {"enable endstops, y-axis", nil, format_bool(value)}
def to_human(:movement_enable_endpoints_z, value),
do: {"enable endstops, z-axis", nil, format_bool(value)}
def to_human(:movement_invert_motor_x, value),
do: {"invert motor, x-axis", nil, format_bool(value)}
def to_human(:movement_invert_motor_y, value),
do: {"invert motor, y-axis", nil, format_bool(value)}
def to_human(:movement_invert_motor_z, value),
do: {"invert motor, z-axis", nil, format_bool(value)}
def to_human(:movement_secondary_motor_x, value),
do: {"enable 2nd x motor", nil, format_bool(value)}
def to_human(:movement_secondary_motor_invert_x, value),
do: {"invert 2nd x motor", nil, format_bool(value)}
def to_human(:movement_steps_acc_dec_x, value),
do: {"accelerate for, x-axis", @steps, format_float(value)}
def to_human(:movement_steps_acc_dec_y, value),
do: {"accelerate for, y-axis", @steps, format_float(value)}
def to_human(:movement_steps_acc_dec_z, value),
do: {"accelerate for, z-axis", @steps, format_float(value)}
def to_human(:movement_stop_at_home_x, value),
do: {"stop at home, x-axis", nil, format_bool(value)}
def to_human(:movement_stop_at_home_y, value),
do: {"stop at home, y-axis", nil, format_bool(value)}
def to_human(:movement_stop_at_home_z, value),
do: {"stop at home, z-axis", nil, format_bool(value)}
def to_human(:movement_home_up_x, value),
do: {"negative coordinates only, x-axis", nil, format_bool(value)}
def to_human(:movement_home_up_y, value),
do: {"negative coordinates only, y-axis", nil, format_bool(value)}
def to_human(:movement_home_up_z, value),
do: {"negative coordinates only, z-axis", nil, format_bool(value)}
def to_human(:movement_step_per_mm_x, value),
do: {"steps per mm, x-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_step_per_mm_y, value),
do: {"steps per mm, y-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_step_per_mm_z, value),
do: {"steps per mm, z-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_min_spd_x, value),
do: {"minimum speed, x-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_min_spd_y, value),
do: {"minimum speed, y-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_min_spd_z, value),
do: {"minimum speed, z-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_home_spd_x, value),
do: {"homing speed, x-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_home_spd_y, value),
do: {"homing speed, y-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_home_spd_z, value),
do: {"homing speed, z-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_max_spd_x, value),
do: {"max speed, x-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_max_spd_y, value),
do: {"max speed, y-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_max_spd_z, value),
do: {"max speed, z-axis", @steps_per_mm, format_float(value)}
def to_human(:movement_invert_2_endpoints_x, value),
do: {"invert endstops, x-axis", nil, format_bool(value)}
def to_human(:movement_invert_2_endpoints_y, value),
do: {"invert endstops, y-axis", nil, format_bool(value)}
def to_human(:movement_invert_2_endpoints_z, value),
do: {"invert endstops, z-axis", nil, format_bool(value)}
def to_human(:movement_motor_current_x, value),
do: {"motor current, x-axis", @milliamps, format_float(value)}
def to_human(:movement_motor_current_y, value),
do: {"motor current, y-axis", @milliamps, format_float(value)}
def to_human(:movement_motor_current_z, value),
do: {"motor current, z-axis", @milliamps, format_float(value)}
def to_human(:movement_stall_sensitivity_x, value),
do: {"stall sensitivity, x-axis", nil, format_float(value)}
def to_human(:movement_stall_sensitivity_y, value),
do: {"stall sensitivity, y-axis", nil, format_float(value)}
def to_human(:movement_stall_sensitivity_z, value),
do: {"stall sensitivity, z-axis", nil, format_float(value)}
def to_human(:movement_microsteps_x, value),
do: {"microsteps, x-axis", nil, format_float(value)}
def to_human(:movement_microsteps_y, value),
do: {"microsteps, y-axis", nil, format_float(value)}
def to_human(:movement_microsteps_z, value),
do: {"microsteps, z-axis", nil, format_float(value)}
def to_human(:encoder_enabled_x, value),
do: {"enable encoders, x-axis", nil, format_bool(value)}
def to_human(:encoder_enabled_y, value),
do: {"enable encoders, y-axis", nil, format_bool(value)}
def to_human(:encoder_enabled_z, value),
do: {"enable encoders, z-axis", nil, format_bool(value)}
def to_human(:encoder_type_x, value),
do: {"encoder type, x-axis", nil, format_float(value)}
def to_human(:encoder_type_y, value),
do: {"encoder type, y-axis", nil, format_float(value)}
def to_human(:encoder_type_z, value),
do: {"encoder type, z-axis", nil, format_float(value)}
def to_human(:encoder_missed_steps_max_x, value),
do: {"max missed steps, x-axis", nil, format_float(value)}
def to_human(:encoder_missed_steps_max_y, value),
do: {"max missed steps, y-axis", nil, format_float(value)}
def to_human(:encoder_missed_steps_max_z, value),
do: {"max missed steps, z-axis", nil, format_float(value)}
def to_human(:encoder_scaling_x, value),
do: {"encoder scaling, x-axis", nil, format_float(value)}
def to_human(:encoder_scaling_y, value),
do: {"encoder scaling, y-axis", nil, format_float(value)}
def to_human(:encoder_scaling_z, value),
do: {"encoder scaling, z-axis", nil, format_float(value)}
def to_human(:encoder_missed_steps_decay_x, value),
do: {"encoder missed steps decay, x-axis", nil, format_float(value)}
def to_human(:encoder_missed_steps_decay_y, value),
do: {"encoder missed steps decay, y-axis", nil, format_float(value)}
def to_human(:encoder_missed_steps_decay_z, value),
do: {"encoder missed steps decay, z-axis", nil, format_float(value)}
def to_human(:encoder_use_for_pos_x, value),
do: {"use encoders for positioning, x-axis", nil, format_bool(value)}
def to_human(:encoder_use_for_pos_y, value),
do: {"use encoders for positioning, y-axis", nil, format_bool(value)}
def to_human(:encoder_use_for_pos_z, value),
do: {"use encoders for positioning, z-axis", nil, format_bool(value)}
def to_human(:encoder_invert_x, value),
do: {"invert encoders, x-axis", nil, format_bool(value)}
def to_human(:encoder_invert_y, value),
do: {"invert encoders, y-axis", nil, format_bool(value)}
def to_human(:encoder_invert_z, value),
do: {"invert encoders, z-axis", nil, format_bool(value)}
def to_human(:movement_axis_nr_steps_x, value),
do: {"axis length, x-axis", @steps, format_float(value)}
def to_human(:movement_axis_nr_steps_y, value),
do: {"axis length, y-axis", @steps, format_float(value)}
def to_human(:movement_axis_nr_steps_z, value),
do: {"axis length, z-axis", @steps, format_float(value)}
def to_human(:movement_stop_at_max_x, value),
do: {"stop at max, x-axis", nil, format_bool(value)}
def to_human(:movement_stop_at_max_y, value),
do: {"stop at max, y-axis", nil, format_bool(value)}
def to_human(:movement_stop_at_max_z, value),
do: {"stop at max, z-axis", nil, format_bool(value)}
def to_human(:pin_guard_1_pin_nr, value),
do: {"pin guard 1 pin number", nil, format_float(value)}
def to_human(:pin_guard_1_time_out, value),
do: {"pin guard 1 timeout", @seconds, format_float(value)}
def to_human(:pin_guard_1_active_state, value),
do: {"pin guard 1 safe state", nil, format_high_low_inverted(value)}
def to_human(:pin_guard_2_pin_nr, value),
do: {"pin guard 2 pin number", nil, format_float(value)}
def to_human(:pin_guard_2_time_out, value),
do: {"pin guard 2 timeout", @seconds, format_float(value)}
def to_human(:pin_guard_2_active_state, value),
do: {"pin guard 2 safe state", nil, format_high_low_inverted(value)}
def to_human(:pin_guard_3_pin_nr, value),
do: {"pin guard 3 pin number", nil, format_float(value)}
def to_human(:pin_guard_3_time_out, value),
do: {"pin guard 3 timeout", @seconds, format_float(value)}
def to_human(:pin_guard_3_active_state, value),
do: {"pin guard 3 safe state", nil, format_high_low_inverted(value)}
def to_human(:pin_guard_4_pin_nr, value),
do: {"pin guard 4 pin number", nil, format_float(value)}
def to_human(:pin_guard_4_time_out, value),
do: {"pin guard 4 timeout", @seconds, format_float(value)}
def to_human(:pin_guard_4_active_state, value),
do: {"pin guard 4 safe state", nil, format_high_low_inverted(value)}
def to_human(:pin_guard_5_pin_nr, value),
do: {"pin guard 5 pin number", nil, format_float(value)}
def to_human(:pin_guard_5_time_out, value),
do: {"pin guard 5 timeout", @seconds, format_float(value)}
def to_human(:pin_guard_5_active_state, value),
do: {"pin guard 5 safe state", nil, format_high_low_inverted(value)}
def format_float(value) when is_integer(value) do
format_float(value / 1)
end
def format_float(value) when is_float(value) do
case :math.fmod(value, 1) do
# value has no remainder
rem when rem <= 0.0 -> :erlang.float_to_binary(value, decimals: 0)
_ -> :erlang.float_to_binary(value, decimals: 1)
end
end
def format_bool(val) when val == 1, do: true
def format_bool(val) when val == 0, do: false
def format_high_low_inverted(val) when val == 0, do: "HIGH"
def format_high_low_inverted(val) when val == 1, do: "LOW"
end
|
farmbot_firmware/lib/farmbot_firmware/param.ex
| 0.693058
| 0.578151
|
param.ex
|
starcoder
|
defmodule AWS.GroundStation do
@moduledoc """
Welcome to the AWS Ground Station API Reference.
AWS Ground Station is a fully managed service that enables you to control
satellite communications, downlink and process satellite data, and scale your
satellite operations efficiently and cost-effectively without having to build or
manage your own ground station infrastructure.
"""
@doc """
Cancels a contact with a specified contact ID.
"""
def cancel_contact(client, contact_id, input, options \\ []) do
path_ = "/contact/#{URI.encode(contact_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 200)
end
@doc """
Creates a `Config` with the specified `configData` parameters.
Only one type of `configData` can be specified.
"""
def create_config(client, input, options \\ []) do
path_ = "/config"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Creates a `DataflowEndpoint` group containing the specified list of
`DataflowEndpoint` objects.
The `name` field in each endpoint is used in your mission profile
`DataflowEndpointConfig` to specify which endpoints to use during a contact.
When a contact uses multiple `DataflowEndpointConfig` objects, each `Config`
must match a `DataflowEndpoint` in the same group.
"""
def create_dataflow_endpoint_group(client, input, options \\ []) do
path_ = "/dataflowEndpointGroup"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Creates a mission profile.
`dataflowEdges` is a list of lists of strings. Each lower level list of strings
has two elements: a *from* ARN and a *to* ARN.
"""
def create_mission_profile(client, input, options \\ []) do
path_ = "/missionprofile"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Deletes a `Config`.
"""
def delete_config(client, config_id, config_type, input, options \\ []) do
path_ = "/config/#{URI.encode(config_type)}/#{URI.encode(config_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 200)
end
@doc """
Deletes a dataflow endpoint group.
"""
def delete_dataflow_endpoint_group(client, dataflow_endpoint_group_id, input, options \\ []) do
path_ = "/dataflowEndpointGroup/#{URI.encode(dataflow_endpoint_group_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 200)
end
@doc """
Deletes a mission profile.
"""
def delete_mission_profile(client, mission_profile_id, input, options \\ []) do
path_ = "/missionprofile/#{URI.encode(mission_profile_id)}"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, 200)
end
@doc """
Describes an existing contact.
"""
def describe_contact(client, contact_id, options \\ []) do
path_ = "/contact/#{URI.encode(contact_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns `Config` information.
Only one `Config` response can be returned.
"""
def get_config(client, config_id, config_type, options \\ []) do
path_ = "/config/#{URI.encode(config_type)}/#{URI.encode(config_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns the dataflow endpoint group.
"""
def get_dataflow_endpoint_group(client, dataflow_endpoint_group_id, options \\ []) do
path_ = "/dataflowEndpointGroup/#{URI.encode(dataflow_endpoint_group_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns the number of minutes used by account.
"""
def get_minute_usage(client, input, options \\ []) do
path_ = "/minute-usage"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Returns a mission profile.
"""
def get_mission_profile(client, mission_profile_id, options \\ []) do
path_ = "/missionprofile/#{URI.encode(mission_profile_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a satellite.
"""
def get_satellite(client, satellite_id, options \\ []) do
path_ = "/satellite/#{URI.encode(satellite_id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of `Config` objects.
"""
def list_configs(client, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/config"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of contacts.
If `statusList` contains AVAILABLE, the request must include `groundStation`,
`missionprofileArn`, and `satelliteArn`.
"""
def list_contacts(client, input, options \\ []) do
path_ = "/contacts"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Returns a list of `DataflowEndpoint` groups.
"""
def list_dataflow_endpoint_groups(client, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/dataflowEndpointGroup"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of ground stations.
"""
def list_ground_stations(client, max_results \\ nil, next_token \\ nil, satellite_id \\ nil, options \\ []) do
path_ = "/groundstation"
headers = []
query_ = []
query_ = if !is_nil(satellite_id) do
[{"satelliteId", satellite_id} | query_]
else
query_
end
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of mission profiles.
"""
def list_mission_profiles(client, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/missionprofile"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of satellites.
"""
def list_satellites(client, max_results \\ nil, next_token \\ nil, options \\ []) do
path_ = "/satellite"
headers = []
query_ = []
query_ = if !is_nil(next_token) do
[{"nextToken", next_token} | query_]
else
query_
end
query_ = if !is_nil(max_results) do
[{"maxResults", max_results} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Returns a list of tags for a specified resource.
"""
def list_tags_for_resource(client, resource_arn, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, 200)
end
@doc """
Reserves a contact using specified parameters.
"""
def reserve_contact(client, input, options \\ []) do
path_ = "/contact"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Assigns a tag to a resource.
"""
def tag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 200)
end
@doc """
Deassigns a resource tag.
"""
def untag_resource(client, resource_arn, input, options \\ []) do
path_ = "/tags/#{URI.encode(resource_arn)}"
headers = []
{query_, input} =
[
{"tagKeys", "tagKeys"},
]
|> AWS.Request.build_params(input)
request(client, :delete, path_, query_, headers, input, options, 200)
end
@doc """
Updates the `Config` used when scheduling contacts.
Updating a `Config` will not update the execution parameters for existing future
contacts scheduled with this `Config`.
"""
def update_config(client, config_id, config_type, input, options \\ []) do
path_ = "/config/#{URI.encode(config_type)}/#{URI.encode(config_id)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 200)
end
@doc """
Updates a mission profile.
Updating a mission profile will not update the execution parameters for existing
future contacts.
"""
def update_mission_profile(client, mission_profile_id, input, options \\ []) do
path_ = "/missionprofile/#{URI.encode(mission_profile_id)}"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, 200)
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "groundstation"}
host = build_host("groundstation", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "application/x-amz-json-1.1"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :json) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/ground_station.ex
| 0.85984
| 0.551936
|
ground_station.ex
|
starcoder
|
defmodule DarkFlows.Workflows.Stages.StepOkStage do
@moduledoc """
Adds `:ok` tuple monad handling step to `Opus.Pipeline`.
"""
@moduledoc since: "1.0.0"
alias DarkFlows.Workflow
alias DarkFlows.Workflows.Stages.StepOkStage
require Opus.Pipeline
@type opts() :: Keyword.t()
@type wrapped_step_ok_fun() :: (Workflow.context() -> Workflow.step_result())
@type step_result() ::
Workflow.context()
| :ok
| {:ok, Workflow.context()}
| {:error, any()}
| {:error, failed_operation :: atom(), failed_value :: any(), changes_so_far :: map()}
@doc """
Adds `step_ok` to the `Opus.Pipeline` allowing for `:ok` unboxing
"""
@spec step_ok(atom(), opts()) :: Macro.t()
defmacro step_ok(name, opts \\ []) when is_atom(name) do
fallback_with =
quote do
&apply(unquote(__CALLER__.module), unquote(name), [&1])
end
quote do
Opus.Pipeline.step(
unquote(name),
unquote(
for {key, val} <- Keyword.put_new(opts, :with, fallback_with), into: [] do
case key do
:with -> {key, quote(do: StepOkStage.wrap_ok(unquote(val)))}
_ -> {key, val}
end
end
)
)
end
end
@doc """
Handles unboxing `:ok` monads for ease-of-use with `Opus.Pipeline`.
"""
@spec wrap_ok(Workflow.step_fun()) :: wrapped_step_ok_fun()
def wrap_ok(fun) when is_function(fun, 1) do
fn context when is_map(context) ->
result = fun.(context)
handle_result(context, result)
end
end
@doc """
Normalize the result of `:ok` monad steps
"""
@spec handle_result(Workflow.context(), step_result()) :: Workflow.step_result()
def handle_result(context, :ok) when is_map(context) do
context
end
def handle_result(context, {:ok, results}) when is_map(context) and is_map(results) do
Map.merge(context, results)
end
def handle_result(context, results) when is_map(context) and is_map(results) do
Map.merge(context, results)
end
def handle_result(context, {:error, failed_operation, failed_value, changes_so_far})
when is_map(context) and is_atom(failed_operation) and is_map(changes_so_far) do
{:error,
%{
failed_operation: failed_operation,
failed_value: failed_value,
changes_so_far: changes_so_far
}}
end
def handle_result(context, {:error, failed_value})
when is_map(context) do
{:error, failed_value}
end
end
|
lib/dark_flows/workflows/stages/step_ok_stage.ex
| 0.831143
| 0.500488
|
step_ok_stage.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.