code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
if Code.ensure_loaded?(:telemetry) do
defmodule Tesla.Middleware.Telemetry do
@moduledoc """
Emits events using the `:telemetry` library to expose instrumentation.
## Example usage
```
defmodule MyClient do
use Tesla
plug Tesla.Middleware.Telemetry
end
:telemetry.attach("my-tesla-telemetry", [:tesla, :request, :stop], fn event, measurements, meta, config ->
# Do something with the event
end)
```
## Telemetry Events
* `[:tesla, :request, :start]` - emitted at the beginning of the request.
* Measurement: `%{system_time: System.system_time()}`
* Metadata: `%{env: Tesla.Env.t()}`
* `[:tesla, :request, :stop]` - emitted at the end of the request.
* Measurement: `%{duration: native_time}`
* Metadata: `%{env: Tesla.Env.t()} | %{env: Tesla.Env.t(), error: term()}`
* `[:tesla, :request, :exception]` - emitted when an exception has been raised.
* Measurement: `%{duration: native_time}`
* Metadata: `%{kind: Exception.kind(), reason: term(), stacktrace: Exception.stacktrace()}`
## Legacy Telemetry Events
* `[:tesla, :request]` - This event is emitted for backwards compatibility only and should be considered deprecated.
This event can be disabled by setting `config :tesla, Tesla.Middleware.Telemetry, disable_legacy_event: true` in your config. Be sure to run `mix deps.compile --force tesla` after changing this setting to ensure the change is picked up.
Please check the [telemetry](https://hexdocs.pm/telemetry/) for the further usage.
"""
@disable_legacy_event Application.get_env(:tesla, Tesla.Middleware.Telemetry,
disable_legacy_event: false
)[:disable_legacy_event]
@behaviour Tesla.Middleware
@impl Tesla.Middleware
def call(env, next, _opts) do
start_time = System.monotonic_time()
emit_start(%{env: env})
try do
Tesla.run(env, next)
catch
kind, reason ->
stacktrace = System.stacktrace()
duration = System.monotonic_time() - start_time
emit_exception(duration, %{kind: kind, reason: reason, stacktrace: stacktrace})
:erlang.raise(kind, reason, stacktrace)
else
{:ok, env} = result ->
duration = System.monotonic_time() - start_time
emit_stop(duration, %{env: env})
emit_legacy_event(duration, result)
result
{:error, reason} = result ->
duration = System.monotonic_time() - start_time
emit_stop(duration, %{env: env, error: reason})
emit_legacy_event(duration, result)
result
end
end
defp emit_start(metadata) do
:telemetry.execute(
[:tesla, :request, :start],
%{system_time: System.system_time()},
metadata
)
end
defp emit_stop(duration, metadata) do
:telemetry.execute(
[:tesla, :request, :stop],
%{duration: duration},
metadata
)
end
defp emit_legacy_event(duration, result) do
if !@disable_legacy_event do
duration_µs = System.convert_time_unit(duration, :native, :microsecond)
:telemetry.execute(
[:tesla, :request],
%{request_time: duration_µs},
%{result: result}
)
end
end
defp emit_exception(duration, metadata) do
:telemetry.execute(
[:tesla, :request, :exception],
%{duration: duration},
metadata
)
end
end
end
|
lib/tesla/middleware/telemetry.ex
| 0.882504
| 0.823577
|
telemetry.ex
|
starcoder
|
defmodule ExWire.Packet.Hello do
@moduledoc """
This packet establishes capabilities and etc between two peer to peer
clents. This is generally required to be the first signed packet communicated
after the handshake is complete.
```
**Hello** `0x00` [`p2pVersion`: `P`, `clientId`: `B`, [[`cap1`: `B_3`, `capVersion1`: `P`], [`cap2`: `B_3`, `capVersion2`: `P`], ...], `listenPort`: `P`, `nodeId`: `B_64`]
First packet sent over the connection, and sent once by both sides. No other messages
may be sent until a `Hello` is received.
* `p2pVersion` Specifies the implemented version of the P2P protocol. Now must be 1.
* `clientId` Specifies the client software identity, as a human-readable string (e.g. "Ethereum(++)/1.0.0").
* `cap` Specifies a peer capability name as a length-3 ASCII string. Current supported capabilities are eth, shh.
* `capVersion` Specifies a peer capability version as a positive integer. Current supported versions are 34 for eth, and 1 for shh.
* `listenPort` specifies the port that the client is listening on (on the interface that the present connection traverses). If 0 it indicates the client is not listening.
* `nodeId` is the Unique Identity of the node and specifies a 512-bit hash that identifies this node.
```
"""
require Logger
alias ExWire.Config
alias ExWire.Struct.Endpoint
@behaviour ExWire.Packet
@type cap :: {String.t(), integer()}
@type t :: %__MODULE__{
p2p_version: integer(),
client_id: String.t(),
caps: [cap],
listen_port: integer(),
node_id: ExWire.node_id()
}
defstruct [
:p2p_version,
:client_id,
:caps,
:listen_port,
:node_id
]
@doc """
Given a Hello packet, serializes for transport over Eth Wire Protocol.
## Examples
iex> %ExWire.Packet.Hello{p2p_version: 10, client_id: "Exthereum/Test", caps: [{"eth", 1}, {"par", 2}], listen_port: 5555, node_id: <<5>>}
...> |> ExWire.Packet.Hello.serialize
...> |> Enum.take(5)
[10, "Exthereum/Test", [["eth", 1], ["par", 2]], 5555, <<5>>]
"""
@spec serialize(t) :: ExRLP.t()
def serialize(packet = %__MODULE__{}) do
[
packet.p2p_version,
packet.client_id,
for({cap, ver} <- packet.caps, do: [cap, ver]),
packet.listen_port,
packet.node_id,
"#{Config.local_ip() |> Endpoint.ip_to_string()}:#{Config.listen_port()}"
]
end
@doc """
Given an RLP-encoded Hello packet from Eth Wire Protocol,
decodes into a Hello struct.
## Examples
iex> ExWire.Packet.Hello.deserialize([<<10>>, "Exthereum/Test", [["eth", <<1>>], ["par", <<2>>]], <<55>>, <<5>>])
%ExWire.Packet.Hello{p2p_version: 10, client_id: "Exthereum/Test", caps: [{"eth", 1}, {"par", 2}], listen_port: 55, node_id: <<5>>}
"""
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
[
p2p_version
| [
client_id
| [
caps
| [
listen_port
| [
node_id
| _rest
]
]
]
]
] = rlp
%__MODULE__{
p2p_version: p2p_version |> :binary.decode_unsigned(),
client_id: client_id,
caps: for([cap, ver] <- caps, do: {cap, ver |> :binary.decode_unsigned()}),
listen_port: listen_port |> :binary.decode_unsigned(),
node_id: node_id
}
end
@doc """
Handles a Hello message. We can mark a peer as active for communication
after we receive this message.
## Examples
iex> %ExWire.Packet.Hello{p2p_version: 10, client_id: "Exthereum/Test", caps: [["eth", 1], ["par", 2]], listen_port: 5555, node_id: <<5>>}
...> |> ExWire.Packet.Hello.handle()
:activate
# When no caps
iex> %ExWire.Packet.Hello{p2p_version: 10, client_id: "Exthereum/Test", caps: [], listen_port: 5555, node_id: <<5>>}
...> |> ExWire.Packet.Hello.handle()
{:disconnect, :useless_peer}
"""
@spec handle(ExWire.Packet.packet()) :: ExWire.Packet.handle_response()
def handle(packet = %__MODULE__{}) do
_ =
if System.get_env("TRACE"),
do: _ = Logger.debug(fn -> "[Packet] Got Hello: #{inspect(packet)}" end)
if packet.caps == [] do
_ =
Logger.debug(fn ->
"[Packet] Disconnecting due to no matching peer caps (#{inspect(packet.caps)})"
end)
{:disconnect, :useless_peer}
else
# TODO: Add a bunch more checks
:activate
end
end
end
|
apps/ex_wire/lib/ex_wire/packet/hello.ex
| 0.822474
| 0.856872
|
hello.ex
|
starcoder
|
defmodule Bolt.Cogs.Temprole do
@moduledoc false
@behaviour Nosedrum.Command
alias Bolt.Converters
alias Bolt.ErrorFormatters
alias Bolt.Events.Handler
alias Bolt.Helpers
alias Bolt.Humanizer
alias Bolt.ModLog
alias Bolt.Parsers
alias Bolt.Repo
alias Bolt.Schema.Infraction
alias Nosedrum.Predicates
alias Nostrum.Api
import Ecto.Query, only: [from: 2]
@impl true
def usage, do: ["temprole <user:member> <role:role> <duration:duration> [reason:str...]"]
@impl true
def description,
do: """
Temporarily apply the given role to the given user.
An infraction is stored in the infraction database, and can be retrieved later.
Requires the `MANAGE_ROLES` permission.
**Examples**:
```rs
// apply the role "Shitposter" to Dude for 24 hours
temprole @Dude#0001 Shitposter 24h
// the same thing, but with a specified reason
temprole @Dude#0001 Shitposter 24h spamming pictures
```
"""
@impl true
def predicates,
do: [&Predicates.guild_only/1, Predicates.has_permission(:manage_roles)]
@impl true
def command(msg, [user, role, duration | reason_list]) do
response =
with reason <- Enum.join(reason_list, " "),
{:ok, member} <- Converters.to_member(msg.guild_id, user),
{:ok, role} <- Converters.to_role(msg.guild_id, role),
query <-
from(
infr in Infraction,
where:
infr.active and infr.user_id == ^member.user.id and
infr.guild_id == ^msg.guild_id and infr.type == "temprole" and
fragment("data->'role_id' = ?", ^role.id),
limit: 1,
select: {infr.id, infr.expires_at}
),
[] <- Repo.all(query),
{:ok, expiry} <- Parsers.human_future_date(duration),
{:ok} <-
Api.add_guild_member_role(
msg.guild_id,
member.user.id,
role.id
),
infraction_map <- %{
type: "temprole",
guild_id: msg.guild_id,
user_id: member.user.id,
actor_id: msg.author.id,
reason: if(reason != "", do: reason, else: nil),
expires_at: expiry,
data: %{
"role_id" => role.id
}
},
{:ok, _created_infraction} <- Handler.create(infraction_map) do
ModLog.emit(
msg.guild_id,
"INFRACTION_CREATE",
"#{Humanizer.human_user(msg.author)} applied temporary role" <>
" #{Humanizer.human_role(msg.guild_id, role)} to #{Humanizer.human_user(member.user)}" <>
" until #{Helpers.datetime_to_human(expiry)}" <>
if(reason != "", do: " with reason `#{reason}`", else: "")
)
response =
"👌 temporary role #{Humanizer.human_role(msg.guild_id, role)} applied to " <>
"#{Humanizer.human_user(member.user)} until #{Helpers.datetime_to_human(expiry)}"
if reason != "" do
response <> " with reason `#{Helpers.clean_content(reason)}`"
else
response
end
else
{:ok, false} ->
"🚫 you need to be above the target user in the role hierarchy"
[{existing_id, existing_expiry}] ->
"❌ there already is an infraction applying that role under ID ##{existing_id}" <>
" which will expire on #{Helpers.datetime_to_human(existing_expiry)}"
error ->
ErrorFormatters.fmt(msg, error)
end
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
def command(msg, _incorrect_args) do
response = "ℹ️ usage: `temprole <user:member> <role:role> <duration:duration> [reason:str...]`"
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
end
|
lib/bolt/cogs/temprole.ex
| 0.809427
| 0.556279
|
temprole.ex
|
starcoder
|
defmodule RakNet.ReliabilityLayer.Reliability do
@moduledoc "Taken from RakNet's PacketPriority.h"
@names_and_vals %{
:unreliable => 0,
:unreliable_sequenced => 1,
:reliable => 2,
:reliable_ordered => 3,
:reliable_sequenced => 4,
# These are the same as unreliable/reliable/reliable ordered, except that the business logic provider
# will get an :ack message when the client acknowledges receipt
:unreliable_ack_receipt => 5,
:reliable_ack_receipt => 6,
:reliable_ordered_ack_receipt => 7
}
@vals_and_names Map.new(@names_and_vals, fn {name, val} -> {val, name} end)
@doc """
The message name atom for this binary message; :error if we don't recognize it
"""
def name(reliability_binary) when is_integer(reliability_binary), do: Map.get(@vals_and_names, reliability_binary, :error)
def binary(reliability_name) when is_atom(reliability_name), do: Map.fetch!(@names_and_vals, reliability_name)
def valid?(reliability_atom) when is_atom(reliability_atom), do: Map.has_key?(@names_and_vals, reliability_atom)
def is_reliable?(reliability_binary) when is_integer(reliability_binary), do: reliability_binary in [2, 3, 4, 6, 7]
def is_reliable?(reliability_atom) when is_atom(reliability_atom), do: binary(reliability_atom) in [2, 3, 4, 6, 7]
def is_ordered?(reliability_binary) when is_integer(reliability_binary), do: reliability_binary == 3
def is_ordered?(reliability_atom) when is_atom(reliability_atom), do: reliability_atom == :reliable_ordered
def is_sequenced?(reliability_binary) when is_integer(reliability_binary), do: reliability_binary in [1, 3, 4, 7]
def is_sequenced?(reliability_atom) when is_atom(reliability_atom), do: binary(reliability_atom) in [1, 3, 4, 7]
def needs_client_ack?(reliability_binary) when is_integer(reliability_binary), do: reliability_binary in [5, 6, 7]
def needs_client_ack?(reliability_atom) when is_atom(reliability_atom), do: binary(reliability_atom) in [5, 6, 7]
end
defmodule RakNet.ReliabilityLayer.Packet do
@moduledoc "See ReliabilityLayer.cpp, ReliabilityLayer::WriteToBitStreamFromInternalPacket()"
alias RakNet.ReliabilityLayer.Reliability
@enforce_keys [:reliability, :buffer]
defstruct priority: 4,
reliability: Reliability.binary(:reliable_ordered),
has_split: 0,
length: -1,
# Used for internal packets only
identifier_ack: nil,
# Used for all reliable types
message_index: nil,
# Used for UNRELIABLE_SEQUENCED, RELIABLE_SEQUENCED
sequencing_index: nil,
# Used for UNRELIABLE_SEQUENCED, RELIABLE_SEQUENCED, RELIABLE_ORDERED.
order_index: nil,
order_channel: 0,
# Split packets only
split_count: nil,
split_id: nil,
split_index: nil,
# The actual (encapsulated) packet data
buffer: nil
# credo:disable-for-next-line
def valid?(%RakNet.ReliabilityLayer.Packet{} = p) do
msg_idx_ok = not Reliability.is_reliable?(p.reliability) or (is_integer(p.message_index) and p.message_index >= 0)
order_idx_ok = not Reliability.is_sequenced?(p.reliability) or (is_integer(p.order_index) and p.order_index >= 0)
split_ok =
p.has_split == 0 or
(is_integer(p.split_count) and p.split_count > 0 and
is_integer(p.split_id) and is_integer(p.split_index))
msg_idx_ok and order_idx_ok and split_ok and
p.priority >= 0 and p.priority < 0xF and
Reliability.valid?(p.reliability) and p.buffer != nil
end
end
|
lib/reliability_layer.ex
| 0.709422
| 0.649197
|
reliability_layer.ex
|
starcoder
|
defmodule Temple.Elements do
@moduledoc """
This module contains the primitives used to generate the macros in the `Temple.Html` and `Temple.Svg` modules.
"""
@doc """
Defines an element.
*Note*: Underscores are converted to dashes.
```elixir
defmodule MyElements do
import Temple.Elements
defelement :super_select, :nonvoid # <super-select></super-select>
defelement :super_input, :void # <super-input>
end
```
"""
defmacro defelement(name, type)
defmacro defelement(name, :nonvoid) do
quote location: :keep do
defmacro unquote(name)() do
Temple.Elements.nonvoid_element(unquote(name))
end
@doc false
defmacro unquote(name)(attrs_or_content_or_block)
defmacro unquote(name)([{:do, _inner}] = block) do
Temple.Elements.nonvoid_element(unquote(name), block)
end
defmacro unquote(name)(attrs_or_content) do
Temple.Elements.nonvoid_element(unquote(name), attrs_or_content)
end
@doc false
defmacro unquote(name)(attrs_or_content, block_or_attrs)
defmacro unquote(name)(attrs, [{:do, _inner}] = block) do
Temple.Elements.nonvoid_element(unquote(name), attrs, block)
end
defmacro unquote(name)(content, attrs) do
Temple.Elements.nonvoid_element(unquote(name), content, attrs)
end
end
end
defmacro defelement(name, :void) do
quote location: :keep do
defmacro unquote(name)(attrs \\ []) do
Temple.Elements.void_element(unquote(name), attrs)
end
end
end
@doc false
def nonvoid_element(el) do
quote location: :keep do
Temple.Utils.put_open_tag(var!(buff, Temple.Html), unquote(el), [])
Temple.Utils.put_close_tag(var!(buff, Temple.Html), unquote(el))
end
end
@doc false
def nonvoid_element(el, attrs_or_content_or_block)
def nonvoid_element(el, [{:do, inner}]) do
quote location: :keep do
Temple.Utils.put_open_tag(var!(buff, Temple.Html), unquote(el), [])
_ = unquote(inner)
Temple.Utils.put_close_tag(var!(buff, Temple.Html), unquote(el))
end
end
def nonvoid_element(el, attrs_or_content) do
quote location: :keep do
Temple.Utils.put_open_tag(var!(buff, Temple.Html), unquote(el), unquote(attrs_or_content))
Temple.Utils.put_close_tag(var!(buff, Temple.Html), unquote(el))
end
end
@doc false
def nonvoid_element(el, attrs_or_content, block_or_attrs)
def nonvoid_element(el, attrs, [{:do, inner}] = _block) do
quote location: :keep do
Temple.Utils.put_open_tag(var!(buff, Temple.Html), unquote_splicing([el, attrs]))
_ = unquote(inner)
Temple.Utils.put_close_tag(var!(buff, Temple.Html), unquote(el))
end
end
def nonvoid_element(el, content, attrs) do
quote location: :keep do
Temple.Utils.put_open_tag(var!(buff, Temple.Html), unquote_splicing([el, attrs]))
text unquote(content)
Temple.Utils.put_close_tag(var!(buff, Temple.Html), unquote(el))
end
end
@doc false
def void_element(el, attrs \\ []) do
quote location: :keep do
Temple.Utils.put_void_tag(var!(buff, Temple.Html), unquote_splicing([el, attrs]))
end
end
end
|
lib/temple/elements.ex
| 0.711732
| 0.821474
|
elements.ex
|
starcoder
|
defmodule Detector do
alias GameSettings, as: GS
@type game_state_result :: {Atom.t, Atom.t, Atom.t}
@spec game_state(List.t, {Integer, Integer}, Atom.t) :: game_state_result
def game_state(board, coord, colour) do
detection_results = {
(vertical_win? board, colour, coord),
(horizontal_win? board, colour, coord),
(diagonal_win? board, colour, coord),
(draw? board)
}
case detection_results do
{true, _, _, _} -> {:win, colour, :vertical}
{false, true, _, _} -> {:win, colour, :horizontal}
{_, _, true, _} -> {:win, colour, :diagonal}
{false, false, false, true} -> {:draw}
{false, false, false, false} -> {:none}
end
end
def vertical_win?(board, colour, {x, _y}) do
Enum.at(board, x) |> is_group_a_winner?(colour)
end
def horizontal_win?(board, colour, {_x, y}) do
Board.convert(board, :horizontal)
|> Enum.at(y)
|> is_group_a_winner?(colour)
end
def diagonal_win?(board, colour, coord) do
{move_column_index, move_row_index} = coord
flat_board = List.flatten board
max_grid_index = length(flat_board) - 1
starting_index = (move_column_index * GS.max_column_index) + move_row_index
[7,9,-7,-9]
|> get_all_indexes(starting_index, max_grid_index)
|> indexes_to_grid_entries(flat_board, [])
|> is_any_row_a_winner?(colour)
end
def draw?(board) do
draw =
board
|> List.flatten
|> Enum.any? &(&1 == :empty)
!draw
end
def is_group_a_winner?(row, colour) do
winning_count = row |> List.foldl 0, fn (entry, acc) ->
case {entry, acc} do
{_, 4} -> 4
{^colour, _} -> acc + 1
{_, _} -> 0
end
end
winning_count >= 4
end
def get_all_indexes(increment_list, starting_index, max_grid_index) do
Enum.map increment_list, fn (i) -> diagonal_indexes(i, starting_index, max_grid_index, []) end
end
def indexes_to_grid_entries([], flat_board, acc), do: acc
def indexes_to_grid_entries([head|tail], flat_board, acc) do
updated_acc = acc ++ [(Enum.map(head, fn (i) -> Enum.at(flat_board, i) end))]
indexes_to_grid_entries(tail, flat_board, updated_acc)
end
def diagonal_indexes(increment, current_index, max_grid_index, acc) do
case current_index do
i when i < 0 or i > max_grid_index -> acc
i ->
this_acc = acc ++ [i]
diagonal_indexes(increment, (i + increment), max_grid_index, this_acc)
end
end
def is_any_row_a_winner?(board, colour) do
Enum.any? board, fn (row) -> is_group_a_winner?(row, colour) end
end
end
|
lib/detector.ex
| 0.783119
| 0.514949
|
detector.ex
|
starcoder
|
defmodule Plotex.Output.Svg do
require Logger
alias Plotex.Output.Options
alias Plotex.Output.Formatter
use Phoenix.HTML
@doc """
Default example CSS Styling.
"""
def default_css() do
"""
.plx-labels {
text-anchor: middle;
dominant-baseline: central;
}
.plx-graph {
height: 500px;
width: 800px;
stroke-width: 1.0;
}
.plx-graph .plx-grid {
stroke: #ccc;
stroke-dasharray: 0;
stroke-width: 1.0;
}
.plx-grid-lines {
stroke-width: 0.1;
}
.plx-ticks {
stroke: #ccc;
stroke-dasharray: 0;
stroke-width: 0.5;
}
.plx-labels {
font-size: 3px;
}
.plx-labels .plx-x-labels {
font-size: 1px;
}
.plx-label-title {
font-size: 8px;
font-weight: bold;
text-transform: uppercase;
fill: black;
}
.plx-data .plx-data-point {
fill: darkblue;
stroke-width: 1.0;
}
.plx-data .plx-data-line {
stroke: #0074d9;
stroke-width: 0.05em;
stroke-width: 0.05em;
stroke-linecap: round;
fill: none;
}
"""
end
@doc """
Primary function to generate SVG plots from a given Plotex structure. The SVG can be
styled using standard CSS. Options include ability to set the tick rotation and offset.
The overall SVG structure and CSS classes that can be used to style the SVG graph are:
```sass
.plx-graph
.plx-title
.plx-label-title
.plx-labels
.plx-x-labels
.plx-y-labels
.plx-grid
.plx-data
.plx-dataset-<n>
.plx-data-point
.plx-data-line
```
The generated SVG includes both a ployline connecting each dataset, and also either
datapoints as either `rect` or `circle` type via `opts.data.type = :rect | :circle`.
"""
def generate(%Plotex{} = plot, %Options{} = opts) do
xaxis = plot.config.xaxis
yaxis = plot.config.yaxis
xfmt = plot.config.xaxis.formatter
yfmt = plot.config.yaxis.formatter
assigns =
plot
|> Map.from_struct()
|> Map.put(:opts, opts)
|> Map.put(:ds, 1.5)
|> Map.put(:ds, 1.5)
~E"""
<svg version="1.2" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"
viewbox="0 -100 <%= @opts.width %> <%= @opts.height %>"
<%= for {attr, val} <- @opts.svg_attrs do %> <%= raw ~s{#{attr}="#{val}"} %> <% end %> >
<title class="plx-title"><%= @config.title %></title>
<%= for item <- @opts.custom_svg do %>
<%= item %>
<% end %>
<defs>
<%= for {dataset, idx} <- @datasets do %>
<%= case Options.data(@opts,idx).shape do %>
<% :circle -> %>
<!-- simple dot marker definition -->
<marker id="marker-<%= idx %>" viewBox="0 0 <%= 2 * Options.data(@opts, idx).width %> <%= 2 * Options.data(@opts, idx).width %>"
refX="<%= Options.data(@opts, idx).width %>" refY="<%= Options.data(@opts, idx).width %>"
markerWidth="<%= Options.data(@opts, idx).width %>" markerHeight="<%= Options.data(@opts, idx).width %>">
<circle class="plx-data-point "
cx="<%= Options.data(@opts, idx).width %>"
cy="<%= Options.data(@opts, idx).width %>"
r="<%= Options.data(@opts, idx).width %>"
/>
</marker>
<% :arrow -> %>
<!-- arrowhead marker definition -->
<marker id="marker-<%= idx %>" viewBox="0 0 <%= 2 * Options.data(@opts, idx).width %> <%= 2 * Options.data(@opts, idx).height %>"
refX="<%= Options.data(@opts, idx).width %>" refY="<%= Options.data(@opts, idx).height %>"
markerWidth="<%= Options.data(@opts, idx).width %>" markerHeight="<%= Options.data(@opts, idx).height %>"
orient="auto-start-reverse">
<path d="M 0 0 L <%= Options.data(@opts, idx).width %> <%= Options.data(@opts, idx).width/2 %> L 0 <%= Options.data(@opts, idx).width %> z" />
<rect class="plx-data-point "
x="<%= Options.data(@opts, idx).width %>"
y="<%= Options.data(@opts, idx).height %>"
width="<%= Options.data(@opts, idx).width %>"
height="<%= Options.data(@opts, idx).height %>"
/>
</marker>
<% _rect_default -> %>
<!-- simple dot marker definition -->
<marker id="marker-<%= idx %>" viewBox="0 0 <%= 2 * Options.data(@opts, idx).width %> <%= 2 * Options.data(@opts, idx).height %>"
refX="<%= Options.data(@opts, idx).width %>" refY="<%= Options.data(@opts, idx).height %>"
markerWidth="<%= Options.data(@opts, idx).width %>" markerHeight="<%= Options.data(@opts, idx).height %>">
<rect class="plx-data-point "
x="<%= Options.data(@opts, idx).width/2 %>"
y="<%= Options.data(@opts, idx).height/2 %>"
width="<%= Options.data(@opts, idx).width %>"
height="<%= Options.data(@opts, idx).height %>"
/>
</marker>
<% end %>
<% end %>
</defs>
<!-- X Axis -->
<g class="plx-grid plx-x-axis ">
<g class="plx-border">
<line x1="<%= @config.xaxis.view.start %>"
x2="<%= @config.xaxis.view.stop %>"
y1="-<%= @config.yaxis.view.start %>"
y2="-<%= @config.yaxis.view.start %>" >
</line>
</g>
<g class="plx-ticks">
<%= for {_xl, xp} <- @xticks do %>
<line
x1="<%= xp %>"
y1="-<%= @config.yaxis.view.start %>"
x2="<%= xp %>"
y2="-<%= @config.yaxis.view.start + @opts.xaxis.ticks.size %>"
>
</line>
<% end %>
</g>
<g class="plx-grid-lines">
<%= for {_xl, xp} <- @xticks do %>
<line
x1="<%= xp %>"
y1="-<%= @config.yaxis.view.start %>"
x2="<%= xp %>"
y2="-<%= @config.yaxis.view.stop %>"
>
</line>
<% end %>
</g>
</g>
<g class="plx-labels plx-x-labels">
<%= for {xl, xp} <- @xticks do %>
<text x="<%= xp %>"
y="-<%= @config.yaxis.view.start %>"
transform="rotate(<%= @opts.xaxis.label.rotate %>, <%= xp %>, -<%= @config.yaxis.view.start - @opts.xaxis.label.offset %>)"
dy="<%= @opts.xaxis.label.offset %>">
<%= Formatter.output(xfmt, xaxis, xl) %>
</text>
<% end %>
<text x="<%= (@config.xaxis.view.stop - @config.xaxis.view.start)/2.0 %>"
y="-<%= @config.yaxis.view.start/2.0 %>"
class="label-title">
<%= @config.xaxis.name %>
</text>
</g>
<!-- Y Axis -->
<g class="plx-grid plx-y-axis">
<g class="plx-border">
<line x1="<%= @config.xaxis.view.start %>"
x2="<%= @config.xaxis.view.start %>"
y1="-<%= @config.yaxis.view.start %>"
y2="-<%= @config.yaxis.view.stop %>" >
</line>
</g>
<g class="plx-ticks">
<%= for {_yl, yp} <- @yticks do %>
<line
x1="<%= @config.xaxis.view.start %>"
y1="-<%= yp %>"
x2="<%= @config.xaxis.view.start + @opts.yaxis.ticks.size %>"
y2="-<%= yp %>"
>
</line>
<% end %>
</g>
<g class="plx-grid-lines">
<%= for {_yl, yp} <- @yticks do %>
<line
x1="<%= @config.xaxis.view.start %>"
y1="-<%= yp %>"
x2="<%= @config.xaxis.view.stop %>"
y2="-<%= yp %>"
>
</line>
<% end %>
</g>
</g>
<g class="plx-labels plx-y-labels">
<%= for {yl, yp} <- @yticks do %>
<text y="-<%= yp %>"
x="<%= @config.xaxis.view.start %>"
transform="rotate(<%= @opts.yaxis.label.rotate %>, <%= @config.xaxis.view.start - @opts.yaxis.label.offset %>, -<%= yp %>)"
dx="-<%= @opts.yaxis.label.offset %>">
<%= Formatter.output(yfmt, yaxis, yl) %>
</text>
<% end %>
<text y="-<%= (@config.yaxis.view.stop - @config.yaxis.view.start)/2.0 %>"
x="<%= @config.xaxis.view.start/2.0 %>"
class="label-title">
<%= @config.yaxis.name %>
</text>
</g>
<!-- Data -->
<g class="plx-data">
<%= for {dataset, idx} <- @datasets do %>
<g class="plx-dataset-<%= idx %>" data-setname="plx-data-<%= idx %>">
<polyline class="plx-data-line"
points=" <%= for {{_xl, xp}, {_yl, yp}} <- dataset do %> <%= float(xp) %>,-<%= float(yp) %><% end %> "
marker-start="url(#marker-<%= idx %>)"
marker-mid="url(#marker-<%= idx %>)"
marker-end="url(#marker-<%= idx %>)" />
</g>
<% end %>
</g>
</svg>
"""
end
defp float(f), do: :erlang.float_to_binary(f, decimals: 3)
end
|
lib/plotex/output/svg.ex
| 0.800848
| 0.458288
|
svg.ex
|
starcoder
|
defmodule JSON.LD.Decoder do
@moduledoc """
"""
use RDF.Serialization.Decoder
import JSON.LD.{NodeIdentifierMap, Utils}
alias JSON.LD.{NodeIdentifierMap, Options}
alias RDF.{BlankNode, Dataset, Graph, IRI, Literal, NS, Statement, XSD}
@impl RDF.Serialization.Decoder
@spec decode(String.t(), keyword) :: {:ok, Dataset.t() | Graph.t()} | {:error, any}
def decode(content, opts \\ []) do
with {:ok, json_ld_object} <- parse_json(content) do
dataset = to_rdf(json_ld_object, opts)
{:ok, dataset}
end
end
@dialyzer {:nowarn_function, to_rdf: 2}
@spec to_rdf(map, Options.t() | Enum.t()) :: Dataset.t() | Graph.t()
def to_rdf(element, options \\ %Options{}) do
{:ok, node_id_map} = NodeIdentifierMap.start_link()
options = Options.new(options)
try do
element
|> JSON.LD.expand(options)
|> JSON.LD.node_map(node_id_map)
|> Enum.sort_by(fn {graph_name, _} -> graph_name end)
|> Enum.reduce(Dataset.new(), fn {graph_name, graph}, dataset ->
unless relative_iri?(graph_name) do
rdf_graph =
graph
|> Enum.sort_by(fn {subject, _} -> subject end)
|> Enum.reduce(Graph.new(), fn {subject, node}, rdf_graph ->
unless relative_iri?(subject) do
node
|> Enum.sort_by(fn {property, _} -> property end)
|> Enum.reduce(rdf_graph, fn {property, values}, rdf_graph ->
cond do
property == "@type" ->
Graph.add(
rdf_graph,
{node_to_rdf(subject), NS.RDF.type(), Enum.map(values, &node_to_rdf/1)}
)
JSON.LD.keyword?(property) ->
rdf_graph
not options.produce_generalized_rdf and blank_node_id?(property) ->
rdf_graph
relative_iri?(property) ->
rdf_graph
true ->
Enum.reduce(values, rdf_graph, fn
%{"@list" => list}, rdf_graph ->
with {list_triples, first} <- list_to_rdf(list, node_id_map) do
rdf_graph
|> Graph.add({node_to_rdf(subject), node_to_rdf(property), first})
|> Graph.add(list_triples)
end
item, rdf_graph ->
case object_to_rdf(item) do
nil ->
rdf_graph
object ->
Graph.add(
rdf_graph,
{node_to_rdf(subject), node_to_rdf(property), object}
)
end
end)
end
end)
else
rdf_graph
end
end)
if Enum.empty?(rdf_graph) do
dataset
else
graph_name = if graph_name == "@default", do: nil, else: graph_name
Dataset.add(dataset, rdf_graph, graph: graph_name)
end
else
dataset
end
end)
after
NodeIdentifierMap.stop(node_id_map)
end
end
@spec parse_json(String.t(), [Jason.decode_opt()]) ::
{:ok, map} | {:error, Jason.DecodeError.t()}
def parse_json(content, _opts \\ []) do
Jason.decode(content)
end
@spec parse_json!(String.t(), [Jason.decode_opt()]) :: map
def parse_json!(content, _opts \\ []) do
Jason.decode!(content)
end
@spec node_to_rdf(String.t()) :: IRI.t() | BlankNode.t()
def node_to_rdf(node) do
if blank_node_id?(node) do
node
|> String.trim_leading("_:")
|> RDF.bnode()
else
RDF.uri(node)
end
end
@spec object_to_rdf(map) :: IRI.t() | BlankNode.t() | Literal.t() | nil
defp object_to_rdf(%{"@id" => id}) do
unless relative_iri?(id), do: node_to_rdf(id)
end
defp object_to_rdf(%{"@value" => value} = item) do
datatype = item["@type"]
{value, datatype} =
cond do
is_boolean(value) ->
value =
value
|> XSD.Boolean.new()
|> XSD.Boolean.canonical()
|> XSD.Boolean.lexical()
datatype = if is_nil(datatype), do: NS.XSD.boolean(), else: datatype
{value, datatype}
is_float(value) or (is_number(value) and datatype == to_string(NS.XSD.double())) ->
value =
value
|> XSD.Double.new()
|> XSD.Double.canonical()
|> XSD.Double.lexical()
datatype = if is_nil(datatype), do: NS.XSD.double(), else: datatype
{value, datatype}
is_integer(value) or (is_number(value) and datatype == to_string(NS.XSD.integer())) ->
value =
value
|> XSD.Integer.new()
|> XSD.Integer.canonical()
|> XSD.Integer.lexical()
datatype = if is_nil(datatype), do: NS.XSD.integer(), else: datatype
{value, datatype}
is_nil(datatype) ->
datatype =
if Map.has_key?(item, "@language"), do: RDF.langString(), else: NS.XSD.string()
{value, datatype}
true ->
{value, datatype}
end
if language = item["@language"] do
Literal.new(value, language: language, canonicalize: true)
else
Literal.new(value, datatype: datatype, canonicalize: true)
end
end
@spec list_to_rdf([map], pid) :: {[Statement.t()], IRI.t() | BlankNode.t()}
defp list_to_rdf(list, node_id_map) do
{list_triples, first, last} =
Enum.reduce(list, {[], nil, nil}, fn item, {list_triples, first, last} ->
case object_to_rdf(item) do
nil ->
{list_triples, first, last}
object ->
bnode = node_to_rdf(generate_blank_node_id(node_id_map))
if last do
{
list_triples ++
[{last, NS.RDF.rest(), bnode}, {bnode, NS.RDF.first(), object}],
first,
bnode
}
else
{
list_triples ++ [{bnode, NS.RDF.first(), object}],
bnode,
bnode
}
end
end
end)
if last do
{list_triples ++ [{last, NS.RDF.rest(), NS.RDF.nil()}], first}
else
{[], NS.RDF.nil()}
end
end
# This is a much nicer and faster version, but the blank node numbering is reversed.
# Although this isn't relevant, I prefer to be more spec conform (for now).
# defp list_to_rdf(list, node_id_map) do
# list
# |> Enum.reverse
# |> Enum.reduce({[], RDF.NS.RDF.nil}, fn (item, {list_triples, last}) ->
# case object_to_rdf(item) do
# nil -> {list_triples, last}
# object ->
# with bnode = node_to_rdf(generate_blank_node_id(node_id_map)) do
# {
# [{bnode, RDF.NS.RDF.first, object},
# {bnode, RDF.NS.RDF.rest, last } | list_triples],
# bnode
# }
# end
# end
# end)
# end
end
|
lib/json/ld/decoder.ex
| 0.718298
| 0.460046
|
decoder.ex
|
starcoder
|
defmodule CanvasAPI.Markdown do
@moduledoc """
A block-level Markdown parser for creating canvases from Markdown.
"""
@cl_item ~r/\A(?<indent>\s*)[*+\-] \[(?<check>[xX ])\] (?<content>.+)\z/
@code_fence ~r/\A```(?<lang>\S*)\z/
@heading ~r/\A(?<hashes>\#{1,6})\s+(?<content>.+)\z/
@horizontal_rule ~r/\A(?:- *)+\z/
@image ~r[\Ahttps?://\S*\.(?:gif|jpg|jpeg|png)(?:\?\S*)?\z]i
@ul_item ~r/\A(?<indent>\s*)[*+\-] (?<content>.+)\z/
@url ~r[\Ahttps?://.*\z]i
@spec parse(String.t) :: [map]
def parse(lines) do
lines
|> String.split("\n")
|> do_parse()
end
@spec do_parse([String.t], [map], Keyword.t) :: [map]
defp do_parse(lines, result \\ [], state \\ [state: :null])
# Final state: all lines parsed
defp do_parse([], result, _state), do: Enum.reverse(result)
# Title
defp do_parse([line | tail] = lines, [], state: :null) do
parsed = match_heading(line)
if parsed && parsed[:meta][:level] == 1 do
title =
parsed
|> Map.put(:type, "title")
|> Map.delete(:meta)
do_parse(tail, [title], state: :garbage)
else
do_parse(lines, [], state: :garbage)
end
end
# Blank line
defp do_parse(["" | tail], result, state: :garbage) do
do_parse(tail, result, state: :garbage)
end
# Garbage state
@lint {Credo.Check.Refactor.CyclomaticComplexity, false}
defp do_parse([line | tail], result, state: :garbage) do
{parsed, new_state} =
cond do
parsed = match_heading(line) ->
{parsed, state: :garbage}
match = Regex.match?(@horizontal_rule, line) ->
{%{type: "horizontal-rule"}, state: :garbage}
match = match_code_fence(line) ->
{%{type: "code", content: "", meta: %{language: elem(match, 1)}},
state: :code}
stripped_line = match_indented_code(line, "\t") ->
{%{type: "code", content: stripped_line, meta: %{language: nil}},
state: :code, indent: "\t"}
stripped_line = match_indented_code(line, " ") ->
{%{type: "code", content: stripped_line, meta: %{language: nil}},
state: :code, indent: " "}
parsed = match_list_item(line) ->
{%{type: "list", blocks: [parsed]}, state: :list}
Regex.match?(@image, line) ->
{%{type: "image", meta: %{url: line}}, state: :garbage}
Regex.match?(@url, line) ->
{%{type: "url", meta: %{url: line}}, state: :garbage}
true ->
{%{type: "paragraph", content: line}, state: :garbage}
end
do_parse(tail, [parsed | result], new_state)
end
# Code state
defp do_parse([line | tail] = lines, [code | result_tail] = result, state: :code, indent: indent) do
if line = match_indented_code(line, indent) do
content = "#{code[:content]}\n#{line}"
code = Map.put(code, :content, content)
do_parse(tail, [code | result_tail], state: :code, indent: indent)
else
do_parse(lines, result, state: :garbage)
end
end
defp do_parse([line | tail], [code | result_tail] = result, state: :code) do
if match_code_fence(line) do
do_parse(tail, result, state: :garbage)
else
content =
if code[:content] == "" do
line
else
"#{code[:content]}\n#{line}"
end
code = Map.put(code, :content, content)
do_parse(tail, [code | result_tail], state: :code)
end
end
# List state
defp do_parse([line | tail] = lines, [list | result_tail] = result, state: :list) do
if parsed = match_list_item(line) do
blocks = list[:blocks] ++ [parsed]
list = Map.put(list, :blocks, blocks)
do_parse(tail, [list | result_tail], state: :list)
else
do_parse(lines, result, state: :garbage)
end
end
@spec match_indented_code(String.t, String.t) :: String.t | nil
defp match_indented_code(line, indent) do
if String.starts_with?(line, indent) do
String.replace_prefix(line, indent, "")
else
nil
end
end
@spec match_code_fence(String.t) :: {:ok, String.t | nil} | nil
defp match_code_fence(line) do
if match = Regex.named_captures(@code_fence, line) do
lang = if match["lang"] == "", do: nil, else: match["lang"]
{:ok, lang}
end
end
@spec match_heading(String.t) :: map | nil
defp match_heading(line) do
if match = Regex.named_captures(@heading, line) do
meta = %{level: String.length(match["hashes"])}
%{type: "heading", content: match["content"], meta: meta}
end
end
@spec match_list_item(String.t) :: map | nil
defp match_list_item(line) do
cond do
match = Regex.named_captures(@cl_item, line) ->
level = match["indent"] |> String.length |> div(2) |> Kernel.+(1)
checked = match["check"] |> String.downcase |> String.contains?("x")
meta = %{level: level, checked: checked}
%{type: "checklist-item", content: match["content"], meta: meta}
match = Regex.named_captures(@ul_item, line) ->
level = match["indent"] |> String.length |> div(2) |> Kernel.+(1)
meta = %{level: level}
%{type: "unordered-list-item", content: match["content"], meta: meta}
true ->
nil
end
end
end
|
lib/canvas_api/markdown.ex
| 0.61231
| 0.672668
|
markdown.ex
|
starcoder
|
defmodule Tempo do
@moduledoc """
Documentation for `Tempo`.
### Terminology
The following terms, defined by ISO 8601, are used throughout
Tempo. For further information consult:
* [ISO Online browsing platform](https://www.iso.org/obp)
* [IEC Electropedia](http://www.electropedia.org/)
#### Date
A [time](#time) on the the calendar time scale. Common forms of date include calendar date,
ordinal date or week date.
#### Time
A mark attributed to an [instant](#instant) or a [time interval](#time_interval) on a specified
[time scale](#time_scale).
The term “time” is often used in common language. However, it should only be used if the
meaning is clearly visible from the context.
On a time scale consisting of successive time intervals, such as a clock or calendar,
distinct instants may be expressed by the same time.
This definition corresponds with the definition of the term “date” in
IEC 60050-113:2011, 113-01-12.
#### Instant
A point on the [time axis](#time_axis). An instantaneous event occurs at a specific instant.
#### Time axis
A mathematical representation of the succession in time according to the space-time model
of instantaneous events along a unique axis/
According to the theory of special relativity, the time axis depends on the choice of a
spatial reference frame.
In IEC 60050-113:2011, 113-01-03, time according to the space-time model is defined to be
the one-dimensional subspace of space-time, locally orthogonal to space.
#### Time scale
A system of ordered marks which can be attributed to [instants](#instant) on the
[time axis](#time_axis), one instant being chosen as the origin.
A time scale may amongst others be chosen as:
* continuous, e.g. international atomic time (TAI) (see IEC 60050-713:1998, 713-05-18);
* continuous with discontinuities, e.g. UTC due to leap seconds, standard time due
to summer time and winter time;
* successive steps, e.g. [calendars](#calendar), where the [time axis](#time_axis) is split
up into a succession of consecutive time intervals and the same mark is attributed to all
instants of each time interval;
* discrete, e.g. in digital techniques.
#### Time interval
A part of the [time axis](#time_axis) limited by two [instants](#instant) *including, unless
otherwise stated, the limiting instants themselves*.
#### Time scale unit
A unit of measurement of a [duration](#duration)
For example:
* Calendar year, calendar month and calendar day are time scale units
of the Gregorian calendar.
* Clock hour, clock minutes and clock seconds are time scale units of the 24-hour clock.
In Tempo, time scale units are referred to by the shortened term "unit". When a "unit" is
combined with a value, the combination is referred to as a "component".
#### Duration
A non-negative quantity of time equal to the difference between the final and initial
[instants](#instant) of a [time interval](#interval)
The duration is one of the base quantities in the International System of Quantities (ISQ)
on which the International System of Units (SI) is based. The term “time” instead of
“duration” is often used in this context and also for an infinitesimal duration.
For the term “duration”, expressions such as “time” or “time interval” are often used,
but the term “time” is not recommended in this sense and the term “time interval” is
deprecated in this sense to avoid confusion with the concept of “time interval”.
The exact duration of a [time scale unit](#time_scale_unit) depends on the
[time scale](#time_scale) used. For example, the durations of a year, month, week,
day, hour or minute, may depend on when they occur (in a Gregorian calendar, a
calendar month can have a duration of 28, 29, 30, or 31 days; in a 24-hour clock, a
clock minute can have a duration of 59, 60, or 61 seconds, etc.). Therefore,
the exact duration can only be evaluated if the exact duration of each is known.
"""
alias Tempo.Iso8601.Parser
def from_iso8601(string) do
case Parser.iso8601(string) do
{:ok, parsed, "", %{}, _line, _char} -> {:ok, parsed}
{:ok, _parsed, _rest, %{}, _line, _char} -> {:error, :invalid_format}
{:error, _message, _rest, %{}, _line, _char} -> {:error, :invalid_format}
end
end
end
|
lib/tempo.ex
| 0.916278
| 0.955277
|
tempo.ex
|
starcoder
|
defmodule SteamEx.ISteamUser do
@moduledoc """
Used to access information and interact with users.
See [IPlayerService](https://partner.steamgames.com/doc/webapi/IPlayerService) for additional methods.
For more info on how to use the Steamworks Web API please see the [Web API Overview](https://partner.steamgames.com/doc/webapi_overview).
"""
import SteamEx.API.Base
@interface "ISteamUser"
@doc """
| Name | Type | Required | Description |
| key | string | ✔ | Steamworks Web API user authentication key.|
| steamid | uint64 | ✔ | SteamID of user|
| relationship | string | | relationship type (ex: friend)|
See other: [https://partner.steamgames.com/doc/webapi/ISteamUser#GetFriendList](https://partner.steamgames.com/doc/webapi/ISteamUser#GetFriendList)
"""
def get_friend_list(access_key, params \\ %{}, headers \\ %{}) do
get(@interface <> "/GetFriendList/v1/", access_key, params, headers)
end
@doc """
| Name | Type | Required | Description |
| key | string | ✔ | Steamworks Web API user authentication key.|
| steamids | string | ✔ | Comma-delimited list of SteamIDs|
See other: [https://partner.steamgames.com/doc/webapi/ISteamUser#GetPlayerBans](https://partner.steamgames.com/doc/webapi/ISteamUser#GetPlayerBans)
"""
def get_player_bans(access_key, params \\ %{}, headers \\ %{}) do
get(@interface <> "/GetPlayerBans/v1/", access_key, params, headers)
end
@doc """
| Name | Type | Required | Description |
| key | string | ✔ | Steamworks Web API user authentication key.|
| steamids | string | ✔ | Comma-delimited list of SteamIDs (max: 100)|
See other: [https://partner.steamgames.com/doc/webapi/ISteamUser#GetPlayerSummaries](https://partner.steamgames.com/doc/webapi/ISteamUser#GetPlayerSummaries)
"""
def get_player_summaries(access_key, params \\ %{}, headers \\ %{}) do
get(@interface <> "/GetPlayerSummaries/v2/", access_key, params, headers)
end
@doc """
| Name | Type | Required | Description |
| key | string | ✔ | Steamworks Web API user authentication key.|
| steamid | uint64 | ✔ | SteamID of user|
See other: [https://partner.steamgames.com/doc/webapi/ISteamUser#GetUserGroupList](https://partner.steamgames.com/doc/webapi/ISteamUser#GetUserGroupList)
"""
def get_user_group_list(access_key, params \\ %{}, headers \\ %{}) do
get(@interface <> "/GetUserGroupList/v1/", access_key, params, headers)
end
@doc """
| Name | Type | Required | Description |
| key | string | ✔ | Steamworks Web API user authentication key.|
| vanityurl | string | ✔ | The vanity URL to get a SteamID for|
| url_type | int32 | | The type of vanity URL. 1 (default): Individual profile, 2: Group, 3: Official game group|
See other: [https://partner.steamgames.com/doc/webapi/ISteamUser#ResolveVanityURL](https://partner.steamgames.com/doc/webapi/ISteamUser#ResolveVanityURL)
"""
def resolve_vanity_url(access_key, params \\ %{}, headers \\ %{}) do
get(@interface <> "/ResolveVanityURL/v1/", access_key, params, headers)
end
end
|
lib/interfaces/i_steam_user.ex
| 0.61832
| 0.412767
|
i_steam_user.ex
|
starcoder
|
defmodule AWS.Macie do
@moduledoc """
Amazon Macie Classic
Amazon Macie Classic is a security service that uses machine learning to
automatically discover, classify, and protect sensitive data in AWS.
Macie Classic recognizes sensitive data such as personally identifiable
information (PII) or intellectual property, and provides you with dashboards and
alerts that give visibility into how this data is being accessed or moved. For
more information, see the [Amazon Macie Classic User Guide](https://docs.aws.amazon.com/macie/latest/userguide/what-is-macie.html).
A new Amazon Macie is now available with significant design improvements and
additional features, at a lower price and in most AWS Regions. We encourage you
to explore and use the new and improved features, and benefit from the reduced
cost. To learn about features and pricing for the new Amazon Macie, see [Amazon Macie](https://aws.amazon.com/macie/).
"""
@doc """
Associates a specified AWS account with Amazon Macie Classic as a member
account.
"""
def associate_member_account(client, input, options \\ []) do
request(client, "AssociateMemberAccount", input, options)
end
@doc """
Associates specified S3 resources with Amazon Macie Classic for monitoring and
data classification.
If memberAccountId isn't specified, the action associates specified S3 resources
with Macie Classic for the current master account. If memberAccountId is
specified, the action associates specified S3 resources with Macie Classic for
the specified member account.
"""
def associate_s3_resources(client, input, options \\ []) do
request(client, "AssociateS3Resources", input, options)
end
@doc """
Removes the specified member account from Amazon Macie Classic.
"""
def disassociate_member_account(client, input, options \\ []) do
request(client, "DisassociateMemberAccount", input, options)
end
@doc """
Removes specified S3 resources from being monitored by Amazon Macie Classic.
If memberAccountId isn't specified, the action removes specified S3 resources
from Macie Classic for the current master account. If memberAccountId is
specified, the action removes specified S3 resources from Macie Classic for the
specified member account.
"""
def disassociate_s3_resources(client, input, options \\ []) do
request(client, "DisassociateS3Resources", input, options)
end
@doc """
Lists all Amazon Macie Classic member accounts for the current Amazon Macie
Classic master account.
"""
def list_member_accounts(client, input, options \\ []) do
request(client, "ListMemberAccounts", input, options)
end
@doc """
Lists all the S3 resources associated with Amazon Macie Classic.
If memberAccountId isn't specified, the action lists the S3 resources associated
with Amazon Macie Classic for the current master account. If memberAccountId is
specified, the action lists the S3 resources associated with Amazon Macie
Classic for the specified member account.
"""
def list_s3_resources(client, input, options \\ []) do
request(client, "ListS3Resources", input, options)
end
@doc """
Updates the classification types for the specified S3 resources.
If memberAccountId isn't specified, the action updates the classification types
of the S3 resources associated with Amazon Macie Classic for the current master
account. If memberAccountId is specified, the action updates the classification
types of the S3 resources associated with Amazon Macie Classic for the specified
member account.
"""
def update_s3_resources(client, input, options \\ []) do
request(client, "UpdateS3Resources", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "macie"}
host = build_host("macie", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "MacieService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/macie.ex
| 0.73431
| 0.505127
|
macie.ex
|
starcoder
|
defmodule PatternMetonyms do
@moduledoc """
Reusing the description from the paper:
Pattern synonyms allow to abstract over patterns used in pattern matching,
notably by allowing to use computation instead of being limited to concrete data.
Pattern metonyms are an implementations of this, but because of the limitation of the language,
it can not be considered the same, so metonyms was chosen as a synonym to synonyms.
Unlike in Haskell, few metonym definitions can be used with `case/2`,
other forms of patterns can only be used in combination with `view/2`.
"""
@doc """
Macro used to define pattern metonyms.
There are three types of pattern, listed below with examples:
1) Implicitly Bidirectional
The simplest type of pattern, because of its symmetry requirement, it can only
be defined using concrete data (or adapted macro). Therefore no computation is
allowed, and they are thus compatible with `case` and function heads.
They take the form:
```
pattern <name>(<[variables]>) = <pattern>
```
where _pattern_ reuses the _variables_
iex> defmodule DoctestTPX do
...> import PatternMetonyms
...>
...> pattern ok(x) = {:ok, x}
...> pattern error(x) = {:error, x}
...>
...> pattern cons(x, xs) = [x | xs]
...>
...> def foo(x) do
...> view x do
...> ok(a) -> a
...> error(b) -> b
...> end
...> end
...>
...> def bar(x) do
...> case x do
...> ok(a) -> a
...> error(b) -> b
...> end
...> end
...>
...> def baz(ok(a) ), do: a
...> def baz(error(b)), do: b
...>
...> def mk_ok(x), do: ok(x)
...>
...> def blorg(xs) do
...> view xs do
...> cons(x, xs) -> cons(x, Enum.map(xs, fn x -> -x end))
...> end
...> end
...> end
iex> DoctestTPX.foo({:ok, :banana})
:banana
iex> DoctestTPX.foo({:error, :split})
:split
iex> DoctestTPX.bar({:ok, :peach})
:peach
iex> DoctestTPX.baz({:error, :melba})
:melba
iex> DoctestTPX.mk_ok(:melba)
{:ok, :melba}
iex> DoctestTPX.blorg([1, 2, 3])
[1, -2, -3]
2) Unidirectional
This type of pattern is read only, it may be used as abstraction over pattern matching on concrete data type
that can not be reused to construct data, or as abstraction over views, as explained in `view/2`.
They take the form:
```
pattern <name>(<[variables]>) <- <pattern>
pattern <name>(<[variables]>) <- (<function> -> <pattern>)
```
where _pattern_ reuses the _variables_.
`(function -> pattern)` is called a view
iex> defmodule DoctestTPY do
...> import PatternMetonyms
...>
...> pattern head(x) <- [x | _]
...>
...> pattern rev_head(x) <- (reverse() -> head(x))
...>
...> def reverse(xs), do: Enum.reverse(xs)
...>
...> def foo(xs) do
...> view xs do
...> head(x) -> x
...> [] -> []
...> end
...> end
...>
...> def bar(x) do
...> case x do
...> head(a) -> a
...> [] -> []
...> end
...> end
...>
...> def baz(head(a)), do: a
...>
...> def blorg(xs) do
...> view xs do
...> rev_head(x) -> x
...> end
...> end
...> end
iex> DoctestTPY.foo([1, 2, 3])
1
iex> DoctestTPY.bar([1, 2, 3])
1
iex> DoctestTPY.baz([1, 2, 3])
1
iex> DoctestTPY.blorg([1, 2, 3])
3
3) Explicitly bidirectional
This type of pattern allows the same kind of abstraction as unidirectional one, but also permit defining
how to construct data from computation (if necessary).
They take the form:
```
pattern (<name>(<[variables]>) <- (<function> -> <pattern>)) when <name>(<[variables]>) = <builder>
```
where _pattern_ and _builder_ reuse the _variables_.
`(function -> pattern)` is called a view
iex> defmodule DoctestTPZ do
...> import PatternMetonyms
...>
...> pattern (snoc(x, xs) <- (unsnoc() -> {x, xs}))
...> when snoc(x, xs) = Enum.reverse([x | Enum.reverse(xs)])
...>
...> defp unsnoc([]), do: :error
...> defp unsnoc(xs) do
...> [x | rev_tail] = Enum.reverse(xs)
...> {x, Enum.reverse(rev_tail)}
...> end
...>
...> def foo(xs) do
...> view xs do
...> snoc(x, _) -> x
...> [] -> []
...> end
...> end
...>
...> def bar(xs) do
...> view xs do
...> snoc(x, xs) -> snoc(-x, xs)
...> [] -> []
...> end
...> end
...> end
iex> DoctestTPZ.foo([1, 2, 3])
3
iex> DoctestTPZ.bar([1, 2, 3])
[1, 2, -3]
Patterns using a view can not be used with `case`.
Remote function can be used within a view, but the `__MODULE__` alias won't work
because the expansion is not done at the usage site. It is not yet determined
which behavior is desired.
iex> defmodule DoctestTPA do
...> import PatternMetonyms
...>
...> pattern rev_head(x) <- (Enum.reverse -> [x | _])
...>
...> def blorg(xs) do
...> view xs do
...> rev_head(x) -> x
...> end
...> end
...> end
iex> DoctestTPA.blorg([1, 2, 3])
3
Unknown yet if anonymous functions can be supported.
Guards within a pattern definition is considered undefined behavior,
it may work, but it depends on the context.
Consider that if the behavior gets a specification, it would be the removal of
the possibility of using them. Patterns using a view pattern are the recommend
approach. For example:
```
pattern heart(n) <- (less_than_3 -> {:ok, n})
```
Patterns can be documented:
```
@doc \"\"\"
heart matches when the number is heartfelt <3
\"\"\"
pattern heart(n) <- (less_than_3() -> {:ok, n})
```
You can then access the doc as usual: `h heart`, or `h Module.heart`.
"""
defmacro pattern(ast) do
PatternMetonyms.Pattern.pattern_builder(ast, __CALLER__)
end
# view
@doc """
Macro substitute for `case/2` capable of using pattern metonyms.
Custom `case` able to use pattern metonyms defined with this module.
Largely unoptimized, try to avoid side effect in your pattern definitions as using them multiple time
in `view` will repeat them, but might not later on.
View pattern (`(function -> pattern)`) may be used raw in here.
View patterns are simply a pair of a function associated with a pattern
where the function will be applied to the data passed to `view`
and the result will be matched with the pattern.
iex> import PatternMetonyms
iex> view self() do
...> (is_pid() -> true) -> :ok
...> _ -> :ko
...> end
:ok
Guards can be used outside of the view pattern or the pattern metonym.
iex> import PatternMetonyms
iex> view -3 - :rand.uniform(2) do
...> (abs() -> x) when x > 3 -> :ok
...> (abs() -> x) when x < 3 -> :ko
...> x -> x
...> end
:ok
Remote calls can be used directly within a view pattern.
iex> import PatternMetonyms
iex> view :banana do
...> (Atom.to_string() -> "ba" <> _) -> :ok
...> _ -> :ko
...> end
:ok
Anonymous functions can be used within a view pattern.
They can be either used as stored within a variable:
iex> import PatternMetonyms
iex> fun = &inspect(&1, pretty: &2)
iex> view :banana do
...> (fun.(true) -> str) -> str
...> end
":banana"
Or defined directly using `Kernel.SpecialForms.fn/1` only:
iex> import PatternMetonyms
iex> view 3 do
...> (fn x -> x + 2 end -> n) -> n + 1
...> end
6
"""
defmacro view(data, do: clauses) when is_list(clauses) do
PatternMetonyms.View.builder(data, clauses, __CALLER__)
#|> case do x -> _ = IO.puts(Macro.to_string(x)) ; x end
end
@doc """
View with anonymous functions
```elixir
import PatternMetonyms
id = fnv do (Function.identity() -> x) -> x end
```
"""
defmacro fnv(do: clauses) when is_list(clauses) do
PatternMetonyms.Fnv.builder(clauses, __CALLER__)
#|> case do x -> _ = IO.puts(Macro.to_string(x)) ; x end
end
@doc false
defmacro __using__(_opts) do
quote do
@before_compile {unquote(__MODULE__), :before_compile_defv}
import unquote(__MODULE__)
end
end
@doc """
view with named functions
```elixir
use PatternMetonyms
defv id((Function.identity() -> x)), do: x
```
"""
defmacro defv(call, [{:do, body} | rest]) do
call = case call do
{name, meta, nil} -> {name, meta, []}
call -> call
end
x = PatternMetonyms.Defv.streamline(call, body, rest)
attribute = :defv_accumulator
_ = Module.register_attribute(__CALLER__.module, attribute, accumulate: true)
_ = Module.put_attribute(__CALLER__.module, attribute, x)
end
@doc false
defmacro before_compile_defv(_env) do
defv_accumulator = Module.get_attribute(__CALLER__.module, :defv_accumulator, [])
_ = Module.delete_attribute(__CALLER__.module, :defv_accumulator)
Enum.reverse(defv_accumulator)
|> Enum.chunk_by(fn {name, arity, _clause} -> {name, arity} end)
|> Enum.map(fn xs ->
{name, _, _} = hd(xs)
clauses = Enum.map(xs, fn {_, _, clause} -> clause end)
PatternMetonyms.Defv.builder(name, clauses, __CALLER__)
end)
#|> case do x -> _ = IO.puts(Macro.to_string(x)) ; x end
end
end
|
lib/pattern_metonyms.ex
| 0.925407
| 0.802903
|
pattern_metonyms.ex
|
starcoder
|
defmodule ProxerEx.Api.User do
@moduledoc """
Contains helper methods to build requests for the user api.
"""
use ProxerEx.Api.Base, api_class: "user"
api_func "about" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Get About``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
To make sure the user is authorized to view the content the program is trying to access, it is
recommended to set `ProxerEx.Request.authorization` to `true`. However this is not required and if
left untouched the program must account for the possibility that the server may return an error if
the information is not accessible to an anonymous user.
## Examples
iex> ProxerEx.Api.User.about(uid: 1337)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "about",
authorization: false,
extra_header: [],
get_args: %{uid: 1337},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.User.about()
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "about",
authorization: false,
extra_header: [],
get_args: %{},
method: :get,
post_args: []
}}
""")
parameter("uid", :get, optional: true, not_with: ["username"])
parameter("username", :get, optional: true, not_with: ["uid"])
end
api_func "checkauth" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Check Authentification``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> code = 1..5 |> Enum.reduce("", fn _, acc -> acc <> "0" end)
iex> ProxerEx.Api.User.checkauth(name: "<NAME>", code: code)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "checkauth",
authorization: false,
extra_header: [],
get_args: %{name: "<NAME>"},
method: :post,
post_args: [
code: "00000"
]
}}
""")
parameter("name", :get)
parameter("code", :post)
end
api_func "comments" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Get Latest Comments``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
To make sure the user is authorized to view the content the program is trying to access, it is
recommended to set `ProxerEx.Request.authorization` to `true`. However this is not required and if
left untouched the program must account for the possibility that the server may return an error if
the information is not accessible to an anonymous user.
## Examples
iex> ProxerEx.Api.User.comments(uid: 163825, kat: "manga", length: 250, p: 1, limit: 12)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "comments",
authorization: false,
extra_header: [],
get_args: %{kat: "manga", uid: 163825, length: 250, p: 1, limit: 12},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.User.comments(uid: 163825)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "comments",
authorization: false,
extra_header: [],
get_args: %{uid: 163825},
method: :get,
post_args: []
}}
""")
parameter("uid", :get, not_with: ["username"])
parameter("username", :get, not_with: ["uid"])
parameter("kat", :get, optional: true)
parameter("length", :get, optional: true)
paging_parameters()
end
api_func "friends" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Get Friends``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
To make sure the user is authorized to view the content the program is trying to access, it is
recommended to set `ProxerEx.Request.authorization` to `true`. However this is not required and if
left untouched the program must account for the possibility that the server may return an error if
the information is not accessible to an anonymous user.
## Examples
iex> ProxerEx.Api.User.friends(uid: 1337)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "friends",
authorization: false,
extra_header: [],
get_args: %{uid: 1337},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.User.friends()
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "friends",
authorization: false,
extra_header: [],
get_args: %{},
method: :get,
post_args: []
}}
""")
parameter("uid", :get, optional: true, not_with: ["username"])
parameter("username", :get, optional: true, not_with: ["uid"])
end
api_func "history" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Get History``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
To make sure the user is authorized to view the content the program is trying to access, it is
recommended to set `ProxerEx.Request.authorization` to `true`. However this is not required and if
left untouched the program must account for the possibility that the server may return an error if
the information is not accessible to an anonymous user.
## Examples
iex> ProxerEx.Api.User.history(uid: 154371, isH: true, p: 5, limit: 24)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "history",
authorization: false,
extra_header: [],
get_args: %{uid: 154371, isH: true, p: 5, limit: 24},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.User.history(uid: 154371)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "history",
authorization: false,
extra_header: [],
get_args: %{uid: 154371},
method: :get,
post_args: []
}}
""")
parameter("uid", :get, not_with: ["username"])
parameter("username", :get, not_with: ["uid"])
parameter("isH", :get, optional: true)
paging_parameters()
end
api_func "list" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Get List``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
To make sure the user is authorized to view the content the program is trying to access, it is
recommended to set `ProxerEx.Request.authorization` to `true`. However this is not required and if
left untouched the program must account for the possibility that the server may return an error if
the information is not accessible to an anonymous user.
## Examples
iex> ProxerEx.Api.User.list(username: "Username", kat: "manga", search: "test",
...> search_start: "test_start", isH: true, sort: "nameASC", filter: "stateFilter1",
...> p: 1, limit: 10)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "list",
authorization: false,
extra_header: [],
get_args: %{
username: "Username",
kat: "manga",
search: "test",
search_start: "test_start",
isH: true,
sort: "nameASC",
filter: "stateFilter1",
p: 1,
limit: 10
},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.User.list(uid: 157584)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "list",
authorization: false,
extra_header: [],
get_args: %{uid: 157584},
method: :get,
post_args: []
}}
""")
parameter("uid", :get, not_with: ["username"])
parameter("username", :get, not_with: ["uid"])
parameter("kat", :get, optional: true)
parameter("search", :get, optional: true)
parameter("search_start", :get, optional: true)
parameter("isH", :get, optional: true)
parameter("sort", :get, optional: true)
parameter("filter", :get, optional: true)
paging_parameters()
end
api_func "login" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Login``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> ProxerEx.Api.User.login(username: "name", password: "<PASSWORD>", secretkey: "918247")
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "login",
authorization: false,
extra_header: [],
get_args: %{},
method: :post,
post_args: [username: "name", password: "<PASSWORD>", secretkey: "918247"]
}}
""")
parameter("username", :post)
parameter("password", :post)
parameter("secretkey", :post, optional: true)
end
api_func "logout", authorization: true do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Logout``` api.
## Examples
iex> ProxerEx.Api.User.logout()
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "logout",
authorization: true,
extra_header: [],
get_args: %{},
method: :get,
post_args: []
}}
""")
end
api_func "requestauth" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Request Authentification``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
## Examples
iex> code = 1..100 |> Enum.reduce("", fn _, acc -> acc <> "0" end)
iex> ProxerEx.Api.User.requestauth(uid: 177103, name: "Example App", code: code)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "requestauth",
authorization: false,
extra_header: [],
get_args: %{name: "Example App", uid: 177103},
method: :post,
post_args: [
code: "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
]
}}
""")
parameter("uid", :get, not_with: ["username"])
parameter("username", :get, not_with: ["uid"])
parameter("name", :get)
parameter("code", :post)
end
api_func "topten" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Get Topten``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
To make sure the user is authorized to view the content the program is trying to access, it is
recommended to set `ProxerEx.Request.authorization` to `true`. However this is not required and if
left untouched the program must account for the possibility that the server may return an error if
the information is not accessible to an anonymous user.
## Examples
iex> ProxerEx.Api.User.topten(username: "Username", kat: "manga", isH: true)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "topten",
authorization: false,
extra_header: [],
get_args: %{isH: true, kat: "manga", username: "Username"},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.User.topten(username: "Username")
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "topten",
authorization: false,
extra_header: [],
get_args: %{username: "Username"},
method: :get,
post_args: []
}}
""")
parameter("uid", :get, not_with: ["username"])
parameter("username", :get, not_with: ["uid"])
parameter("kat", :get, optional: true)
parameter("isH", :get, optional: true)
end
api_func "userinfo" do
api_doc("""
Constructs a `ProxerEx.Request` that can be used to send a request to the ```User/Userinfo``` api.
This method receives an optional keyword list as its only argument which represents the information send to
the respective api. All keys must be named as seen in the official documentation. For further information
take a look at the examples below.
If neither `uid` or `username` is given, information about the authenticated user is returned.
In this case it is required to set `ProxerEx.Request.authorization` to `true`.
## Examples
iex> ProxerEx.Api.User.userinfo(uid: 1337)
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "userinfo",
authorization: false,
extra_header: [],
get_args: %{uid: 1337},
method: :get,
post_args: []
}}
iex> ProxerEx.Api.User.userinfo()
{:ok,
%ProxerEx.Request{
api_class: "user",
api_func: "userinfo",
authorization: false,
extra_header: [],
get_args: %{},
method: :get,
post_args: []
}}
""")
parameter("uid", :get, optional: true, not_with: ["username"])
parameter("username", :get, optional: true, not_with: ["uid"])
end
end
|
lib/api_classes/user_api.ex
| 0.881309
| 0.737796
|
user_api.ex
|
starcoder
|
defmodule ShopifyPlug.Sigv do
@moduledoc """
When Shopify receives an HTTP request for a proxied path, it will forward that request to the specified Proxy URL.
Cookies are not supported for the application proxy, since the application is accessed through the shop's domain.
Shopify will strip the Cookie header from the request and Set-Cookie from the response.
For example, when the following HTTP request is sent from the client user agent:
```GET /apps/awesome_reviews/extra/path/components?extra=1&extra=2 HTTP/1.1
Host: Cogn.myshopify.com
Cookie: csrftoken=<KEY>;
_session_id=1234456789abcdef0123456789abcdef;
_secure_session_id=234456789abcdef0123456789abcdef0```
Given that the Proxy URL is set to https://genie.cogcloud.net/proxy,
the client's IP address is 172.16.58.3 and the applications shared secret is hush,
the forwarded request will look like the following:
```GET /proxy/extra/path/components?extra=1&extra=2&shop=Cogn.myshopify.com&path_prefix=%2Fapps%2Fawesome_reviews×tamp=1317327555&signature=a9718877bea71c2484f91608a7eaea1532bdf71f5c56825065fa4ccabe549ef3 HTTP/1.1
Host: genie.cogcloud.net
X-Forwarded-For: 172.16.58.3```
Shopify: How proxy requests work: https://help.shopify.com/api/tutorials/application-proxies#proxy-request
Shopify Security: Calculate a digital signature: https://help.shopify.com/api/tutorials/application-proxies#security
"""
@behaviour Plug
def init(), do: raise(ArgumentError, message: "missing require options")
def init(opts) do
unless opts[:signature],
do: raise(ArgumentError, message: "missing require argument 'signature'")
opts
end
@doc """
Check that all the parameters we need are set in the connection.
"""
def call(%Plug.Conn{ params: %{ "signature" => signature }, query_string: query_string } = conn, opts) do
calculated_signature =
create_calculated_signature(query_string, opts)
case SecureCompare.compare(signature, calculated_signature) do
true ->
conn
|> Plug.Conn.put_req_header("x-spap-sigv", "pass")
false ->
ShopifyPlug.Errors.failed_connection(conn, :sigv)
end
end
@doc """
hash/1 proxies to :crypto.hmac/3
"""
def generate_hmac(query_hash, opts), do: :crypto.hmac(:sha256, opts[:signature], query_hash)
def create_calculated_signature(params, opts) do
params
|> parse_query()
|> remove_signature()
|> Enum.sort()
|> Enum.map(fn {k, v} -> stringify(k, v) end)
|> Enum.join("")
|> generate_hmac(opts)
|> Base.encode16()
|> String.downcase()
end
def parse_query(params) do
params
|> split_query_strings()
|> split_query_key_string()
|> decode_query_string()
|> list_of_kv_pair()
|> group_by_unique_key()
end
defp split_query_strings(string), do: String.split(string, "&")
defp split_query_key_string(value), do: Enum.map(value, fn v -> String.split(v, "=") end)
defp decode_query_string(value), do: Enum.map(value, fn [k, v] -> [k, URI.decode(v)] end)
defp list_of_kv_pair(value), do: Enum.flat_map(value, fn [k, v] -> ["#{k}": v] end)
defp group_by_unique_key(value), do: Enum.group_by(value, fn {k, _} -> k end, fn {_, v} -> v end)
defp remove_signature(value), do: Map.delete(value, :signature)
defp stringify(key, value) when is_list(value) or is_map(value) do
csv = Enum.join(value, ",")
"#{key}=#{csv}"
end
defp stringify(key, value) do
"#{key}=#{value}"
end
end
|
lib/sigv.ex
| 0.747708
| 0.7287
|
sigv.ex
|
starcoder
|
defmodule Artemis.Drivers.PagerDuty.ListOnCalls do
require Logger
alias Artemis.Drivers.PagerDuty
defmodule Result do
defstruct data: [],
meta: %{}
end
@moduledoc """
Fetches on call from the PagerDuty API.
## Paginated Results
The PagerDuty API sets a low limit for how many records may be returned in a
single request. As a consequence, it's common for many requests to be sent in
order to return the complete result set.
This module will automatically request the next page of results until it
has returned all matching records.
The default behaviour is to return the complete result set at the end.
Optionally, a callback function can be sent to batch process results after
each page.
## Options
:callback
:request_headers
:request_params
:request_path
"""
@fetch_limit 50
@request_path "/oncalls"
def call(options \\ []) do
initial_data = %Result{
data: [],
meta: %{}
}
fetch_data(initial_data, options)
end
defp fetch_data(acc, options) do
with {:ok, response} <- get_page(options),
200 <- response.status_code,
{:ok, all_on_calls} <- process_response(response) do
callback_results = apply_callback(all_on_calls, options)
on_calls = Map.get(callback_results, :on_calls, all_on_calls)
options = Map.get(callback_results, :options, options)
acc =
acc
|> Map.update!(:data, &Kernel.++(&1, on_calls))
|> Map.update!(:meta, &Map.put(&1, :api_response, response))
more? = Artemis.Helpers.deep_get(response, [:body, "more"], false)
case more? do
false -> {:ok, acc}
true -> fetch_data(acc, get_updated_options(options, all_on_calls))
end
else
{:error, %HTTPoison.Error{id: nil, reason: :closed}} ->
fetch_data(acc, options)
{:error, %HTTPoison.Error{id: nil, reason: :timeout}} ->
fetch_data(acc, options)
error ->
Logger.info("Error fetching on calls from PagerDuty API: " <> inspect(error))
return_error(error)
end
rescue
error ->
Logger.info("Error fetching on calls from PagerDuty API: " <> inspect(error))
{:error, "Exception raised while fetching on callsfrom PagerDuty API"}
end
defp get_page(options) do
path = Keyword.get(options, :request_path, @request_path)
headers = Keyword.get(options, :request_headers, [])
options = [params: get_request_params(options)]
PagerDuty.Request.get(path, headers, options)
end
defp get_request_params(options) do
default_request_params = [
limit: @fetch_limit,
offset: 0
]
custom_request_params = Keyword.get(options, :request_params, [])
Keyword.merge(default_request_params, custom_request_params)
end
defp process_response(%HTTPoison.Response{body: %{"oncalls" => entries}}) do
{:ok, entries}
end
defp process_response(response), do: return_error(response)
defp apply_callback(on_calls, options) do
case Keyword.get(options, :callback) do
nil -> %{on_calls: on_calls, options: options}
callback -> callback.(on_calls, options)
end
end
defp get_updated_options(options, _on_calls) do
request_params = Keyword.get(options, :request_params, [])
current_offset = Keyword.get(request_params, :offset, 0)
next_offset = current_offset + 1
updated_request_params = Keyword.put(request_params, :offset, next_offset)
Keyword.put(options, :request_params, updated_request_params)
end
defp return_error({:error, message}), do: {:error, message}
defp return_error(error), do: {:error, error}
end
|
apps/artemis/lib/artemis/drivers/pager_duty/list_on_calls.ex
| 0.601945
| 0.400163
|
list_on_calls.ex
|
starcoder
|
defmodule Cldr.Calendar.Interval do
@moduledoc """
Implements functions to return intervals and compare
date intervals.
In particular it provides functions which return an
interval (as a `Date.Range.t`) for years, quarters,
months, weeks and days.
In general, the intervals created with the packaage
[calendar_interval](https://hex.pm/packages/calendar_interval)
are to be preferred since they can used over different
time precisions whereas the functions in this module are
all intervals of a day. In order to be used with `ex_cldr_calendars`,
version "~> 0.2" of [calendar_interval](https://hex.pm/packages/calendar_interval)
is required.
Note however that as of release `0.2`, [calendar_interval](https://hex.pm/packages/calendar_interval) does
not support intervals of `quarters` or `weeks`.
"""
@doc """
Returns a `Date.Range.t` that represents
the `year`.
The range is enumerable.
## Arguments
* `year` is any `year` for `calendar`
* `calendar` is any module that implements
the `Calendar` and `Cldr.Calendar`
behaviours. The default is `Cldr.Calendar.Gregorian`.
## Returns
* A `Date.Range.t()` representing the
the enumerable days in the `year`
## Examples
iex> Cldr.Calendar.Interval.year 2019, Cldr.Calendar.Fiscal.UK
#DateRange<~D[2019-01-01 Cldr.Calendar.Fiscal.UK], ~D[2019-12-31 Cldr.Calendar.Fiscal.UK]>
iex> Cldr.Calendar.Interval.year 2019, Cldr.Calendar.NRF
#DateRange<~D[2019-W01-1 Cldr.Calendar.NRF], ~D[2019-W52-7 Cldr.Calendar.NRF]>
"""
@spec year(Calendar.year(), Cldr.Calendar.calendar()) :: Date.Range.t()
@spec year(Date.t()) :: Date.Range.t()
def year(%{calendar: Calendar.ISO} = date) do
%{date | calendar: Cldr.Calendar.Gregorian}
|> year
|> coerce_iso_calendar
end
def year(%{year: _, month: _, day: _} = date) do
year(date.year, date.calendar)
end
def year(year, calendar \\ Cldr.Calendar.Gregorian) do
calendar.year(year)
end
@doc """
Returns a `Date.Range.t` that represents
the `quarter`.
The range is enumerable.
## Arguments
* `year` is any `year` for `calendar`
* `quarter` is any `quarter` in the
` year` for `calendar`
* `calendar` is any module that implements
the `Calendar` and `Cldr.Calendar`
behaviours. The default is `Cldr.Calendar.Gregorian`.
## Returns
* A `Date.Range.t()` representing the
the enumerable days in the `quarter`
## Examples
iex> Cldr.Calendar.Interval.quarter 2019, 2, Cldr.Calendar.Fiscal.UK
#DateRange<~D[2019-04-01 Cldr.Calendar.Fiscal.UK], ~D[2019-06-30 Cldr.Calendar.Fiscal.UK]>
iex> Cldr.Calendar.Interval.quarter 2019, 2, Cldr.Calendar.ISOWeek
#DateRange<~D[2019-W14-1 Cldr.Calendar.ISOWeek], ~D[2019-W26-7 Cldr.Calendar.ISOWeek]>
"""
@spec quarter(Calendar.year(), Cldr.Calendar.quarter(), Cldr.Calendar.calendar()) ::
Date.Range.t()
@spec quarter(Date.t()) :: Date.Range.t()
def quarter(%{calendar: Calendar.ISO} = date) do
%{date | calendar: Cldr.Calendar.Gregorian}
|> quarter
|> coerce_iso_calendar
end
def quarter(date) do
quarter = Cldr.Calendar.quarter_of_year(date)
quarter(date.year, quarter, date.calendar)
end
def quarter(year, quarter, calendar \\ Cldr.Calendar.Gregorian) do
calendar.quarter(year, quarter)
end
@doc """
Returns a `Date.Range.t` that represents
the `year`.
The range is enumerable.
## Arguments
* `year` is any `year` for `calendar`
* `month` is any `month` in the `year`
for `calendar`
* `calendar` is any module that implements
the `Calendar` and `Cldr.Calendar`
behaviours. The default is `Cldr.Calendar.Gregorian`.
## Returns
* A `Date.Range.t()` representing the
the enumerable days in the `month`
## Examples
iex> Cldr.Calendar.Interval.month 2019, 3, Cldr.Calendar.Fiscal.UK
#DateRange<~D[2019-03-01 Cldr.Calendar.Fiscal.UK], ~D[2019-03-30 Cldr.Calendar.Fiscal.UK]>
iex> Cldr.Calendar.Interval.month 2019, 3, Cldr.Calendar.Fiscal.US
#DateRange<~D[2019-03-01 Cldr.Calendar.Fiscal.US], ~D[2019-03-31 Cldr.Calendar.Fiscal.US]>
"""
@spec month(Calendar.year(), Calendar.month(), Cldr.Calendar.calendar()) :: Date.Range.t()
@spec month(Date.t()) :: Date.Range.t()
def month(%{calendar: Calendar.ISO} = date) do
%{date | calendar: Cldr.Calendar.Gregorian}
|> month
|> coerce_iso_calendar
end
def month(date) do
month = Cldr.Calendar.month_of_year(date)
month(date.year, month, date.calendar)
end
def month(year, month, calendar \\ Cldr.Calendar.Gregorian) do
calendar.month(year, month)
end
@doc """
Returns a `Date.Range.t` that represents
the `year`.
The range is enumerable.
## Arguments
* `year` is any `year` for `calendar`
* `week` is any `week` in the `year`
for `calendar`
* `calendar` is any module that implements
the `Calendar` and `Cldr.Calendar`
behaviours. The default is `Cldr.Calendar.Gregorian`.
## Returns
* A `Date.Range.t()` representing the
the enumerable days in the `week` or
* `{:error, :not_defined}` if the calendar
does not support the concept of weeks
## Examples
iex> Cldr.Calendar.Interval.week 2019, 52, Cldr.Calendar.Fiscal.US
#DateRange<~D[2019-12-22 Cldr.Calendar.Fiscal.US], ~D[2019-12-28 Cldr.Calendar.Fiscal.US]>
iex> Cldr.Calendar.Interval.week 2019, 52, Cldr.Calendar.NRF
#DateRange<~D[2019-W52-1 Cldr.Calendar.NRF], ~D[2019-W52-7 Cldr.Calendar.NRF]>
iex> Cldr.Calendar.Interval.week 2019, 52, Cldr.Calendar.ISOWeek
#DateRange<~D[2019-W52-1 Cldr.Calendar.ISOWeek], ~D[2019-W52-7 Cldr.Calendar.ISOWeek]>
iex> Cldr.Calendar.Interval.week 2019, 52, Cldr.Calendar.Julian
{:error, :not_defined}
"""
@spec week(Calendar.year(), Cldr.Calendar.week(), Cldr.Calendar.calendar()) :: Date.Range.t()
@spec week(Date.t()) :: Date.Range.t()
def week(%{calendar: Calendar.ISO} = date) do
%{date | calendar: Cldr.Calendar.Gregorian}
|> week
|> coerce_iso_calendar
end
def week(date) do
{year, week} = Cldr.Calendar.week_of_year(date)
week(year, week, date.calendar)
end
def week(year, week, calendar \\ Cldr.Calendar.Gregorian) do
calendar.week(year, week)
end
@doc """
Returns a `Date.Range.t` that represents
the `day`.
The range is enumerable.
## Arguments
* `year` is any `year` for `calendar`
* `day` is any `day` in the `year`
for `calendar`
* `calendar` is any module that implements
the `Calendar` and `Cldr.Calendar`
behaviours. The default is `Cldr.Calendar.Gregorian`.
## Returns
* A `Date.Range.t()` representing the
the enumerable days in the `week`
## Examples
iex> Cldr.Calendar.Interval.day 2019, 52, Cldr.Calendar.Fiscal.US
#DateRange<~D[2019-02-21 Cldr.Calendar.Fiscal.US], ~D[2019-02-21 Cldr.Calendar.Fiscal.US]>
iex> Cldr.Calendar.Interval.day 2019, 92, Cldr.Calendar.NRF
#DateRange<~D[2019-W14-1 Cldr.Calendar.NRF], ~D[2019-W14-1 Cldr.Calendar.NRF]>
Cldr.Calendar.Interval.day 2019, 8, Cldr.Calendar.ISOWeek
#DateRange<%Date{calendar: Cldr.Calendar.ISOWeek, day: 1, month: 2, year: 2019}, %Date{calendar: Cldr.Calendar.ISOWeek, day: 1, month: 2, year: 2019}>
"""
@spec day(Calendar.year(), Calendar.day(), Cldr.Calendar.calendar()) :: Date.Range.t()
@spec day(Date.t()) :: Date.Range.t()
def day(%{calendar: Calendar.ISO} = date) do
%{date | calendar: Cldr.Calendar.Gregorian}
|> day
|> coerce_iso_calendar
end
def day(date) do
Date.range(date, date)
end
def day(year, day, calendar \\ Cldr.Calendar.Gregorian) do
if day <= calendar.days_in_year(year) do
iso_days = calendar.first_gregorian_day_of_year(year) + day - 1
with {year, month, day} = calendar.date_from_iso_days(iso_days),
{:ok, date} <- Date.new(year, month, day, calendar) do
day(date)
end
else
{:error, :invalid_date}
end
end
@doc """
Compare two date ranges.
Uses [Allen's Interval Algebra](https://en.wikipedia.org/wiki/Allen%27s_interval_algebra)
to return one of 13 different relationships:
Relation | Converse
---------- | --------------
:precedes | :preceded_by
:meets | :met_by
:overlaps | :overlapped_by
:finished_by | :finishes
:contains | :during
:starts | :started_by
:equals | :equals
## Arguments
* `range_1` is a `Date.Range.t`
* `range_2` is a `Date.Range.t`
## Returns
An atom representing the relationship between the two ranges.
## Examples
iex> Cldr.Calendar.Interval.compare Cldr.Calendar.Interval.day(~D[2019-01-01]),
...> Cldr.Calendar.Interval.day(~D[2019-01-02])
:meets
iex> Cldr.Calendar.Interval.compare Cldr.Calendar.Interval.day(~D[2019-01-01]),
...> Cldr.Calendar.Interval.day(~D[2019-01-03])
:precedes
iex> Cldr.Calendar.Interval.compare Cldr.Calendar.Interval.day(~D[2019-01-03]),
...> Cldr.Calendar.Interval.day(~D[2019-01-01])
:preceded_by
iex> Cldr.Calendar.Interval.compare Cldr.Calendar.Interval.day(~D[2019-01-02]),
...> Cldr.Calendar.Interval.day(~D[2019-01-01])
:met_by
iex> Cldr.Calendar.Interval.compare Cldr.Calendar.Interval.day(~D[2019-01-02]),
...> Cldr.Calendar.Interval.day(~D[2019-01-02])
:equals
"""
@spec compare(range_1 :: Date.Range.t(), range_2 :: Date.Range.t()) ::
Cldr.Calendar.interval_relation()
def compare(
%Date.Range{first_in_iso_days: first, last_in_iso_days: last},
%Date.Range{first_in_iso_days: first, last_in_iso_days: last}
) do
:equals
end
def compare(%Date.Range{} = r1, %Date.Range{} = r2) do
cond do
r1.last_in_iso_days - r2.first_in_iso_days < -1 ->
:precedes
r1.last_in_iso_days - r2.first_in_iso_days == -1 ->
:meets
r1.first_in_iso_days < r2.first_in_iso_days && r1.last_in_iso_days > r2.last_in_iso_days ->
:contains
r1.last_in_iso_days == r2.last_in_iso_days && r1.first_in_iso_days < r2.first_in_iso_days ->
:finished_by
r1.first_in_iso_days < r2.first_in_iso_days && r1.last_in_iso_days > r2.first_in_iso_days ->
:overlaps
r1.first_in_iso_days == r2.first_in_iso_days && r1.last_in_iso_days < r2.last_in_iso_days ->
:starts
r2.last_in_iso_days - r1.first_in_iso_days < -1 ->
:preceded_by
r2.last_in_iso_days - r1.first_in_iso_days == -1 ->
:met_by
r2.last_in_iso_days == r1.last_in_iso_days && r2.first_in_iso_days < r1.first_in_iso_days ->
:finishes
r1.first_in_iso_days > r2.first_in_iso_days && r1.last_in_iso_days < r2.last_in_iso_days ->
:during
r2.first_in_iso_days == r1.first_in_iso_days && r1.last_in_iso_days > r2.last_in_iso_days ->
:started_by
r2.last_in_iso_days > r1.first_in_iso_days && r2.last_in_iso_days < r1.last_in_iso_days ->
:overlapped_by
end
end
@doc false
def to_iso_calendar(%Date.Range{first: first, last: last}) do
Date.range(Date.convert!(first, Calendar.ISO), Date.convert!(last, Calendar.ISO))
end
@doc false
def coerce_iso_calendar(%Date.Range{first: first, last: last}) do
first = %{first | calendar: Calendar.ISO}
last = %{last | calendar: Calendar.ISO}
Date.range(first, last)
end
end
|
lib/cldr/calendar/interval.ex
| 0.953998
| 0.839142
|
interval.ex
|
starcoder
|
defmodule AWS.CloudWatch.Events do
@moduledoc """
Amazon CloudWatch Events helps you to respond to state changes in your AWS
resources. When your resources change state, they automatically send events
into an event stream. You can create rules that match selected events in
the stream and route them to targets to take action. You can also use rules
to take action on a pre-determined schedule. For example, you can configure
rules to:
<ul> <li> Automatically invoke an AWS Lambda function to update DNS entries
when an event notifies you that Amazon EC2 instance enters the running
state.
</li> <li> Direct specific API records from CloudTrail to an Amazon Kinesis
stream for detailed analysis of potential security or availability risks.
</li> <li> Periodically invoke a built-in target to create a snapshot of an
Amazon EBS volume.
</li> </ul> For more information about the features of Amazon CloudWatch
Events, see the [Amazon CloudWatch Events User
Guide](http://docs.aws.amazon.com/AmazonCloudWatch/latest/events).
"""
@doc """
Deletes the specified rule.
You must remove all targets from a rule using `RemoveTargets` before you
can delete the rule.
When you delete a rule, incoming events might continue to match to the
deleted rule. Please allow a short period of time for changes to take
effect.
"""
def delete_rule(client, input, options \\ []) do
request(client, "DeleteRule", input, options)
end
@doc """
Displays the external AWS accounts that are permitted to write events to
your account using your account's event bus, and the associated policy. To
enable your account to receive events from other accounts, use
`PutPermission`.
"""
def describe_event_bus(client, input, options \\ []) do
request(client, "DescribeEventBus", input, options)
end
@doc """
Describes the specified rule.
"""
def describe_rule(client, input, options \\ []) do
request(client, "DescribeRule", input, options)
end
@doc """
Disables the specified rule. A disabled rule won't match any events, and
won't self-trigger if it has a schedule expression.
When you disable a rule, incoming events might continue to match to the
disabled rule. Please allow a short period of time for changes to take
effect.
"""
def disable_rule(client, input, options \\ []) do
request(client, "DisableRule", input, options)
end
@doc """
Enables the specified rule. If the rule does not exist, the operation
fails.
When you enable a rule, incoming events might not immediately start
matching to a newly enabled rule. Please allow a short period of time for
changes to take effect.
"""
def enable_rule(client, input, options \\ []) do
request(client, "EnableRule", input, options)
end
@doc """
Lists the rules for the specified target. You can see which of the rules in
Amazon CloudWatch Events can invoke a specific target in your account.
"""
def list_rule_names_by_target(client, input, options \\ []) do
request(client, "ListRuleNamesByTarget", input, options)
end
@doc """
Lists your Amazon CloudWatch Events rules. You can either list all the
rules or you can provide a prefix to match to the rule names.
"""
def list_rules(client, input, options \\ []) do
request(client, "ListRules", input, options)
end
@doc """
Lists the targets assigned to the specified rule.
"""
def list_targets_by_rule(client, input, options \\ []) do
request(client, "ListTargetsByRule", input, options)
end
@doc """
Sends custom events to Amazon CloudWatch Events so that they can be matched
to rules.
"""
def put_events(client, input, options \\ []) do
request(client, "PutEvents", input, options)
end
@doc """
Running `PutPermission` permits the specified AWS account to put events to
your account's default *event bus*. CloudWatch Events rules in your account
are triggered by these events arriving to your default event bus.
For another account to send events to your account, that external account
must have a CloudWatch Events rule with your account's default event bus as
a target.
To enable multiple AWS accounts to put events to your default event bus,
run `PutPermission` once for each of these accounts.
The permission policy on the default event bus cannot exceed 10KB in size.
"""
def put_permission(client, input, options \\ []) do
request(client, "PutPermission", input, options)
end
@doc """
Creates or updates the specified rule. Rules are enabled by default, or
based on value of the state. You can disable a rule using `DisableRule`.
If you are updating an existing rule, the rule is completely replaced with
what you specify in this `PutRule` command. If you omit arguments in
`PutRule`, the old values for those arguments are not kept. Instead, they
are replaced with null values.
When you create or update a rule, incoming events might not immediately
start matching to new or updated rules. Please allow a short period of time
for changes to take effect.
A rule must contain at least an EventPattern or ScheduleExpression. Rules
with EventPatterns are triggered when a matching event is observed. Rules
with ScheduleExpressions self-trigger based on the given schedule. A rule
can have both an EventPattern and a ScheduleExpression, in which case the
rule triggers on matching events as well as on a schedule.
Most services in AWS treat : or / as the same character in Amazon Resource
Names (ARNs). However, CloudWatch Events uses an exact match in event
patterns and rules. Be sure to use the correct ARN characters when creating
event patterns so that they match the ARN syntax in the event you want to
match.
"""
def put_rule(client, input, options \\ []) do
request(client, "PutRule", input, options)
end
@doc """
Adds the specified targets to the specified rule, or updates the targets if
they are already associated with the rule.
Targets are the resources that are invoked when a rule is triggered.
You can configure the following as targets for CloudWatch Events:
<ul> <li> EC2 instances
</li> <li> AWS Lambda functions
</li> <li> Streams in Amazon Kinesis Streams
</li> <li> Delivery streams in Amazon Kinesis Firehose
</li> <li> Amazon ECS tasks
</li> <li> AWS Step Functions state machines
</li> <li> AWS Batch jobs
</li> <li> Pipelines in Amazon Code Pipeline
</li> <li> Amazon Inspector assessment templates
</li> <li> Amazon SNS topics
</li> <li> Amazon SQS queues, including FIFO queues
</li> <li> The default event bus of another AWS account
</li> </ul> Note that creating rules with built-in targets is supported
only in the AWS Management Console.
For some target types, `PutTargets` provides target-specific parameters. If
the target is an Amazon Kinesis stream, you can optionally specify which
shard the event goes to by using the `KinesisParameters` argument. To
invoke a command on multiple EC2 instances with one rule, you can use the
`RunCommandParameters` field.
To be able to make API calls against the resources that you own, Amazon
CloudWatch Events needs the appropriate permissions. For AWS Lambda and
Amazon SNS resources, CloudWatch Events relies on resource-based policies.
For EC2 instances, Amazon Kinesis streams, and AWS Step Functions state
machines, CloudWatch Events relies on IAM roles that you specify in the
`RoleARN` argument in `PutTargets`. For more information, see
[Authentication and Access
Control](http://docs.aws.amazon.com/AmazonCloudWatch/latest/events/auth-and-access-control-cwe.html)
in the *Amazon CloudWatch Events User Guide*.
If another AWS account is in the same region and has granted you permission
(using `PutPermission`), you can send events to that account by setting
that account's event bus as a target of the rules in your account. To send
the matched events to the other account, specify that account's event bus
as the `Arn` when you run `PutTargets`. If your account sends events to
another account, your account is charged for each sent event. Each event
sent to antoher account is charged as a custom event. The account receiving
the event is not charged. For more information on pricing, see [Amazon
CloudWatch Pricing](https://aws.amazon.com/cloudwatch/pricing/).
For more information about enabling cross-account events, see
`PutPermission`.
**Input**, **InputPath** and **InputTransformer** are mutually exclusive
and optional parameters of a target. When a rule is triggered due to a
matched event:
<ul> <li> If none of the following arguments are specified for a target,
then the entire event is passed to the target in JSON form (unless the
target is Amazon EC2 Run Command or Amazon ECS task, in which case nothing
from the event is passed to the target).
</li> <li> If **Input** is specified in the form of valid JSON, then the
matched event is overridden with this constant.
</li> <li> If **InputPath** is specified in the form of JSONPath (for
example, `$.detail`), then only the part of the event specified in the path
is passed to the target (for example, only the detail part of the event is
passed).
</li> <li> If **InputTransformer** is specified, then one or more specified
JSONPaths are extracted from the event and used as values in a template
that you specify as the input to the target.
</li> </ul> When you specify `InputPath` or `InputTransformer`, you must
use JSON dot notation, not bracket notation.
When you add targets to a rule and the associated rule triggers soon after,
new or updated targets might not be immediately invoked. Please allow a
short period of time for changes to take effect.
This action can partially fail if too many requests are made at the same
time. If that happens, `FailedEntryCount` is non-zero in the response and
each entry in `FailedEntries` provides the ID of the failed target and the
error code.
"""
def put_targets(client, input, options \\ []) do
request(client, "PutTargets", input, options)
end
@doc """
Revokes the permission of another AWS account to be able to put events to
your default event bus. Specify the account to revoke by the `StatementId`
value that you associated with the account when you granted it permission
with `PutPermission`. You can find the `StatementId` by using
`DescribeEventBus`.
"""
def remove_permission(client, input, options \\ []) do
request(client, "RemovePermission", input, options)
end
@doc """
Removes the specified targets from the specified rule. When the rule is
triggered, those targets are no longer be invoked.
When you remove a target, when the associated rule triggers, removed
targets might continue to be invoked. Please allow a short period of time
for changes to take effect.
This action can partially fail if too many requests are made at the same
time. If that happens, `FailedEntryCount` is non-zero in the response and
each entry in `FailedEntries` provides the ID of the failed target and the
error code.
"""
def remove_targets(client, input, options \\ []) do
request(client, "RemoveTargets", input, options)
end
@doc """
Tests whether the specified event pattern matches the provided event.
Most services in AWS treat : or / as the same character in Amazon Resource
Names (ARNs). However, CloudWatch Events uses an exact match in event
patterns and rules. Be sure to use the correct ARN characters when creating
event patterns so that they match the ARN syntax in the event you want to
match.
"""
def test_event_pattern(client, input, options \\ []) do
request(client, "TestEventPattern", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "events"}
host = get_host("events", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSEvents.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/cloudwatch_events.ex
| 0.895537
| 0.566978
|
cloudwatch_events.ex
|
starcoder
|
defmodule Scenic do
@moduledoc """
Scenic is an application framework written directly on the Elixir/Erlang/OTP stack.
With it you can build client-side applications that operate identically across all
supported operating systems, including MacOS, Ubuntu, Nerves/Linux, and more.
Scenic is primarily aimed at fixed screen connected devices (IoT), but can also be
used to build portable applications.
## Helpful Guides
[General Overview](overview_general.html) | [**Getting Started**](getting_started.html) | [Nerves](getting_started_nerves.html)
[Structure of a Scene](scene_structure.html) | [Lifecycle of a Scene](scene_lifecycle.html) | [Standard Components](Scenic.Components.html)
[Graph Overview](overview_graph.html) | [ViewPort Overview](overview_viewport.html) | [Driver Overview](overview_driver)
[Primitives](Scenic.Primitives.html) | [Styles](styles_overview.html) | [Transforms](transforms_overview.html)
[Contributing](contributing.html) | [Code of Conduct](code_of_conduct.html.html) |
If you are new to scenic, then you should read the [General Overview](overview_general.html),
[Getting Started](getting_started.html), and [Structure of a Scene](scene_structure.html) guides first.
## Configure Scenic
In order to start Scenic, you should first build a configuration for one or more
ViewPorts. These configuration maps will be passed in to the main Scenic
supervisor. These configurations should live in your app's config.exs file.
use Mix.Config
# Configure the main viewport for the Scenic application
config :my_application, :viewport, %{
name: :main_viewport,
size: {700, 600},
default_scene: {MyApplication.Scene.Example, nil},
drivers: [
%{
module: Scenic.Driver.Glfw,
name: :glfw,
opts: [resizeable: false, title: "Example Application"],
}
]
}
In the ViewPort configuration you can do things like set a name for the
ViewPort process, its size, the default scene and start one or more drivers.
See the documentation for [ViewPort Configuration](Scenic.ViewPort.Config.html)
to learn more about how to set the options on a viewport.
Note that all the drivers are in seperate Hex packages as you should choose the
correct one for your application. For example, the `Scenic.Driver.Glfw` driver draws
your scenes into a window under MacOS and Ubuntu. It should work on other
OS's as well, such as other flavors of Unix or Windows, but I haven't worked
on or tested those yet.
## Supervise Scenic
The Scenic module itself is a supervisor that manages all the machinery that
makes the [Scenes](overview_scene.html), [ViewPorts](overview_viewport.html),
and [Drivers](overview_driver.html) run.
In order to run any Scenic application, you will need to start the Scenic
supervisor in your supervision tree.
Load a configuration for one or more ViewPorts, then add Scenic to your root supervisor.
defmodule MyApplication do
def start(_type, _args) do
import Supervisor.Spec, warn: false
# load the viewport configuration from config
main_viewport_config = Application.get_env(:my_application :viewport)
# start the application with the viewport
children = [
supervisor(Scenic, [viewports: [main_viewport_config]]),
]
Supervisor.start_link(children, strategy: :one_for_one)
end
end
Note that you can start the Scenic supervisor without any ViewPort
Configurations. In that case, you are responsible for supervising
the ViewPorts yourself. This is not recommended for devices
as Scenic should know how to restart the main ViewPort in the event
of an error.
"""
use Supervisor
@viewports :scenic_dyn_viewports
# --------------------------------------------------------
@doc false
def child_spec(opts) do
%{
start: {__MODULE__, :start_link, [opts]},
type: :supervisor,
restart: :permanent,
shutdown: 500
}
end
# --------------------------------------------------------
@doc false
def start_link(opts \\ [])
def start_link({a, b}), do: start_link([{a, b}])
def start_link(opts) when is_list(opts) do
Supervisor.start_link(__MODULE__, opts, name: :scenic)
end
# --------------------------------------------------------
@doc false
def init(opts) do
opts
|> Keyword.get(:viewports, [])
|> do_init
end
# --------------------------------------------------------
# init with no default viewports
defp do_init([]) do
[
{Scenic.ViewPort.Tables, nil},
supervisor(Scenic.Cache.Supervisor, []),
{DynamicSupervisor, name: @viewports, strategy: :one_for_one}
]
|> Supervisor.init(strategy: :one_for_one)
end
# --------------------------------------------------------
# init with default viewports
defp do_init(viewports) do
[
{Scenic.ViewPort.Tables, nil},
supervisor(Scenic.Cache.Supervisor, []),
supervisor(Scenic.ViewPort.SupervisorTop, [viewports]),
{DynamicSupervisor, name: @viewports, strategy: :one_for_one}
]
|> Supervisor.init(strategy: :one_for_one)
end
end
|
lib/scenic.ex
| 0.799325
| 0.605099
|
scenic.ex
|
starcoder
|
defmodule RDF.Serialization.Format do
@moduledoc """
A behaviour for RDF serialization formats.
A serialization format can be implemented like this
defmodule SomeFormat do
use RDF.Serialization.Format
import RDF.Sigils
@id ~I<http://example.com/some_format>
@name :some_format
@extension "ext"
@media_type "application/some-format"
end
When `@id`, `@name`, `@extension` and `@media_type` module attributes are
defined the resp. behaviour functions are generated automatically and return
these values.
Then you'll have to do the main work by implementing a
`RDF.Serialization.Encoder` and a `RDF.Serialization.Decoder` for the format.
By default it is assumed that these are defined in `Encoder` and `Decoder`
modules under the `RDF.Serialization.Format` module of the format, i.e. in the
example above in `SomeFormat.Encoder` and `SomeFormat.Decoder`. If you want
them in another module, you'll have to override the `encoder/0` and/or
`decoder/0` functions in your `RDF.Serialization.Format` module.
"""
alias RDF.{Dataset, Graph}
alias RDF.Serialization.{Reader, Writer}
@doc """
An IRI of the serialization format.
"""
@callback id :: RDF.IRI.t()
@doc """
An name atom of the serialization format.
"""
@callback name :: atom
@doc """
The usual file extension for the serialization format.
"""
@callback extension :: String.t()
@doc """
The MIME type of the serialization format.
"""
@callback media_type :: String.t()
@doc """
The `RDF.Serialization.Decoder` module for the serialization format.
"""
@callback decoder :: module
@doc """
The `RDF.Serialization.Encoder` module for the serialization format.
"""
@callback encoder :: module
defmacro __using__(_) do
quote bind_quoted: [], unquote: true do
@behaviour unquote(__MODULE__)
@decoder __MODULE__.Decoder
@encoder __MODULE__.Encoder
@impl unquote(__MODULE__)
def decoder, do: @decoder
@impl unquote(__MODULE__)
def encoder, do: @encoder
defoverridable unquote(__MODULE__)
@decoder_doc_ref """
See the [module documentation of the decoder](`#{@decoder}`) for the
available format-specific options, all of which can be used in this
function and will be passed them through to the decoder.
"""
@doc """
Deserializes a graph or dataset from a string.
It returns an `{:ok, data}` tuple, with `data` being the deserialized graph or
dataset, or `{:error, reason}` if an error occurs.
#{@decoder_doc_ref}
"""
@spec read_string(String.t(), keyword) :: {:ok, Graph.t() | Dataset.t()} | {:error, any}
def read_string(content, opts \\ []), do: Reader.read_string(decoder(), content, opts)
@doc """
Deserializes a graph or dataset from a string.
As opposed to `read_string/2`, it raises an exception if an error occurs.
#{@decoder_doc_ref}
"""
@spec read_string!(String.t(), keyword) :: Graph.t() | Dataset.t()
def read_string!(content, opts \\ []), do: Reader.read_string!(decoder(), content, opts)
@doc """
Deserializes a graph or dataset from a stream.
It returns an `{:ok, data}` tuple, with `data` being the deserialized graph or
dataset, or `{:error, reason}` if an error occurs.
#{@decoder_doc_ref}
"""
@spec read_stream(Enumerable.t(), keyword) :: {:ok, Graph.t() | Dataset.t()} | {:error, any}
def read_stream(stream, opts \\ []), do: Reader.read_stream(decoder(), stream, opts)
@doc """
Deserializes a graph or dataset from a stream.
As opposed to `read_stream/2`, it raises an exception if an error occurs.
#{@decoder_doc_ref}
"""
@spec read_stream!(Enumerable.t(), keyword) :: Graph.t() | Dataset.t()
def read_stream!(stream, opts \\ []), do: Reader.read_stream!(decoder(), stream, opts)
@doc """
Deserializes a graph or dataset from a file.
It returns an `{:ok, data}` tuple, with `data` being the deserialized graph or
dataset, or `{:error, reason}` if an error occurs.
## Options
General serialization-independent options:
- `:stream`: Allows to enable reading the data from a file directly via a
stream (default: `false` on this function, `true` on the bang version)
- `:gzip`: Allows to read directly from a gzipped file (default: `false`)
- `:file_mode`: A list with the Elixir `File.open` modes to be used for reading
(default: `[:read, :utf8]`)
#{@decoder_doc_ref}
"""
@spec read_file(Path.t(), keyword) :: {:ok, Graph.t() | Dataset.t()} | {:error, any}
def read_file(file, opts \\ []), do: Reader.read_file(decoder(), file, opts)
@doc """
Deserializes a graph or dataset from a file.
As opposed to `read_file/2`, it raises an exception if an error occurs and
defaults to `stream: true`.
See `read_file/3` for the available format-independent options.
#{@decoder_doc_ref}
"""
@spec read_file!(Path.t(), keyword) :: Graph.t() | Dataset.t()
def read_file!(file, opts \\ []), do: Reader.read_file!(decoder(), file, opts)
@encoder_doc_ref """
See the [module documentation of the encoder](`#{@encoder}`) for the
available format-specific options, all of which can be used in this
function and will be passed them through to the encoder.
"""
@doc """
Serializes a RDF data structure to a string.
It returns an `{:ok, string}` tuple, with `string` being the serialized graph or
dataset, or `{:error, reason}` if an error occurs.
#{@encoder_doc_ref}
"""
@spec write_string(RDF.Data.t(), keyword) :: {:ok, String.t()} | {:error, any}
def write_string(data, opts \\ []), do: Writer.write_string(encoder(), data, opts)
@doc """
Serializes a RDF data structure to a string.
As opposed to `write_string/2`, it raises an exception if an error occurs.
#{@encoder_doc_ref}
"""
@spec write_string!(RDF.Data.t(), keyword) :: String.t()
def write_string!(data, opts \\ []), do: Writer.write_string!(encoder(), data, opts)
if @encoder.stream_support?() do
@doc """
Serializes a RDF data structure to a stream.
#{@encoder_doc_ref}
"""
@spec write_stream(RDF.Data.t(), keyword) :: Enumerable.t()
def write_stream(data, opts \\ []), do: Writer.write_stream(encoder(), data, opts)
end
@doc """
Serializes a RDF data structure to a file.
It returns `:ok` if successful or `{:error, reason}` if an error occurs.
## Options
General serialization-independent options:
- `:stream`: Allows to enable writing the serialized data to the file directly
via a stream. Possible values: `:string` or `:iodata` for writing to the file
with a stream of strings respective IO lists, `true` if you want to use streams,
but don't care for the exact method or `false` for not writing with
a stream (default: `false` on this function, `:iodata` on the bang version)
- `:gzip`: Allows to write directly to a gzipped file (default: `false`)
- `:force`: If not set to `true`, an error is raised when the given file
already exists (default: `false`)
- `:file_mode`: A list with the Elixir `File.open` modes to be used for writing
(default: `[:write, :exclusive]`)
#{@encoder_doc_ref}
"""
@spec write_file(RDF.Data.t(), Path.t(), keyword) :: :ok | {:error, any}
def write_file(data, path, opts \\ []), do: Writer.write_file(encoder(), data, path, opts)
@doc """
Serializes a RDF data structure to a file.
As opposed to `write_file/3`, it raises an exception if an error occurs.
See `write_file/3` for the available format-independent options.
#{@encoder_doc_ref}
"""
@spec write_file!(RDF.Data.t(), Path.t(), keyword) :: :ok
def write_file!(data, path, opts \\ []), do: Writer.write_file!(encoder(), data, path, opts)
@before_compile unquote(__MODULE__)
end
end
defmacro __before_compile__(_env) do
quote do
if !Module.defines?(__MODULE__, {:id, 0}) &&
Module.get_attribute(__MODULE__, :id) do
@impl unquote(__MODULE__)
def id, do: @id
end
if !Module.defines?(__MODULE__, {:name, 0}) &&
Module.get_attribute(__MODULE__, :name) do
@impl unquote(__MODULE__)
def name, do: @name
end
if !Module.defines?(__MODULE__, {:extension, 0}) &&
Module.get_attribute(__MODULE__, :extension) do
@impl unquote(__MODULE__)
def extension, do: @extension
end
if !Module.defines?(__MODULE__, {:media_type, 0}) &&
Module.get_attribute(__MODULE__, :media_type) do
@impl unquote(__MODULE__)
def media_type, do: @media_type
end
end
end
end
|
lib/rdf/serialization/format.ex
| 0.959386
| 0.608245
|
format.ex
|
starcoder
|
defmodule Mix.Tasks.Compile.Boundary do
# credo:disable-for-this-file Credo.Check.Readability.Specs
use Boundary, deps: [Boundary]
use Mix.Task.Compiler
alias Boundary.Xref
@moduledoc """
Verifies cross-module function calls according to defined boundaries.
This compiler reports all cross-boundary function calls which are not permitted, according to
the current definition of boundaries. For details on defining boundaries, see the docs for the
`Boundary` module.
## Usage
Once you have configured the boundaries, you need to include the compiler in `mix.exs`:
```
defmodule MySystem.MixProject do
# ...
def project do
[
compilers: [:boundary] ++ Mix.compilers(),
# ...
]
end
# ...
end
```
When developing a library, it's advised to use this compiler only in `:dev` and `:test`
environments:
```
defmodule Boundary.MixProject do
# ...
def project do
[
compilers: extra_compilers(Mix.env()) ++ Mix.compilers(),
# ...
]
end
# ...
defp extra_compilers(:prod), do: []
defp extra_compilers(_env), do: [:boundaries]
end
```
## Warnings
Every invalid cross-boundary call is reported as a compiler warning. Consider the following example:
```
defmodule MySystem.User do
def auth() do
MySystemWeb.Endpoint.url()
end
end
```
Assuming that calls from `MySystem` to `MySystemWeb` are not allowed, you'll get the following warning:
```
$ mix compile
warning: forbidden call to MySystemWeb.Endpoint.url/0
(calls from MySystem to MySystemWeb are not allowed)
(call originated from MySystem.User)
lib/my_system/user.ex:3
```
Since the compiler emits warnings, `mix compile` will still succeed, and you can normally start
your system, even if some boundary rules are violated. The compiler doesn't force you to immediately
fix these violations, which is a deliberate decision made to avoid disrupting the development flow.
At the same time, it's worth enforcing boundaries on the CI. This can easily be done by providing
the `--warnings-as-errors` option to `mix compile`.
"""
@recursive true
@impl Mix.Task.Compiler
def run(argv) do
Xref.start_link(path())
Mix.Task.Compiler.after_compiler(:app, &after_compiler(&1, argv))
tracers = Code.get_compiler_option(:tracers)
Code.put_compiler_option(:tracers, [__MODULE__ | tracers])
{:ok, []}
end
@doc false
def trace({remote, meta, callee_module, name, arity}, env) when remote in ~w/remote_function remote_macro/a do
if env.module != nil do
Xref.add_call(
env.module,
%{callee: {callee_module, name, arity}, file: Path.relative_to_cwd(env.file), line: meta[:line]}
)
end
:ok
end
def trace(_event, _env), do: :ok
defp after_compiler({:error, _} = status, _argv), do: status
defp after_compiler({status, diagnostics}, argv) when status in [:ok, :noop] do
tracers = Enum.reject(Code.get_compiler_option(:tracers), &(&1 == __MODULE__))
Code.put_compiler_option(:tracers, tracers)
calls = Xref.calls(path(), app_modules())
errors = Boundary.MixCompiler.check(calls: calls)
print_diagnostic_errors(errors)
{status(errors, argv), diagnostics ++ errors}
end
defp app_modules do
app = Keyword.fetch!(Mix.Project.config(), :app)
Application.load(app)
Application.spec(app, :modules)
end
defp status([], _), do: :ok
defp status([_ | _], argv), do: if(warnings_as_errors?(argv), do: :error, else: :ok)
defp warnings_as_errors?(argv) do
{parsed, _argv, _errors} = OptionParser.parse(argv, strict: [warnings_as_errors: :boolean])
Keyword.get(parsed, :warnings_as_errors, false)
end
defp print_diagnostic_errors(errors) do
if errors != [], do: IO.puts("")
Enum.each(errors, &print_diagnostic_error/1)
end
defp print_diagnostic_error(error) do
Mix.shell().info([severity(error.severity), error.message, location(error)])
end
defp location(error) do
if error.file != nil and error.file != "" do
pos = if error.position != nil, do: ":#{error.position}", else: ""
"\n #{error.file}#{pos}\n"
else
"\n"
end
end
defp severity(severity), do: [:bright, color(severity), "#{severity}: ", :reset]
defp color(:error), do: :red
defp color(:warning), do: :yellow
defp path, do: Path.join(Mix.Project.compile_path(), "boundary_calls.ets")
end
|
lib/mix/tasks/compile/boundary.ex
| 0.856947
| 0.735713
|
boundary.ex
|
starcoder
|
defmodule Mix.Deps.Retriever do
@moduledoc false
@doc """
Returns all dependencies for the current Mix.Project
as a `Mix.Dep` record.
## Exceptions
This function raises an exception in case the developer
provides a dependency in the wrong format.
"""
def all(post_config // []) do
{ deps, _ } = all(nil, post_config, fn(dep, acc) -> { dep, acc } end)
deps
end
@doc """
Like `all/0` but takes a callback that is invoked for
each dependency and must return an updated depedency
in case some processing is done.
"""
def all(rest, post_config // [], callback) do
Enum.map_reduce children, rest, fn (dep, rest) ->
{ dep, rest } = callback.(dep, rest)
if Mix.Deps.available?(dep) and mixfile?(dep) do
{ dep, rest } = Mix.Deps.in_dependency dep, post_config, fn project ->
{ deps, rest } = all(rest, callback)
# We need to call with_mix_project once again
# here in case the dependency was not available
# the first time and the callback hook just
# happened to fetch it.
{ with_mix_project(dep, project).deps(deps), rest }
end
end
{ dep, rest }
end
end
@doc """
Gets all direct children for the current Mix.Project
as a `Mix.Dep` record. Unlike with `all` the `deps`
field is not populated.
"""
def children() do
deps = Mix.project[:deps] || []
scms = Mix.SCM.available
Enum.map deps, fn dep ->
dep = with_scm_and_status(dep, scms)
if Mix.Deps.available?(dep) and mixfile?(dep) do
Mix.Deps.in_dependency dep, fn project ->
with_mix_project(dep, project)
end
else
dep
end
end
end
@doc """
Receives a dependency and update its status.
"""
def update(Mix.Dep[scm: scm, app: app, requirement: req, opts: opts]) do
with_scm_and_status({ app, req, opts }, [scm])
end
## Helpers
defp with_mix_project(Mix.Dep[project: nil] = dep, project) do
if match?({ :noappfile, _ }, dep.status) and Mix.Project.umbrella? do
dep = dep.update_opts(Keyword.put(&1, :app, false))
.status({ :ok, nil })
end
dep.project(project)
end
defp with_mix_project(dep, _project), do: dep
defp with_scm_and_status({ app, opts }, scms) when is_atom(app) and is_list(opts) do
with_scm_and_status({ app, nil, opts }, scms)
end
defp with_scm_and_status({ app, req, opts }, scms) when is_atom(app) and
(is_binary(req) or is_regex(req) or req == nil) and is_list(opts) do
path = Path.join(Mix.project[:deps_path], app)
opts = Keyword.put(opts, :dest, path)
{ scm, opts } = Enum.find_value scms, fn(scm) ->
(new = scm.accepts_options(opts)) && { scm, new }
end
if scm do
Mix.Dep[
scm: scm,
app: app,
requirement: req,
status: status(scm, app, req, opts),
opts: opts
]
else
supported = Enum.join scms, ", "
raise Mix.Error, message: "did not specify a supported scm, expected one of: " <> supported
end
end
defp with_scm_and_status(other, _scms) do
raise Mix.Error, message: %b(dependency specified in the wrong format: #{inspect other}, ) <>
%b(expected { :app, scm: "location" } | { :app, "requirement", scm: "location" })
end
defp status(scm, app, req, opts) do
if scm.checked_out? opts do
opts_app = opts[:app]
if opts_app == false do
{ :ok, nil }
else
path = if is_binary(opts_app), do: opts_app, else: "ebin/#{app}.app"
path = Path.join(opts[:dest], path)
validate_app_file(path, app, req)
end
else
{ :unavailable, opts[:dest] }
end
end
defp validate_app_file(app_path, app, req) do
case :file.consult(app_path) do
{ :ok, [{ :application, ^app, config }] } ->
case List.keyfind(config, :vsn, 0) do
{ :vsn, actual } ->
actual = list_to_binary(actual)
if vsn_match?(req, actual) do
{ :ok, actual }
else
{ :invalidvsn, actual }
end
nil -> { :invalidvsn, nil }
end
{ :ok, _ } -> { :invalidapp, app_path }
{ :error, _ } -> { :noappfile, app_path }
end
end
defp vsn_match?(nil, _actual), do: true
defp vsn_match?(expected, actual) when is_binary(expected), do: actual == expected
defp vsn_match?(expected, actual) when is_regex(expected), do: actual =~ expected
defp mixfile?(dep) do
File.regular?(Path.join dep.opts[:dest], "mix.exs")
end
end
|
lib/mix/lib/mix/deps/retriever.ex
| 0.735642
| 0.401043
|
retriever.ex
|
starcoder
|
defmodule DenseNN do
import Nx.Defn
defn init_random_params do
# 3 layers
# 1. Dense(64) with sigmoid
# 2. Dense(32) with sigmoid
# 3. Dense(10) with softmax
w1 = Nx.random_normal({1024, 64}, 0.0, 0.1, names: [:input, :layer1])
b1 = Nx.random_normal({64}, 0.0, 0.1, names: [:layer1])
w2 = Nx.random_normal({64, 32}, 0.0, 0.1, names: [:layer1, :layer2])
b2 = Nx.random_normal({32}, 0.0, 0.1, names: [:layer2])
w3 = Nx.random_normal({32, 10}, 0.0, 0.1, names: [:layer2, :output])
b3 = Nx.random_normal({10}, 0.0, 0.1, names: [:output])
{w1, b1, w2, b2, w3, b3}
end
defn softmax(logits) do
Nx.exp(logits) /
Nx.sum(Nx.exp(logits), axes: [:output], keep_axes: true)
end
defn predict({w1, b1, w2, b2, w3, b3}, batch) do
batch
|> Nx.dot(w1)
|> Nx.add(b1)
|> Nx.logistic()
|> Nx.dot(w2)
|> Nx.add(b2)
|> Nx.logistic()
|> Nx.dot(w3)
|> Nx.add(b3)
|> softmax()
end
defn accuracy({w1, b1, w2, b2, w3, b3}, batch_images, batch_labels) do
Nx.mean(
Nx.equal(
Nx.argmax(batch_labels, axis: :output),
Nx.argmax(predict({w1, b1, w2, b2, w3, b3}, batch_images), axis: :output)
)
|> Nx.as_type({:s, 8})
)
end
defn loss({w1, b1, w2, b2, w3, b3}, batch_images, batch_labels) do
preds = predict({w1, b1, w2, b2, w3, b3}, batch_images)
-Nx.sum(Nx.mean(Nx.log(preds) * batch_labels, axes: [:output]))
end
defn compute_gradient({_, _, _, _, _, _} = params, batch_images, batch_labels) do
grad(params, &loss(&1, batch_images, batch_labels))
end
defn update({w1, b1, w2, b2, w3, b3} = _params, batch_grad, step) do
{grad_w1, grad_b1, grad_w2, grad_b2, grad_w3, grad_b3} = batch_grad
{
w1 - grad_w1 * step,
b1 - grad_b1 * step,
w2 - grad_w2 * step,
b2 - grad_b2 * step,
w3 - grad_w3 * step,
b3 - grad_b3 * step
}
end
defn update_with_averages(
{_, _, _, _, _, _} = cur_params,
batch_grad,
batch_loss,
batch_acc,
avg_loss,
avg_accuracy,
total
) do
avg_loss = avg_loss + batch_loss / total
avg_accuracy = avg_accuracy + batch_acc / total
{update(cur_params, batch_grad, 0.01), avg_loss, avg_accuracy}
end
defn update_with_averages(
{_, _, _, _, _, _} = cur_params,
imgs,
tar,
avg_loss,
avg_accuracy,
total
) do
batch_loss = loss(cur_params, imgs, tar)
batch_accuracy = accuracy(cur_params, imgs, tar)
avg_loss = avg_loss + batch_loss / total
avg_accuracy = avg_accuracy + batch_accuracy / total
batch_grad = compute_gradient(cur_params, imgs, tar)
{update(cur_params, batch_grad, 0.01), avg_loss, avg_accuracy}
end
def train_epoch(cur_params, x, labels, Nx.BinaryBackend, n_jobs) when n_jobs >= 1 do
total_batches = Enum.count(x)
x
|> Enum.zip(labels)
|> Enum.reduce({cur_params, Nx.tensor(0.0), Nx.tensor(0.0)}, fn
{x, tar}, {cur_params, avg_loss, avg_accuracy} ->
[n_samples|_] = Nx.shape(x) |> Tuple.to_list()
split_len =
n_samples
|> :erlang.div(n_jobs)
|> round()
x_splits = Nx.to_batched_list(x, split_len)
tar_splits = Nx.to_batched_list(tar, split_len)
[[first_grad, first_loss, first_acc]|rest_splits] =
Enum.zip(x_splits, tar_splits)
|> Enum.map(fn {imgs, labels} ->
Task.async(fn ->
Nx.default_backend(Nx.BinaryBackend)
split_loss = loss(cur_params, imgs, labels)
split_acc = accuracy(cur_params, imgs, labels)
[compute_gradient(cur_params, imgs, labels), split_loss, split_acc]
end)
end)
|> Enum.map(&Task.await(&1, :infinity))
[batch_grad, batch_loss, batch_acc] =
rest_splits |>
Enum.reduce([Tuple.to_list(first_grad), first_loss, first_acc],
fn [grad, loss, acc], [acc_grad, acc_loss, acc_acc] ->
acc_grad =
grad
|> Tuple.to_list()
|> Enum.zip(acc_grad)
|> Enum.map(fn {current, total} -> Nx.add(total, current) end)
[acc_grad, Nx.add(loss, acc_loss), Nx.add(acc, acc_acc)]
end)
batch_loss = Nx.divide(batch_loss, n_jobs)
batch_acc = Nx.divide(batch_acc, n_jobs)
update_with_averages(cur_params, List.to_tuple(batch_grad), batch_loss, batch_acc, avg_loss, avg_accuracy, total_batches)
end)
end
def train_epoch(cur_params, x, labels, _backend, _n_jobs) do
total_batches = Enum.count(x)
x
|> Enum.zip(labels)
|> Enum.reduce({cur_params, Nx.tensor(0.0), Nx.tensor(0.0)}, fn
{x, tar}, {cur_params, avg_loss, avg_accuracy} ->
update_with_averages(cur_params, x, tar, avg_loss, avg_accuracy, total_batches)
end)
end
def train(x, labels, params, opts \\ []) do
epochs = opts[:epochs] || 5
n_jobs = opts[:n_jobs] || 1
backend = opts[:backend] || Nx.BinaryBackend
for epoch <- 1..epochs, reduce: {params, [], [], []} do
{cur_params, history_acc, history_loss, history_time} ->
{time, {new_params, epoch_avg_loss, epoch_avg_acc}} =
:timer.tc(__MODULE__, :train_epoch, [cur_params, x, labels, backend, n_jobs])
epoch_avg_loss =
epoch_avg_loss
|> Nx.backend_transfer()
|> Nx.to_scalar()
epoch_avg_acc =
epoch_avg_acc
|> Nx.backend_transfer()
|> Nx.to_scalar()
epoch_time = time / 1_000_000
history_acc = history_acc ++ [epoch_avg_acc]
history_loss = history_loss ++ [epoch_avg_loss]
history_time = history_time ++ [epoch_time]
IO.puts("Epoch #{epoch} Time: #{Float.round(epoch_time, 3)}s, loss: #{Float.round(epoch_avg_loss, 3)}, acc: #{Float.round(epoch_avg_acc, 3)}")
{new_params, history_acc, history_loss, history_time}
end
end
end
|
lib/densenn.ex
| 0.675978
| 0.593491
|
densenn.ex
|
starcoder
|
defmodule ExRabbitMQ.Config.Session do
@moduledoc """
A structure holding the necessary information about a queue that is to be consumed.
#### Queue configuration example:
```elixir
# :queue is this queue's configuration name
config :exrabbitmq, :my_session_config,
# name of the queue from which we wish to consume (optional, default: "")
queue: "my_queue",
# properties set on the queue when it is declared (optional, default: [])
queue_opts: [
durable: true
],
# the exchange name to declare and bind (optional, default: nil)
exchange: "my_exchange",
# the options to use when one wants to declare the exchange (optional, default: [])
exchange_opts: [
# the exchange type to declare (optional, default: :direct)
# this is an atom that can have one of the following values:
# :direct, :fanout, :topic or :headers
type: :fanout,
# other exchange declare options as documented in the Options paragraph of
# https://hexdocs.pm/amqp/AMQP.Exchange.html#declare/4, eg.:
durable: true,
auto_delete: true,
passive: false,
internal: false
]
# the options to use when binding the queue to the exchange (optional, default: [])
bind_opts: [
routing_key: "my_routing_key",
nowait: false,
arguments: []
],
# the options to use for specifying QoS properties on a channel (optional, default: [])
qos_opts: [
prefect_size: 1,
prefetch_count: 1,
global: true
],
# properties set on the call to consume from the queue (optional, default: [])
consume_opts: [
no_ack: false
]
```
"""
alias ExRabbitMQ.Config.Bind, as: XRMQBindConfig
alias ExRabbitMQ.Config.Exchange, as: XRMQExchangeConfig
alias ExRabbitMQ.Config.Queue, as: XRMQQueueConfig
defstruct [:queue, :consume_opts, :qos_opts, :declarations]
@type t :: %__MODULE__{
queue: String.t(),
consume_opts: keyword,
qos_opts: keyword,
declarations: list
}
@doc """
Returns a part of the `app` configuration section, specified with the
`key` argument as a `ExRabbitMQ.Config.Session` struct.
If the `app` argument is omitted, it defaults to `:exrabbitmq`.
"""
@spec get(atom, atom | t()) :: t()
def get(app \\ :exrabbitmq, session_config) do
session_config
|> case do
session_config when is_atom(session_config) -> from_env(app, session_config)
_ -> session_config
end
|> merge_defaults()
end
def validate_bindings(%{bindings: bindings} = config) do
bindings
|> Enum.reduce_while(config, fn
binding, %XRMQBindConfig{exchange: nil} ->
raise ArgumentError, "invalid source exchange: #{inspect(binding)}"
_, _ ->
{:cont, config}
end)
end
defp from_env(app, key) do
config = Application.get_env(app, key, [])
%__MODULE__{
queue: config[:queue],
consume_opts: config[:consume_opts],
qos_opts: config[:qos_opts],
declarations: get_declarations(config[:declarations] || [])
}
end
# Merges an existing `ExRabbitMQ.Config.Session` struct the default values when these are `nil`.
defp merge_defaults(%__MODULE__{} = config) do
%__MODULE__{
queue: config.queue || "",
consume_opts: config.consume_opts || [],
qos_opts: config.qos_opts || [],
declarations: get_declarations(config.declarations || [])
}
end
defp get_declarations(declarations) do
declarations
|> Enum.map(fn
{:exchange, config} -> {:exchange, XRMQExchangeConfig.get(config)}
{:queue, config} -> {:queue, XRMQQueueConfig.get(config)}
declaration -> raise ArgumentError, "invalid declaration #{inspect(declaration)}"
end)
end
end
|
lib/ex_rabbit_m_q/config/session.ex
| 0.836388
| 0.753263
|
session.ex
|
starcoder
|
defmodule Zaryn.SharedSecrets.NodeRenewalScheduler do
@moduledoc """
Schedule the renewal of node shared secrets
At each `interval - trigger offset` , a new node shared secrets transaction is created with
the new authorized nodes and is broadcasted to the validation nodes to include
them as new authorized nodes and update the daily nonce seed.
A trigger offset is defined to determine the seconds before the interval
when the transaction will be created and sent.
(this offset can be tuned by the prediction module to ensure the correctness depending on the latencies)
For example, for a interval every day (00:00), with 10min offset.
At 23:58 UTC, an elected node will build and send the transaction for the node renewal shared secrets
At 00.00 UTC, nodes receives will applied the node shared secrets for the transaction mining
"""
alias Crontab.CronExpression.Parser, as: CronParser
alias Crontab.DateChecker, as: CronDateChecker
alias Crontab.Scheduler, as: CronScheduler
alias Zaryn
alias Zaryn.Crypto
alias Zaryn.P2P.Node
alias Zaryn.PubSub
alias Zaryn.SharedSecrets.NodeRenewal
alias Zaryn.Utils
require Logger
use GenServer
@doc """
Start the node renewal scheduler process without starting the scheduler
Options:
- interval: Cron like interval when the node renewal will occur
- trigger_offset: How many seconds before the interval, the node renewal must be done and sent to all the nodes
"""
@spec start_link(
args :: [interval: binary()],
opts :: Keyword.t()
) ::
{:ok, pid()}
def start_link(args \\ [], opts \\ [name: __MODULE__]) do
GenServer.start_link(__MODULE__, args, opts)
end
@doc false
def init(opts) do
interval = Keyword.get(opts, :interval)
PubSub.register_to_node_update()
{:ok, %{interval: interval}, :hibernate}
end
def handle_info(
{:node_update, %Node{first_public_key: first_public_key, authorized?: true}},
state = %{interval: interval}
) do
if Crypto.first_node_public_key() == first_public_key do
Logger.info("Start node shared secrets scheduling")
timer = schedule_renewal_message(interval)
Logger.info(
"Node shared secrets will be renewed in #{Utils.remaining_seconds_from_timer(timer)}"
)
{:noreply, Map.put(state, :timer, timer), :hibernate}
else
{:noreply, state, :hibernate}
end
end
def handle_info(
{:node_update, %Node{first_public_key: first_public_key, authorized?: false}},
state
) do
with ^first_public_key <- Crypto.first_node_public_key(),
timer when timer != nil <- Map.get(state, :timer) do
Process.cancel_timer(timer)
{:noreply, Map.delete(state, :timer), :hibernate}
else
_ ->
{:noreply, state, :hibernate}
end
end
def handle_info(:make_renewal, state = %{interval: interval}) do
timer = schedule_renewal_message(interval)
Logger.info(
"Node shared secrets will be renewed in #{Utils.remaining_seconds_from_timer(timer)}"
)
if NodeRenewal.initiator?() do
Logger.info("Node shared secrets renewal creation...")
make_renewal()
end
{:noreply, Map.put(state, :timer, timer), :hibernate}
end
def handle_cast({:new_conf, conf}, state) do
case Keyword.get(conf, :interval) do
nil ->
{:noreply, state}
new_interval ->
{:noreply, Map.put(state, :interval, new_interval)}
end
end
defp make_renewal do
NodeRenewal.next_authorized_node_public_keys()
|> NodeRenewal.new_node_shared_secrets_transaction(
:crypto.strong_rand_bytes(32),
:crypto.strong_rand_bytes(32)
)
|> Zaryn.send_new_transaction()
Logger.info(
"Node shared secrets renewal transaction sent (#{Crypto.number_of_node_shared_secrets_keys()})"
)
end
defp schedule_renewal_message(interval) do
Process.send_after(self(), :make_renewal, Utils.time_offset(interval) * 1000)
end
def config_change(nil), do: :ok
def config_change(changed_config) do
GenServer.cast(__MODULE__, {:new_conf, changed_config})
end
@doc """
Get the next shared secrets application date from a given date
"""
@spec next_application_date(DateTime.t()) :: DateTime.t()
def next_application_date(date_from = %DateTime{}) do
if renewal_date?(date_from) do
get_application_date_interval()
|> CronParser.parse!(true)
|> CronScheduler.get_next_run_date!(DateTime.to_naive(date_from))
|> DateTime.from_naive!("Etc/UTC")
else
date_from
end
end
defp renewal_date?(date) do
get_trigger_interval()
|> CronParser.parse!(true)
|> CronDateChecker.matches_date?(DateTime.to_naive(date))
end
defp get_trigger_interval do
Application.get_env(:zaryn, __MODULE__) |> Keyword.fetch!(:interval)
end
defp get_application_date_interval do
Application.get_env(:zaryn, __MODULE__)
|> Keyword.fetch!(:application_interval)
end
end
|
lib/zaryn/shared_secrets/node_renewal_scheduler.ex
| 0.860105
| 0.551695
|
node_renewal_scheduler.ex
|
starcoder
|
defmodule AeppSDK.Channel.OffChain do
@moduledoc """
Module for Aeternity Off-chain channel activities, see: [https://github.com/aeternity/protocol/blob/master/channels/OFF-CHAIN.md](https://github.com/aeternity/protocol/blob/master/channels/OFF-CHAIN.md)
Contains Off-Chain channel-related functionality.
"""
alias AeppSDK.Utils.Serialization
@update_vsn 1
@updates_version 1
@no_updates_version 2
@meta_fields_template [data: :binary]
@transfer_fields_template [from: :id, to: :id, amount: :int]
@deposit_fields_template [from: :id, amount: :int]
@withdraw_fields_template [to: :id, amount: :int]
@create_contract_fields_template [
owner: :id,
ct_version: :int,
code: :binary,
deposit: :int,
call_data: :binary
]
@call_contract_fields_template [
caller: :id,
contract: :id,
abi_version: :int,
amount: :int,
gas: :int,
gas_price: :int,
call_data: :binary,
call_stack: [:int]
]
@doc """
Creates new off-chain transactions, supporting updates, with given parameters.
## Example
iex> channel = "ch_11111111111111111111111111111111273Yts"
iex> state_hash = "st_11111111111111111111111111111111273Yts"
iex> AeppSDK.Channel.OffChain.new(channel, 1, state_hash, 1, [])
%{
channel_id: "ch_11111111111111111111111111111111273Yts",
round: 1,
state_hash: "st_11111111111111111111111111111111273Yts",
updates: [],
version: 1
}
"""
@spec new(String.t(), integer(), String.t(), integer(), list()) :: map()
def new(
<<"ch_", _channel_id::binary>> = channel_id,
round,
<<"st_", _state_hash::binary>> = encoded_state_hash,
@updates_version,
updates
)
when is_list(updates) do
%{
channel_id: channel_id,
round: round,
state_hash: encoded_state_hash,
version: @updates_version,
updates: serialize_updates(updates)
}
end
@doc """
Creates new off-chain transactions, without supporting updates, with given parameters.
## Example
iex> channel = "ch_11111111111111111111111111111111273Yts"
iex> state_hash = "st_11111111111111111111111111111111273Yts"
iex> AeppSDK.Channel.OffChain.new(channel, 1, state_hash, 2)
%{
channel_id: "ch_11111111111111111111111111111111273Yts",
round: 1,
state_hash: "st_11111111111111111111111111111111273Yts",
version: 2
}
"""
@spec new(String.t(), integer(), String.t(), integer()) :: map()
def new(
<<"ch_", _channel_id::binary>> = channel_id,
round,
<<"st_", _state_hash::binary>> = encoded_state_hash,
@no_updates_version
) do
%{
channel_id: channel_id,
round: round,
version: @no_updates_version,
state_hash: encoded_state_hash
}
end
@doc """
Serializes off-chain transactions, supports both updates and no-updates versions.
## Example
iex> channel = "ch_11111111111111111111111111111111273Yts"
iex> state_hash = "st_11111111111111111111111111111111273Yts"
iex> channel_off_chain_tx = AeppSDK.Channel.OffChain.new(channel, 1, state_hash, 2)
iex> AeppSDK.Channel.OffChain.serialize_tx(channel_off_chain_tx)
<<248,70,57,2,161,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
160,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>>
"""
@spec serialize_tx(map()) :: binary()
def serialize_tx(
%{
channel_id: _channel_id,
round: _round,
state_hash: _state_hash,
version: _version
} = offchain_tx
) do
Serialization.serialize(offchain_tx)
end
@doc """
Serializes off-chain updates.
## Example
iex> update = %{type: :transfer, from: {:id, :account, <<0::256>>}, to: {:id, :account, <<0::256>>}, amount: 100}
iex> AeppSDK.Channel.OffChain.serialize_updates(update)
[<<248,73,130,2,58,1,161,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
161,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,100>>]
"""
@spec serialize_updates(list()) :: list(binary())
def serialize_updates(update) when is_list(update) do
serialize_update(update, [])
end
def serialize_updates(%{type: _} = valid_update_struct) do
serialize_update([valid_update_struct], [])
end
defp serialize_update([], acc) do
acc
end
defp serialize_update(
[
%{
type: :transfer,
from: {:id, _, _from_id},
to: {:id, _, _to_id},
amount: _amount
} = update
| updates
],
acc
) do
template = @transfer_fields_template
fields = prepare_fields(update, template)
serialized_update =
:aeser_chain_objects.serialize(
:channel_offchain_update_transfer,
@update_vsn,
template,
fields
)
serialize_update(updates, [serialized_update | acc])
end
defp serialize_update(
[
%{type: :withdraw, to: {:id, _, _account_id}, amount: _amount} = update
| updates
],
acc
) do
template = @withdraw_fields_template
fields = prepare_fields(update, template)
serialized_update =
:aeser_chain_objects.serialize(
:channel_offchain_update_withdraw,
@update_vsn,
template,
fields
)
serialize_update(updates, [serialized_update | acc])
end
defp serialize_update(
[
%{type: :deposit, from: {:id, _, _caller_id}, amount: _amount} = update
| updates
],
acc
) do
template = @deposit_fields_template
fields = prepare_fields(update, template)
serialized_update =
:aeser_chain_objects.serialize(
:channel_offchain_update_deposit,
@update_vsn,
template,
fields
)
serialize_update(updates, [serialized_update | acc])
end
defp serialize_update(
[
%{
type: :create_contract,
owner: {:id, _, _owner_id},
ct_version: _ct_version,
code: _code,
deposit: _deposit,
call_data: _call_data
} = update
| updates
],
acc
) do
template = @create_contract_fields_template
fields = prepare_fields(update, template)
serialized_update =
:aeser_chain_objects.serialize(
:channel_offchain_update_create_contract,
@update_vsn,
template,
fields
)
serialize_update(updates, [serialized_update | acc])
end
defp serialize_update(
[
%{
type: :call_contract,
caller: {:id, _, _caller_id},
contract: {:id, _, _contract_id},
abi_version: _abi_version,
amount: _amount,
call_data: _call_data,
call_stack: _call_stack,
gas_price: _gas_price,
gas: _gas
} = update
| updates
],
acc
) do
template = @call_contract_fields_template
fields = prepare_fields(update, template)
serialized_update =
:aeser_chain_objects.serialize(
:channel_offchain_update_call_contract,
@update_vsn,
template,
fields
)
serialize_update(updates, [serialized_update | acc])
end
defp serialize_update([%{type: :meta} = update | updates], acc) do
template = @meta_fields_template
fields = prepare_fields(update, template)
serialized_update =
:aeser_chain_objects.serialize(:channel_offchain_update_meta, @update_vsn, template, fields)
serialize_update(updates, [serialized_update | acc])
end
defp prepare_fields(update, template) do
for {field, _type} <- template do
{field, Map.get(update, field)}
end
end
end
|
lib/core/channel/offchain.ex
| 0.839454
| 0.47658
|
offchain.ex
|
starcoder
|
defmodule Arrow.Disruption.DayOfWeek do
@moduledoc """
The day of the week that a disruption takes place.
"""
use Ecto.Schema
import Ecto.Changeset
@type t :: %__MODULE__{
day_name: String.t() | nil,
start_time: Time.t() | nil,
end_time: Time.t() | nil,
disruption_revision: Arrow.DisruptionRevision | Ecto.Association.NotLoaded.t(),
inserted_at: DateTime.t() | nil,
updated_at: DateTime.t() | nil
}
schema "disruption_day_of_weeks" do
field(:day_name, :string)
field(:start_time, :time)
field(:end_time, :time)
belongs_to(:disruption_revision, Arrow.DisruptionRevision)
timestamps(type: :utc_datetime)
end
@doc false
@spec changeset(t(), map()) :: Ecto.Changeset.t()
def changeset(day_of_week, attrs) do
day_of_week
|> cast(attrs, [:day_name, :start_time, :end_time])
|> validate_inclusion(:day_name, [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday"
])
|> unique_constraint(:day_name, name: "unique_disruption_weekday")
|> validate_start_time_before_end_time()
end
@spec validate_start_time_before_end_time(Ecto.Changeset.t(t())) :: Ecto.Changeset.t(t())
defp validate_start_time_before_end_time(changeset) do
start_time = get_field(changeset, :start_time)
end_time = get_field(changeset, :end_time)
cond do
is_nil(start_time) or is_nil(end_time) ->
changeset
not (Time.compare(start_time, end_time) == :lt) ->
add_error(changeset, :days_of_week, "start time should be before end time")
true ->
changeset
end
end
@spec day_number(t()) :: 1 | 2 | 3 | 4 | 5 | 6 | 7
def day_number(%{day_name: "monday"}), do: 1
def day_number(%{day_name: "tuesday"}), do: 2
def day_number(%{day_name: "wednesday"}), do: 3
def day_number(%{day_name: "thursday"}), do: 4
def day_number(%{day_name: "friday"}), do: 5
def day_number(%{day_name: "saturday"}), do: 6
def day_number(%{day_name: "sunday"}), do: 7
@spec day_name(1 | 2 | 3 | 4 | 5 | 6 | 7) :: String.t()
def day_name(1), do: "monday"
def day_name(2), do: "tuesday"
def day_name(3), do: "wednesday"
def day_name(4), do: "thursday"
def day_name(5), do: "friday"
def day_name(6), do: "saturday"
def day_name(7), do: "sunday"
@spec date_to_day_name(Date.t()) :: String.t()
def date_to_day_name(date), do: date |> Date.day_of_week() |> day_name()
end
|
lib/arrow/disruption/day_of_week.ex
| 0.806777
| 0.42316
|
day_of_week.ex
|
starcoder
|
defmodule Conform.Schema.Mapping do
@moduledoc """
This module defines a struct which represents a mapping definition.
## Definitions
- "path" - A string of dot-separated tokens which represent the path to a setting,
a path can contain variables which represent wildcard values such that any value at
that point in the path is valid.
## Fields
- `name` is the name of the setting in the .conf, it should be a path as defined above
- `to` is the name of the setting in the sys.config, it should be a path as defined above
- `datatype` is the type of the value this setting will be mapped to, see the documentation
for information on what datatypes are available. User-defined types are also possible.
- `default` the default value to use if one is not provided in the .conf
- `env_var` if set, will use the value of the given environment variable as the default value for this option
- `doc` the documentation comment which will accompany this setting in the generated .conf
- `see` the name of another setting which you wrote a `doc` for, but which also describes this
setting. Used when describing how multiple settings work together, as it's just a pointer to
additional documentation. It will be output as a comment in the generated .conf
- `commented` is a boolean which determines whether this setting should be commented by default
in the generated .conf
- `hidden` is a boolean which determines whether this setting will be placed in the generated .conf,
in this way you can provided "advanced" settings which can be configured but only by those who know
they exist.
- `include_default` if set to a string, it will be used in place of a wildcard value when generating a .conf,
if nil, the generated .conf will contain a commented default which contains the wildcard in the path.
- `validators` a list of validator names which will be executed against the value of this setting once it has
been mapped to the defined datatype.
"""
alias Conform.Schema.Mapping
defstruct name: "",
to: nil,
datatype: :binary,
default: nil,
env_var: nil,
doc: "",
see: "",
commented: false,
hidden: false,
include_default: nil,
validators: [],
persist: true
def from_quoted({name, mapping}) when is_list(mapping) do
case Keyword.keyword?(mapping) do
false -> raise Conform.Schema.SchemaError, message: "Invalid mapping for #{name}: `#{inspect(mapping)}`."
true ->
do_from(mapping, %Mapping{name: Atom.to_string(name)})
end
end
defp do_from([{:to, to}|rest], mapping) when is_binary(to), do: do_from(rest, %{mapping | :to => to})
defp do_from([{:datatype, dt}|rest], mapping), do: do_from(rest, %{mapping | :datatype => dt})
defp do_from([{:default, default}|rest], mapping), do: do_from(rest, %{mapping | :default => default})
defp do_from([{:env_var, env_var}|rest], mapping), do: do_from(rest, %{mapping | :env_var => env_var})
defp do_from([{:doc, doc}|rest], mapping) when is_binary(doc), do: do_from(rest, %{mapping | :doc => doc})
defp do_from([{:see, see}|rest], mapping) when is_binary(see), do: do_from(rest, %{mapping | :see => see})
defp do_from([{:hidden, h}|rest], mapping) when is_boolean(h), do: do_from(rest, %{mapping | :hidden => h})
defp do_from([{:commented, c}|rest], mapping) when is_boolean(c), do: do_from(rest, %{mapping | :commented => c})
defp do_from([{:include_default, default}|rest], mapping) when is_binary(default),
do: do_from(rest, %{mapping | :include_default => default})
defp do_from([{:validators, vs}|rest], mapping) when is_list(vs), do: do_from(rest, %{mapping | :validators => vs})
defp do_from([_|rest], mapping), do: do_from(rest, mapping)
defp do_from([], mapping), do: mapping
end
|
lib/conform/schema/mapping.ex
| 0.722429
| 0.663594
|
mapping.ex
|
starcoder
|
defmodule Trash.Repo do
@moduledoc """
Provides functions for discarding and keeping records and querying for them
via `Ecto.Repo` functions.
"""
require Ecto.Query
alias Ecto.Query
alias Ecto.Queryable
alias Ecto.Changeset
alias Trash.Query, as: TrashQuery
@doc """
Imports functions from `Trash.Repo`.
It's not required to `use` this module in order to use `Trash`. Doing so
will import shorthand functions into your app's `Repo` module with the repo
implicitly passed. It's a bit more convenient, but the functions are public
on `Trash.Repo`, so if preferred they can be called directly.
```
# Shorthand with `use`
MyRepo.all_discarded(Post)
# Long form without
Trash.Repo.all_discarded(Post, [], MyRepo)
```
## Options
- `repo` - A module reference to an `Ecto.Repo`; raises `ArgumentError` if
missing
## Examples
defmodule MyApp.Repo
use Ecto.Schema
use Trash.Schema, repo: __MODULE__
end
"""
# credo:disable-for-this-file Credo.Check.Refactor.LongQuoteBlocks
# credo:disable-for-this-file Credo.Check.Consistency.ParameterPatternMatching
@spec __using__(opts :: list()) :: Macro.t()
defmacro __using__(opts) do
quote do
import unquote(__MODULE__)
@__trash_repo__ unquote(compile_config(opts))
@doc """
Fetches all entries matching the given query that have been discarded.
## Examples
iex> MyRepo.all_discarded(Post)
[%Post{
title: "<NAME>",
discarded_at: %DateTime{},
discarded?: nil
}]
"""
@spec all_discarded(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t()
) :: [Ecto.Schema.t()]
def all_discarded(queryable, opts \\ []) do
all_discarded(queryable, opts, @__trash_repo__)
end
@doc """
Fetches all entries matching the given query that have been kept.
## Examples
iex> MyRepo.all_kept(Post)
[%Post{title: "Hello World", discarded_at: nil, discarded?: nil}]
"""
@spec all_kept(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t()
) :: [Ecto.Schema.t()]
def all_kept(queryable, opts \\ []) do
all_kept(queryable, opts, @__trash_repo__)
end
@doc """
Updates a record as discarded.
This takes either an `Ecto.Changeset` or an `Ecto.Schema` struct. If a
struct is given a bare changeset is generated first.
A change is added to the changeset to set `discarded_at` to
`DateTime.utc_now/1`. It calls `repo.update/2` to finalize the changes.
It returns `{:ok, struct}` if the struct has been successfully updated
or `{:error, changeset}` if there was an error.
## Examples
iex> Post.changeset(post, %{title: "[Archived] Hello, world"})
|> MyRepo.discard()
{:ok,
%Post{title: "[Archived] Hello, world", discarded_at: %DateTime{}}}
"""
@spec discard(changeset_or_schema :: Changeset.t() | Ecto.Schema.t()) ::
{:ok, Ecto.Schema.t()} | {:error, Changeset.t()}
def discard(changeset = %Changeset{}) do
discard(changeset, @__trash_repo__)
end
def discard(%{__struct__: _} = struct) do
discard(struct, @__trash_repo__)
end
@doc """
Updates a record as discarded.
This takes either an `Ecto.Changeset` or an `Ecto.Schema` struct. If a struct
is given a bare changeset is generated first.
A change is added to the changeset to set `discarded_at` to
`DateTime.utc_now/1`. It calls `repo.update/2` to finalize the changes.
Raises `Ecto.InvalidChangesetError` if the changeset is invalid.
Note: since an `Ecto.Schema` struct can be passed which generates a bare
changeset, this will never raise when given a struct.
## Examples
iex> Post.changeset(post, %{title: "[Archived] Hello, world"})
|> MyRepo.discard!
%Post{title: "[Archived] Hello, world", discarded_at: %DateTime{}}
iex> Post.changeset(post, %{}) |> MyRepo.discard!
** (Ecto.InvalidChangesetError)
"""
@spec discard!(changeset_or_schema :: Changeset.t() | Ecto.Schema.t()) ::
Ecto.Schema.t()
def discard!(changeset = %Changeset{}) do
discard!(changeset, @__trash_repo__)
end
def discard!(%{__struct__: _} = struct) do
discard!(struct, @__trash_repo__)
end
@doc """
Checks if there exists an entry that matches the given query that has been
discarded.
Returns a boolean.
## Examples
iex> MyRepo.discarded?(post)
true
"""
@spec discarded?(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t()
) :: boolean
def discarded?(queryable, opts \\ []) do
discarded?(queryable, opts, @__trash_repo__)
end
@doc """
Fetches a single discarded result where the primary key matches the given
`id`.
Returns `nil` if no result was found.
## Examples
iex> MyRepo.get_discarded(Post)
%Post{
title: "<NAME>",
discarded_at: %DateTime{},
discarded?: nil
}
"""
@spec get_discarded(
queryable :: Ecto.Queryable.t(),
id :: term(),
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
def get_discarded(queryable, id, opts \\ []) do
get_discarded(queryable, id, opts, @__trash_repo__)
end
@doc """
Fetches a single discarded result where the primary key matches the given
`id`.
Raises `Ecto.NoResultsError` if no result was found.
## Examples
iex> MyRepo.get_discarded!(Post, 1)
%Post{
title: "<NAME>",
discarded_at: %DateTime{},
discarded?: nil
}
iex> MyRepo.get_discarded!(Post, 2)
** (Ecto.NoResultsError)
"""
@spec get_discarded!(
queryable :: Ecto.Queryable.t(),
id :: term(),
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
def get_discarded!(queryable, id, opts \\ []) do
get_discarded!(queryable, id, opts, @__trash_repo__)
end
@doc """
Fetches a single discarded result from the query.
Returns `nil` if no result was found or raises
`Ecto.MultipleResultsError` if more than one entry.
## Examples
iex> MyRepo.get_discarded_by(Post, [title: "<NAME>"])
%Post{title: "Hello World", discarded_at: %DateTime{}}
"""
@spec get_discarded_by(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
def get_discarded_by(queryable, clauses, opts \\ []) do
get_discarded_by(queryable, clauses, opts, @__trash_repo__)
end
@doc """
Fetches a single discarded result from the query.
Raises `Ecto.MultipleResultsError` if more than one result. Raises
`Ecto.NoResultsError` if no result was found.
## Examples
iex> MyRepo.get_discarded_by!(Post, [title: "<NAME>"])
%Post{title: "<NAME>", discarded_at: %DateTime{}}
iex> MyRepo.get_discarded_by!(Post, [title: "Unwritten"])
** (Ecto.NoResultsError)
"""
@spec get_discarded_by!(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t()
) :: Ecto.Schema.t()
def get_discarded_by!(queryable, clauses, opts \\ []) do
get_discarded_by!(queryable, clauses, opts, @__trash_repo__)
end
@doc """
Fetches a single kept result where the primary key matches the given `id`.
Returns `nil` if no result was found.
## Examples
iex> MyRepo.get_kept(Post, 1)
%Post{title: "Hello World", discarded_at: nil}
"""
@spec get_kept(
queryable :: Ecto.Queryable.t(),
id :: term(),
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
def get_kept(queryable, id, opts \\ []) do
get_kept(queryable, id, opts, @__trash_repo__)
end
@doc """
Fetches a single kept result where the primary key matches the given `id`.
Raises `Ecto.NoResultsError` if no result was found.
## Examples
iex> MyRepo.get_kept!(Post, 1)
%Post{title: "Hello World", discarded_at: nil}
iex> MyRepo.get_kept!(Post, 2)
** (Ecto.NoResultsError)
"""
@spec get_kept!(
queryable :: Ecto.Queryable.t(),
id :: term(),
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
def get_kept!(queryable, id, opts \\ []) do
get_kept!(queryable, id, opts, @__trash_repo__)
end
@doc """
Fetches a single kept result from the query.
Returns `nil` if no result was found or raises
`Ecto.MultipleResultsError` if more than one entry.
## Examples
iex> MyRepo.get_kept_by(Post, title: "Hello World")
%Post{title: "Hello World", discarded_at: nil}
"""
@spec get_kept_by(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
def get_kept_by(queryable, clauses, opts \\ []) do
get_kept_by(queryable, clauses, opts, @__trash_repo__)
end
@doc """
Fetches a single kept result from the query.
Raises `Ecto.MultipleResultsError` if more than one result. Raises
`Ecto.NoResultsError` if no result was found.
## Examples
iex> MyRepo.get_kept_by!(Post, title: "<NAME>")
%Post{title: "<NAME>", discarded_at: %DateTime{}}
iex> MyRepo.get_kept_by!(Post, title: "Not Written")
** (Ecto.NoResultsError)
"""
@spec get_kept_by!(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t()
) :: Ecto.Schema.t()
def get_kept_by!(queryable, clauses, opts \\ []) do
get_kept_by!(queryable, clauses, opts, @__trash_repo__)
end
@doc """
Checks if there exists an entry that matches the given query that has been
kept.
Returns a boolean.
## Examples
iex> MyRepo.kept?(post)
true
"""
@spec kept?(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t()
) :: boolean
def kept?(queryable, opts \\ []) do
kept?(queryable, opts, @__trash_repo__)
end
@doc """
Fetches a single discarded result from the query.
Returns `nil` if no result was found or raises
`Ecto.MultipleResultsError` if more than one entry.
## Examples
iex> MyRepo.one_discarded(Post)
%Post{title: "<NAME>", discarded_at: %DateTime{}}
"""
@spec one_discarded(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
def one_discarded(queryable, opts \\ []) do
one_discarded(queryable, opts, @__trash_repo__)
end
@doc """
Fetches a single discarded result from the query.
Raises `Ecto.MultipleResultsError` if more than one result. Raises
`Ecto.NoResultsError` if no result was found.
## Examples
iex> MyRepo.one_discarded!(Post)
%Post{title: "<NAME>", discarded_at: %DateTime{}}
iex> MyRepo.one_discarded!(Post)
** (Ecto.NoResultsError)
"""
@spec one_discarded!(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t()
) :: Ecto.Schema.t()
def one_discarded!(queryable, opts \\ []) do
one_discarded!(queryable, opts, @__trash_repo__)
end
@doc """
Fetches a single kept result from the query.
Returns `nil` if no result was found or raises
`Ecto.MultipleResultsError` if more than one entry.
## Examples
iex> MyRepo.one_kept(Post)
%Post{title: "<NAME>", discarded_at: nil}
"""
@spec one_kept(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t()
) :: Ecto.Schema.t() | nil
def one_kept(queryable, opts \\ []) do
one_kept(queryable, opts, @__trash_repo__)
end
@doc """
Fetches a single kept result from the query.
Raises `Ecto.MultipleResultsError` if more than one result. Raises
`Ecto.NoResultsError` if no result was found.
## Examples
iex> MyRepo.one_kept!(Post)
%Post{title: "<NAME>", discarded_at: nil}
iex> MyRepo.one_kept!(Post)
** (Ecto.NoResultsError)
"""
@spec one_kept!(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t()
) :: Ecto.Schema.t()
def one_kept!(queryable, opts \\ []) do
one_kept!(queryable, opts, @__trash_repo__)
end
@doc """
Updates a record as kept.
This takes either an `Ecto.Changeset` or an `Ecto.Schema` struct. If a struct
is given a bare changeset is generated first.
A change is added to the changeset to set `discarded_at` to `nil`. It calls
`repo.update/2` to finalize the changes.
It returns `{:ok, struct}` if the struct has been successfully updated or
`{:error, changeset}` if there was an error.
## Examples
iex> Post.changeset(post, %{title: "Hello, world"})
|> MyRepo.restore()
{:ok, %Post{title: "Hello, world", discarded_at: nil}}
"""
@spec restore(changeset_or_schema :: Changeset.t() | Ecto.Schema.t()) ::
{:ok, Ecto.Schema.t()} | {:error, Changeset.t()}
def restore(changeset = %Changeset{}) do
restore(changeset, @__trash_repo__)
end
def restore(%{__struct__: _} = struct) do
restore(struct, @__trash_repo__)
end
@doc """
Updates a record as kept.
This takes either an `Ecto.Changeset` or an `Ecto.Schema` struct. If a struct
is given a bare changeset is generated first.
A change is added to the changeset to set `discarded_at` to `nil`. It calls
`repo.update/2` to finalize the changes.
Raises `Ecto.InvalidChangesetError` if the changeset is invalid.
Note: since an `Ecto.Schema` struct can be passed which generates a bare
changeset, this will never raise when given a struct.
## Examples
iex> Post.changeset(post, %{title: "[Archived] Hello, world"})
|> MyRepo.restore!()
%Post{title: "[Archived] Hello, world", discarded_at: nil}
iex> Post.changeset(post, %{}) |> MyRepo.restore!()
** (Ecto.InvalidChangesetError)
"""
@spec restore!(changeset_or_schema :: Changeset.t() | Ecto.Schema.t()) ::
Ecto.Schema.t()
def restore!(changeset = %Changeset{}) do
restore!(changeset, @__trash_repo__)
end
def restore!(%{__struct__: _} = struct) do
restore!(struct, @__trash_repo__)
end
end
end
@doc """
Fetches all entries matching the given query that have been discarded.
## Examples
iex> Trash.Repo.all_discarded(Post, [], MyApp.Repo)
[%Post{title: "Hello World", discarded_at: %DateTime{}, discarded?: nil}]
"""
@spec all_discarded(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t(),
repo :: atom
) :: [Ecto.Schema.t()]
def all_discarded(queryable, opts \\ [], repo) do
queryable
|> discarded_queryable()
|> repo.all(opts)
end
@doc """
Fetches all entries matching the given query that have been kept.
## Examples
iex> Trash.Repo.all_kept(Post, [], MyApp.Repo)
[%Post{title: "Hello World", discarded_at: nil, discarded?: nil}]
"""
@spec all_kept(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t(),
repo :: atom
) :: [Ecto.Schema.t()]
def all_kept(queryable, opts \\ [], repo) do
queryable
|> kept_queryable()
|> repo.all(opts)
end
@doc """
Updates a record as discarded.
This takes either an `Ecto.Changeset` or an `Ecto.Schema` struct. If a struct
is given a bare changeset is generated first.
A change is added to the changeset to set `discarded_at` to
`DateTime.utc_now/1`. It calls `repo.update/2` to finalize the changes.
It returns `{:ok, struct}` if the struct has been successfully updated or
`{:error, changeset}` if there was an error.
## Examples
iex> Post.changeset(post, %{title: "[Archived] Hello, world"})
|> Trash.Repo.discard(MyApp.Repo)
{:ok, %Post{title: "[Archived] Hello, world", discarded_at: %DateTime{}}}
"""
@spec discard(
changeset_or_schema :: Changeset.t() | Ecto.Schema.t(),
repo :: atom
) :: {:ok, Ecto.Schema.t()} | {:error, Changeset.t()}
def discard(changeset = %Changeset{}, repo) do
changeset
|> Changeset.put_change(
:discarded_at,
DateTime.truncate(DateTime.utc_now(), :second)
)
|> repo.update()
end
def discard(%{__struct__: _} = struct, repo) do
struct
|> Changeset.change()
|> discard(repo)
end
@doc """
Updates a record as discarded.
This takes either an `Ecto.Changeset` or an `Ecto.Schema` struct. If a struct
is given a bare changeset is generated first.
A change is added to the changeset to set `discarded_at` to
`DateTime.utc_now/1`. It calls `repo.update/2` to finalize the changes.
Raises `Ecto.InvalidChangesetError` if the changeset is invalid.
Note: since an `Ecto.Schema` struct can be passed which generates a bare
changeset, this will never raise when given a struct.
## Examples
iex> Post.changeset(post, %{title: "[Archived] Hello, world"})
|> Trash.Repo.discard!(MyApp.Repo)
%Post{title: "[Archived] Hello, world", discarded_at: %DateTime{}}
iex> Post.changeset(post, %{}) |> Trash.Repo.discard!(MyApp.Repo)
** (Ecto.InvalidChangesetError)
"""
@spec discard!(
changeset_or_schema :: Changeset.t() | Ecto.Schema.t(),
repo :: atom
) :: Ecto.Schema.t()
def discard!(changeset = %Changeset{}, repo) do
case discard(changeset, repo) do
{:ok, struct} ->
struct
{:error, changeset} ->
raise Ecto.InvalidChangesetError,
action: :discard,
changeset: changeset
end
end
def discard!(%{__struct__: _} = struct, repo) do
{:ok, struct} = discard(struct, repo)
struct
end
@doc """
Checks if there exists an entry that matches the given query that has been
discarded.
Returns a boolean.
## Examples
iex> Trash.Repo.discarded?(post, [], MyApp.Repo)
true
"""
@spec discarded?(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t(),
repo :: atom
) :: boolean
def discarded?(queryable, opts \\ [], repo) do
queryable
|> discarded_queryable()
|> repo.exists?(opts)
end
@doc """
Fetches a single discarded result where the primary key matches the given
`id`.
Returns `nil` if no result was found.
## Examples
iex> Trash.Repo.get_discarded(Post, 1, [], MyApp.Repo)
%Post{}
"""
@spec get_discarded(
queryable :: Ecto.Queryable.t(),
id :: term(),
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t() | nil
def get_discarded(queryable, id, opts \\ [], repo) do
queryable
|> discarded_queryable()
|> repo.get(id, opts)
end
@doc """
Fetches a single discarded result where the primary key matches the given
`id`.
Raises `Ecto.NoResultsError` if no result was found.
## Examples
iex> Trash.Repo.get_discarded!(Post, 1, [], MyApp.Repo)
%Post{}
iex> Trash.Repo.get_discarded!(Post, 2, [], MyApp.Repo)
** (Ecto.NoResultsError)
"""
@spec get_discarded!(
queryable :: Ecto.Queryable.t(),
id :: term(),
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t() | nil
def get_discarded!(queryable, id, opts \\ [], repo) do
queryable
|> discarded_queryable()
|> repo.get!(id, opts)
end
@doc """
Fetches a single discarded result from the query.
Returns `nil` if no result was found or raises `Ecto.MultipleResultsError` if
more than one entry.
## Examples
iex> Trash.Repo.get_discarded_by(Post, [title: "Hello World"], [],
MyApp.Repo)
%Post{title: "Hello World", discarded_at: %DateTime{}}
"""
@spec get_discarded_by(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t() | nil
def get_discarded_by(queryable, clauses, opts \\ [], repo) do
queryable
|> discarded_queryable()
|> repo.get_by(clauses, opts)
end
@doc """
Fetches a single discarded result from the query.
Raises `Ecto.MultipleResultsError` if more than one result. Raises
`Ecto.NoResultsError` if no result was found.
## Examples
iex> Trash.Repo.get_discarded_by!(Post, [title: "<NAME>"], [],
MyApp.Repo)
%Post{title: "<NAME>", discarded_at: %DateTime{}}
iex> Trash.Repo.get_discarded_by!(Post, [title: "<NAME>"], [],
MyApp.Repo)
** (Ecto.NoResultsError)
"""
@spec get_discarded_by!(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t()
def get_discarded_by!(queryable, clauses, opts \\ [], repo) do
queryable
|> discarded_queryable()
|> repo.get_by!(clauses, opts)
end
@doc """
Fetches a single kept result where the primary key matches the given `id`.
Returns `nil` if no result was found.
## Examples
iex> Trash.Repo.get_kept(Post, 1, [], MyApp.Repo)
%Post{title: "<NAME>", discarded_at: nil}
"""
@spec get_kept(
queryable :: Ecto.Queryable.t(),
id :: term(),
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t() | nil
def get_kept(queryable, id, opts \\ [], repo) do
queryable
|> kept_queryable()
|> repo.get(id, opts)
end
@doc """
Fetches a single kept result where the primary key matches the given `id`.
Raises `Ecto.NoResultsError` if no result was found.
## Examples
iex> Trash.Repo.get_kept!(Post, 1, [], MyApp.Repo)
%Post{title: "<NAME>", discarded_at: nil}
iex> Trash.Repo.get_kept!(Post, 2, [], MyApp.Repo)
** (Ecto.NoResultsError)
"""
@spec get_kept!(
queryable :: Ecto.Queryable.t(),
id :: term(),
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t() | nil
def get_kept!(queryable, id, opts \\ [], repo) do
queryable
|> kept_queryable()
|> repo.get!(id, opts)
end
@doc """
Fetches a single kept result from the query.
Returns `nil` if no result was found or raises `Ecto.MultipleResultsError` if
more than one entry.
## Examples
iex> Trash.Repo.get_kept_by(Post, title: "<NAME>", [], MyApp.Repo)
%Post{title: "<NAME>", discarded_at: nil}
"""
@spec get_kept_by(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t() | nil
def get_kept_by(queryable, clauses, opts \\ [], repo) do
queryable
|> kept_queryable()
|> repo.get_by(clauses, opts)
end
@doc """
Fetches a single kept result from the query.
Raises `Ecto.MultipleResultsError` if more than one result. Raises
`Ecto.NoResultsError` if no result was found.
## Examples
iex> Trash.Repo.get_kept_by!(Post, title: "<NAME>", [], MyApp.Repo)
%Post{title: "<NAME>", discarded_at: %DateTime{}}
iex> Trash.Repo.get_kept_by!(Post, title: "Not Written", [], MyApp.Repo)
** (Ecto.NoResultsError)
"""
@spec get_kept_by!(
queryable :: Ecto.Queryable.t(),
clauses :: Keyword.t() | map,
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t()
def get_kept_by!(queryable, clauses, opts \\ [], repo) do
queryable
|> kept_queryable()
|> repo.get_by!(clauses, opts)
end
@doc """
Checks if there exists an entry that matches the given query that has been
kept.
Returns a boolean.
## Examples
iex> Trash.Repo.kept?(post, [], MyApp.Repo)
true
"""
@spec kept?(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t(),
repo :: atom
) :: boolean
def kept?(queryable, opts \\ [], repo) do
queryable
|> kept_queryable()
|> repo.exists?(opts)
end
@doc """
Fetches a single discarded result from the query.
Returns `nil` if no result was found or raises `Ecto.MultipleResultsError` if
more than one entry.
## Examples
iex> Trash.Repo.one_discarded(Post, [], MyApp.Repo)
%Post{title: "<NAME>", discarded_at: %DateTime{}}
"""
@spec one_discarded(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t() | nil
def one_discarded(queryable, opts \\ [], repo) do
queryable
|> discarded_queryable()
|> repo.one(opts)
end
@doc """
Fetches a single discarded result from the query.
Raises `Ecto.MultipleResultsError` if more than one entry. Raises
`Ecto.NoResultsError` if no result was found.
## Examples
iex> Trash.Repo.one_discarded!(Post, [], MyApp.Repo)
%Post{title: "<NAME>", discarded_at: %DateTime{}}
iex> Trash.Repo.one_discarded!(Post, [], MyApp.Repo)
** (Ecto.NoResultsError)
"""
@spec one_discarded!(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t()
def one_discarded!(queryable, opts \\ [], repo) do
queryable
|> discarded_queryable()
|> repo.one!(opts)
end
@doc """
Fetches a single kept result from the query.
Returns `nil` if no result was found or raises `Ecto.MultipleResultsError` if
more than one entry.
## Examples
iex> Trash.Repo.one_kept(Post, [], MyApp.Repo)
%Post{title: "<NAME>", discarded_at: nil}
"""
@spec one_kept(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t() | nil
def one_kept(queryable, opts \\ [], repo) do
queryable
|> kept_queryable()
|> repo.one(opts)
end
@doc """
Fetches a single kept result from the query.
Raises `Ecto.MultipleResultsError` if more than one entry. Raises
`Ecto.NoResultsError` if no result was found.
## Examples
iex> Trash.Repo.one_kept!(Post, [], MyApp.Repo)
%Post{title: "<NAME>", discarded_at: nil}
iex> Trash.Repo.one_kept!(Post, [], MyApp.Repo)
** (Ecto.NoResultsError)
"""
@spec one_kept!(
queryable :: Ecto.Queryable.t(),
opts :: Keyword.t(),
repo :: atom
) :: Ecto.Schema.t()
def one_kept!(queryable, opts \\ [], repo) do
queryable
|> kept_queryable()
|> repo.one!(opts)
end
@doc """
Updates a record as kept.
This takes either an `Ecto.Changeset` or an `Ecto.Schema` struct. If a struct
is given a bare changeset is generated first.
A change is added to the changeset to set `discarded_at` to `nil`. It calls
`repo.update/2` to finalize the changes.
It returns `{:ok, struct}` if the struct has been successfully updated or
`{:error, changeset}` if there was an error.
## Examples
iex> Post.changeset(post, %{title: "Hello, world"})
|> Trash.Repo.restore(MyApp.Repo)
{:ok, %Post{title: "Hello, world", discarded_at: nil}}
"""
@spec restore(
changeset_or_schema :: Changeset.t() | Ecto.Schema.t(),
repo :: atom
) :: {:ok, Ecto.Schema.t()} | {:error, Changeset.t()}
def restore(changeset = %Changeset{}, repo) do
changeset
|> Changeset.put_change(:discarded_at, nil)
|> repo.update()
end
def restore(%{__struct__: _} = struct, repo) do
struct
|> Changeset.change()
|> restore(repo)
end
@doc """
Updates a record as kept.
This takes either an `Ecto.Changeset` or an `Ecto.Schema` struct. If a struct
is given a bare changeset is generated first.
A change is added to the changeset to set `discarded_at` to `nil`. It calls
`repo.update/2` to finalize the changes.
Raises `Ecto.InvalidChangesetError` if the changeset is invalid.
Note: since an `Ecto.Schema` struct can be passed which generates a bare
changeset, this will never raise when given a struct.
## Examples
iex> Post.changeset(post, %{title: "[Archived] Hello, world"})
|> Trash.Repo.restore!(MyApp.Repo)
%Post{title: "[Archived] Hello, world", discarded_at: nil}
iex> Post.changeset(post, %{}) |> Trash.Repo.restore!(MyApp.Repo)
** (Ecto.InvalidChangesetError)
"""
@spec restore!(
changeset_or_schema :: Changeset.t() | Ecto.Schema.t(),
repo :: atom
) :: Ecto.Schema.t()
def restore!(changeset = %Changeset{}, repo) do
case restore(changeset, repo) do
{:ok, struct} ->
struct
{:error, changeset} ->
raise Ecto.InvalidChangesetError,
action: :restore,
changeset: changeset
end
end
def restore!(%{__struct__: _} = struct, repo) do
{:ok, struct} = restore(struct, repo)
struct
end
defp compile_config(opts) do
case Keyword.fetch(opts, :repo) do
{:ok, value} ->
value
:error ->
raise ArgumentError, "missing :repo option on use Trash.Repo"
end
end
defp discarded_queryable(queryable) do
queryable
|> Queryable.to_query()
|> Query.from()
|> TrashQuery.where_discarded()
end
defp kept_queryable(queryable) do
queryable
|> Queryable.to_query()
|> Query.from()
|> TrashQuery.where_kept()
end
end
|
lib/trash/repo.ex
| 0.911333
| 0.646321
|
repo.ex
|
starcoder
|
defmodule Exexif.Decode do
@moduledoc """
Decode tags and (in some cases) their parameters
"""
def tag(:tiff, 0x0100, value), do: { :image_width, value }
def tag(:tiff, 0x0101, value), do: { :image_height, value }
def tag(:tiff, 0x010d, value), do: { :document_name, value }
def tag(:tiff, 0x010e, value), do: { :image_description, value }
def tag(:tiff, 0x010f, value), do: { :make, value }
def tag(:tiff, 0x0110, value), do: { :model, value }
def tag(:tiff, 0x0112, value), do: { :orientation, orientation(value) }
def tag(:tiff, 0x011a, value), do: { :x_resolution, value }
def tag(:tiff, 0x011b, value), do: { :y_resolution, value }
def tag(:tiff, 0x0128, value), do: { :resolution_units, resolution(value) }
def tag(:tiff, 0x0131, value), do: { :software, value }
def tag(:tiff, 0x0132, value), do: { :modify_date, inspect(value) }
def tag(:tiff, 0x8769, value), do: {:exif, value }
def tag(:tiff, 0x8825, value), do: {:gps, value }
def tag(:exif, 0x0201, value), do: {:thumbnail_offset, value }
def tag(:exif, 0x0202, value), do: {:thumbnail_size, value }
def tag(_, 0x829a, value), do: {:exposure_time, value }
def tag(_, 0x829d, value), do: {:f_number, value }
def tag(_, 0x8822, value), do: {:exposure_program, exposure_program(value) }
def tag(_, 0x8824, value), do: {:spectral_sensitivity, value }
def tag(_, 0x8827, value), do: {:iso_speed_ratings, value }
def tag(_, 0x8828, value), do: {:oecf, value }
def tag(_, 0x8830, value), do: {:sensitivity_type, sensitivity_type(value) }
def tag(_, 0x8831, value), do: {:standard_output_sensitivity, value }
def tag(_, 0x8832, value), do: {:recommended_exposure, value }
def tag(_, 0x9000, value), do: {:exif_version, version(value) }
def tag(_, 0x9003, value), do: {:datetime_original, value }
def tag(_, 0x9004, value), do: {:datetime_digitized, value }
def tag(_, 0x9101, value), do: {:component_configuration, component_configuration(value) }
def tag(_, 0x9102, value), do: {:compressed_bits_per_pixel, value }
def tag(_, 0x9201, value), do: {:shutter_speed_value, value }
def tag(_, 0x9202, value), do: {:aperture_value, value }
def tag(_, 0x9203, value), do: {:brightness_value, value }
def tag(_, 0x9204, value), do: {:exposure_bias_value, value }
def tag(_, 0x9205, value), do: {:max_aperture_value, value }
def tag(_, 0x9206, value), do: {:subject_distance, value }
def tag(_, 0x9207, value), do: {:metering_mode, metering_mode(value) }
def tag(_, 0x9208, value), do: {:light_source, value }
def tag(_, 0x9209, value), do: {:flash, flash(value) }
def tag(_, 0x920a, value), do: {:focal_length, value }
def tag(_, 0x9214, value), do: {:subject_area, value }
def tag(_, 0x927c, value), do: {:maker_note, value }
def tag(_, 0x9286, value), do: {:user_comment, value }
def tag(_, 0x9290, value), do: {:subsec_time, value }
def tag(_, 0x9291, value), do: {:subsec_time_orginal, value }
def tag(_, 0x9292, value), do: {:subsec_time_digitized, value }
def tag(_, 0xa000, value), do: {:flash_pix_version, version(value) }
def tag(_, 0xa001, value), do: {:color_space, color_space(value) }
def tag(_, 0xa002, value), do: {:exif_image_width, value }
def tag(_, 0xa003, value), do: {:exif_image_height, value }
def tag(_, 0xa004, value), do: {:related_sound_file, value }
def tag(_, 0xa20b, value), do: {:flash_energy, value }
def tag(_, 0xa20c, value), do: {:spatial_frequency_response, value }
def tag(_, 0xa20e, value), do: {:focal_plane_x_resolution, value }
def tag(_, 0xa20f, value), do: {:focal_plane_y_resolution, value }
def tag(_, 0xa210, value), do: {:focal_plane_resolution_unit, focal_plane_resolution_unit(value) }
def tag(_, 0xa214, value), do: {:subject_location, value }
def tag(_, 0xa215, value), do: {:exposure_index, value }
def tag(_, 0xa217, value), do: {:sensing_method, sensing_method(value) }
def tag(_, 0xa300, value), do: {:file_source, file_source(value) }
def tag(_, 0xa301, value), do: {:scene_type, scene_type(value) }
def tag(_, 0xa302, value), do: {:cfa_pattern, value }
def tag(_, 0xa401, value), do: {:custom_rendered, custom_rendered(value) }
def tag(_, 0xa402, value), do: {:exposure_mode, exposure_mode(value) }
def tag(_, 0xa403, value), do: {:white_balance, white_balance(value) }
def tag(_, 0xa404, value), do: {:digital_zoom_ratio, value }
def tag(_, 0xa405, value), do: {:focal_length_in_35mm_film, value }
def tag(_, 0xa406, value), do: {:scene_capture_type, scene_capture_type(value) }
def tag(_, 0xa407, value), do: {:gain_control, gain_control(value) }
def tag(_, 0xa408, value), do: {:contrast, contrast(value) }
def tag(_, 0xa409, value), do: {:saturation, saturation(value) }
def tag(_, 0xa40a, value), do: {:sharpness, sharpness(value) }
def tag(_, 0xa40b, value), do: {:device_setting_description, value }
def tag(_, 0xa40c, value), do: {:subject_distance_range, subject_distance_range(value) }
def tag(_, 0xa420, value), do: {:image_unique_id, value }
def tag(_, 0xa432, value), do: {:lens_info, value }
def tag(_, 0xa433, value), do: {:lens_make, value }
def tag(_, 0xa434, value), do: {:lens_model, value }
def tag(_, 0xa435, value), do: {:lens_serial_number, value }
# http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/GPS.html
Exexif.Data.Gps.fields
|> Enum.with_index
|> Enum.each(fn {e, i} ->
def tag(:gps, unquote(i), value), do: {unquote(e), value}
end)
def tag(type, tag, value) do
{~s[#{type} tag(0x#{:io_lib.format("~.16B", [tag])})], inspect value }
end
# Value decodes
defp orientation(1), do: "Horizontal (normal)"
defp orientation(2), do: "Mirror horizontal"
defp orientation(3), do: "Rotate 180"
defp orientation(4), do: "Mirror vertical"
defp orientation(5), do: "Mirror horizontal and rotate 270 CW"
defp orientation(6), do: "Rotate 90 CW"
defp orientation(7), do: "Mirror horizontal and rotate 90 CW"
defp orientation(8), do: "Rotate 270 CW"
defp resolution(1), do: "None"
defp resolution(2), do: "Pixels/in"
defp resolution(3), do: "Pixels/cm"
defp exposure_program(0), do: "Unknown"
defp exposure_program(1), do: "Manual"
defp exposure_program(2), do: "Program AE"
defp exposure_program(3), do: "Aperture-priority AE"
defp exposure_program(4), do: "Shutter speed priority AE"
defp exposure_program(5), do: "Creative (Slow speed)"
defp exposure_program(6), do: "Action (High speed)"
defp exposure_program(7), do: "Portrait"
defp exposure_program(8), do: "Landscape"
defp exposure_program(9), do: "Bulb"
defp sensitivity_type(0), do: "Unknown"
defp sensitivity_type(1), do: "Standard Output Sensitivity"
defp sensitivity_type(2), do: "Recommended Exposure Index"
defp sensitivity_type(3), do: "ISO Speed"
defp sensitivity_type(4), do: " Standard Output Sensitivity and Recommended Exposure Index"
defp sensitivity_type(5), do: "Standard Output Sensitivity and ISO Speed"
defp sensitivity_type(6), do: "Recommended Exposure Index and ISO Speed"
defp sensitivity_type(7), do: "Standard Output Sensitivity, Recommended Exposure Index and ISO Speed"
@comp_conf { "-", "Y", "Cb", "Cr", "R", "G", "B" }
defp component_configuration(list) do
for cc <- list do
elem(@comp_conf, cc)
end
|> Enum.join(",")
end
defp metering_mode(0), do: "Unknown"
defp metering_mode(1), do: "Average"
defp metering_mode(2), do: "Center-weighted average"
defp metering_mode(3), do: "Spot"
defp metering_mode(4), do: "Multi-spot"
defp metering_mode(5), do: "Multi-segment"
defp metering_mode(6), do: "Partial"
defp metering_mode(_), do: "Other"
defp color_space(0x1), do: "sRGB"
defp color_space(0x2), do: "Adobe RGB"
defp color_space(0xfffd), do: "Wide Gamut RGB"
defp color_space(0xfffe), do: "ICC Profile"
defp color_space(0xffff), do: "Uncalibrated"
defp focal_plane_resolution_unit(1), do: "None"
defp focal_plane_resolution_unit(2), do: "inches"
defp focal_plane_resolution_unit(3), do: "cm"
defp focal_plane_resolution_unit(4), do: "mm"
defp focal_plane_resolution_unit(5), do: "um"
defp sensing_method(1), do: "Not defined"
defp sensing_method(2), do: "One-chip color area"
defp sensing_method(3), do: "Two-chip color area"
defp sensing_method(4), do: "Three-chip color area"
defp sensing_method(5), do: "Color sequential area"
defp sensing_method(7), do: "Trilinear"
defp sensing_method(8), do: "Color sequential linear"
defp file_source(1), do: "Film Scanner"
defp file_source(2), do: "Reflection Print Scanner"
defp file_source(3), do: "Digital Camera"
defp file_source(0x03000000), do: "Sigma Digital Camera"
defp custom_rendered(0), do: "Normal"
defp custom_rendered(1), do: "Custom"
defp custom_rendered(v) when is_number(v), do: "Unknown (#{v})"
defp scene_type(1), do: "Directly photographed"
defp exposure_mode(0), do: "Auto"
defp exposure_mode(1), do: "Manual"
defp exposure_mode(2), do: "Auto bracket"
defp white_balance(0), do: "Auto"
defp white_balance(1), do: "Manual"
defp scene_capture_type(0), do: "Standard"
defp scene_capture_type(1), do: "Landscape"
defp scene_capture_type(2), do: "Portrait"
defp scene_capture_type(3), do: "Night"
defp gain_control(0), do: "None"
defp gain_control(1), do: "Low gain up"
defp gain_control(2), do: "High gain up"
defp gain_control(3), do: "Low gain down"
defp gain_control(4), do: "High gain down"
defp contrast(0), do: "Normal"
defp contrast(1), do: "Low"
defp contrast(2), do: "High"
defp saturation(0), do: "Normal"
defp saturation(1), do: "Low"
defp saturation(2), do: "High"
defp sharpness(0), do: "Normal"
defp sharpness(1), do: "Soft"
defp sharpness(2), do: "Hard"
defp subject_distance_range(0), do: "Unknown"
defp subject_distance_range(1), do: "Macro"
defp subject_distance_range(2), do: "Close"
defp subject_distance_range(3), do: "Distant"
defp flash(0x0), do: "No Flash"
defp flash(0x1), do: "Fired"
defp flash(0x5), do: "Fired, Return not detected"
defp flash(0x7), do: "Fired, Return detected"
defp flash(0x8), do: "On, Did not fire"
defp flash(0x9), do: "On, Fired"
defp flash(0xd), do: "On, Return not detected"
defp flash(0xf), do: "On, Return detected"
defp flash(0x10), do: "Off, Did not fire"
defp flash(0x14), do: "Off, Did not fire, Return not detected"
defp flash(0x18), do: "Auto, Did not fire"
defp flash(0x19), do: "Auto, Fired"
defp flash(0x1d), do: "Auto, Fired, Return not detected"
defp flash(0x1f), do: "Auto, Fired, Return detected"
defp flash(0x20), do: "No flash function"
defp flash(0x30), do: "Off, No flash function"
defp flash(0x41), do: "Fired, Red-eye reduction"
defp flash(0x45), do: "Fired, Red-eye reduction, Return not detected"
defp flash(0x47), do: "Fired, Red-eye reduction, Return detected"
defp flash(0x49), do: "On, Red-eye reduction"
defp flash(0x4d), do: "On, Red-eye reduction, Return not detected"
defp flash(0x4f), do: "On, Red-eye reduction, Return detected"
defp flash(0x50), do: "Off, Red-eye reduction"
defp flash(0x58), do: "Auto, Did not fire, Red-eye reduction"
defp flash(0x59), do: "Auto, Fired, Red-eye reduction"
defp flash(0x5d), do: "Auto, Fired, Red-eye reduction, Return not detected"
defp flash(0x5f), do: "Auto, Fired, Red-eye reduction, Return detected"
defp version([ ?0, major, minor1, minor2 ]) do
<< major, ?., minor1, minor2 >>
end
defp version([ major1, major2, minor1, minor2 ]) do
<< major1, major2, ?., minor1, minor2 >>
end
end
|
lib/exexif/decode.ex
| 0.773259
| 0.695312
|
decode.ex
|
starcoder
|
defmodule Bolt.Sips.Internals.PackStream.EncoderV1 do
@moduledoc false
alias Bolt.Sips.Internals.PackStream.Encoder
use Bolt.Sips.Internals.PackStream.Markers
@int8 -127..-17
@int16_low -32_768..-129
@int16_high 128..32_767
@int32_low -2_147_483_648..-32_769
@int32_high 32_768..2_147_483_647
@int64_low -9_223_372_036_854_775_808..-2_147_483_649
@int64_high 2_147_483_648..9_223_372_036_854_775_807
@doc """
Encode an atom into Bolt binary format.
Encoding:
`Marker`
with
| Value | Marker |
| ------- | -------- |
| nil | `0xC0` |
| false | `0xC2` |
| true | `0xC3` |
Other atoms are converted to string before encoding.
## Example
iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
iex> EncoderV1.encode_atom(nil, 1)
<<0xC0>>
iex> EncoderV1.encode_atom(true, 1)
<<0xC3>>
iex> EncoderV1.encode_atom(:guten_tag, 1)
<<0x89, 0x67, 0x75, 0x74, 0x65, 0x6E, 0x5F, 0x74, 0x61, 0x67>>
"""
@spec encode_atom(atom(), integer()) :: Bolt.Sips.Internals.PackStream.value()
def encode_atom(nil, _bolt_version), do: <<@null_marker>>
def encode_atom(true, _bolt_version), do: <<@true_marker>>
def encode_atom(false, _bolt_version), do: <<@false_marker>>
def encode_atom(other, bolt_version) do
other |> Atom.to_string() |> encode_string(bolt_version)
end
@doc """
Encode a string into Bolt binary format.
Encoding:
`Marker` `Size` `Content`
with
| Marker | Size | Max data size |
|--------|------|---------------|
| `0x80`..`0x8F` | None (contained in marker) | 15 bytes |
| `0xD0` | 8-bit integer | 255 bytes |
| `0xD1` | 16-bit integer | 65_535 bytes |
| `0xD2` | 32-bit integer | 4_294_967_295 bytes |
## Example
iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
iex> EncoderV1.encode_string("guten tag", 1)
<<0x89, 0x67, 0x75, 0x74, 0x65, 0x6E, 0x20, 0x74, 0x61, 0x67>>
"""
@spec encode_string(String.t(), integer()) :: Bolt.Sips.Internals.PackStream.value()
def encode_string(string, _bolt_version) when byte_size(string) <= 15 do
<<@tiny_bitstring_marker::4, byte_size(string)::4>> <> string
end
def encode_string(string, _bolt_version) when byte_size(string) <= 255 do
<<@bitstring8_marker, byte_size(string)::8>> <> string
end
def encode_string(string, _bolt_version) when byte_size(string) <= 65_535 do
<<@bitstring16_marker, byte_size(string)::16>> <> string
end
def encode_string(string, _bolt_version) when byte_size(string) <= 4_294_967_295 do
<<@bitstring32_marker, byte_size(string)::32>> <> string
end
@doc """
Encode an integer into Bolt binary format.
Encoding:
`Marker` `Value`
with
| | Marker |
|---|--------|
| tiny int | `0x2A` |
| int8 | `0xC8` |
| int16 | `0xC9` |
| int32 | `0xCA` |
| int64 | `0xCB` |
## Example
iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
iex> EncoderV1.encode_integer(74, 1)
<<0x4A>>
iex> EncoderV1.encode_integer(-74_789, 1)
<<0xCA, 0xFF, 0xFE, 0xDB, 0xDB>>
"""
@spec encode_integer(integer(), integer()) :: Bolt.Sips.Internals.PackStream.value()
def encode_integer(integer, _bolt_version) when integer in -16..127 do
<<integer>>
end
def encode_integer(integer, _bolt_version) when integer in @int8 do
<<@int8_marker, integer>>
end
def encode_integer(integer, _bolt_version)
when integer in @int16_low or integer in @int16_high do
<<@int16_marker, integer::16>>
end
def encode_integer(integer, _bolt_version)
when integer in @int32_low or integer in @int32_high do
<<@int32_marker, integer::32>>
end
def encode_integer(integer, _bolt_version)
when integer in @int64_low or integer in @int64_high do
<<@int64_marker, integer::64>>
end
@doc """
Encode a float into Bolt binary format.
Encoding: `Marker` `8 byte Content`.
Marker: `0xC1`
Formated according to the IEEE 754 floating-point "double format" bit layout.
## Example
iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
iex> EncoderV1.encode_float(42.42, 1)
<<0xC1, 0x40, 0x45, 0x35, 0xC2, 0x8F, 0x5C, 0x28, 0xF6>>
"""
@spec encode_float(float(), integer()) :: Bolt.Sips.Internals.PackStream.value()
def encode_float(number, _bolt_version), do: <<@float_marker, number::float>>
@doc """
Encode a list into Bolt binary format.
Encoding:
`Marker` `Size` `Content`
with
| Marker | Size | Max list size |
|--------|------|---------------|
| `0x90`..`0x9F` | None (contained in marker) | 15 bytes |
| `0xD4` | 8-bit integer | 255 items |
| `0xD5` | 16-bit integer | 65_535 items |
| `0xD6` | 32-bit integer | 4_294_967_295 items |
## Example
iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
iex> EncoderV1.encode_list(["hello", "world"], 1)
<<0x92, 0x85, 0x68, 0x65, 0x6C, 0x6C, 0x6F, 0x85, 0x77, 0x6F, 0x72, 0x6C, 0x64>>
"""
@spec encode_list(list(), integer()) :: Bolt.Sips.Internals.PackStream.value()
def encode_list(list, bolt_version) when length(list) <= 15 do
<<@tiny_list_marker::4, length(list)::4>> <> encode_list_data(list, bolt_version)
end
def encode_list(list, bolt_version) when length(list) <= 255 do
<<@list8_marker, length(list)::8>> <> encode_list_data(list, bolt_version)
end
def encode_list(list, bolt_version) when length(list) <= 65_535 do
<<@list16_marker, length(list)::16>> <> encode_list_data(list, bolt_version)
end
def encode_list(list, bolt_version) when length(list) <= 4_294_967_295 do
<<@list32_marker, length(list)::32>> <> encode_list_data(list, bolt_version)
end
@spec encode_list_data(list(), integer()) :: binary()
defp encode_list_data(data, bolt_version) do
Enum.map_join(data, &Encoder.encode(&1, bolt_version))
end
@doc """
Encode a map into Bolt binary format.
Note that Elixir structs are converted to map for encoding purpose.
Encoding:
`Marker` `Size` `Content`
with
| Marker | Size | Max map size |
|--------|------|---------------|
| `0xA0`..`0xAF` | None (contained in marker) | 15 entries |
| `0xD8` | 8-bit integer | 255 entries |
| `0xD9` | 16-bit integer | 65_535 entries |
| `0xDA` | 32-bit integer | 4_294_967_295 entries |
## Example
iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
iex> EncoderV1.encode_map(%{id: 345, value: "hello world"}, 1)
<<0xA2, 0x82, 0x69, 0x64, 0xC9, 0x1, 0x59, 0x85, 0x76, 0x61, 0x6C, 0x75,
0x65, 0x8B, 0x68, 0x65, 0x6C, 0x6C, 0x6F, 0x20, 0x77, 0x6F, 0x72, 0x6C, 0x64>>
"""
@spec encode_map(map(), integer()) :: Bolt.Sips.Internals.PackStream.value()
def encode_map(map, bolt_version) when map_size(map) <= 15 do
<<@tiny_map_marker::4, map_size(map)::4>> <> encode_kv(map, bolt_version)
end
def encode_map(map, bolt_version) when map_size(map) <= 255 do
<<@map8_marker, map_size(map)::8>> <> encode_kv(map, bolt_version)
end
def encode_map(map, bolt_version) when map_size(map) <= 65_535 do
<<@map16_marker, map_size(map)::16>> <> encode_kv(map, bolt_version)
end
def encode_map(map, bolt_version) when map_size(map) <= 4_294_967_295 do
<<@map32_marker, map_size(map)::32>> <> encode_kv(map, bolt_version)
end
@spec encode_kv(map(), integer()) :: binary()
defp encode_kv(map, bolt_version) do
Enum.reduce(map, <<>>, fn data, acc ->
acc <> do_reduce_kv(data, bolt_version)
end)
end
@spec do_reduce_kv({atom(), any()}, integer()) :: binary()
defp do_reduce_kv({key, value}, bolt_version) do
Encoder.encode(key, bolt_version) <> Encoder.encode(value, bolt_version)
end
@doc """
Encode a struct into Bolt binary format.
This concerns Bolt Structs as defined in []().
Elixir structs are just converted to regular maps before encoding
Encoding:
`Marker` `Size` `Signature` `Content`
with
| Marker | Size | Max structure size |
|--------|------|---------------|
| `0xB0`..`0xBF` | None (contained in marker) | 15 fields |
| `0xDC` | 8-bit integer | 255 fields |
| `0xDD` | 16-bit integer | 65_535 fields |
## Example
iex> alias Bolt.Sips.Internals.PackStream.EncoderV1
iex> EncoderV1.encode_struct({0x01, ["two", "params"]}, 1)
<<0xB2, 0x1, 0x83, 0x74, 0x77, 0x6F, 0x86, 0x70, 0x61, 0x72, 0x61, 0x6D, 0x73>>
"""
@spec encode_struct({integer(), list()}, integer()) :: Bolt.Sips.Internals.PackStream.value()
def encode_struct({signature, list}, bolt_version) when length(list) <= 15 do
<<@tiny_struct_marker::4, length(list)::4, signature>> <> encode_list_data(list, bolt_version)
end
def encode_struct({signature, list}, bolt_version) when length(list) <= 255 do
<<@struct8_marker::8, length(list)::8, signature>> <> encode_list_data(list, bolt_version)
end
def encode_struct({signature, list}, bolt_version) when length(list) <= 65_535 do
<<@struct16_marker::8, length(list)::16, signature>> <> encode_list_data(list, bolt_version)
end
end
|
lib/bolt_sips/internals/pack_stream/encoder_v1.ex
| 0.876191
| 0.492127
|
encoder_v1.ex
|
starcoder
|
defmodule AWS.QLDBSession do
@moduledoc """
The transactional data APIs for Amazon QLDB
Instead of interacting directly with this API, we recommend using the QLDB
driver or the QLDB shell to execute data transactions on a ledger.
If you are working with an AWS SDK, use the QLDB driver. The driver
provides a high-level abstraction layer above this *QLDB Session* data plane and
manages `SendCommand` API calls for you. For information and a list of supported
programming languages, see [Getting started with the driver](https://docs.aws.amazon.com/qldb/latest/developerguide/getting-started-driver.html)
in the *Amazon QLDB Developer Guide*.
If you are working with the AWS Command Line Interface (AWS CLI),
use the QLDB shell. The shell is a command line interface that uses the QLDB
driver to interact with a ledger. For information, see [Accessing Amazon QLDB using the QLDB
shell](https://docs.aws.amazon.com/qldb/latest/developerguide/data-shell.html).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "QLDB Session",
api_version: "2019-07-11",
content_type: "application/x-amz-json-1.0",
credential_scope: nil,
endpoint_prefix: "session.qldb",
global?: false,
protocol: "json",
service_id: "QLDB Session",
signature_version: "v4",
signing_name: "qldb",
target_prefix: "QLDBSession"
}
end
@doc """
Sends a command to an Amazon QLDB ledger.
Instead of interacting directly with this API, we recommend using the QLDB
driver or the QLDB shell to execute data transactions on a ledger.
If you are working with an AWS SDK, use the QLDB driver. The driver
provides a high-level abstraction layer above this *QLDB Session* data plane and
manages `SendCommand` API calls for you. For information and a list of supported
programming languages, see [Getting started with the driver](https://docs.aws.amazon.com/qldb/latest/developerguide/getting-started-driver.html)
in the *Amazon QLDB Developer Guide*.
If you are working with the AWS Command Line Interface (AWS CLI),
use the QLDB shell. The shell is a command line interface that uses the QLDB
driver to interact with a ledger. For information, see [Accessing Amazon QLDB using the QLDB
shell](https://docs.aws.amazon.com/qldb/latest/developerguide/data-shell.html).
"""
def send_command(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "SendCommand", input, options)
end
end
|
lib/aws/generated/qldb_session.ex
| 0.868144
| 0.499329
|
qldb_session.ex
|
starcoder
|
defprotocol Realm.Monoid do
@moduledoc ~S"""
Monoid extends the semigroup with the concept of an "empty" or "zero" element.
## Type Class
An instance of `Realm.Monoid` must also implement `Realm.Semigroup`,
and define `Realm.Monoid.empty/1`.
Semigroup [append/2]
↓
Monoid [empty/1]
"""
@doc ~S"""
An "emptied out" or "starting position" of the passed data.
## Example
iex> Realm.Monoid.empty(10)
0
iex> Realm.Monoid.empty [1, 2, 3, 4, 5]
[]
"""
@spec empty(t()) :: t()
def empty(monoid)
end
defmodule Realm.Monoid.Algebra do
alias Realm.{Monoid, Functor}
def zero(monoid), do: Monoid.empty(monoid)
@doc """
Check if a value is the empty element of that type.
## Examples
iex> import Realm.Monoid.Algebra
...> empty?([])
true
iex> import Realm.Monoid.Algebra
...> empty?([1])
false
"""
@spec empty?(Monoid.t()) :: boolean
def empty?(monoid), do: Monoid.empty(monoid) == monoid
@doc ~S"""
`map` with its arguments flipped.
## Examples
iex> import Realm.Monoid.Algebra
...> across(fn x -> x + 1 end, [1, 2, 3])
[2, 3, 4]
iex> import Realm.Monoid.Algebra
...> fn
...> int when is_integer(int) -> int * 100
...> value -> inspect(value)
...> end
...> |> .across(%{a: 2, b: [1, 2, 3]})
%{a: 200, b: "[1, 2, 3]"}
"""
@spec across((any() -> any()), Functor.t()) :: Functor.t()
def across(fun, functor), do: Functor.map(functor, fun)
end
defimpl Realm.Monoid, for: Function do
def empty(monoid) when is_function(monoid), do: &Quark.id/1
end
defimpl Realm.Monoid, for: Integer do
def empty(_), do: 0
end
defimpl Realm.Monoid, for: Float do
def empty(_), do: 0.0
end
defimpl Realm.Monoid, for: BitString do
def empty(_), do: ""
end
defimpl Realm.Monoid, for: List do
def empty(_), do: []
end
defimpl Realm.Monoid, for: Map do
def empty(_), do: %{}
end
defimpl Realm.Monoid, for: Tuple do
def empty(monoid), do: Realm.Functor.map(monoid, &Realm.Monoid.empty/1)
end
defimpl Realm.Monoid, for: MapSet do
def empty(_), do: MapSet.new()
end
|
lib/realm/monoid.ex
| 0.839109
| 0.53048
|
monoid.ex
|
starcoder
|
defmodule Bonny.Server.Scheduler.Binding do
@moduledoc """
Kubernetes [binding](#placeholder) interface.
Currently [undocumented](https://github.com/kubernetes/kubernetes/issues/75749) in Kubernetes docs.
## Links
* [Example using curl](https://gist.github.com/kelseyhightower/2349c9c645d32a3fcbe385082de74668)
* [Example using golang](https://banzaicloud.com/blog/k8s-custom-scheduler/)
"""
require Logger
@doc """
Returns a map representing a `Binding` kubernetes resource
## Example
iex> pod = %{"metadata" => %{"name" => "nginx", "namespace" => "default"}}
...> node = %{"metadata" => %{"name" => "kewl-node"}}
iex> Bonny.Server.Scheduler.Binding.new(pod, node)
%{"apiVersion" => "v1", "kind" => "Binding", "metadata" => %{"name" => "nginx", "namespace" => "default"}, "target" => %{"apiVersion" => "v1", "kind" => "Node", "name" => "kewl-node"}}
"""
@spec new(map(), map()) :: map()
def new(pod, node) do
pod_name = K8s.Resource.name(pod)
pod_namespace = K8s.Resource.namespace(pod)
node_name = K8s.Resource.name(node)
%{
"apiVersion" => "v1",
"kind" => "Binding",
"metadata" => %{
"name" => pod_name,
"namespace" => pod_namespace
},
"target" => %{
"apiVersion" => "v1",
"kind" => "Node",
"name" => node_name
}
}
end
@doc """
Creates the pod's /binding subresource through K8s.
"""
@spec create(K8s.Conn.t(), map(), map()) ::
{:ok, HTTPoison.Response.t()} | {:error, HTTPoison.Error.t()}
def create(conn, pod, node) do
binding = new(pod, node)
operation = K8s.Client.create(pod, binding)
metadata = %{operation: operation}
:telemetry.span([:scheduler, :binding], metadata, fn ->
case K8s.Client.run(conn, operation) do
{:ok, body} ->
Logger.debug("Schduler binding succeeded", metadata)
{{:ok, body}, metadata}
{:error, error} ->
metadata = Map.put(metadata, :error, error)
Logger.error("Schduler binding failed", metadata)
{{:error, error}, metadata}
end
end)
end
end
|
lib/bonny/server/scheduler/binding.ex
| 0.830422
| 0.485844
|
binding.ex
|
starcoder
|
defmodule JsonSerde do
@moduledoc """
A Json Serialization/Deserialization library that aims to create json documents from any
nested data structures and deserialize json documents back to same datastructure.
```elixir
iex(1)> map = %{"name" => "Joe", "age" => 21, "birthdate" => Date.new(1970, 1, 1) |> elem(1)}
%{"age" => 21, "birthdate" => ~D[1970-01-01], "name" => "Joe"}
iex(2)> {:ok, serialized} = JsonSerde.serialize(map)
{:ok,
"{\"age\":21,\"birthdate\":{\"__data_type__\":\"date\",\"value\":\"1970-01-01\"},\"name\":\"Joe\"}"}
iex(3)> JsonSerde.deserialize(serialized)
{:ok, %{"age" => 21, "birthdate" => ~D[1970-01-01], "name" => "Joe"}}
```
Custom structs can be marked with an alias, so resulting json does not appear elixir specific
```elixir
iex(2)> defmodule CustomStruct do
...(2)> use JsonSerde, alias: "custom"
...(2)> defstruct [:name, :age]
...(2)> end
{:module, CustomStruct,
<<70, 79, 82, 49, 0, 0, 5, 240, 66, 69, 65, 77, 65, 116, 85, 56, 0, 0, 0, 189,
0, 0, 0, 18, 19, 69, 108, 105, 120, 105, 114, 46, 67, 117, 115, 116, 111,
109, 83, 116, 114, 117, 99, 116, 8, 95, 95, ...>>,
%CustomStruct{age: nil, name: nil}}
iex(3)> c = %CustomStruct{name: "eddie", age: 34}
%CustomStruct{age: 34, name: "eddie"}
iex(4)> {:ok, serialized} = JsonSerde.serialize(c)
{:ok, "{\"age\":34,\"name\":\"eddie\",\"__data_type__\":\"custom\"}"}
iex(5)> JsonSerde.deserialize(serialized)
{:ok, %CustomStruct{age: 34, name: "eddie"}}
```
### Supported Types
* Map
* List
* MapSet
* Tuple
* Atom
* Date
* DateTime
* NaiveDateTime
* Time
* Custom Structs
### Custom Structs
The type key included in all structs is defaulted to "__data_type__" but can be customized by:
config :json_serde, :type_key, "type"
* Note: This configuration is only read at compile time
"""
@typedoc "A Json representation of the given term"
@type serialized_term :: String.t()
defmacro __using__(opts) do
alias = Keyword.get(opts, :alias)
exclusions = Keyword.get(opts, :exclude, [])
module = __CALLER__.module
JsonSerde.Alias.setup_alias(module, alias)
quote do
def __json_serde_exclusions__() do
unquote(exclusions)
end
end
end
defmacro data_type_key() do
data_key = Application.get_env(:json_serde, :type_key, "__data_type__")
quote do
unquote(data_key)
end
end
@spec serialize(term) :: {:ok, serialized_term()} | {:error, term}
def serialize(term) do
with {:ok, output} <- JsonSerde.Serializer.serialize(term) do
Jason.encode(output)
end
end
@spec serialize!(term) :: serialized_term()
def serialize!(term) do
case serialize(term) do
{:ok, value} -> value
{:error, reason} -> raise reason
end
end
@spec deserialize(serialized_term()) :: {:ok, term} | {:error, term}
def deserialize(serialized_term) when is_binary(serialized_term) do
with {:ok, decoded} <- Jason.decode(serialized_term) do
JsonSerde.Deserializer.deserialize(decoded, decoded)
end
end
@spec deserialize!(serialized_term()) :: term
def deserialize!(serialized_term) do
case deserialize(serialized_term) do
{:ok, value} -> value
{:error, reason} -> raise reason
end
end
end
defprotocol JsonSerde.Serializer do
@fallback_to_any true
@spec serialize(t) :: {:ok, map | list | binary | integer | float} | {:error, term}
def serialize(t)
end
defprotocol JsonSerde.Deserializer do
@fallback_to_any true
@spec deserialize(t, map | list | binary | integer | float) :: {:ok, term} | {:error, term}
def deserialize(t, serialized_term)
end
|
lib/json_serde.ex
| 0.803637
| 0.67003
|
json_serde.ex
|
starcoder
|
defmodule RDF.Sigils do
@moduledoc """
Sigils for the most common types of RDF nodes.
"""
@doc ~S"""
Handles the sigil `~I` for IRIs.
It returns an `RDF.IRI` from the given string without interpolations and
without escape characters, except for the escaping of the closing sigil
character itself.
## Examples
iex> import RDF.Sigils
iex> ~I<http://example.com>
RDF.iri("http://example.com")
"""
defmacro sigil_I({:<<>>, _, [iri]}, []) when is_binary(iri) do
Macro.escape(RDF.iri!(iri))
end
@doc ~S"""
Handles the sigil `~i` for IRIs.
It returns an `RDF.IRI` from the given string as if it was a double quoted
string, unescaping characters and replacing interpolations.
## Examples
iex> import RDF.Sigils
iex> ~i<http://example.com/#{String.downcase("Foo")}>
RDF.iri("http://example.com/foo")
"""
defmacro sigil_i({:<<>>, _, [iri]}, []) when is_binary(iri) do
Macro.escape(RDF.iri!(iri))
end
defmacro sigil_i({:<<>>, line, pieces}, []) do
quote do
RDF.iri!(unquote({:<<>>, line, unescape_tokens(pieces)}))
end
end
@doc ~S"""
Handles the sigil `~B` for blank nodes.
It returns an `RDF.BlankNode` from the given string without interpolations
and without escape characters, except for the escaping of the closing sigil
character itself.
## Examples
iex> import RDF.Sigils
iex> ~B<foo>
RDF.bnode("foo")
"""
defmacro sigil_B({:<<>>, _, [bnode]}, []) when is_binary(bnode) do
Macro.escape(RDF.BlankNode.new(bnode))
end
@doc ~S"""
Handles the sigil `~b` for blank nodes.
It returns an `RDF.BlankNode` from the given string as if it was a double quoted
string, unescaping characters and replacing interpolations.
## Examples
iex> import RDF.Sigils
iex> ~b<foo#{String.downcase("Bar")}>
RDF.bnode("foobar")
"""
defmacro sigil_b({:<<>>, _, [bnode]}, []) when is_binary(bnode) do
Macro.escape(RDF.BlankNode.new(bnode))
end
defmacro sigil_b({:<<>>, line, pieces}, []) do
quote do
RDF.BlankNode.new(unquote({:<<>>, line, unescape_tokens(pieces)}))
end
end
@doc ~S"""
Handles the sigil `~L` for plain Literals.
It returns an `RDF.Literal` from the given string without interpolations and without escape characters, except for the escaping of the closing sigil character itself.
The sigil modifier can be used to specify a language tag.
Note: Languages with subtags are not supported.
## Examples
iex> import RDF.Sigils
iex> ~L"foo"
RDF.literal("foo")
iex> ~L"foo"en
RDF.literal("foo", language: "en")
"""
defmacro sigil_L(value, language)
defmacro sigil_L({:<<>>, _, [value]}, []) when is_binary(value) do
Macro.escape(RDF.XSD.String.new(value))
end
defmacro sigil_L({:<<>>, _, [value]}, language) when is_binary(value) do
Macro.escape(RDF.LangString.new(value, language: to_string(language)))
end
@doc ~S"""
Handles the sigil `~l` for blank nodes.
It returns an `RDF.Literal` from the given string as if it was a double quoted
string, unescaping characters and replacing interpolations.
## Examples
iex> import RDF.Sigils
iex> ~l"foo #{String.downcase("Bar")}"
RDF.literal("foo bar")
iex> ~l"foo #{String.downcase("Bar")}"en
RDF.literal("foo bar", language: "en")
"""
defmacro sigil_l(value, language)
defmacro sigil_l({:<<>>, _, [value]}, []) when is_binary(value) do
Macro.escape(RDF.XSD.String.new(value))
end
defmacro sigil_l({:<<>>, _, [value]}, language) when is_binary(value) do
Macro.escape(RDF.LangString.new(value, language: to_string(language)))
end
defmacro sigil_l({:<<>>, line, pieces}, []) do
quote do
RDF.XSD.String.new(unquote({:<<>>, line, unescape_tokens(pieces)}))
end
end
defmacro sigil_l({:<<>>, line, pieces}, language) do
quote do
RDF.LangString.new(unquote({:<<>>, line, unescape_tokens(pieces)}),
language: to_string(unquote(language))
)
end
end
defp unescape_tokens(tokens) do
for token <- tokens do
if is_binary(token), do: Macro.unescape_string(token), else: token
end
end
end
|
lib/rdf/sigils.ex
| 0.84106
| 0.611643
|
sigils.ex
|
starcoder
|
defmodule Bunch.Math do
@moduledoc """
A bunch of math helper functions.
"""
@doc """
Applies `div/2` and `rem/2` to arguments and returns results as a tuple.
## Example
iex> #{inspect(__MODULE__)}.div_rem(10, 4)
{div(10, 4), rem(10, 4)}
"""
@spec div_rem(divident :: non_neg_integer, divisor :: pos_integer) ::
{div :: non_neg_integer, rem :: non_neg_integer}
def div_rem(dividend, divisor) do
{div(dividend, divisor), rem(dividend, divisor)}
end
@doc """
Works like `div_rem/2` but allows to accumulate remainder.
Useful when an accumulation of division error is not acceptable, for example
when you need to produce chunks of data every second but need to make sure there
are 9 chunks per 4 seconds on average. You can calculate `div_rem(9, 4)`,
keep the remainder, pass it to subsequent calls and every fourth result will be
bigger than others.
## Example
iex> 1..10 |> Enum.map_reduce(0, fn _, err ->
...> #{inspect(__MODULE__)}.div_rem(9, 4, err)
...> end)
{[2, 2, 2, 3, 2, 2, 2, 3, 2, 2], 2}
"""
@spec div_rem(
divident :: non_neg_integer,
divisor :: pos_integer,
accumulated_remainder :: non_neg_integer
) :: {div :: non_neg_integer, rem :: non_neg_integer}
def div_rem(dividend, divisor, accumulated_remainder) do
div_rem(accumulated_remainder + dividend, divisor)
end
@doc """
Returns the biggest multiple of `value` that is lower than or equal to `threshold`.
## Examples
iex> #{inspect(__MODULE__)}.max_multiple_lte(4, 10)
8
iex> #{inspect(__MODULE__)}.max_multiple_lte(2, 6)
6
"""
@spec max_multiple_lte(value :: pos_integer, threshold :: non_neg_integer) :: non_neg_integer
def max_multiple_lte(value, threshold) do
remainder = threshold |> rem(value)
threshold - remainder
end
@doc """
Returns the smallest multiple of `value` that is greater than or equal to `threshold`.
## Examples
iex> #{inspect(__MODULE__)}.min_multiple_gte(4, 10)
12
iex> #{inspect(__MODULE__)}.min_multiple_gte(2, 6)
6
"""
@spec min_multiple_gte(value :: pos_integer, threshold :: non_neg_integer) :: non_neg_integer
def min_multiple_gte(value, threshold) do
case threshold |> rem(value) do
0 -> threshold
remainder -> threshold + value - remainder
end
end
end
|
lib/bunch/math.ex
| 0.90022
| 0.50061
|
math.ex
|
starcoder
|
defmodule Transmog.KeyPairs do
@moduledoc """
`Transmog.KeyPairs` is a struct which holds the information about a list of
`t:key_pair/0` and ensures that they are valid. A key pair is a list of
mappings from one path to another. For example, `{[:a], [:b]}` indicates that
we are transforming a map with keys `:a` and `:b` to now have the keys swapped
with the same values.
You can create a new `Transmog.KeyPairs` struct manually by calling the
`new/1` and `new!/1` functions directly. This struct can be used in most of
the core functionality in this library.
This library uses `parse/1` and `parse!/1` under the hood to coerce your key
paths into a format that can be understood by this struct. These functions use
the `Transmog.Parser` protocol for the type that is provided for the key path.
## Examples
iex> key_pairs = [{[:identity, :first_name], [:user_details, :first_name]}]
iex> {:ok, %Transmog.KeyPairs{} = key_pairs} = Transmog.KeyPairs.new(key_pairs)
iex> key_pairs
%Transmog.KeyPairs{list: [{[:identity, :first_name], [:user_details, :first_name]}]}
iex> key_pairs = [{[:identity, :first_name], [:user_details, :first_name]}]
iex> Transmog.KeyPairs.new!(key_pairs)
%Transmog.KeyPairs{list: [{[:identity, :first_name], [:user_details, :first_name]}]}
If you do not provide correct key pairs when this struct is created then you
will receive an `t:invalid/0` error as a response instead.
## Examples
iex> key_pairs = [{nil, [:identity, :last_name]}]
iex> Transmog.KeyPairs.new(key_pairs)
{:error, :invalid_key_pairs}
iex> key_paths = [{nil, ":identity.:last_name"}]
iex> Transmog.KeyPairs.parse(key_paths)
{:error, :invalid_key_path} #=> Also possible to receive these errors
iex> key_pairs = [{nil, [:identity, :last_name]}]
iex> Transmog.KeyPairs.new!(key_pairs)
** (Transmog.InvalidKeyPairsError) key pairs are not valid ({nil, [:identity, :last_name]}, index 0)
iex> key_paths = [{nil, ":identity.:last_name"}]
iex> Transmog.KeyPairs.parse!(key_paths)
** (Transmog.InvalidKeyPathError) key path is not valid (nil)
If you know the shape of your data structures in advance then you should
pre-compile your `Transmog.KeyPairs` structs by calling `parse!/1` or `new!/1`
and saving the results somewhere where they can be reused.
`Transmog.format/2` and `Transmog.format!/2` optimize for the case where you
provide this struct directly.
"""
alias Transmog.InvalidKeyPairsError
alias Transmog.Parser
defstruct list: []
@typedoc """
`t:error/0` is the type for when creating a new `Transmog.Keypair` struct
fails due to either an invalid path or pair.
"""
@type error :: Parser.error() | invalid
@typedoc """
`t:invalid/0` is the type for when a key pair list is determined to not be
valid the struct is created using `new/1`.
"""
@type invalid :: {:error, :invalid_key_pairs}
@typedoc """
`t:key_pair/0` is the type for a single key pair that is part of the list of
key pairs that this struct holds.
"""
@type key_pair :: {list(term), list(term)}
@typedoc """
`t:t/0` is the type for the `Transmog.KeyPair` struct.
"""
@type t :: %__MODULE__{list: list(key_pair)}
@doc """
`find_match/2` takes a list of key pairs and a key and finds the pair in the
list which matches. If a match is found then the opposite key in the key
pair will be returned as the key that the value should map to.
## Examples
iex> key_pairs = %Transmog.KeyPairs{list: [{[:a], ["a"]}]}
iex> Transmog.KeyPairs.find_match(key_pairs, [:a])
["a"]
iex> key_pairs = %Transmog.KeyPairs{list: [{[:a], ["a"]}]}
iex> Transmog.KeyPairs.find_match(key_pairs, [:b])
[:b]
"""
@spec find_match(key_pairs :: t, key :: list(term)) :: list(term)
def find_match(%__MODULE__{list: list}, key) do
case Enum.find(list, &pair_matches?(key, &1)) do
nil -> key
{_, to} -> to
end
end
@doc """
`new/1` creates a new `Transmog.KeyPairs` struct. It enforces that the key
pairs are valid and have been previously parsed. If the key pairs are not
valid then an error will be returned.
## Examples
iex> key_pairs = [{[:a, :b], [:b, :a]}]
iex> {:ok, key_pairs} = Transmog.KeyPairs.new(key_pairs)
iex> key_pairs
%Transmog.KeyPairs{list: [{[:a, :b], [:b, :a]}]}
iex> key_pairs = [{nil, [:a, :b]}]
iex> Transmog.KeyPairs.new(key_pairs)
{:error, :invalid_key_pairs}
"""
@spec new(list :: list(key_pair)) :: {:ok, t} | invalid
def new(list) when is_list(list) do
key_pairs = %__MODULE__{list: list}
if valid?(key_pairs) do
{:ok, %{key_pairs | list: sort_list(list)}}
else
invalid_key_pairs()
end
end
def new(_), do: invalid_key_pairs()
@doc """
`new!/1` creates a new `Transmog.KeyPairs` struct. It delegates execution to
`new/1`. If the key pairs are not valid then an error is raised otherwise the
result is unwrapped and returned.
## Examples
iex> key_pairs = [{[:a, :b], [:b, :a]}]
iex> Transmog.KeyPairs.new!(key_pairs)
%Transmog.KeyPairs{list: [{[:a, :b], [:b, :a]}]}
iex> key_pairs = [{nil, [:a, :b]}]
iex> Transmog.KeyPairs.new!(key_pairs)
** (Transmog.InvalidKeyPairsError) key pairs are not valid ({nil, [:a, :b]}, index 0)
"""
@spec new!(list :: list(key_pair)) :: t
def new!(list) when is_list(list) do
# Validate the pairs one by one so that we can report on the error case
# specifically in the error message.
for {pair, index} <- Enum.with_index(list) do
if !pair_valid?(pair), do: InvalidKeyPairsError.new(pair, index)
end
# We know the key pairs are valid because we validate them one by one above
list
|> new()
|> elem(1)
end
def new!(_), do: raise(InvalidKeyPairsError)
@doc """
`parse/1` takes a list of key paths and attempts to coerce them into valid
key pairs. If successful then a `Transmog.KeyPair` struct will be returned
with the key paths converted into the list format.
## Examples
iex> key_paths = [{"a.b", ":a.:b"}]
iex> {:ok, key_pairs} = Transmog.KeyPairs.parse(key_paths)
iex> key_pairs
%Transmog.KeyPairs{list: [{["a", "b"], [:a, :b]}]}
iex> key_paths = [{"", ":a"}, {"a.b", ":a.:b"}]
iex> Transmog.KeyPairs.parse(key_paths)
{:error, :invalid_key_path}
iex> key_paths = [{"a", ":a.:b"}]
iex> Transmog.KeyPairs.parse(key_paths)
{:error, :invalid_key_pairs}
"""
@spec parse(list :: list(Transmog.key_paths())) :: {:ok, t} | error
def parse(list) when is_list(list) do
list =
list
|> Enum.reduce({:ok, []}, fn
{_, _} = pair, {:ok, list} when is_list(list) ->
case do_parse(pair) do
{:ok, {left, right} = pair} when is_list(left) and is_list(right) ->
{:ok, list ++ [pair]}
{:error, {reason, _}} ->
{:error, reason}
end
_, error ->
error
end)
with {:ok, list} when is_list(list) <- list, do: new(list)
end
@doc """
`parse!/1` takes a list of key paths are coerces them into valid key pairs.
If the key pairs or any path are not valid then an error will be raised.
Otherwise the result is unwrapped and returned.
## Examples
iex> key_paths = [{"a.b", ":a.:b"}]
iex> Transmog.KeyPairs.parse!(key_paths)
%Transmog.KeyPairs{list: [{["a", "b"], [:a, :b]}]}
iex> key_paths = [{"", ":a"}, {"a.b", ":a.:b"}]
iex> Transmog.KeyPairs.parse!(key_paths)
** (Transmog.InvalidKeyPathError) key path is not valid (\"\")
iex> key_paths = [{"a", ":a.:b"}]
iex> Transmog.KeyPairs.parse!(key_paths)
** (Transmog.InvalidKeyPairsError) key pairs are not valid ({[\"a\"], [:a, :b]}, index 0)
"""
@spec parse!(list :: list(Transmog.key_paths())) :: t
def parse!(list) do
list
|> Enum.reduce([], fn
{_, _} = pair, list when is_list(list) -> list ++ [do_parse!(pair)]
end)
|> new!()
end
@doc """
`reverse/1` takes a `Transmog.KeyPairs` and reverses each individual key pair
in the list of key pairs. This could be useful if you wanted to reverse a
format without having to define two separate key pairs manually.
## Examples
iex> key_paths = [{"a", "b"}]
iex> %Transmog.KeyPairs{} = key_pairs = Transmog.KeyPairs.parse!(key_paths)
iex> Transmog.KeyPairs.reverse(key_pairs)
%Transmog.KeyPairs{list: [{["b"], ["a"]}]}
"""
@spec reverse(key_pairs :: t) :: t
def reverse(%__MODULE__{list: list} = key_pairs) when is_list(list) do
list = Enum.map(list, &{elem(&1, 1), elem(&1, 0)})
%{key_pairs | list: list}
end
# Parses a single key path and returns the path if parsing is successful. If
# parsing fails then the invalid value is returned as well so we can use it
# when raising an error.
@spec do_parse(key_path :: term) ::
{:ok, list(term) | key_pair} | {:error, {:invalid_key_path, term}}
defp do_parse({left, right}) do
with {:ok, left} <- do_parse(left),
{:ok, right} <- do_parse(right) do
{:ok, {left, right}}
end
end
defp do_parse(key_path) do
case Parser.parse(key_path) do
{:ok, _} = result -> result
{:error, reason} -> {:error, {reason, key_path}}
end
end
# Parses both sides of the key pairs and raises an error if there are any
# issues with the input.
@spec do_parse!(key_pairs :: Transmog.key_paths()) :: key_pair
defp do_parse!({left, right}), do: {Parser.parse!(left), Parser.parse!(right)}
# Returns an error to indicate that the key pairs are not valid.
@spec invalid_key_pairs :: invalid
defp invalid_key_pairs, do: {:error, :invalid_key_pairs}
# Given a key pair and a key, returns if the key matches the left side of the
# key pair.
@spec pair_matches?(key :: list(term), pair :: key_pair) :: boolean
defp pair_matches?(key, {from, _}) when is_list(key) and is_list(from), do: from == key
defp pair_matches?(_, _), do: false
# Returns whether or not a single pair is valid. A pair is considered valid if
# they are both lists and have the same length.
@spec pair_valid?(key_pair :: key_pair) :: boolean
defp pair_valid?({left, right}) when is_list(left) and is_list(right) do
length(left) == length(right)
end
defp pair_valid?(_), do: false
# Sorts the list of key pairs in order from shortest to longest.
@spec sort_list(list :: list(key_pair)) :: list(key_pair)
defp sort_list(list) when is_list(list), do: Enum.sort_by(list, &length(elem(&1, 0)))
# Returns whether or not all of the key pairs are valid.
@spec valid?(key_pairs :: t) :: boolean
defp valid?(%__MODULE__{list: list}) when is_list(list), do: Enum.all?(list, &pair_valid?/1)
defp valid?(_), do: false
end
|
lib/transmog/key_pairs.ex
| 0.884558
| 0.685568
|
key_pairs.ex
|
starcoder
|
defmodule Generator.Maze do
@moduledoc """
Module for handling the overall maze.
Mazes are being represented as a list Cells
"""
alias Generator.Cell
@doc """
Makes a new maze of Cells with the width(x) and height (y)
## Examples
m = Generator.Maze.make 1, 1
m
[{%Cell{x: 0, y: 0, north: false, south: false, east: false, west: false}]
"""
def make(x, y) do
maze =
new(x,y)
|> init_borders
maze
|> Enum.map(&set_east(&1, maze))
|> Enum.map(&set_west(&1, maze))
|> Enum.map(&set_north(&1, maze))
|> Enum.map(&set_south(&1, maze))
end
@doc """
Makes a new maze, using randomized bitstrings
"""
def make(x, y, true) do
new(x, y)
|> Enum.map(&Cell.new(rand_bitstring(), &1))
end
@doc """
Converts a maze into a doubly nested list of bitstrings.
"""
def to_json(maze) do
0..max_height(maze)
|> Enum.map(fn v ->
0..max_width(maze)
|> Enum.map(fn h ->
Enum.find(maze, & &1.y == v and &1.x == h)
|> Cell.get_bitstring
end)
end)
end
@doc """
Returns the example maze
"""
def example do
maze = [
[ 2, 14, 10, 14, 8 ],
[ 5, 9, 11, 13, 11 ],
[ 3, 15, 9, 15, 9 ],
[ 7, 15, 13, 15, 11 ],
[ 1, 13, 9, 9, 9 ]
]
maze
end
#### Private functions
defp init_borders(maze) do
## Initialize max's so they aren't calculated every time through loop
height = max_height(maze)
width = max_width(maze)
maze
|> Enum.map(fn cell -> if cell.x == 0, do: Map.put(cell, :north, true), else: cell end)
|> Enum.map(fn cell -> if cell.x == height, do: Map.put(cell, :south, true), else: cell end)
|> Enum.map(fn cell -> if cell.y == 0, do: Map.put(cell, :west, true), else: cell end)
|> Enum.map(fn cell -> if cell.y == width, do: Map.put(cell, :east, true), else: cell end)
end
defp max_width(maze) do
maze
|> Enum.max_by(fn cell -> cell.x end)
|> Map.get(:x)
end
defp max_height(maze) do
maze
|> Enum.max_by(fn cell -> cell.y end)
|> Map.get(:y)
end
defp set_east(cell, maze) do
east = get_east(cell, maze)
cond do
cell.east != nil -> cell
east == nil -> Map.put(cell, :east, rand_bool())
east.west != nil -> Map.put(cell, :east, east.west)
true -> Map.put(cell, :east, rand_bool())
end
end
defp set_west(cell, maze) do
west = get_west(cell, maze)
cond do
cell.west != nil -> cell
west == nil -> Map.put(cell, :west, rand_bool())
west.east != nil -> Map.put(cell, :west, west.east)
true -> Map.put(cell, :west, rand_bool())
end
end
defp set_north(cell, maze) do
north = get_north(cell, maze)
cond do
cell.north != nil -> cell
north == nil -> Map.put(cell, :north, rand_bool())
north.south == nil -> Map.put(cell, :north, rand_bool())
true -> Map.put(cell, :north, north.south)
end
end
defp set_south(cell, maze) do
south = get_south(cell, maze)
cond do
cell.south != nil -> cell
south == nil -> Map.put(cell, :south, rand_bool())
south.south == nil -> Map.put(cell, :south, rand_bool())
true -> Map.put(cell, :south, south.north)
end
end
defp new(x, y) do
for h <- 0..x-1, v <- 0..y-1 do
%Cell{x: h, y: v}
end
end
def get_west(cell, maze), do: Enum.find(maze, fn c -> c.x == cell.x + 1 && cell.y == c.y end)
def get_east(cell, maze), do: Enum.find(maze, fn c -> c.x == cell.x - 1 && cell.y == c.y end)
def get_north(cell, maze), do: Enum.find(maze, fn c -> c.x == cell.x && c.y == cell.y - 1 end)
def get_south(cell, maze), do: Enum.find(maze, fn c -> c.x == cell.x && c.y == cell.y + 1 end)
defp rand_bool, do: [true, false] |> Enum.random
defp rand_bitstring, do: 1..15 |> Enum.random
end
|
alex_story+elixir/maze_jam/apps/generator/lib/generator/maze.ex
| 0.852061
| 0.613468
|
maze.ex
|
starcoder
|
defmodule RlStudy.DP.Planner do
alias RlStudy.MDP.Environment
require Logger
require Matrex
@planner_data [env: nil, log: nil]
@type t :: %RlStudy.DP.Planner{
env: RlStudy.MDP.Environment.t(),
log: [] | [String.t()]
}
defstruct @planner_data
def planner_data() do
@planner_data
end
@spec initialize(RlStudy.DP.Planner.t()) :: RlStudy.DP.Planner.t()
def initialize(planner) do
%RlStudy.DP.Planner{env: RlStudy.MDP.Environment.reset(planner.env), log: []}
end
defprotocol Plan do
@fallback_to_any true
@spec plan(%{env: RlStudy.MDP.Environment.t()}, float(), float()) :: float()
def plan(planner, gamma \\ 0.9, threshold \\ 0.0001)
end
defimpl Plan, for: Any do
@spec plan(any, any, any) :: none
def plan(_planner, _gamma \\ 0.9, _threshold \\ 0.0001) do
raise "Planner have to implements plan method."
end
end
@spec transitions_at(
RlStudy.DP.Planner.t(),
RlStudy.MDP.State.t(),
RlStudy.MDP.Action.t()
) :: [%{prob: float(), next_state: RlStudy.MDP.State.t(), reward: float()}]
def transitions_at(planner, state, action) do
Logger.debug(
"planner: #{inspect(planner, pretty: true)}, state: #{inspect(state, pretty: true)}, action: #{inspect(action, pretty: true)}"
)
Environment.transit_func(planner.env, state, action)
|> Enum.map(fn {state, prob} ->
%{reward: reward, done: _} = Environment.reward_func(planner.env, state)
%{prob: prob, next_state: state, reward: reward}
end)
end
def dict_to_grid(planner, state_reward_dict) do
Logger.debug("planner: #{inspect(planner, pretty: true)}")
Logger.debug("state_reward_dict: #{inspect(state_reward_dict, pretty: true)}")
row_length = Environment.row_length(planner.env)
column_length = Environment.column_length(planner.env)
zero_grid = Matrex.new(row_length, column_length, fn -> 0 end)
state_reward_dict
|> Enum.reduce(zero_grid, fn {s, reward}, acc ->
Matrex.set(acc, s.row + 1, s.column + 1, reward)
end)
end
end
|
lib/dp/planner.ex
| 0.691706
| 0.680826
|
planner.ex
|
starcoder
|
defmodule Calibex do
@moduledoc """
Calibex allows you to handle ICal file format.
In the same way as the `mailibex` library, Calibex allows bijective coding/decoding :
making it possible to modify an ical and to keep all fields and struct of the initial ical.
The ICal elixir term is exactly a representation of the ICal file format : for instance :
[vcalendar: [[
prodid: "-//Google Inc//Google Calendar 70.9054//EN",
version: "2.0",
calscale: "GREGORIAN",
vevent: [[
dtstart: %DateTime{},
dtend: %DateTime{},
organizer: [cn: "<NAME>",value: "mailto:<EMAIL>"],
attendee: [cutype: "INDIVIDUAL",role: "REQ-PARTICIPANT",partstat: "NEEDS-ACTION",rsvp: true, cn: "Moi",
"x-num-guests": 0, value: "mailto:<EMAIL>"],
]]]]]
`encode/1` and `decode/1` parse and format an ICal from this terms : see
functions doc to find encoding rules.
Using this terms make it possible to handle all types of ICal files and any
fields type. But the downside of this approach is that it can be cumbersome
to create and handle this tree of keyword lists. To help you in this tasks,
some helpers functions are provided :
- `new/1`
- `new/2`
- `new_root/1`
- `new_root/2`
- `request/1`
- `request/2`
## Example usage : email event request generation
```
Calibex.request(dtstart: Timex.now, dtend: Timex.shift(Timex.now,hours: 1), summary: "Mon évènement",
organizer: "<EMAIL>", attendee: "<EMAIL>", attendee: "<EMAIL>")
|> Calibex.encode
```
"""
@doc ~S"""
Encode a tree of keyworks list with ICal rules :
- `KEY: [KW1,KW2]` a list of keyword list is encoded as multiple
`BEGIN:KEY\nKW1\nEND:KEY\nBEGIN:KEY\nKW2\nEND:KEY`
- `[K1: V1, K2: V2]` a keyword list is encoded as lines `K1:V1\nK2:V2`
- `KEY: [K1, V1, K2: V2, value: VALUE]` a keyword list as leaf value is encoded as
key value line with props : `KEY;K1=V1;K2=V2:VALUE`
- `%DateTime{}` datetime values are encoded as UTC BasicISO string
- `:atom1` atom values are encoded as upercase string `ATOM1`
- `:key_1` atom keys are encoded as upercase string, `_` replaced with `-` : `KEY-1`
"""
defdelegate encode(props), to: Calibex.Codec
@doc """
Decode an ICal UTF8 binary into a tree of keywork list with the same rules as
`encode/1`, but inversed, excepted :
- all leaf values are not decoded: kept as string.
"""
defdelegate decode(bin), to: Calibex.Codec
@doc """
Complete an event keyword list to form an ical nested kwlists :
`fill_attrs` is a list of *key* atoms describing completion rules to be used.
see `all_fill_attrs/0` doc to find allowed completion rules.
"""
defdelegate new(event,fill_attrs), to: Calibex.Helper
@doc "see `new/2`, default fill_attrs are
`[:prodid, :version, :calscale, :organizer, :attendee, :cutype, :role, :partstat, :rsvp, :x_num_guests]`"
defdelegate new(event), to: Calibex.Helper
@doc """
same as `new/2`, but with a `REQUEST` method in order to allow email request
ICS generation.
"""
defdelegate request(event,fill_attrs), to: Calibex.Helper
@doc """
same as `new/1`, but with a `REQUEST` method in order to allow email request
ICS generation.
Default `fill_attrs` contains in addition `[:uid,:last_modified,:sequence,:dtstamp,:created,:status]`
"""
defdelegate request(event), to: Calibex.Helper
@doc """
Complete an ical keyword list :
`fill_attrs` is a list of *key* atoms describing completion rules to be used.
see `all_fill_attrs/0` doc to find allowed completion rules.
"""
defdelegate new_root(cal,fill_attrs), to: Calibex.Helper
defdelegate new_root(cal), to: Calibex.Helper
@doc """
return all available transformation rules. There are 2 types :
- the ones which set a default value if not defined otherwise (DEFAULT)
- the ones which transform a given value if defined (TRANSFORM)
Available rules are :
- `:last_modified`,`:dtstamp`,:`created` : DEFAULT to UTC now
- `:sequence` : DEFAULT to 0
- `:uid` : DEFAULT to all vals hexa sha1 of all event props
- `:status` : DEFAULT to :confirmed
- `:version`,`:calscale`,`:prodid`: DEFAULT to base ical attrs (`2.0`,`GREGORIAN`)
- `:cutype`, `:role`, `:partstat`, `:rsvp`, `:x_num_guests`: DEFAULT to
standard rsvp enabled attendee, waiting for event acceptance
- `:organizer` TRANSFORM an email string into a `[cn: email,value: "mailto:"<>email]` props value.
- `:attendee` TRANSFORM an email string into a `[cn: email,value: "mailto:"<>email]` props value.
"""
defdelegate all_fill_attrs(), to: Calibex.Helper
end
|
lib/calibex.ex
| 0.822189
| 0.792544
|
calibex.ex
|
starcoder
|
defmodule ExDoc.Formatter.HTML.Autolink do
import ExDoc.Formatter.HTML.Templates, only: [h: 1, enc_h: 1]
@moduledoc """
Conveniences for autolinking locals, types and more.
"""
@elixir_docs "http://elixir-lang.org/docs/stable/"
@erlang_docs "http://www.erlang.org/doc/man/"
@doc """
Receives a list of module nodes and autolink all docs and typespecs.
"""
def all(modules) do
aliases = Enum.map modules, &(&1.module)
modules
|> Enum.map(&Task.async(fn -> process_module(&1, modules, aliases) end))
|> Enum.map(&Task.await/1)
end
defp process_module(module, modules, aliases) do
module
|> all_docs(modules)
|> all_typespecs(aliases)
end
defp module_to_string(module) do
inspect module.module
end
defp all_docs(module, modules) do
locals = Enum.map module.docs, &(doc_prefix(&1) <> &1.id)
if module.moduledoc do
moduledoc =
module.moduledoc
|> local_doc(locals)
|> project_doc(modules, module.id)
end
docs = for node <- module.docs do
if node.doc do
doc =
node.doc
|> local_doc(locals)
|> project_doc(modules, module.id)
end
%{node | doc: doc}
end
typedocs = for node <- module.typespecs do
if node.doc do
doc =
node.doc
|> local_doc(locals)
|> project_doc(modules, module.id)
end
%{node | doc: doc}
end
%{module | moduledoc: moduledoc, docs: docs, typespecs: typedocs}
end
defp all_typespecs(module, aliases) do
locals = Enum.map module.typespecs, fn
%ExDoc.TypeNode{name: name, arity: arity} -> { name, arity }
end
typespecs = for typespec <- module.typespecs do
%{typespec | spec: typespec(typespec.spec, locals, aliases)}
end
docs = for node <- module.docs do
%{node | specs: Enum.map(node.specs, &typespec(&1, locals, aliases))}
end
%{module | typespecs: typespecs, docs: docs}
end
@doc """
Converts the given `ast` to string while linking the locals
given by `typespecs` as HTML.
"""
def typespec({:when, _, [{:::, _, [left, {:|, _, _} = center]}, right]} = ast, typespecs, aliases) do
if short_typespec?(ast) do
typespec_to_string(ast, typespecs, aliases)
else
typespec_to_string(left, typespecs, aliases) <>
" ::\n " <> typespec_with_new_line(center, typespecs, aliases) <>
" when " <> String.slice(typespec_to_string(right, typespecs, aliases), 1..-2)
end
end
def typespec({:::, _, [left, {:|, _, _} = center]} = ast, typespecs, aliases) do
if short_typespec?(ast) do
typespec_to_string(ast, typespecs, aliases)
else
typespec_to_string(left, typespecs, aliases) <>
" ::\n " <> typespec_with_new_line(center, typespecs, aliases)
end
end
def typespec(other, typespecs, aliases) do
typespec_to_string(other, typespecs, aliases)
end
defp typespec_with_new_line({:|, _, [left, right]}, typespecs, aliases) do
typespec_to_string(left, typespecs, aliases) <>
" |\n " <> typespec_with_new_line(right, typespecs, aliases)
end
defp typespec_with_new_line(other, typespecs, aliases) do
typespec_to_string(other, typespecs, aliases)
end
defp typespec_to_string(ast, typespecs, aliases) do
Macro.to_string(ast, fn
{name, _, args}, string when is_atom(name) and is_list(args) ->
string = strip_parens(string, args)
arity = length(args)
if { name, arity } in typespecs do
n = enc_h("#{name}")
~s[<a href="#t:#{n}/#{arity}">#{h(string)}</a>]
else
string
end
{{ :., _, [alias, name] }, _, args}, string when is_atom(name) and is_list(args) ->
string = strip_parens(string, args)
alias = expand_alias(alias)
if source = get_source(alias, aliases) do
n = enc_h("#{name}")
~s[<a href="#{source}#{enc_h(inspect alias)}.html#t:#{n}/#{length(args)}">#{h(string)}</a>]
else
string
end
_, string ->
string
end)
end
defp short_typespec?(ast) do
byte_size(Macro.to_string(ast)) < 60
end
defp strip_parens(string, []) do
if :binary.last(string) == ?) do
:binary.part(string, 0, byte_size(string)-2)
else
string
end
end
defp strip_parens(string, _), do: string
defp expand_alias({:__aliases__, _, [h|t]}) when is_atom(h), do: Module.concat([h|t])
defp expand_alias(atom) when is_atom(atom), do: atom
defp expand_alias(_), do: nil
defp get_source(alias, aliases) do
cond do
is_nil(alias) -> nil
alias in aliases -> ""
dir = from_elixir(alias) -> @elixir_docs <> dir <> "/"
true -> nil
end
end
defp from_elixir(alias) do
alias_ebin = alias_ebin(alias)
if String.starts_with?(alias_ebin, elixir_ebin()) do
alias_ebin
|> Path.dirname()
|> Path.dirname()
|> Path.basename()
end
end
defp alias_ebin(alias) do
case :code.where_is_file('#{alias}.beam') do
:non_existing -> ""
path -> List.to_string(path)
end
end
defp elixir_ebin do
case :code.where_is_file('Elixir.Kernel.beam') do
:non_existing -> [0]
path ->
path
|> Path.dirname()
|> Path.dirname()
|> Path.dirname()
end
end
@doc """
Create links to locally defined functions, specified in `locals`
as a list of `fun/arity` strings.
Ignores functions which are already wrapped in markdown url syntax,
e.g. `[test/1](url)`. If the function doesn't touch the leading
or trailing `]`, e.g. `[my link link/1 is here](url)`, the fun/arity
will get translated to the new href of the function.
"""
def local_doc(bin, locals) when is_binary(bin) do
~r{(?<!\[)`\s*(([a-z_!\\?>\\|=&<!~+\\.\\+*^@-]+)/\d+)\s*`(?!\])}
|> Regex.scan(bin)
|> Enum.uniq()
|> List.flatten()
|> Enum.filter(&(&1 in locals))
|> Enum.reduce(bin, fn (x, acc) ->
{prefix, _, function_name, arity} = split_function(x)
escaped = Regex.escape(x)
Regex.replace(~r/(?<!\[)`(\s*#{escaped}\s*)`(?!\])/, acc,
"[`#{function_name}/#{arity}`](##{prefix}#{enc_h function_name}/#{arity})")
end)
end
@doc """
Creates links to modules and functions defined in the project.
"""
def project_doc(bin, modules, module_id \\ nil) when is_binary(bin) do
project_funs = for m <- modules, d <- m.docs, do: doc_prefix(d) <> m.id <> "." <> d.id
project_modules =
modules
|> Enum.map(&module_to_string/1)
|> Enum.uniq()
bin
|> project_functions(project_funs)
|> project_modules(project_modules, module_id)
|> erlang_functions()
end
defp doc_prefix(%{type: c}) when c in [:callback, :macrocallback], do: "c:"
defp doc_prefix(%{type: _}), do: ""
@doc """
Create links to functions defined in the project, specified in `project_funs`
as a list of `Module.fun/arity` tuples.
Ignores functions which are already wrapped in markdown url syntax,
e.g. `[Module.test/1](url)`. If the function doesn't touch the leading
or trailing `]`, e.g. `[my link Module.link/1 is here](url)`, the Module.fun/arity
will get translated to the new href of the function.
"""
def project_functions(bin, project_funs) when is_binary(bin) do
~r{(?<!\[)`\s*((c:)?(([A-Z][A-Za-z]+)\.)+([a-z_!\?>\|=&<!~+\.\+*^@-]+)/\d+)\s*`(?!\])}
|> Regex.scan(bin)
|> Enum.uniq()
|> List.flatten()
|> Enum.filter(&(&1 in project_funs))
|> Enum.reduce(bin, fn (x, acc) ->
{prefix, mod_str, function_name, arity} = split_function(x)
escaped = Regex.escape(x)
Regex.replace(~r/(?<!\[)`(\s*#{escaped}\s*)`(?!\])/, acc,
"[`#{mod_str}.#{function_name}/#{arity}`](#{mod_str}.html##{prefix}#{enc_h function_name}/#{arity})")
end)
end
@doc """
Create links to modules defined in the project, specified in `modules`
as a list.
Ignores modules which are already wrapped in markdown url syntax,
e.g. `[Module](url)`. If the module name doesn't touch the leading
or trailing `]`, e.g. `[my link Module is here](url)`, the Module
will get translated to the new href of the module.
"""
def project_modules(bin, modules, module_id \\ nil) when is_binary(bin) do
~r{(?<!\[)`\s*(([A-Z][A-Za-z]+\.?)+)\s*`(?!\])}
|> Regex.scan(bin)
|> Enum.uniq()
|> List.flatten()
|> Enum.filter(&(&1 in modules))
|> Enum.reduce(bin, fn (x, acc) ->
escaped = Regex.escape(x)
suffix = ".html"
if module_id && x == module_id do
suffix = suffix <> "#content"
end
Regex.replace(~r/(?<!\[)`(\s*#{escaped}\s*)`(?!\])/, acc,
"[`\\1`](\\1" <> suffix <> ")")
end)
end
defp split_function("c:" <> bin) do
{"", mod, fun, arity} = split_function(bin)
{"c:", mod, fun, arity}
end
defp split_function(bin) do
[modules, arity] = String.split(bin, "/")
{mod, name} =
modules
|> String.replace(~r{([^\.])\.}, "\\1 ") # this handles the case of the ".." function
|> String.split(" ")
|> Enum.split(-1)
{"", Enum.join(mod, "."), hd(name), arity}
end
@doc """
Create links to Erlang functions in code blocks.
Only links modules that are in the Erlang distribution `lib_dir`
and only link functions in those modules that export a function of the
same name and arity.
Ignores functions which are already wrapped in markdown url syntax,
e.g. `[:module.test/1](url)`. If the function doesn't touch the leading
or trailing `]`, e.g. `[my link :module.link/1 is here](url)`, the :module.fun/arity
will get translated to the new href of the function.
"""
def erlang_functions(bin) when is_binary(bin) do
lib_dir = erlang_lib_dir()
~r{(?<!\[)`\s*:([a-z_]+\.[0-9a-zA-Z_!\\?]+/\d+)\s*`(?!\])}
|> Regex.scan(bin)
|> Enum.uniq()
|> List.flatten()
|> Enum.filter(&valid_erlang_beam?(&1, lib_dir))
|> Enum.filter(&module_exports_function?/1)
|> Enum.reduce(bin, fn (x, acc) ->
{_, mod_str, function_name, arity} = split_function(x)
escaped = Regex.escape(x)
Regex.replace(~r/(?<!\[)`(\s*:#{escaped}\s*)`(?!\])/, acc,
"[`\\1`](#{@erlang_docs}#{mod_str}.html##{function_name}-#{arity})")
end)
end
defp valid_erlang_beam?(function_str, lib_dir) do
{ _, mod_str, _function_name, _arity } = split_function(function_str)
'#{mod_str}.beam'
|> :code.where_is_file
|> on_lib_path?(lib_dir)
end
defp on_lib_path?(:non_existing, _base_path), do: false
defp on_lib_path?(beam_path, base_path) do
beam_path
|> Path.expand()
|> String.starts_with?(base_path)
end
defp erlang_lib_dir do
:code.lib_dir
|> Path.expand()
end
defp module_exports_function?(function_str) do
{_, mod_str, function_name, arity_str} = split_function(function_str)
module = String.to_atom(mod_str)
function_name = String.to_atom(function_name)
{arity, _} = Integer.parse(arity_str)
exports = module.module_info(:exports)
Enum.member? exports, {function_name, arity}
end
end
|
lib/ex_doc/formatter/html/autolink.ex
| 0.641535
| 0.446857
|
autolink.ex
|
starcoder
|
defmodule Microdata.Strategy.HTMLMicrodata do
@moduledoc """
`Microdata.Strategy.HTMLMicrodata` defines a strategy to extract HTML microdata from a `Meeseeks.Document`, based on the W3C [HTML Microdata format](https://www.w3.org/TR/microdata/).
### Caveats
- `itemref` lookups are not yet supported
"""
@behaviour Microdata.Strategy
import Meeseeks.XPath
alias Microdata.{Helpers, Item, Property}
@tags_src ~w(audio embed iframe img source track video)
@tags_href ~w(a area link)
@tags_data ~w(object)
@tags_value ~w(data meter)
@tags_datetime ~w(datetime)
@impl true
def parse_items(doc, base_uri \\ nil) do
parse_items(doc, 0, base_uri)
end
defp parse_items(doc, nest_level, base_uri, items \\ []) do
selector =
if nest_level == 0 do
"/*[@itemscope]|//*[@itemscope and count(ancestor::*[@itemscope]) = 0]"
else
"//*[@itemscope and not(@itemprop) and count(ancestor::*[@itemscope]) = #{nest_level}]"
end
doc
|> Meeseeks.all(xpath(selector))
|> Enum.map(&parse_item(&1, nest_level, base_uri))
|> case do
new_items when new_items != [] ->
parse_items(doc, nest_level + 1, base_uri, new_items ++ items)
_ ->
items
end
end
defp parse_item(item, nest_level, base_uri) do
item_model = %Item{
id: item |> Meeseeks.attr("itemid") |> Helpers.parse_item_id(),
types:
item
|> Meeseeks.attr("itemtype")
|> Helpers.parse_item_types()
|> MapSet.new()
}
%{item_model | properties: parse_properties(item, item_model, nest_level, base_uri)}
end
defp parse_properties(item, item_model, nest_level, base_uri) do
selector = ".//*[@itemprop and count(ancestor::*[@itemscope]) = #{nest_level + 1}]"
item
|> Meeseeks.all(xpath(selector))
|> Enum.map(fn prop -> parse_property(prop, item_model, nest_level, base_uri) end)
end
defp parse_property(property, item, nest_level, base_uri) do
%Property{
names: property |> parse_property_names(item) |> MapSet.new(),
value: parse_property_value(property, nest_level, base_uri),
html: Meeseeks.html(property)
}
end
defp parse_property_names(property, item) do
property
|> Meeseeks.attr("itemprop")
|> Helpers.parse_property_names(item)
end
# credo:disable-for-lines:35 Credo.Check.Refactor.CyclomaticComplexity
defp parse_property_value(property, nest_level, base_uri) do
tag = Meeseeks.tag(property)
itemscope = Meeseeks.attr(property, "itemscope")
content = Meeseeks.attr(property, "content")
cond do
itemscope != nil ->
parse_item(property, nest_level + 1, base_uri)
content != nil ->
content
Enum.member?(@tags_src, tag) ->
Meeseeks.attr(property, "src")
|> parse_property_uri(base_uri)
Enum.member?(@tags_href, tag) ->
Meeseeks.attr(property, "href")
|> parse_property_uri(base_uri)
Enum.member?(@tags_data, tag) ->
Meeseeks.attr(property, "data")
|> parse_property_uri(base_uri)
Enum.member?(@tags_value, tag) ->
value = Meeseeks.attr(property, "value")
if value != nil, do: value, else: Meeseeks.text(property)
Enum.member?(@tags_datetime, tag) ->
value = Meeseeks.attr(property, "datetime")
if value != nil, do: value, else: Meeseeks.text(property)
true ->
Meeseeks.text(property)
end
end
defp parse_property_uri(uri, base) do
cond do
Helpers.absolute_url?(uri) -> uri
Helpers.absolute_url?(base) -> URI.merge(base, uri) |> URI.to_string()
true -> ""
end
end
end
|
lib/microdata/strategy/html_microdata.ex
| 0.798619
| 0.435121
|
html_microdata.ex
|
starcoder
|
defmodule Bowling do
@doc """
Creates a new game of bowling that can be used to store the results of
the game
"""
@spec start() :: any
def start, do: {1, [], []}
@doc """
Records the number of pins knocked down on a single roll. Returns `any`
unless there is something wrong with the given number of pins, in which
case it returns a helpful message.
"""
@spec roll(any, integer) :: any | String.t()
def roll(_, pins) when pins < 0, do: {:error, "Negative roll is invalid"}
def roll(_, pins) when pins > 10, do: {:error, "Pin count exceeds pins on the lane"}
# A strike or spare in frame 10 stays in the frame for fill rolls.
def roll({10, [], rolls}, 10), do: {10, [10], [10 | rolls]}
def roll({10, [10], rolls}, pins), do: {10, [10, pins], [pins | rolls]}
def roll({10, [10, 10], rolls}, pins), do: {10, :done, [pins | rolls]}
def roll({10, [10, n], _}, pins) when pins + n > 10 do
{:error, "Pin count exceeds pins on the lane"}
end
def roll({10, [10, _], rolls}, pins), do: {10, :done, [pins | rolls]}
def roll({10, [n], _}, pins) when pins + n > 10 do
{:error, "Pin count exceeds pins on the lane"}
end
def roll({10, [n], rolls}, pins) when pins + n < 10, do: {10, :done, [pins | rolls]}
def roll({10, [n], rolls}, pins), do: {10, [n, pins], [pins | rolls]}
def roll({10, [m, n], rolls}, pins) when n + m == 10, do: {10, :done, [pins | rolls]}
def roll({frame, [], rolls}, 10) when frame < 10, do: {frame + 1, [], [10 | rolls]}
def roll({frame, [], rolls}, pins), do: {frame, [pins], [pins | rolls]}
def roll({_, [n], _}, pins) when pins + n > 10 do
{:error, "Pin count exceeds pins on the lane"}
end
def roll({frame, [_], rolls}, pins), do: {frame + 1, [], [pins | rolls]}
def roll({10, _, _}, _), do: {:error, "Cannot roll after game is over"}
@doc """
Returns the score of a given game of bowling if the game is complete.
If the game isn't complete, it returns a helpful message.
"""
@spec score(any) :: integer | String.t()
def score({10, :done, rolls}) do
do_score(Enum.reverse(rolls), 0)
end
def score(_), do: {:error, "Score cannot be taken until the end of the game"}
defp do_score([], acc), do: acc
defp do_score([_], acc), do: acc
defp do_score([10, r1, r2], acc), do: acc + 10 + r1 + r2
defp do_score([10 | rest], acc) do
do_score(rest, acc + 10 + Enum.sum(Enum.take(rest, 2)))
end
defp do_score([r1, r2 | rest], acc) do
case r1 + r2 do
10 -> do_score(rest, acc + 10 + hd(rest))
_ -> do_score(rest, acc + r1 + r2)
end
end
end
|
exercism/bowling/bowling.ex
| 0.842004
| 0.702913
|
bowling.ex
|
starcoder
|
defmodule Commanded.ExampleDomain.BankAccount do
@moduledoc false
@derive Jason.Encoder
defstruct [:account_number, :state, balance: 0]
alias Commanded.ExampleDomain.BankAccount
defmodule Commands do
defmodule OpenAccount do
defstruct [:account_number, :initial_balance]
end
defmodule DepositMoney do
defstruct [:account_number, :transfer_uuid, :amount]
end
defmodule WithdrawMoney do
defstruct [:account_number, :transfer_uuid, :amount]
end
defmodule CloseAccount do
defstruct [:account_number]
end
end
defmodule Events do
defmodule BankAccountOpened do
@derive Jason.Encoder
defstruct [:account_number, :initial_balance]
end
defmodule MoneyDeposited do
@derive Jason.Encoder
defstruct [:account_number, :transfer_uuid, :amount, :balance]
end
defmodule MoneyWithdrawn do
@derive Jason.Encoder
defstruct [:account_number, :transfer_uuid, :amount, :balance]
end
defmodule AccountOverdrawn do
@derive Jason.Encoder
defstruct [:account_number, :balance]
end
defmodule BankAccountClosed do
@derive Jason.Encoder
defstruct [:account_number]
end
end
alias Commands.{CloseAccount, DepositMoney, OpenAccount, WithdrawMoney}
alias Events.{
AccountOverdrawn,
BankAccountClosed,
BankAccountOpened,
MoneyDeposited,
MoneyWithdrawn
}
def open_account(
%BankAccount{state: nil},
%OpenAccount{initial_balance: initial_balance} = command
)
when is_number(initial_balance) and initial_balance > 0 do
%OpenAccount{account_number: account_number} = command
%BankAccountOpened{account_number: account_number, initial_balance: initial_balance}
end
def open_account(%BankAccount{state: nil}, %OpenAccount{}),
do: {:error, :invalid_initial_balance}
def deposit(
%BankAccount{state: :active} = account,
%DepositMoney{amount: amount} = command
)
when is_number(amount) and amount > 0 do
%BankAccount{balance: balance} = account
%DepositMoney{account_number: account_number, transfer_uuid: transfer_uuid} = command
balance = balance + amount
%MoneyDeposited{
account_number: account_number,
transfer_uuid: transfer_uuid,
amount: amount,
balance: balance
}
end
def withdraw(
%BankAccount{state: :active} = account,
%WithdrawMoney{amount: amount} = command
)
when is_number(amount) and amount > 0 do
%BankAccount{balance: balance} = account
%WithdrawMoney{account_number: account_number, transfer_uuid: transfer_uuid} = command
case balance - amount do
balance when balance < 0 ->
[
%MoneyWithdrawn{
account_number: account_number,
transfer_uuid: transfer_uuid,
amount: amount,
balance: balance
},
%AccountOverdrawn{account_number: account_number, balance: balance}
]
balance ->
%MoneyWithdrawn{
account_number: account_number,
transfer_uuid: transfer_uuid,
amount: amount,
balance: balance
}
end
end
def close_account(%BankAccount{state: :closed}, %CloseAccount{}) do
[]
end
def close_account(%BankAccount{state: :active}, %CloseAccount{account_number: account_number}) do
%BankAccountClosed{account_number: account_number}
end
# State mutators
def apply(%BankAccount{} = state, %BankAccountOpened{} = event) do
%BankAccountOpened{account_number: account_number, initial_balance: initial_balance} = event
%BankAccount{state | account_number: account_number, balance: initial_balance, state: :active}
end
def apply(%BankAccount{} = state, %MoneyDeposited{balance: balance}),
do: %BankAccount{state | balance: balance}
def apply(%BankAccount{} = state, %MoneyWithdrawn{balance: balance}),
do: %BankAccount{state | balance: balance}
def apply(%BankAccount{} = state, %AccountOverdrawn{}), do: state
def apply(%BankAccount{} = state, %BankAccountClosed{}) do
%BankAccount{state | state: :closed}
end
end
|
test/example_domain/bank_account/bank_account.ex
| 0.737064
| 0.688567
|
bank_account.ex
|
starcoder
|
defmodule AWS.Health do
@moduledoc """
AWS Health
The AWS Health API provides programmatic access to the AWS Health information
that appears in the [AWS Personal Health Dashboard](https://phd.aws.amazon.com/phd/home#/).
You can use the API operations to get information about AWS Health events that
affect your AWS services and resources.
You must have a Business or Enterprise support plan from [AWS Support](http://aws.amazon.com/premiumsupport/) to use the AWS Health API. If
you call the AWS Health API from an AWS account that doesn't have a Business or
Enterprise support plan, you receive a `SubscriptionRequiredException` error.
AWS Health has a single endpoint: health.us-east-1.amazonaws.com (HTTPS). Use
this endpoint to call the AWS Health API operations.
For authentication of requests, AWS Health uses the [Signature Version 4 Signing Process](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
If your AWS account is part of AWS Organizations, you can use the AWS Health
organizational view feature. This feature provides a centralized view of AWS
Health events across all accounts in your organization. You can aggregate AWS
Health events in real time to identify accounts in your organization that are
affected by an operational event or get notified of security vulnerabilities.
Use the organizational view API operations to enable this feature and return
event information. For more information, see [Aggregating AWS Health events](https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) in
the *AWS Health User Guide*.
When you use the AWS Health API operations to return AWS Health events, see the
following recommendations:
Use the
[eventScopeCode](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html#AWSHealth-Type-Event-eventScopeCode)
parameter to specify whether to return AWS Health events that are public or
account-specific.
Use pagination to view all events from the response. For example,
if you call the `DescribeEventsForOrganization` operation to get all events in
your organization, you might receive several page results. Specify the
`nextToken` in the next request to return more results.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "AWSHealth",
api_version: "2016-08-04",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "health",
global?: false,
protocol: "json",
service_id: "Health",
signature_version: "v4",
signing_name: "health",
target_prefix: "AWSHealth_20160804"
}
end
@doc """
Returns a list of accounts in the organization from AWS Organizations that are
affected by the provided event.
For more information about the different types of AWS Health events, see
[Event](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). Before you can call this operation, you must first enable AWS Health to work
with AWS Organizations. To do this, call the
[EnableHealthServiceAccessForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html)
operation from your organization's master account.
This API operation uses pagination. Specify the `nextToken` parameter in the
next request to return more results.
"""
def describe_affected_accounts_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeAffectedAccountsForOrganization",
input,
options
)
end
@doc """
Returns a list of entities that have been affected by the specified events,
based on the specified filter criteria.
Entities can refer to individual customer resources, groups of customer
resources, or any other construct, depending on the AWS service. Events that
have impact beyond that of the affected entities, or where the extent of impact
is unknown, include at least one entity indicating this.
At least one event ARN is required. Results are sorted by the `lastUpdatedTime`
of the entity, starting with the most recent.
This API operation uses pagination. Specify the `nextToken` parameter in the
next request to return more results.
"""
def describe_affected_entities(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeAffectedEntities", input, options)
end
@doc """
Returns a list of entities that have been affected by one or more events for one
or more accounts in your organization in AWS Organizations, based on the filter
criteria.
Entities can refer to individual customer resources, groups of customer
resources, or any other construct, depending on the AWS service.
At least one event Amazon Resource Name (ARN) and account ID are required.
Results are sorted by the `lastUpdatedTime` of the entity, starting with the
most recent.
Before you can call this operation, you must first enable AWS Health to work
with AWS Organizations. To do this, call the
[EnableHealthServiceAccessForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html)
operation from your organization's master account.
This API operation uses pagination. Specify the `nextToken` parameter in the
next request to return more results.
"""
def describe_affected_entities_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeAffectedEntitiesForOrganization",
input,
options
)
end
@doc """
Returns the number of entities that are affected by each of the specified
events.
If no events are specified, the counts of all affected entities are returned.
"""
def describe_entity_aggregates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEntityAggregates", input, options)
end
@doc """
Returns the number of events of each event type (issue, scheduled change, and
account notification).
If no filter is specified, the counts of all events in each category are
returned.
This API operation uses pagination. Specify the `nextToken` parameter in the
next request to return more results.
"""
def describe_event_aggregates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventAggregates", input, options)
end
@doc """
Returns detailed information about one or more specified events.
Information includes standard event data (Region, service, and so on, as
returned by
[DescribeEvents](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEvents.html)), a detailed event description, and possible additional metadata that depends upon
the nature of the event. Affected entities are not included. To retrieve those,
use the
[DescribeAffectedEntities](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntities.html)
operation.
If a specified event cannot be retrieved, an error message is returned for that
event.
"""
def describe_event_details(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventDetails", input, options)
end
@doc """
Returns detailed information about one or more specified events for one or more
accounts in your organization.
Information includes standard event data (Region, service, and so on, as
returned by
[DescribeEventsForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEventsForOrganization.html)), a detailed event description, and possible additional metadata that depends upon
the nature of the event. Affected entities are not included; to retrieve those,
use the
[DescribeAffectedEntitiesForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntitiesForOrganization.html)
operation.
Before you can call this operation, you must first enable AWS Health to work
with AWS Organizations. To do this, call the
[EnableHealthServiceAccessForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html) operation from your organization's master account.
When you call the `DescribeEventDetailsForOrganization` operation, you specify
the `organizationEventDetailFilters` object in the request. Depending on the AWS
Health event type, note the following differences:
* If the event is public, the `awsAccountId` parameter must be
empty. If you specify an account ID for a public event, then an error message is
returned. That's because the event might apply to all AWS accounts and isn't
specific to an account in your organization.
* If the event is specific to an account, then you must specify the
`awsAccountId` parameter in the request. If you don't specify an account ID, an
error message returns because the event is specific to an AWS account in your
organization.
For more information, see
[Event](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html).
"""
def describe_event_details_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeEventDetailsForOrganization",
input,
options
)
end
@doc """
Returns the event types that meet the specified filter criteria.
If no filter criteria are specified, all event types are returned, in no
particular order.
This API operation uses pagination. Specify the `nextToken` parameter in the
next request to return more results.
"""
def describe_event_types(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventTypes", input, options)
end
@doc """
Returns information about events that meet the specified filter criteria.
Events are returned in a summary form and do not include the detailed
description, any additional metadata that depends on the event type, or any
affected resources. To retrieve that information, use the
[DescribeEventDetails](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEventDetails.html) and
[DescribeAffectedEntities](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntities.html)
operations.
If no filter criteria are specified, all events are returned. Results are sorted
by `lastModifiedTime`, starting with the most recent event.
When you call the `DescribeEvents` operation and specify an entity
for the `entityValues` parameter, AWS Health might return public events that
aren't specific to that resource. For example, if you call `DescribeEvents` and
specify an ID for an Amazon Elastic Compute Cloud (Amazon EC2) instance, AWS
Health might return events that aren't specific to that resource or service. To
get events that are specific to a service, use the `services` parameter in the
`filter` object. For more information, see
[Event](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html).
This API operation uses pagination. Specify the `nextToken`
parameter in the next request to return more results.
"""
def describe_events(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEvents", input, options)
end
@doc """
Returns information about events across your organization in AWS Organizations.
You can use the`filters` parameter to specify the events that you want to
return. Events are returned in a summary form and don't include the affected
accounts, detailed description, any additional metadata that depends on the
event type, or any affected resources. To retrieve that information, use the
following operations:
*
[DescribeAffectedAccountsForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedAccountsForOrganization.html) *
[DescribeEventDetailsForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEventDetailsForOrganization.html)
*
[DescribeAffectedEntitiesForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntitiesForOrganization.html)
If you don't specify a `filter`, the `DescribeEventsForOrganizations` returns
all events across your organization. Results are sorted by `lastModifiedTime`,
starting with the most recent event.
For more information about the different types of AWS Health events, see
[Event](https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html).
Before you can call this operation, you must first enable AWS Health to work
with AWS Organizations. To do this, call the
[EnableHealthServiceAccessForOrganization](https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html)
operation from your organization's master AWS account.
This API operation uses pagination. Specify the `nextToken` parameter in the
next request to return more results.
"""
def describe_events_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEventsForOrganization", input, options)
end
@doc """
This operation provides status information on enabling or disabling AWS Health
to work with your organization.
To call this operation, you must sign in as an IAM user, assume an IAM role, or
sign in as the root user (not recommended) in the organization's master account.
"""
def describe_health_service_status_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DescribeHealthServiceStatusForOrganization",
input,
options
)
end
@doc """
Disables AWS Health from working with AWS Organizations.
To call this operation, you must sign in as an AWS Identity and Access
Management (IAM) user, assume an IAM role, or sign in as the root user (not
recommended) in the organization's master AWS account. For more information, see
[Aggregating AWS Health events](https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) in
the *AWS Health User Guide*.
This operation doesn't remove the service-linked role (SLR) from the AWS master
account in your organization. You must use the IAM console, API, or AWS Command
Line Interface (AWS CLI) to remove the SLR. For more information, see [Deleting a Service-Linked
Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#delete-service-linked-role)
in the *IAM User Guide*.
You can also disable the organizational feature by using the Organizations
[DisableAWSServiceAccess](https://docs.aws.amazon.com/organizations/latest/APIReference/API_DisableAWSServiceAccess.html)
API operation. After you call this operation, AWS Health stops aggregating
events for all other AWS accounts in your organization. If you call the AWS
Health API operations for organizational view, AWS Health returns an error. AWS
Health continues to aggregate health events for your AWS account.
"""
def disable_health_service_access_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"DisableHealthServiceAccessForOrganization",
input,
options
)
end
@doc """
Calling this operation enables AWS Health to work with AWS Organizations.
This applies a service-linked role (SLR) to the master account in the
organization. To call this operation, you must sign in as an IAM user, assume an
IAM role, or sign in as the root user (not recommended) in the organization's
master account.
For more information, see [Aggregating AWS Health events](https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) in
the *AWS Health User Guide*.
"""
def enable_health_service_access_for_organization(%Client{} = client, input, options \\ []) do
Request.request_post(
client,
metadata(),
"EnableHealthServiceAccessForOrganization",
input,
options
)
end
end
|
lib/aws/generated/health.ex
| 0.905646
| 0.573619
|
health.ex
|
starcoder
|
defmodule TaxationDoubleAuction do
use XeeThemeScript
require Logger
use Timex
@modes ["wait", "description", "auction", "result"]
@tax_type ["lump_sum", "proportional", "regressive", "progressive"]
@tax_target ["buyer", "seller", "both"]
# Callbacks
def script_type do
:message
end
def install, do: nil
def init do
{:ok, %{data: %{
participants_num: 0,
mode: "wait",
tax_target: "both",
tax_type: "proportional",
lump_sum_tax: 200,
proportional_ratio: 10,
regressive_ratio: 20,
progressive_ratio: 50,
participants: %{},
buyer_bids: [],
highest_bid: nil,
seller_bids: [],
lowest_bid: nil,
deals: [],
started: false,
price_base: 100
}}}
end
def filter_data(data) do
rule = %{
_default: true,
price_base: false,
highest_bid: false,
lowest_bid: false,
participants: "users",
participants_num: "usersCount",
buyer_bids: "buyerBids",
seller_bids: "sellerBids",
tax_target: "taxTarget",
tax_type: "taxType",
lump_sum_tax: "lumpSumTax",
proportional_ratio: "proportionalRatio",
regressive_ratio: "regressiveRatio",
progressive_ratio: "progressiveRatio",
}
data
|> Map.update!(:buyer_bids, &mapelem(&1, 1))
|> Map.update!(:seller_bids, &mapelem(&1, 1))
|> Map.update!(:deals, &mapelem(&1, 0))
|> Transmap.transform(rule)
end
def filter_data(data, id) do
rule = %{
_default: true,
price_base: false,
highest_bid: false,
lowest_bid: false,
buyer_bids: "buyerBids",
seller_bids: "sellerBids",
participants_num: "usersCount",
participants: {"personal", %{
id => true,
:_spread => [[id]]
}},
tax_target: "taxTarget",
tax_type: "taxType",
lump_sum_tax: "lumpSumTax",
proportional_ratio: "proportionalRatio",
regressive_ratio: "regressiveRatio",
progressive_ratio: "progressiveRatio",
}
data
|> Map.update!(:buyer_bids, &mapelem(&1, 1))
|> Map.update!(:seller_bids, &mapelem(&1, 1))
|> Map.update!(:deals, &mapelem(&1, 0))
|> Transmap.transform(rule)
end
def join(%{participants: participants, participants_num: participants_num} = data, id) do
if not Map.has_key?(participants, id) do
participant = %{role: nil, bidded: false, money: nil, bid: nil, dealt: false, deal: nil}
participants = Map.put(participants, id, participant)
new = %{data | participants: participants, participants_num: participants_num + 1}
wrap_result(data, new)
else
wrap_result(data, data)
end
end
def dealt(data, id1, id2, money) do
data
|> update_in([:participants, id1], fn participant ->
%{participant | bidded: false, bid: money, dealt: true, deal: money}
end)
|> update_in([:participants, id2], fn participant ->
%{participant | bidded: false, dealt: true, deal: money}
end)
end
def set_highest_bid(%{buyer_bids: []} = data) do
%{ data | highest_bid: nil }
end
def set_highest_bid(%{buyer_bids: bids} = data) do
%{ data | highest_bid: Enum.max_by(bids, &elem(&1, 1)) }
end
def set_lowest_bid(%{seller_bids: []} = data) do
%{ data | lowest_bid: nil }
end
def set_lowest_bid(%{seller_bids: bids} = data) do
%{ data | lowest_bid: Enum.min_by(bids, &elem(&1, 1)) }
end
def handle_received(data, %{"action" => "start"}) do
wrap_result(data, %{data | started: true})
end
def handle_received(data, %{"action" => "stop"}) do
wrap_result(data, %{data | started: false})
end
def handle_received(data, %{"action" => "change_mode", "params" => mode}) do
wrap_result(data, %{data | mode: mode})
end
def handle_received(data, %{"action" => "change_tax_type", "params" => tax_type}) do
wrap_result(data, %{data | tax_type: tax_type})
end
def handle_received(data, %{"action" => "change_tax_target", "params" => tax_target}) do
wrap_result(data, %{data | tax_target: tax_target})
end
def handle_received(data, %{"action" => "change_lump_sum_tax", "params" => lump_sum_tax}) do
wrap_result(data, %{data | lump_sum_tax: lump_sum_tax})
end
def handle_received(data, %{"action" => "change_proportional_ratio", "params" => proportional_ratio}) do
wrap_result(data, %{data | proportional_ratio: proportional_ratio})
end
def handle_received(data, %{"action" => "change_regressive_ratio", "params" => regressive_ratio}) do
wrap_result(data, %{data | regressive_ratio: regressive_ratio})
end
def handle_received(data, %{"action" => "change_progressive_ratio", "params" => progressive_ratio}) do
wrap_result(data, %{data | progressive_ratio: progressive_ratio})
end
def handle_received(data, %{"action" => "match"}) do
participants = Enum.shuffle(data.participants) |> Enum.map_reduce(1, fn {id, participant}, acc ->
if rem(acc, 2) == 0 do
new_participant = %{
role: "buyer",
money: acc * data.price_base,
bidded: false,
bid: nil,
dealt: false,
deal: nil
}
else
new_participant = %{
role: "seller",
money: acc * data.price_base,
bidded: false,
bid: nil,
dealt: false,
deal: nil
}
end
{{id, new_participant}, acc + 1}
end) |> elem(0) |> Enum.into(%{})
new = %{data | participants: participants,
buyer_bids: [], seller_bids: [], deals: [],
highest_bid: nil, lowest_bid: nil }
wrap_result(data, new)
end
def handle_received(data, %{"action" => "fetch_contents"}) do
action = %{
type: "RECEIVE_CONTENTS",
payload: filter_data(data)
}
{:ok, %{data: data, host: %{action: action}}}
end
def handle_received(data, %{"action" => "fetch_contents"}, id) do
action = %{
type: "RECEIVE_CONTENTS",
payload: filter_data(data, id)
}
{:ok, %{data: data, participant: %{id => %{action: action}}}}
end
def handle_received(data, %{"action" => "bid", "params" => bid}, id) do
old = data
participant = Map.get(data.participants, id)
data = case participant do
# Seller
%{role: "seller", bidded: bidded, bid: previous_bid, money: money, dealt: false} when not is_nil(money) and bid >= money ->
data = remove_first(data, id, previous_bid, :lowest_bid, :seller_bids, &set_lowest_bid/1)
if not is_nil(data.highest_bid) and bid <= elem(data.highest_bid, 1) do
deal(data, :highest_bid, :buyer_bids, id, bid, previous_bid, &set_highest_bid/1)
else
bid(data, :lowest_bid, :seller_bids, id, bid, previous_bid, "NEW_SELLER_BIDS", fn most_bid, bid ->
bid < most_bid
end)
end
# Buyer
%{role: "buyer", bidded: bidded, bid: previous_bid, money: money, dealt: false} when not is_nil(money) and bid <= money ->
data = remove_first(data, id, previous_bid, :highest_bid, :buyer_bids, &set_highest_bid/1)
if not is_nil(data.lowest_bid) and bid >= elem(data.lowest_bid, 1) do
deal(data, :lowest_bid, :seller_bids, id, bid, previous_bid, &set_lowest_bid/1)
else
bid(data, :highest_bid, :buyer_bids, id, bid, previous_bid, "NEW_BUYER_BIDS", fn most_bid, bid ->
bid > most_bid
end)
end
end
wrap_result(old, data)
end
def remove_first(data, id, previous_bid, bid_key, key, set) do
if previous_bid != nil do
data = %{data | key => List.delete(data[key], {id, previous_bid})}
if not is_nil(data[bid_key]) and elem(data[bid_key], 0) == id do
data = set.(data)
end
end
data
end
def bid(data, bid_key, key, id, bid, previous_bid, action, func) do
bids = [{id, bid} | data[key]]
most_bid = if is_nil(data[bid_key]) or func.(elem(data[bid_key], 1), bid) do
{id, bid}
else
data[bid_key]
end
data = %{data | key => bids, bid_key => most_bid}
data = update_in(data, [:participants, id], fn participant ->
%{participant | bidded: true, bid: bid}
end)
data
end
def deal(data, bid_key, partner_key, id, bid, previous_bid, set) do
now = DateTime.today()
id2 = elem(data[bid_key], 0)
deals = [new_deal(bid, id, id2, now) | data.deals]
bids = List.delete(data[partner_key], data[bid_key])
data = %{data | :deals => deals, partner_key => bids}
data = dealt(data, id, id2, bid)
data = set.(data)
data
end
def new_deal(bid, id, id2, now) do
{bid, now, {id, id2}}
end
def compute_diff(old, %{data: new} = result) do
host = Map.get(result, :host, %{})
participant = Map.get(result, :participant, %{})
participant_tasks = Enum.map(old.participants, fn {id, _} ->
{id, Task.async(fn -> JsonDiffEx.diff(filter_data(old, id), filter_data(new, id)) end)}
end)
host_task = Task.async(fn -> JsonDiffEx.diff(filter_data(old), filter_data(new)) end)
host_diff = Task.await(host_task)
participant_diff = Enum.map(participant_tasks, fn {id, task} -> {id, %{diff: Task.await(task)}} end)
|> Enum.filter(fn {_, map} -> map_size(map.diff) != 0 end)
|> Enum.into(%{})
host = Map.merge(host, %{diff: host_diff})
host = if map_size(host.diff) == 0 do
Map.delete(host, :diff)
else
host
end
host = if map_size(host) == 0 do
nil
else
host
end
participant = Map.merge(participant, participant_diff, fn _k, v1, v2 ->
Map.merge(v1, v2)
end)
%{data: new, host: host, participant: participant}
end
def mapelem(list, i) do
Enum.map(list, &(elem(&1, i)))
end
def wrap_result(old, {:ok, result}) do
{:ok, compute_diff(old, result)}
end
def wrap_result(old, new) do
{:ok, compute_diff(old, %{data: new})}
end
end
|
lib/xee_double_auction.ex
| 0.562777
| 0.434461
|
xee_double_auction.ex
|
starcoder
|
defmodule GrovePi.Sound.HysteresisTrigger do
@behaviour GrovePi.Trigger
@default_high_threshold 510
@default_low_threshold 490
@moduledoc """
This is the default triggering mechanism for Sound events. Events
are either `loud` or `quiet` and include the trigger state. It
contains to thresholds a `low_threshold` and a `high_threshold` for
triggering `loud` and `quiet` events.
This trigger will not fire an event unless it has fired the opposite
event or if it is the first event fired. If a `loud` event fires it
will not be able to fire again unless a `quiet` event is fired. This
is to keep from having a trigger float near the trigger value and
become excessively noisy.
## Examples
iex> GrovePi.Sound.HysteresisTrigger.init([])
{:ok, %GrovePi.Sound.HysteresisTrigger.State{value: 500, fireable: :any, low_threshold: 490, high_threshold: 510}}
iex> GrovePi.Sound.HysteresisTrigger.init(low_threshold: 10, high_threshold: 200)
{:ok, %GrovePi.Sound.HysteresisTrigger.State{value: 500, fireable: :any, low_threshold: 10, high_threshold: 200}}
### When there has been no event
iex> GrovePi.Sound.HysteresisTrigger.update(499, %{value: 500, fireable: :any, low_threshold: 490, high_threshold: 500})
{:ok, %{value: 499, fireable: :any, low_threshold: 490, high_threshold: 500}}
iex> GrovePi.Sound.HysteresisTrigger.update(489, %{value: 500, fireable: :any, low_threshold: 490, high_threshold: 500})
{:quiet, %{value: 489, fireable: :loud, low_threshold: 490, high_threshold: 500}}
iex> GrovePi.Sound.HysteresisTrigger.update(511, %{value: 500, fireable: :any, low_threshold: 490, high_threshold: 500})
{:loud, %{value: 511, fireable: :quiet, low_threshold: 490, high_threshold: 500}}
iex> GrovePi.Sound.HysteresisTrigger.update(21, %{value: 500, fireable: :any, low_threshold: 10, high_threshold: 20})
{:loud, %{value: 21, fireable: :quiet, low_threshold: 10, high_threshold: 20}}
### When the last event was loud
iex> GrovePi.Sound.HysteresisTrigger.update(511, %{value: 500, fireable: :quiet, low_threshold: 490, high_threshold: 500})
{:ok, %{value: 511, fireable: :quiet, low_threshold: 490, high_threshold: 500}}
iex> GrovePi.Sound.HysteresisTrigger.update(501, %{value: 500, fireable: :quiet, low_threshold: 490, high_threshold: 500})
{:ok, %{value: 501, fireable: :quiet, low_threshold: 490, high_threshold: 500}}
iex> GrovePi.Sound.HysteresisTrigger.update(489, %{value: 500, fireable: :quiet, low_threshold: 490, high_threshold: 500})
{:quiet, %{value: 489, fireable: :loud, low_threshold: 490, high_threshold: 500}}
iex> GrovePi.Sound.HysteresisTrigger.update(9, %{value: 500, fireable: :quiet, low_threshold: 10, high_threshold: 20})
{:quiet, %{value: 9, fireable: :loud, low_threshold: 10, high_threshold: 20}}
### When the last event was quiet
iex> GrovePi.Sound.HysteresisTrigger.update(470, %{value: 500, fireable: :loud, low_threshold: 490, high_threshold: 500})
{:ok, %{value: 470, fireable: :loud, low_threshold: 490, high_threshold: 500}}
iex> GrovePi.Sound.HysteresisTrigger.update(491, %{value: 500, fireable: :loud, low_threshold: 490, high_threshold: 500})
{:ok, %{value: 491, fireable: :loud, low_threshold: 490, high_threshold: 500}}
iex> GrovePi.Sound.HysteresisTrigger.update(521, %{value: 500, fireable: :loud, low_threshold: 490, high_threshold: 500})
{:loud, %{value: 521, fireable: :quiet, low_threshold: 490, high_threshold: 500}}
iex> GrovePi.Sound.HysteresisTrigger.update(21, %{value: 500, fireable: :loud, low_threshold: 10, high_threshold: 20})
{:loud, %{value: 21, fireable: :quiet, low_threshold: 10, high_threshold: 20}}
"""
defmodule State do
@moduledoc false
@enforce_keys [:high_threshold, :low_threshold]
defstruct [
value: 500,
fireable: :any,
high_threshold: nil,
low_threshold: nil,
]
end
@doc """
# Options
* `:high_threshold` - The level that must be exceeded to fire a loud event, The default is `510`
* `:low_threshold` - The level that must be recede below to fire a quiet event, The default is `490`
"""
def init(opts) do
high_threshold = Keyword.get(opts, :high_threshold, @default_high_threshold)
low_threshold = Keyword.get(opts, :low_threshold, @default_low_threshold)
{:ok, %State{high_threshold: high_threshold, low_threshold: low_threshold}}
end
def update(new_value, %{fireable: fireable, low_threshold: low_threshold} = state) when new_value < low_threshold and fireable != :loud do
{:quiet, %{state | value: new_value, fireable: :loud}}
end
def update(new_value, %{fireable: fireable, high_threshold: high_threshold} = state) when new_value > high_threshold and fireable != :quiet do
{:loud, %{state | value: new_value, fireable: :quiet}}
end
def update(new_value, state) do
{:ok, %{state | value: new_value}}
end
end
|
lib/grovepi/sound/hysteresis_trigger.ex
| 0.896475
| 0.740714
|
hysteresis_trigger.ex
|
starcoder
|
defmodule EDS.Fixtures.Guards do
import Bitwise
def addition(a, b, c) when a + b === c, do: true
def addition(_, _, _), do: false
def binary_and(a, b, c) when (a &&& b) === c, do: true
def binary_and(_, _, _), do: false
def binary_band(a, b, c) when band(a, b) === c, do: true
def binary_band(_, _, _), do: false
def binary_or(a, b, c) when (a ||| b) === c, do: true
def binary_or(_, _, _), do: false
def binary_bor(a, b, c) when bor(a, b) === c, do: true
def binary_bor(_, _, _), do: false
def binary_left_bit_shift(a, b, c) when a <<< b === c, do: true
def binary_left_bit_shift(_, _, _), do: false
def binary_bsl(a, b, c) when bsl(a, b) === c, do: true
def binary_bsl(_, _, _), do: false
def binary_right_bit_shift(a, b, c) when a >>> b === c, do: true
def binary_right_bit_shift(_, _, _), do: false
def binary_bsr(a, b, c) when bsr(a, b) === c, do: true
def binary_bsr(_, _, _), do: false
def binary_not(a, b) when ~~~a === b, do: true
def binary_not(_, _), do: false
def binary_bnot(a, b) when bnot(a) === b, do: true
def binary_bnot(_, _), do: false
def binary_bxor(a, b, c) when bxor(a, b) === c, do: true
def binary_bxor(_, _, _), do: false
def is_atom?(a) when is_atom(a), do: true
def is_atom?(_), do: false
def is_binary?(a) when is_binary(a), do: true
def is_binary?(_), do: false
def is_bitstring?(a) when is_bitstring(a), do: true
def is_bitstring?(_), do: false
def is_boolean?(a) when is_boolean(a), do: true
def is_boolean?(_), do: false
def is_float?(a) when is_float(a), do: true
def is_float?(_), do: false
def is_function?(func) when is_function(func), do: true
def is_function?(_), do: false
def is_function?(func, arity) when is_function(func, arity), do: true
def is_function?(_, _), do: false
def is_integer?(a) when is_integer(a), do: true
def is_integer?(_), do: false
def is_list?(a) when is_list(a), do: true
def is_list?(_), do: false
def is_map?(map) when is_map(map), do: true
def is_map?(_), do: false
def is_nil?(a) when is_nil(a), do: true
def is_nil?(_), do: false
def is_number?(a) when is_number(a), do: true
def is_number?(_), do: false
def is_pid?(a) when is_pid(a), do: true
def is_pid?(_), do: false
def is_port?(a) when is_port(a), do: true
def is_port?(_), do: false
def is_reference?(a) when is_reference(a), do: true
def is_reference?(_), do: false
def is_tuple?(a) when is_tuple(a), do: true
def is_tuple?(_), do: false
def equal(a, b) when a == b, do: true
def equal(_, _), do: false
def strict_equal(a, b) when a === b, do: true
def strict_equal(_, _), do: false
def not_equal(a, b) when a != b, do: true
def not_equal(_, _), do: false
def strict_not_equal(a, b) when a !== b, do: true
def strict_not_equal(_, _), do: false
def multiply(a, b, c) when a * b === c, do: true
def multiply(_, _, _), do: false
def positive(a) when +a > 0, do: true
def positive(_), do: false
def negative(a) when -a > 0, do: true
def negative(_), do: false
def subtraction(a, b, c) when a - b === c, do: true
def subtraction(_, _, _), do: false
def division(a, b, c) when a / b === c, do: true
def division(_, _, _), do: false
def less(a, b) when a < b, do: true
def less(_, _), do: false
def less_or_equal(a, b) when a <= b, do: true
def less_or_equal(_, _), do: false
def greater(a, b) when a > b, do: true
def greater(_, _), do: false
def greater_or_equal(a, b) when a >= b, do: true
def greater_or_equal(_, _), do: false
def absolute(a) when abs(a) >= 0, do: true
def absolute(_), do: false
def boolean_and(a, b) when a and b, do: true
def boolean_and(_, _), do: false
def boolean_or(a, b) when a or b, do: true
def boolean_or(_, _), do: false
def binary_part(a, b) when binary_part(a, 0, 1) === b, do: true
def binary_part(_, _), do: false
def bit_size(a, b) when bit_size(a) === b, do: true
def bit_size(_, _), do: false
def byte_size(a, b) when byte_size(a) === b, do: true
def byte_size(_, _), do: false
def ceil(a, b) when ceil(a) === b, do: true
def ceil(_, _), do: false
def floor(a, b) when floor(a) === b, do: true
def floor(_, _), do: false
def div(a, b, c) when div(a, b) === c, do: true
def div(_, _, _), do: false
def elem(a, b, c) when elem(a, b) === c, do: true
def elem(_, _, _), do: false
def head(a, b) when hd(a) === b, do: true
def head(_, _), do: false
def in_list(a) when a in [:a], do: true
def in_list(_), do: false
def in_list_not(a) when a not in [:a], do: true
def in_list_not(_), do: false
def length(a, b) when length(a) === b, do: true
def length(_, _), do: false
def map_size(a, b) when map_size(a) === b, do: true
def map_size(_, _), do: false
def node_guard(node) when node() === node, do: true
def node_guard(_), do: false
def node_guard(pid, node) when node(pid) === node, do: true
def node_guard(_, _), do: false
def not_guard(a) when not a, do: true
def not_guard(_), do: false
def rem(a, b, c) when rem(a, b) === c, do: true
def rem(_, _, _), do: false
def round(a, b) when round(a) === b, do: true
def round(_, _), do: false
def self(pid) when self() === pid, do: true
def self(_), do: false
def tail(a, b) when tl(a) === b, do: true
def tail(_, _), do: false
def trunc(a, b) when trunc(a) === b, do: true
def trunc(_, _), do: false
def tuple_size(tuple, size) when tuple_size(tuple) === size, do: true
def tuple_size(_, _), do: false
end
|
test/fixtures/guards.ex
| 0.672439
| 0.475179
|
guards.ex
|
starcoder
|
defmodule Finitomata.Transition do
@moduledoc false
alias Finitomata.Transition
@type state :: atom()
@type event :: atom()
@typedoc """
The transition is represented by `from` and `to` states _and_ the `event`.
"""
@type t :: %{
__struct__: Transition,
from: state(),
to: state(),
event: event()
}
defstruct [:from, :to, :event]
@doc false
@spec from_parsed([binary()]) :: t()
def from_parsed([from, to, event])
when is_binary(from) and is_binary(to) and is_binary(event) do
[from, to, event] =
Enum.map(
[from, to, event],
&(&1 |> String.trim_leading("[") |> String.trim_trailing("]") |> String.to_atom())
)
%Transition{from: from, to: to, event: event}
end
@doc ~S"""
iex> {:ok, transitions} = Finitomata.PlantUML.parse("[*] --> s1 : foo\ns1 --> s2 : ok\ns2 --> [*] : ko")
...> Finitomata.Transition.entry(transitions)
:s1
"""
@spec entry([Transition.t()]) :: state()
def entry(transitions) do
transition = Enum.find(transitions, &match?(%Transition{from: :*}, &1))
transition.to
end
@doc ~S"""
iex> {:ok, transitions} = Finitomata.PlantUML.parse("[*] --> s1 : foo\ns1 --> s2 : ok\ns2 --> [*] : ko")
...> Finitomata.Transition.allowed?(transitions, :s1, :s2)
true
...> Finitomata.Transition.allowed?(transitions, :s1, :*)
false
"""
@spec allowed?([Transition.t()], state(), state()) :: boolean()
def allowed?(transitions, from, to) do
not is_nil(Enum.find(transitions, &match?(%Transition{from: ^from, to: ^to}, &1)))
end
@doc ~S"""
iex> {:ok, transitions} = Finitomata.PlantUML.parse("[*] --> s1 : foo\ns1 --> s2 : ok\ns2 --> [*] : ko")
...> Finitomata.Transition.responds?(transitions, :s1, :ok)
true
...> Finitomata.Transition.responds?(transitions, :s1, :ko)
false
"""
@spec responds?([Transition.t()], state(), event()) :: boolean()
def responds?(transitions, from, event) do
not is_nil(Enum.find(transitions, &match?(%Transition{from: ^from, event: ^event}, &1)))
end
@doc ~S"""
iex> {:ok, transitions} = Finitomata.PlantUML.parse("[*] --> s1 : foo\ns1 --> s2 : ok\ns2 --> [*] : ko")
...> Finitomata.Transition.allowed(transitions, :s1, :foo)
[:s2]
...> Finitomata.Transition.allowed(transitions, :s1, :*)
[]
"""
@spec allowed([Transition.t()], state(), state()) :: [state()]
def allowed(transitions, from, event) do
for %Transition{from: ^from, to: to, event: ^event} <- transitions, do: to
end
@doc ~S"""
iex> {:ok, transitions} = Finitomata.PlantUML.parse("[*] --> s1 : foo\ns1 --> s2 : ok\ns2 --> [*] : ko")
...> Finitomata.Transition.states(transitions)
[:s1, :s2]
"""
@spec states([Transition.t()]) :: [state()]
def states(transitions) do
transitions
|> Enum.flat_map(fn %Transition{from: from, to: to} -> [from, to] end)
|> Enum.uniq()
|> Enum.reject(&(&1 == :*))
end
end
|
lib/finitomata/transition.ex
| 0.831793
| 0.52409
|
transition.ex
|
starcoder
|
defmodule Tanga do
@moduledoc """
Tanga is a collection of string helpers similar to those that can be found in Ruby.
"""
@type t :: binary
@digits hd('0')..hd('9') |> Enum.to_list
@uppercase_characters hd('A')..hd('A') |> Enum.to_list
@lowercase_characters hd('a')..hd('z') |> Enum.to_list
@doc """
Builds a set of characters from the other_str parameter(s) using the procedure described for String#count. Returns a new string where runs of the same character that occur in this set are replaced by a single character. If no arguments are given, all runs of identical characters are replaced by a single character.
## Examples
iex> Tanga.squeeze("aaa")
"a"
iex> Tanga.squeeze("baaabaaa", "a")
"baba"
iex> Tanga.squeeze("bbaabbaa", ["a", "b"])
"baba"
"""
@spec squeeze(t) :: t
@spec squeeze(t, t) :: t
@spec squeeze(t, list) :: t
def squeeze(string) do
Regex.replace(~r/(.)\1+/, string, "\\1")
end
def squeeze(string, chars) when is_binary(chars) do
Regex.replace(~r/(|#{chars}|)\1+/, string, "\\1")
end
def squeeze(string, chars) when is_list(chars) do
chars_str = Enum.join(chars, "|")
Regex.replace(~r/([#{chars_str}])\1+/, string, "\\1")
end
@doc """
Returns the successor to str. The successor is calculated by incrementing characters starting from the rightmost alphanumeric (or the rightmost character if there are no alphanumerics) in the string. Incrementing a digit always results in another digit, and incrementing a letter results in another letter of the same case. Incrementing nonalphanumerics uses the underlying character set’s collating sequence.
If the increment generates a “carry,” the character to the left of it is incremented. This process repeats until there is no carry, adding an additional character if necessary.
## Examples
iex> Tanga.next("aaa")
"aab"
iex> Tanga.next("12")
"13"
"""
@spec next(t) :: t
def next(string) when string == "", do: ""
def next(string) do
String.reverse(string)
|> next_string
|> String.reverse
end
defp next_string(<<byte, rest::bitstring>>) do
case next_character(byte) do
{c, true} -> <<c>> <> next_string(rest)
{c, false} -> <<c>> <> rest
end
end
defp next_character(c) when c in @digits do
if (c + 1) in @digits do
{c + 1, false}
else
{hd(@digits), true}
end
end
defp next_character(c) when c in @uppercase_characters do
if (c + 1) in @uppercase_characters do
{c + 1, false}
else
index = Enum.find_index(@uppercase_characters, &(&1 == c))
if c = Enum.at(@uppercase_characters, index + 1) do
{c, false}
else
{hd(@uppercase_characters), true}
end
end
end
defp next_character(c) when c in @lowercase_characters do
if (c + 1) in @lowercase_characters do
{c + 1, false}
else
index = Enum.find_index(@lowercase_characters, &(&1 == c))
if c = Enum.at(@lowercase_characters, index + 1) do
{c, false}
else
{hd(@lowercase_characters), true}
end
end
end
defp next_character(c), do: {c, true}
@doc """
Centers str in width. If width is greater than the length of str, returns a new String of length width with str centered and padded with padstr; otherwise, returns str.
## Examples
iex> Tanga.center("fin", 9, "~")
"~~~fin~~~"
"""
@spec center(t, t, t) :: t
@spec center(t, float, t) :: t
@spec center(t, integer, t) :: t
def center(string, char_count, chars \\ " ")
def center(string, char_count, chars) when is_binary(char_count) do
int_char_count = String.to_integer(char_count)
do_center(string, int_char_count, chars)
end
def center(string, char_count, chars) when is_float(char_count) do
int_char_count = trunc(char_count)
do_center(string, int_char_count, chars)
end
def center(string, char_count, chars) when is_integer(char_count) do
do_center(string, char_count, chars)
end
defp do_center(string, char_count, chars) do
string_length = String.length(string)
if char_count <= string_length do
string
else
space = char_count - string_length
lpad = round(Float.floor(space / 2))
rpad = round(Float.ceil(space / 2))
String.pad_trailing(string, max(0, rpad) + string_length, chars)
|> String.pad_leading(char_count, chars)
end
end
@doc """
Inverts all characters in the given string to uppercase/lowercase accordingly
## Examples
iex> Tanga.swapcase("aBcD")
"AbCd"
iex> Tanga.swapcase("aB 123 xPTo")
"Ab 123 XptO"
iex> Tanga.swapcase("oLá")
"OlÁ"
"""
@spec swapcase(t) :: t
def swapcase(string) when is_binary(string) do
string
|> String.graphemes
|> Enum.map(fn(char) ->
if char =~ ~r/^\p{Lu}$/u,
do: String.downcase(char),
else: String.upcase(char)
end)
|> Enum.join
end
end
|
lib/tanga.ex
| 0.83901
| 0.423398
|
tanga.ex
|
starcoder
|
defmodule OMG.State.Transaction.Payment do
@moduledoc """
Internal representation of a payment transaction done on Plasma chain.
This module holds the representation of a "raw" transaction, i.e. without signatures nor recovered input spenders
"""
alias OMG.Crypto
alias OMG.State.Transaction
alias OMG.Utxo
require Transaction
require Utxo
@zero_metadata <<0::256>>
defstruct [:inputs, :outputs, metadata: @zero_metadata]
@type t() :: %__MODULE__{
inputs: list(input()),
outputs: list(output()),
metadata: Transaction.metadata()
}
@type input() :: %{
blknum: non_neg_integer(),
txindex: non_neg_integer(),
oindex: non_neg_integer()
}
@type output() :: %{
owner: Crypto.address_t(),
currency: currency(),
amount: non_neg_integer()
}
@type currency() :: Crypto.address_t()
@max_inputs 4
@max_outputs 4
defmacro max_inputs do
quote do
unquote(@max_inputs)
end
end
defmacro max_outputs do
quote do
unquote(@max_outputs)
end
end
@doc """
Creates a new transaction from a list of inputs and a list of outputs.
Adds empty (zeroes) inputs and/or outputs to reach the expected size
of `@max_inputs` inputs and `@max_outputs` outputs.
assumptions:
```
length(inputs) <= @max_inputs
length(outputs) <= @max_outputs
```
"""
@spec new(
list({pos_integer, pos_integer, 0..3}),
list({Crypto.address_t(), currency(), pos_integer}),
Transaction.metadata()
) :: t()
def new(inputs, outputs, metadata \\ @zero_metadata)
def new(inputs, outputs, metadata)
when Transaction.is_metadata(metadata) and length(inputs) <= @max_inputs and length(outputs) <= @max_outputs do
inputs =
inputs
|> Enum.map(fn {blknum, txindex, oindex} -> %{blknum: blknum, txindex: txindex, oindex: oindex} end)
outputs =
outputs
|> Enum.map(fn {owner, currency, amount} -> %{owner: owner, currency: currency, amount: amount} end)
%__MODULE__{inputs: inputs, outputs: outputs, metadata: metadata}
end
@doc """
Transaform the structure of RLP items after a successful RLP decode of a raw transaction, into a structure instance
"""
def reconstruct([inputs_rlp, outputs_rlp | rest_rlp])
when rest_rlp == [] or length(rest_rlp) == 1 do
with {:ok, inputs} <- reconstruct_inputs(inputs_rlp),
{:ok, outputs} <- reconstruct_outputs(outputs_rlp),
{:ok, metadata} <- reconstruct_metadata(rest_rlp),
do: {:ok, %__MODULE__{inputs: inputs, outputs: outputs, metadata: metadata}}
end
def reconstruct(_), do: {:error, :malformed_transaction}
defp reconstruct_inputs(inputs_rlp) do
{:ok, Enum.map(inputs_rlp, &from_new_rlp_input/1)}
rescue
_ -> {:error, :malformed_inputs}
end
# messy, see comments on the abstract output/input fixing this properly
defp from_new_rlp_input(binary_input) when is_binary(binary_input) do
Utxo.position(blknum, txindex, oindex) =
binary_input
|> :binary.decode_unsigned(:big)
|> Utxo.Position.decode!()
%{blknum: blknum, txindex: txindex, oindex: oindex}
end
defp reconstruct_outputs(outputs_rlp) do
outputs =
Enum.map(outputs_rlp, fn [owner, currency, amount] ->
with {:ok, cur12} <- parse_address(currency),
{:ok, owner} <- parse_address(owner) do
%{owner: owner, currency: cur12, amount: parse_int(amount)}
end
end)
if error = Enum.find(outputs, &match?({:error, _}, &1)), do: error, else: {:ok, outputs}
rescue
_ -> {:error, :malformed_outputs}
end
defp reconstruct_metadata([]), do: {:ok, @zero_metadata}
defp reconstruct_metadata([metadata]) when Transaction.is_metadata(metadata), do: {:ok, metadata}
defp reconstruct_metadata([_]), do: {:error, :malformed_metadata}
defp parse_int(binary), do: :binary.decode_unsigned(binary, :big)
# necessary, because RLP handles empty string equally to integer 0
@spec parse_address(<<>> | Crypto.address_t()) :: {:ok, Crypto.address_t()} | {:error, :malformed_address}
defp parse_address(binary)
defp parse_address(""), do: {:ok, <<0::160>>}
defp parse_address(<<_::160>> = address_bytes), do: {:ok, address_bytes}
defp parse_address(_), do: {:error, :malformed_address}
end
defimpl OMG.State.Transaction.Protocol, for: OMG.State.Transaction.Payment do
alias OMG.State.Transaction
alias OMG.Utxo
require Transaction
require Utxo
require Transaction.Payment
@empty_signature <<0::size(520)>>
# TODO: note this is fixed and improved in the abstract outputs/inputs PR
@payment_marker Transaction.Markers.payment()
@doc """
Turns a structure instance into a structure of RLP items, ready to be RLP encoded, for a raw transaction
"""
def get_data_for_rlp(%Transaction.Payment{inputs: inputs, outputs: outputs, metadata: metadata})
when Transaction.is_metadata(metadata),
do: [
@payment_marker,
Enum.map(inputs, &to_new_rlp_input/1),
Enum.map(outputs, fn %{owner: owner, currency: currency, amount: amount} -> [owner, currency, amount] end),
# used to be optional and as such was `if`-appended if not null here
# When it is not optional, and there's the if, dialyzer complains about the if
metadata
]
def get_outputs(%Transaction.Payment{outputs: outputs}) do
outputs
end
def get_inputs(%Transaction.Payment{inputs: inputs}) do
inputs
|> Enum.map(fn %{blknum: blknum, txindex: txindex, oindex: oindex} -> Utxo.position(blknum, txindex, oindex) end)
end
defp to_new_rlp_input(%{blknum: blknum, txindex: txindex, oindex: oindex}),
do: Utxo.position(blknum, txindex, oindex) |> Utxo.Position.encode() |> :binary.encode_unsigned(:big)
@doc """
True if the witnessses provided follow some extra custom validation.
Currently this covers the requirement for all the inputs to be signed on predetermined positions
"""
def valid?(%Transaction.Payment{}, %Transaction.Signed{sigs: sigs} = tx) do
tx
|> Transaction.get_inputs()
|> all_inputs_signed?(sigs)
end
@doc """
True if a payment can be applied, given a set of input UTXOs is present in the ledger.
Involves the checking of balancing of inputs and outputs for currencies
Returns the fees that this transaction is paying, mapped by currency
"""
@spec can_apply?(Transaction.Payment.t(), list(Utxo.t())) :: {:ok, map()} | {:error, :amounts_do_not_add_up}
def can_apply?(%Transaction.Payment{} = tx, input_utxos) do
outputs = Transaction.get_outputs(tx)
input_amounts_by_currency = get_amounts_by_currency(input_utxos)
output_amounts_by_currency = get_amounts_by_currency(outputs)
with :ok <- amounts_add_up?(input_amounts_by_currency, output_amounts_by_currency),
do: {:ok, fees_paid(input_amounts_by_currency, output_amounts_by_currency)}
end
@doc """
Effects of a payment transaction - spends all inputs and creates all outputs
"""
def get_effects(%Transaction.Payment{} = tx, blknum, tx_index) do
new_utxos_map = tx |> non_zero_utxos_from(blknum, tx_index) |> Map.new()
spent_input_pointers = Transaction.get_inputs(tx)
{spent_input_pointers, new_utxos_map}
end
defp all_inputs_signed?(non_zero_inputs, sigs) do
count_non_zero_signatures = Enum.count(sigs, &(&1 != @empty_signature))
count_non_zero_inputs = length(non_zero_inputs)
cond do
count_non_zero_signatures > count_non_zero_inputs -> {:error, :superfluous_signature}
count_non_zero_signatures < count_non_zero_inputs -> {:error, :missing_signature}
true -> true
end
end
defp non_zero_utxos_from(tx, blknum, tx_index) do
tx
|> utxos_from(blknum, tx_index)
|> Enum.filter(fn {_key, value} -> is_non_zero_amount?(value) end)
end
defp utxos_from(tx, blknum, tx_index) do
hash = Transaction.raw_txhash(tx)
tx
|> Transaction.get_outputs()
|> Enum.with_index()
|> Enum.map(fn {%{owner: owner, currency: currency, amount: amount}, oindex} ->
{Utxo.position(blknum, tx_index, oindex),
%Utxo{owner: owner, currency: currency, amount: amount, creating_txhash: hash}}
end)
end
defp is_non_zero_amount?(%{amount: 0}), do: false
defp is_non_zero_amount?(%{amount: _}), do: true
defp fees_paid(input_amounts_by_currency, output_amounts_by_currency) do
input_amounts_by_currency
|> Enum.into(%{}, fn {input_currency, input_amount} ->
# fee is implicit - it's the difference between funds owned and spend
implicit_paid_fee = input_amount - Map.get(output_amounts_by_currency, input_currency, 0)
{input_currency, implicit_paid_fee}
end)
end
defp get_amounts_by_currency(utxos) do
utxos
|> Enum.group_by(fn %{currency: currency} -> currency end, fn %{amount: amount} -> amount end)
|> Enum.map(fn {currency, amounts} -> {currency, Enum.sum(amounts)} end)
|> Map.new()
end
defp amounts_add_up?(input_amounts, output_amounts) do
for {output_currency, output_amount} <- Map.to_list(output_amounts) do
input_amount = Map.get(input_amounts, output_currency, 0)
input_amount >= output_amount
end
|> Enum.all?()
|> if(do: :ok, else: {:error, :amounts_do_not_add_up})
end
end
|
apps/omg/lib/omg/state/transaction/payment.ex
| 0.859133
| 0.718928
|
payment.ex
|
starcoder
|
defmodule Day21Old do
@start [[".", "#", "."],
[".", ".", "#"],
["#", "#", "#"]]
def solveA(filename), do: solve filename, 5
def solveB(filename), do: solve filename, 18
def solve(filename, niter) do
book = parse filename
grid = rec_solveA @start, niter, book
grid
|> List.flatten
|> Enum.count(fn x -> x == "#" end)
end
def parse(filename) do
filename
|> File.stream!([:utf8], :line)
|> Enum.map(&String.trim/1)
|> Enum.reduce(%{}, fn line, acc ->
{k, v} = get_rule line
Map.put acc, k, v
end)
end
def get_rule(line) do
[left, right] = String.split(line, " => ")
{to_square(left), to_square(right)}
end
def to_square(pattern) do
pattern
|> String.split("/")
|> Enum.map(fn line -> String.split(line, "", trim: true) end)
end
def patterns(pixels) do
[pixels | rotations(pixels)]
|> Enum.reduce(MapSet.new, fn block, acc ->
MapSet.union acc, MapSet.new([fliph(block), flipv(block)])
end)
end
def fliph(pixels), do: Enum.reverse pixels
def flipv(pixels), do: Enum.map pixels, &Enum.reverse/1
def rotate(pixels) do
size = pixels |> length
empty = List.duplicate [], size
rec_rotate(pixels, empty)
end
def rec_rotate([], output), do: output
def rec_rotate(input, output) do
[line | tail] = input
output =
Enum.zip(line, output)
|> Enum.map(fn {pixel, rline} -> [pixel | rline] end)
rec_rotate(tail, output)
end
def rotations(pixels) do
r1 = rotate(pixels)
r2 = rotate(r1)
r3 = rotate(r2)
[r1, r2, r3]
end
def print(pixels) do
pixels
|> Enum.map(fn line -> Enum.join line, "" end)
|> Enum.join("\n")
|> IO.write
IO.write("\n")
end
def rec_solveA(grid, 0, _book), do: grid
def rec_solveA(grid, iter, book) do
width = length grid
blocks =
if rem(width, 2) == 0 do
divide(grid, 2, width)
else
divide(grid, 3, width)
end
converted = Enum.map(blocks, fn b -> convert b, book end)
nblocks = length converted
grid =
if rem(width, 2) == 0 do
assemble(converted, 0, 0, [], 3, nblocks)
else
assemble(converted, 0, 0, [], 4, nblocks)
end
IO.puts "iter: #{iter}"
IO.puts "grid: #{inspect grid}"
rec_solveA grid, iter - 1, book
end
def divide(grid, chunk_size, width) do
grid
|> Enum.map(fn line -> Enum.chunk_every line, chunk_size end)
|> rec_divide(0, 0, [], chunk_size, width)
end
def rec_divide(grid, x, y, res, chunk_size, width) do
cond do
y == width ->
res
x == width / chunk_size ->
rec_divide(grid, 0, y + chunk_size, res, chunk_size, width)
true ->
res =
res ++ [Enum.map(0 .. chunk_size - 1, fn n ->
grid |> Enum.at(y + n) |> Enum.at(x)
end)]
rec_divide(grid, x + 1, y, res, chunk_size, width)
end
end
def convert(block, book) do
match =
patterns(block)
|> Enum.find(fn p -> Map.has_key? book, p end)
Map.get book, match
end
def assemble(grid, x, y, res, chunk_size, nblocks) do
cond do
y == nblocks ->
res
x == chunk_size ->
assemble(grid, 0, y + round(:math.sqrt(nblocks)), res, chunk_size, nblocks)
true ->
block =
Enum.map(0 .. round(:math.sqrt(nblocks) - 1), fn n ->
grid |> Enum.at(y + n) |> Enum.at(x)
end)
|> Enum.concat
res = res ++ [block]
assemble(grid, x + 1, y, res, chunk_size, nblocks)
end
end
end
|
2017/elixir/day21/lib/day21_old.ex
| 0.506103
| 0.622373
|
day21_old.ex
|
starcoder
|
defmodule ComplexNum.Cartesian do
import Kernel, except: [div: 2]
alias ComplexNum.{Cartesian}
@moduledoc """
A simple Complex Number in the form of `a + b*i`.
`a` and `b` are allowed to be any type that implements the `Numeric` behaviour.
This means Integer and Float, as well as custom-built data types like Decimal, Ratio, ???.
Do note that certain kinds of operations (especially the conversion of Cartesian <-> Polar) require the calculation of square roots.
Computers are not able to calculate any square root with infinite precision in finite time.
This is exactly the reason that e.g. Decimal and Ratio do _not_ support `sqrt`.
Therefore, the only way to manage this, is to _explicitly_ convert a (high- or infinite-precision) data type that does not support square roots
to a data type that _does_ support it (like Floats), in which case precision will be lost.
"""
defstruct [:real, :imaginary]
alias Numbers, as: N
@doc """
Creates a new Cartesian Complex Number.
`real` and `imaginary` can be Integer, Float or any custom struct that implements the Numeric behaviour. (defined by The [Numbers](https://hex.pm/packages/numbers) package)
If a custom Numeric type is used, the other argument is converted to that type automatically.
"""
def new(real, imaginary \\ 0)
def new(real, imaginary) when is_number(real) and is_number(imaginary) do
%ComplexNum{mode: Cartesian, real: real, imaginary: imaginary}
end
def new(real = %numeric{}, imaginary = %numeric{}) do
%ComplexNum{mode: Cartesian, real: real, imaginary: imaginary}
end
def new(real = %numeric{}, imaginary) when is_number(imaginary) do
%ComplexNum{mode: Cartesian, real: real, imaginary: numeric.new(imaginary)}
end
def new(real, imaginary = %numeric{}) when is_number(real) do
%ComplexNum{mode: Cartesian, real: numeric.new(real), imaginary: imaginary}
end
@doc """
Extracts the 'real' part from a Complex number.
For a number in the Cartesian form `a + bi`, this is `a`.
"""
def real(ca = %ComplexNum{mode: Cartesian}), do: ca.real
def real(a), do: a
@doc """
Extracts the 'imaginary' part from a Complex number.
For a number in the Cartesian form `a + bi`, this is `b`.
"""
def imaginary(ca = %ComplexNum{mode: Cartesian}), do: ca.imaginary
def imaginary(_a), do: 0
@doc """
Adds two Complex Numbers in Cartesian form together.
This is a precise operation.
Note that this function expects both arguments to be Complex numbers in Cartesian form.
(optionally, one of the arguments might be an Integer or Float).
If you want to be able to add Complex numbers in Cartesian form and Polar form together,
use `ComplexNum.add/2` instead.
"""
def add(ca = %ComplexNum{mode: Cartesian}, cb = %ComplexNum{mode: Cartesian}) do
new(N.add(ca.real, cb.real), N.add(ca.imaginary, cb.imaginary))
end
def add(a, cb = %ComplexNum{mode: Cartesian}), do: add(new(a), cb)
def add(ca = %ComplexNum{mode: Cartesian}, b), do: add(ca, new(b))
@doc """
Adds one Complex Numbers in Cartesian form from another.
This is a precise operation.
Note that this function expects both arguments to be Complex numbers in Cartesian form.
(optionally, one of the arguments might be an Integer or Float).
If you want to be able to subtract Complex numbers in Cartesian form and Polar form together,
use `ComplexNum.sub/2` instead.
"""
def sub(ca = %ComplexNum{mode: Cartesian}, cb = %ComplexNum{mode: Cartesian}) do
new(N.sub(ca.real, cb.real), N.sub(ca.imaginary, cb.imaginary))
end
def sub(a, cb = %ComplexNum{mode: Cartesian}), do: sub(new(a), cb)
def sub(ca = %ComplexNum{mode: Cartesian}, b), do: sub(ca, new(b))
@doc """
Multiplies two Complex Numbers in Cartesian form.
This is a precise operation (but slower than multiplication of numbers in Polar form).
Note that this function expects both arguments to be Complex numbers in Cartesian form.
(optionally, one of the arguments might be an Integer or Float).
If you want to be able to multiply Complex numbers in Cartesian form and Polar form together,
use `ComplexNum.mult/2` instead.
"""
# (a + bi) * (a - bi) == a² + b²
def mult(
ca = %ComplexNum{mode: Cartesian, real: real, imaginary: imaginary},
_ca_conjugate = %ComplexNum{mode: Cartesian, real: real, imaginary: neg_imaginary}) when -imaginary == neg_imaginary do
magnitude_squared(ca)
end
# (a + bi) * (c + di)
def mult(ca = %ComplexNum{mode: Cartesian}, cb = %ComplexNum{mode: Cartesian}) do
# (a * c) - (b * d)
real = N.sub(N.mult(ca.real, cb.real), N.mult(ca.imaginary, cb.imaginary))
# (a * d + b * c)
imaginary = N.add(N.mult(ca.real, cb.imaginary), N.mult(ca.imaginary, cb.real))
new(real, imaginary)
end
def mult(a, cb = %ComplexNum{mode: Cartesian}), do: mult(new(a), cb)
def mult(ca = %ComplexNum{mode: Cartesian}, b), do: mult(ca, new(b))
@doc """
Returns the *Complex Conjugate* of a Complex number in Cartesian form.
For `a + bi`, this is `a - bi`.
This is a precise operation.
"""
def conjugate(ca = %ComplexNum{mode: Cartesian}) do
new(ca.real, N.minus(ca.imaginary))
end
def conjugate(a), do: new(a) # as -0 === 0
@doc """
Divides a Complex Number in Cartesian form by another.
This is a precise operation (but slower than division of numbers in Polar form).
Note that this function expects both arguments to be Complex numbers in Cartesian form.
(optionally, one of the arguments might be an Integer or Float).
If you want to be able to multiply Complex numbers in Cartesian form and Polar form together,
use `ComplexNum.div/2` instead.
"""
# 1 / (a + bi)
def div(%ComplexNum{mode: Cartesian, real: 1, imaginary: 0}, cb = %ComplexNum{mode: Cartesian}), do: reciprocal(cb)
def div(ca = %ComplexNum{mode: Cartesian}, cb = %ComplexNum{mode: Cartesian}) do
# (a + bi)/(c + di)
# denom = c^2 + d^2
denom = N.add(N.mult(cb.real, cb.real), N.mult(cb.imaginary, cb.imaginary))
# (ac + bd)/denom
real = N.div(N.add(N.mult(ca.real, cb.real), N.mult(ca.imaginary, cb.imaginary)), denom)
# (bc - ad)/denom
imaginary = N.div(N.sub(N.mult(ca.imaginary, cb.real), N.mult(ca.real, cb.imaginary)), denom)
new(real, imaginary)
end
# 1 / (a + bi)
def div(1, cb = %ComplexNum{mode: Cartesian}), do: reciprocal(cb)
def div(a, cb = %ComplexNum{mode: Cartesian}), do: div(new(a), cb)
def div(ca = %ComplexNum{mode: Cartesian}, b), do: div(ca, new(b))
@doc """
The reciprocal of a Complex number (a + bi) is identical to 1 / (a + bi).
"""
def reciprocal(ca = %ComplexNum{mode: Cartesian}) do
denom = N.add(N.mult(ca.real, ca.real), N.mult(ca.imaginary, ca.imaginary))
real = N.div(ca.real, denom)
imaginary = N.div(N.minus(ca.imaginary), denom)
new(real, imaginary)
end
@doc """
Calculates the magnitude of the Cartesian Complex Number.
As this is done using Pythagoras, i.e. c = sqrt(a² + b²),
this is a lossy operation where (a²+b²) is converted to a float,
so the square root can be calculated.
"""
# |a + bi| = sqrt(a² + b²)
def magnitude(ca = %ComplexNum{mode: Cartesian, real: 0}), do: ca.imaginary
def magnitude(ca = %ComplexNum{mode: Cartesian, imaginary: 0}), do: ca.real
def magnitude(ca = %ComplexNum{mode: Cartesian}) do
:math.sqrt(N.to_float(magnitude_squared(ca)))
end
@doc """
Returns the square of the magnitude of the Cartesian Complex Number.
Because it is not necessary to calculate a square root, this is a precise operation.
"""
# |a + bi|² = a² + b²
def magnitude_squared(ca = %ComplexNum{mode: Cartesian}) do
N.add(N.mult(ca.real, ca.real),N.mult(ca.imaginary, ca.imaginary))
end
@doc """
The absolute value of a Cartesian Complex Number is a real part containing the magnitude of the number,
and an imaginary part of 0.
This is a lossy operation, as calculating the magnitude (see `magnitude/1` ) of a Cartesian Complex number is a lossy operation.
"""
def abs(ca = %ComplexNum{mode: Cartesian}) do
new(magnitude(ca), 0)
end
@doc """
Negates the complex number.
This means that both the real and the imaginary part are negated.
"""
def minus(ca = %ComplexNum{mode: Cartesian}) do
new(N.minus(ca.real), N.minus(ca.imaginary))
end
@doc """
Integer power function.
The result is calculated using the Exponentiation by Squaring algorithm,
which means that it performs log(n) multiplications to calculate (a + bi)^n.
Note that only integers are accepted as exponent, so you cannot calculate roots
using this function.
"""
def pow(base = %ComplexNum{mode: Cartesian}, exponent) when is_integer(exponent) do
pow_by_sq(base, exponent)
end
# Small powers
defp pow_by_sq(x, 1), do: x
defp pow_by_sq(x, 2), do: mult(x, x)
defp pow_by_sq(x, 3), do: mult(mult(x, x), x)
defp pow_by_sq(x, n) when is_integer(n), do: do_pow_by_sq(x, n)
# Exponentiation By Squaring.
defp do_pow_by_sq(x, n, y \\ 1)
defp do_pow_by_sq(_x, 0, y), do: y
defp do_pow_by_sq(x, 1, y), do: mult(x, y)
defp do_pow_by_sq(x, n, y) when n < 0, do: do_pow_by_sq(div(1, x), Kernel.-(n), y)
defp do_pow_by_sq(x, n, y) when rem(n, 2) == 0, do: do_pow_by_sq(mult(x, x), Kernel.div(n, 2), y)
defp do_pow_by_sq(x, n, y), do: do_pow_by_sq(mult(x, x), Kernel.div((n - 1), 2), mult(x, y))
@doc """
Returns the angle (counter-clockwise in relation to the 'Real' axis of the Argand plane)
of a Complex number in cartesian form.
Note that this is a lossy operation, as the trigonometric function `atan2(b, a)` is used,
which is only available for built-in Floats.
Note that when called with `0 + 0i` there are infinitely many solutions,
and thus the result is formally _undefined_.
By keeping with the convention most practical implementations follow however,
instead of creating an exceptional situation,
the solution `angle(0 + 0i) = 0` is returned.
"""
def angle(ca = %ComplexNum{mode: Cartesian}) do
:math.atan2(N.to_float(ca.imaginary), N.to_float(ca.real))
end
@doc """
Converts a Complex Number in Cartesian form
to a Complex Number in Polar form.
This is a lossy operation, as both `magnitude/1` and `angle/1` need to be called,
which are both lossy operations (requiring the use of a square root and atan2, respectively).
"""
def to_polar(ca = %ComplexNum{mode: Cartesian, real: %numericType{}}) do
float_conversion = do_to_polar(ca)
converted_magnitude = numericType.new(float_conversion.real)
converted_angle = numericType.new(float_conversion.imaginary)
ComplexNum.Polar.new(converted_magnitude, converted_angle)
end
def to_polar(ca = %ComplexNum{mode: Cartesian, imaginary: %numericType{}}) do
float_conversion = do_to_polar(ca)
converted_magnitude = numericType.new(float_conversion.real)
converted_angle = numericType.new(float_conversion.imaginary)
ComplexNum.Polar.new(converted_magnitude, converted_angle)
end
def to_polar(ca = %ComplexNum{mode: Cartesian}), do: do_to_polar(ca)
defp do_to_polar(ca = %ComplexNum{mode: Cartesian}) do
ComplexNum.Polar.new(magnitude(ca), angle(ca))
end
end
|
lib/complex_num/cartesian.ex
| 0.925533
| 0.810554
|
cartesian.ex
|
starcoder
|
defmodule OMG.State.UtxoSet do
@moduledoc """
Handles all the operations done on the UTXOs held in the ledger
It will provide the requested UTXOs by a collection of inputs, trade in transaction effects (new utxos, utxos to delete).
It also translates the modifications to it into DB updates, and is able to interpret the UTXO query result from DB
"""
alias OMG.Utxo
require Utxo
def init(utxos_query_result) do
Enum.into(utxos_query_result, %{}, fn {db_position, db_utxo} ->
{Utxo.Position.from_db_key(db_position), Utxo.from_db_value(db_utxo)}
end)
end
@doc """
Provides the outputs that are pointed by `inputs` provided
"""
def get_by_inputs(utxos, inputs) do
inputs
|> Enum.reduce_while({:ok, []}, fn input, acc -> get_utxo(utxos, input, acc) end)
|> reverse()
end
@doc """
Updates itself given a list of spent input pointers and a map of UTXOs created upon a transaction
"""
def apply_effects(utxos, spent_input_pointers, new_utxos_map) do
utxos |> Map.drop(spent_input_pointers) |> Map.merge(new_utxos_map)
end
@doc """
Returns the DB updates required given a list of spent input pointers and a map of UTXOs created upon a transaction
"""
@spec db_updates(list(Utxo.Position.t()), %{Utxo.Position.t() => Utxo.t()}) ::
list({:put, :utxo, {Utxo.Position.db_t(), Utxo.t()}} | {:delete, :utxo, Utxo.Position.db_t()})
def db_updates(spent_input_pointers, new_utxos_map) do
db_updates_new_utxos = new_utxos_map |> Enum.map(&utxo_to_db_put/1)
db_updates_spent_utxos = spent_input_pointers |> Enum.map(&utxo_to_db_delete/1)
Enum.concat(db_updates_new_utxos, db_updates_spent_utxos)
end
def exists?(utxos, input_pointer),
do: Map.has_key?(utxos, input_pointer)
@doc """
Searches the UTXO set for a particular UTXO created with a `tx_hash` on `oindex` position.
Current implementation is **expensive**
"""
def scan_for_matching_utxo(utxos, tx_hash, oindex) do
Enum.find(utxos, &match?({Utxo.position(_, _, ^oindex), %Utxo{creating_txhash: ^tx_hash}}, &1))
end
defp get_utxo(utxos, position, {:ok, acc}) do
case Map.get(utxos, position) do
nil -> {:halt, {:error, :utxo_not_found}}
found -> {:cont, {:ok, [found | acc]}}
end
end
defp utxo_to_db_put({utxo_pos, utxo}),
do: {:put, :utxo, {Utxo.Position.to_db_key(utxo_pos), Utxo.to_db_value(utxo)}}
defp utxo_to_db_delete(utxo_pos),
do: {:delete, :utxo, Utxo.Position.to_db_key(utxo_pos)}
@spec reverse({:ok, any()} | {:error, :utxo_not_found}) :: {:ok, list(any())} | {:error, :utxo_not_found}
defp reverse({:ok, input_utxos}), do: {:ok, Enum.reverse(input_utxos)}
defp reverse({:error, :utxo_not_found} = result), do: result
end
|
apps/omg/lib/omg/state/utxo_set.ex
| 0.802903
| 0.4081
|
utxo_set.ex
|
starcoder
|
defmodule Binary.Dict do
@moduledoc """
This module implements a dictionary that forces the keys to
be converted to binaries on insertion. Currently it is
implemented using a `List.Dict` underneath, but this may
change in the future.
Check the `Dict` module for examples and documentation.
"""
import Kernel, except: [to_binary: 1]
@compile { :inline, to_binary: 1 }
defp to_binary(key) do
if is_binary(key), do: key, else: to_string(key)
end
defmacrop dict(data) do
quote do
{ Binary.Dict, unquote(data) }
end
end
def new, do: dict([])
def new(pairs) do
dict Enum.map pairs, fn({ k, v }) -> { to_binary(k), v } end
end
def new(pairs, transform) when is_function(transform) do
dict Enum.map pairs, fn(entry) ->
{ k, v } = transform.(entry)
{ to_binary(k), v }
end
end
@doc false
def keys(dict(data)) do
for { k, _ } <- data, do: k
end
@doc false
def values(dict(data)) do
for { _, v } <- data, do: v
end
@doc false
def size(dict(data)) do
length(data)
end
@doc false
def has_key?(dict(data), key) do
:lists.keymember(to_binary(key), 1, data)
end
@doc false
def get(dict(data), key, default \\ nil) do
case :lists.keyfind(to_binary(key), 1, data) do
{ _, value } -> value
false -> default
end
end
@doc false
def get!(dict(data), key) do
case :lists.keyfind(to_binary(key), 1, data) do
{ _, value } -> value
false -> raise(KeyError, key: key)
end
end
@doc false
def fetch(dict(data), key) do
case :lists.keyfind(to_binary(key), 1, data) do
{ _, value } -> { :ok, value }
false -> :error
end
end
@doc false
def put(dict(data), key, value) do
key = to_binary(key)
dict [{key, value}|keydelete(data, key)]
end
@doc false
def put_new(dict, key, value) do
update(dict, key, value, fn(v) -> v end)
end
@doc false
def delete(dict(data), key) do
dict keydelete(data, to_binary(key))
end
@doc false
def merge(dict, enum, fun \\ fn(_k, _v1, v2) -> v2 end) do
Enum.reduce enum, dict, fn({ k, v2 }, acc) ->
k = to_binary(k)
update(acc, k, v2, fn(v1) -> fun.(k, v1, v2) end)
end
end
@doc false
def update(dict(data), key, fun) do
dict keyupdate(data, to_binary(key), fun)
end
@doc false
def update(dict(data), key, initial, fun) do
dict keyupdate(data, to_binary(key), initial, fun)
end
@doc false
def empty(_) do
dict([])
end
@doc false
def to_list(dict(data)) do
data
end
defp keydelete(data, key) do
for { k, _ } = tuple <- data, key != k, do: tuple
end
defp keyupdate([{key, value}|dict], key, fun) do
[{key, fun.(value)}|keydelete(dict, key)]
end
defp keyupdate([{_, _} = e|dict], key, fun) do
[e|keyupdate(dict, key, fun)]
end
defp keyupdate([], key, _fun) do
raise(KeyError, key: key)
end
defp keyupdate([{key, value}|dict], key, _initial, fun) do
[{key, fun.(value)}|keydelete(dict, key)]
end
defp keyupdate([{_, _} = e|dict], key, initial, fun) do
[e|keyupdate(dict, key, initial, fun)]
end
defp keyupdate([], key, initial, _fun) do
[{key, initial}]
end
end
defimpl Enumerable, for: Binary.Dict do
def reduce({ Binary.Dict, data }, acc, fun), do: :lists.foldl(fun, acc, data)
def count({ Binary.Dict, data }), do: length(data)
def member?({ Binary.Dict, data }, v), do: :lists.member(v, data)
end
defimpl Access, for: Binary.Dict do
def access({ Binary.Dict, data }, key) do
case :lists.keyfind(to_string(key), 1, data) do
{ _, value } -> value
false -> nil
end
end
end
defimpl Inspect, for: Binary.Dict do
import Inspect.Algebra
def inspect({ Binary.Dict, data }, opts) do
concat ["#Binary.Dict<", Inspect.List.inspect(data, opts), ">"]
end
end
|
lib/binary/dict.ex
| 0.837952
| 0.65388
|
dict.ex
|
starcoder
|
defmodule Ecto.Adapters.SQL do
@moduledoc """
Behaviour and implementation for SQL adapters.
The implementation for SQL adapter provides a
pooled based implementation of SQL and also expose
a query function to developers.
Developers that use `Ecto.Adapters.SQL` should implement
a connection module with specifics on how to connect
to the database and also how to translate the queries
to SQL. See `Ecto.Adapters.SQL.Connection` for more info.
"""
@doc false
defmacro __using__(adapter) do
quote do
@behaviour Ecto.Adapter
@behaviour Ecto.Adapter.Migration
@behaviour Ecto.Adapter.Transaction
@conn __MODULE__.Connection
@adapter unquote(adapter)
## Worker
@doc false
defmacro __before_compile__(_env) do
:ok
end
@doc false
def start_link(repo, opts) do
{:ok, _} = Application.ensure_all_started(@adapter)
Ecto.Adapters.SQL.start_link(@conn, @adapter, repo, opts)
end
## Types
@doc false
def autogenerate(:id), do: nil
def autogenerate(:embed_id), do: Ecto.UUID.autogenerate()
def autogenerate(:binary_id), do: Ecto.UUID.autogenerate()
@doc false
def loaders({:embed, _} = type, _), do: [&Ecto.Adapters.SQL.load_embed(type, &1)]
def loaders(:binary_id, type), do: [Ecto.UUID, type]
def loaders(_, type), do: [type]
@doc false
def dumpers({:embed, _} = type, _), do: [&Ecto.Adapters.SQL.dump_embed(type, &1)]
def dumpers(:binary_id, type), do: [type, Ecto.UUID]
def dumpers(_, type), do: [type]
## Query
@doc false
def prepare(:all, query), do: {:cache, @conn.all(query)}
def prepare(:update_all, query), do: {:cache, @conn.update_all(query)}
def prepare(:delete_all, query), do: {:cache, @conn.delete_all(query)}
@doc false
def execute(repo, meta, prepared, params, preprocess, opts) do
Ecto.Adapters.SQL.execute(repo, meta, prepared, params, preprocess, opts)
end
@doc false
def insert_all(repo, %{source: {prefix, source}}, header, rows, returning, opts) do
Ecto.Adapters.SQL.insert_all(repo, @conn, prefix, source, header, rows, returning, opts)
end
@doc false
def insert(repo, %{source: {prefix, source}}, params, returning, opts) do
{fields, values} = :lists.unzip(params)
sql = @conn.insert(prefix, source, fields, [fields], returning)
Ecto.Adapters.SQL.struct(repo, @conn, sql, values, returning, opts)
end
@doc false
def update(repo, %{source: {prefix, source}}, fields, filter, returning, opts) do
{fields, values1} = :lists.unzip(fields)
{filter, values2} = :lists.unzip(filter)
sql = @conn.update(prefix, source, fields, filter, returning)
Ecto.Adapters.SQL.struct(repo, @conn, sql, values1 ++ values2, returning, opts)
end
@doc false
def delete(repo, %{source: {prefix, source}}, filter, opts) do
{filter, values} = :lists.unzip(filter)
sql = @conn.delete(prefix, source, filter, [])
Ecto.Adapters.SQL.struct(repo, @conn, sql, values, [], opts)
end
## Transaction
@doc false
def transaction(repo, opts, fun) do
Ecto.Adapters.SQL.transaction(repo, opts, fun)
end
@doc false
def rollback(repo, value) do
Ecto.Adapters.SQL.rollback(repo, value)
end
## Migration
@doc false
def execute_ddl(repo, definition, opts) do
sqls = @conn.execute_ddl(definition)
for sql <- List.wrap(sqls) do
Ecto.Adapters.SQL.query!(repo, sql, [], opts)
end
:ok
end
defoverridable [prepare: 2, execute: 6, insert: 5, update: 6, delete: 4, insert_all: 6,
execute_ddl: 3, loaders: 2, dumpers: 2, autogenerate: 1]
end
end
alias Ecto.LogProxy
alias Ecto.LogQuery
alias Ecto.Adapters.SQL.Sandbox
@doc """
Converts the given query to SQL according to its kind and the
adapter in the given repository.
## Examples
The examples below are meant for reference. Each adapter will
return a different result:
Ecto.Adapters.SQL.to_sql(:all, repo, Post)
{"SELECT p.id, p.title, p.inserted_at, p.created_at FROM posts as p", []}
Ecto.Adapters.SQL.to_sql(:update_all, repo,
from(p in Post, update: [set: [title: ^"hello"]]))
{"UPDATE posts AS p SET title = $1", ["hello"]}
"""
@spec to_sql(:all | :update_all | :delete_all, Ecto.Repo.t, Ecto.Queryable.t) ::
{String.t, [term]}
def to_sql(kind, repo, queryable) do
adapter = repo.__adapter__
{_meta, prepared, params} =
Ecto.Queryable.to_query(queryable)
|> Ecto.Query.Planner.query(kind, repo, adapter)
{prepared, params}
end
@doc """
Same as `query/4` but raises on invalid queries.
"""
@spec query!(Ecto.Repo.t, String.t, [term], Keyword.t) ::
%{rows: nil | [tuple], num_rows: non_neg_integer} | no_return
def query!(repo, sql, params, opts \\ []) do
query!(repo, sql, params, fn x -> x end, opts)
end
defp query!(repo, sql, params, mapper, opts) do
case query(repo, sql, params, mapper, opts) do
{:ok, result} -> result
{:error, err} -> raise err
end
end
@doc """
Runs custom SQL query on given repo.
In case of success, it must return an `:ok` tuple containing
a map with at least two keys:
* `:num_rows` - the number of rows affected
* `:rows` - the result set as a list. `nil` may be returned
instead of the list if the command does not yield any row
as result (but still yields the number of affected rows,
like a `delete` command without returning would)
## Options
* `:timeout` - The time in milliseconds to wait for a query to finish,
`:infinity` will wait indefinitely. (default: 15_000)
* `:pool_timeout` - The time in milliseconds to wait for a call to the pool
to finish, `:infinity` will wait indefinitely. (default: 5_000)
* `:log` - When false, does not log the query
## Examples
iex> Ecto.Adapters.SQL.query(MyRepo, "SELECT $1::integer + $2", [40, 2])
{:ok, %{rows: [{42}], num_rows: 1}}
"""
@spec query(Ecto.Repo.t, String.t, [term], Keyword.t) ::
{:ok, %{rows: nil | [tuple], num_rows: non_neg_integer}} | {:error, Exception.t}
def query(repo, sql, params, opts \\ []) do
query(repo, sql, params, fn x -> x end, opts)
end
defp query(repo, sql, params, mapper, opts) do
{pool, default_opts} = repo.__pool__
conn = get_conn(pool) || pool
connection = Module.concat(repo.__adapter__, Connection)
query = connection.query(sql)
opts =
opts ++ default_opts
|> Keyword.put(:logger, &repo.log/1)
|> Keyword.put(:encode_mapper, &connection.encode_mapper/1)
|> Keyword.put(:decode_mapper, mapper)
do_query(conn, query, params, opts)
end
defp do_query(%DBConnection{proxy_mod: proxy} = conn, query, params, opts) do
do_query(proxy, conn, query, params, opts)
end
defp do_query(pool, query, params, opts) do
proxy = Keyword.get(opts, :proxy)
do_query(proxy, pool, query, params, opts)
end
defp do_query(LogProxy, conn, query, params, opts) do
log_query = %LogQuery{query: query, params: params}
DBConnection.query(conn, log_query, params, opts)
end
defp do_query(_, conn, query, params, opts) do
DBConnection.query(conn, query, params, opts)
end
@doc ~S"""
Starts a transaction for test.
This function work by starting a transaction and storing the connection
back in the pool with an open transaction. On every test, we restart
the test transaction rolling back to the appropriate savepoint.
**IMPORTANT:** Test transactions only work if the connection pool is
`Ecto.Adapters.SQL.Sandbox`
## Example
The first step is to configure your database to use the
`Ecto.Adapters.SQL.Sandbox` pool. You set those options in your
`config/config.exs`:
config :my_app, Repo,
pool: Ecto.Adapters.SQL.Sandbox
Since you don't want those options in your production database, we
typically recommend to create a `config/test.exs` and add the
following to the bottom of your `config/config.exs` file:
import_config "config/#{Mix.env}.exs"
Now with the test database properly configured, you can write
transactional tests:
# At the end of your test_helper.exs
# From now, all tests happen inside a transaction
Ecto.Adapters.SQL.begin_test_transaction(TestRepo)
defmodule PostTest do
# Tests that use the shared repository cannot be async
use ExUnit.Case
setup do
# Go back to a clean slate at the beginning of every test
Ecto.Adapters.SQL.restart_test_transaction(TestRepo)
:ok
end
test "create comment" do
assert %Post{} = TestRepo.insert!(%Post{})
end
end
In some cases, you may want to start the test transaction only
for specific tests and then roll it back. You can do it as:
defmodule PostTest do
# Tests that use the shared repository cannot be async
use ExUnit.Case
setup_all do
# Wrap this case in a transaction
Ecto.Adapters.SQL.begin_test_transaction(TestRepo)
# Roll it back once we are done
on_exit fn ->
Ecto.Adapters.SQL.rollback_test_transaction(TestRepo)
end
:ok
end
setup do
# Go back to a clean slate at the beginning of every test
Ecto.Adapters.SQL.restart_test_transaction(TestRepo)
:ok
end
test "create comment" do
assert %Post{} = TestRepo.insert!(%Post{})
end
end
"""
@spec begin_test_transaction(Ecto.Repo.t, Keyword.t) :: :ok
def begin_test_transaction(repo, opts \\ []) do
test_transaction(:begin, repo, opts)
end
@doc """
Restarts a test transaction, see `begin_test_transaction/2`.
"""
@spec restart_test_transaction(Ecto.Repo.t, Keyword.t) :: :ok
def restart_test_transaction(repo, opts \\ []) do
test_transaction(:restart, repo, opts)
end
@spec rollback_test_transaction(Ecto.Repo.t, Keyword.t) :: :ok
def rollback_test_transaction(repo, opts \\ []) do
test_transaction(:rollback, repo, opts)
end
defp test_transaction(req, repo, opts) do
{pool, defaults} = repo.__pool__
opts = opts ++ defaults
case Keyword.fetch!(opts, :pool) do
Sandbox ->
query = %Sandbox.Query{request: req}
DBConnection.execute!(pool, query, [], opts)
pool_mod ->
raise """
cannot #{req} test transaction with pool #{inspect pool_mod}.
In order to use test transactions with Ecto SQL, you need to
configure your repository to use #{inspect Sandbox}:
pool: #{inspect Sandbox}
"""
end
end
## Worker
@doc false
def start_link(connection, adapter, repo, opts) do
unless Code.ensure_loaded?(connection) do
raise """
could not find #{inspect connection}.
Please verify you have added #{inspect adapter} as a dependency:
{#{inspect adapter}, ">= 0.0.0"}
And remember to recompile Ecto afterwards by cleaning the current build:
mix deps.clean ecto
"""
end
{mod, opts} = connection.connection(opts)
if function_exported?(repo, :after_connect, 1) do
IO.puts :stderr, "warning: #{inspect repo}.after_connect/1 is deprecated. If you want to " <>
"perform some action after connecting, please set after_connect: {module, fun, args}" <>
"in your repository configuration"
opts = Keyword.put(opts, :after_connect, {repo, :after_connect, []})
end
DBConnection.start_link(mod, opts)
end
## Types
@doc false
def load_embed(type, value) do
Ecto.Type.load(type, value, fn
{:embed, _} = type, value -> load_embed(type, value)
type, value -> Ecto.Type.cast(type, value)
end)
end
@doc false
def dump_embed(type, value) do
Ecto.Type.dump(type, value, fn
{:embed, _} = type, value -> dump_embed(type, value)
_type, value -> {:ok, value}
end)
end
## Query
@doc false
def insert_all(repo, conn, prefix, source, header, rows, returning, opts) do
{rows, params} = unzip_inserts(header, rows)
sql = conn.insert(prefix, source, header, rows, returning)
%{rows: rows, num_rows: num} = query!(repo, sql, Enum.reverse(params), nil, opts)
{num, rows}
end
defp unzip_inserts(header, rows) do
Enum.map_reduce rows, [], fn fields, params ->
Enum.map_reduce header, params, fn key, acc ->
case :lists.keyfind(key, 1, fields) do
{^key, value} -> {key, [value|acc]}
false -> {nil, acc}
end
end
end
end
@doc false
def execute(repo, _meta, prepared, params, nil, opts) do
%{rows: rows, num_rows: num} = query!(repo, prepared, params, nil, opts)
{num, rows}
end
def execute(repo, %{select: %{fields: fields}}, prepared, params, preprocess, opts) do
mapper = &process_row(&1, preprocess, fields)
%{rows: rows, num_rows: num} = query!(repo, prepared, params, mapper, opts)
{num, rows}
end
@doc false
def struct(repo, conn, sql, values, returning, opts) do
case query(repo, sql, values, fn x -> x end, opts) do
{:ok, %{rows: nil, num_rows: 1}} ->
{:ok, []}
{:ok, %{rows: [values], num_rows: 1}} ->
{:ok, Enum.zip(returning, values)}
{:ok, %{num_rows: 0}} ->
{:error, :stale}
{:error, err} ->
case conn.to_constraints(err) do
[] -> raise err
constraints -> {:invalid, constraints}
end
end
end
defp process_row(row, preprocess, fields) do
Enum.map_reduce(fields, row, fn
{:&, _, [_, fields]} = field, acc ->
case split_and_not_nil(acc, length(fields), true, []) do
{nil, rest} -> {nil, rest}
{val, rest} -> {preprocess.(field, val, nil), rest}
end
field, [h|t] ->
{preprocess.(field, h, nil), t}
end) |> elem(0)
end
defp split_and_not_nil(rest, 0, true, _acc), do: {nil, rest}
defp split_and_not_nil(rest, 0, false, acc), do: {:lists.reverse(acc), rest}
defp split_and_not_nil([nil|t], count, all_nil?, acc) do
split_and_not_nil(t, count - 1, all_nil?, [nil|acc])
end
defp split_and_not_nil([h|t], count, _all_nil?, acc) do
split_and_not_nil(t, count - 1, false, [h|acc])
end
## Transactions
@doc false
def transaction(repo, opts, fun) do
{pool, default_opts} = repo.__pool__
opts = opts ++ default_opts
case get_conn(pool) do
nil ->
do_transaction(pool, opts, fun)
conn ->
DBConnection.transaction(conn, fn(_) -> fun.() end, opts)
end
end
defp do_transaction(pool, opts, fun) do
run = fn(conn) ->
try do
put_conn(pool, conn)
fun.()
after
delete_conn(pool)
end
end
DBConnection.transaction(pool, run, opts)
end
@doc false
def rollback(repo, value) do
{pool, _} = repo.__pool__
case get_conn(pool) do
nil -> raise "cannot call rollback outside of transaction"
conn -> DBConnection.rollback(conn, value)
end
end
## Connection helpers
defp put_conn(pool, conn) do
_ = Process.put(key(pool), conn)
:ok
end
defp get_conn(pool) do
Process.get(key(pool))
end
defp delete_conn(pool) do
_ = Process.delete(key(pool))
:ok
end
defp key(pool), do: {__MODULE__, pool}
end
|
deps/ecto/lib/ecto/adapters/sql.ex
| 0.866062
| 0.533762
|
sql.ex
|
starcoder
|
defmodule BattleCity.Display do
@moduledoc false
@spec columns(ComplexDisplay.t(), keyword()) :: keyword
def columns(o, opts \\ []) do
SimpleDisplay.columns(o) ++ ComplexDisplay.columns(o, Map.new(opts))
end
end
# defprotocol Grid do
# @spec grid(t) :: BattleCity.Context.grid()
# def grid(struct)
# end
# defimpl Grid, for: Any do
# defmacro __deriving__(module, _, color: color) do
# quote do
# defimpl Grid, for: unquote(module) do
# def grid(%{position: %{x: x, y: y}}) do
# {x, y, 1.9, 1.9, unquote(color)}
# end
# def grid(%{x: x, y: y}) do
# {x, y, 1.9, 1.9, unquote(color)}
# end
# end
# end
# end
# def grid(_), do: nil
# end
# defprotocol Size do
# @spec width(t) :: BattleCity.Position.width()
# def width(struct)
# @spec height(t) :: BattleCity.Position.height()
# def height(struct)
# end
# defimpl Size, for: Any do
# defmacro __deriving__(module, _, width: width, height: height) do
# quote do
# defimpl Size, for: unquote(module) do
# def width(_), do: unquote(width)
# def height(_), do: unquote(height)
# end
# end
# end
# def width(_), do: nil
# def height(_), do: nil
# end
defprotocol SimpleDisplay do
@fallback_to_any true
@spec columns(t) :: keyword
def columns(struct)
end
defimpl SimpleDisplay, for: Any do
defmacro __deriving__(module, _, only: [_ | _] = only) do
quote do
defimpl SimpleDisplay, for: unquote(module) do
def columns(arg) do
for i <- unquote(only), do: {i, Map.fetch!(arg, i)}
end
end
end
end
def columns(_), do: []
end
defprotocol ComplexDisplay do
@spec columns(t, map) :: keyword
def columns(struct, opts)
end
defimpl ComplexDisplay, for: BattleCity.Context do
def columns(%{} = o, %{stage_fn: stage_fn} = m) when is_function(stage_fn) do
columns(o, %{m | stage_fn: nil}) ++ [stage: stage_fn.(o.stage.name)]
end
def columns(%{} = o, %{tank_fn: tank_fn} = m) when is_function(tank_fn) do
tanks =
o.tanks
|> Enum.map(fn {id, %{position: p, __module__: module}} ->
x = String.pad_leading(to_string(p.x), 2)
y = String.pad_leading(to_string(p.y), 2)
name = String.pad_leading(to_string(module.name()), 6)
tank_fn.("{#{x} , #{y}} [#{name}] -> #{id}", id)
end)
|> Enum.intersperse({:safe, "<br />"})
columns(o, %{m | tank_fn: nil}) ++ [tanks: tanks]
end
def columns(%{} = o, _) do
[
objects: o.objects |> Map.values() |> Enum.map(&map_size/1) |> Enum.sum(),
power_ups: Enum.count(o.power_ups),
bullets: Enum.count(o.bullets)
]
end
end
defimpl ComplexDisplay, for: BattleCity.Stage do
## TODO environment_fn click
def columns(%{__module__: module} = o, %{environment_fn: environment_fn} = m)
when is_function(environment_fn) do
raw =
module.__raw__()
|> Enum.map(fn x ->
Enum.map_join(x, " ", fn a ->
String.pad_leading(a.raw, 2)
end)
end)
|> Enum.intersperse({:safe, "<br />"})
columns(o, %{m | environment_fn: nil}) |> Keyword.put(:raw, raw)
end
## TODO display from module
def columns(%{} = o, _) do
[
bots: o.bots |> Enum.map_join(", ", fn {m, c} -> "#{m.name()} -> #{c}" end),
raw: "raw"
]
end
end
defimpl ComplexDisplay, for: BattleCity.Tank do
def columns(%{}, _) do
[]
end
end
|
lib/battle_city/protocols/display.ex
| 0.563258
| 0.460713
|
display.ex
|
starcoder
|
defmodule TypedEctoSchema.SyntaxSugar do
@moduledoc false
# Defines the syntax sugar we apply on top of Ecto's DSL
# This works by transforming calls to Ecto's own macros to also call
# Our Type Builder
alias TypedEctoSchema.SyntaxSugar
alias TypedEctoSchema.TypeBuilder
@schema_function_names [
:field,
:embeds_one,
:embeds_many,
:has_one,
:has_many,
:belongs_to,
:many_to_many
]
@embeds_function_names [:embeds_one, :embeds_many]
@spec apply_to_block(Macro.t()) :: Macro.t()
def apply_to_block(block) do
calls =
case block do
{:__block__, _, calls} ->
calls
call ->
[call]
end
new_calls = Enum.map(calls, &transform_expression/1)
{:__block__, [], new_calls}
end
@spec transform_expression(Macro.t()) :: Macro.t()
defp transform_expression({function_name, _, [name, type, opts]})
when function_name in @schema_function_names do
ecto_opts = Keyword.drop(opts, [:__typed_ecto_type__, :enforce])
quote do
unquote(function_name)(unquote(name), unquote(type), unquote(ecto_opts))
unquote(TypeBuilder).add_field(
__MODULE__,
unquote(function_name),
unquote(name),
unquote(type),
unquote(opts)
)
end
end
defp transform_expression({function_name, _, [name, type]})
when function_name in @schema_function_names do
quote do
unquote(function_name)(unquote(name), unquote(type))
unquote(TypeBuilder).add_field(
__MODULE__,
unquote(function_name),
unquote(name),
unquote(type),
[]
)
end
end
defp transform_expression({:field, _, [name]}) do
quote do
field(unquote(name))
unquote(TypeBuilder).add_field(
__MODULE__,
:field,
unquote(name),
:string,
[]
)
end
end
defp transform_expression({:timestamps, _, [opts]} = call) do
quote do
unquote(call)
unquote(TypeBuilder).add_timestamps(
__MODULE__,
Keyword.merge(@timestamps_opts, unquote(opts))
)
end
end
defp transform_expression({function_name, _, [name, schema, opts, [do: block]]})
when function_name in @embeds_function_names do
quote do
{schema, opts} =
unquote(SyntaxSugar).__embeds_module__(
__ENV__,
unquote(schema),
unquote(opts),
unquote(Macro.escape(block))
)
unquote(function_name)(unquote(name), schema, opts)
unquote(TypeBuilder).add_field(
__MODULE__,
unquote(function_name),
unquote(name),
schema,
opts
)
end
end
defp transform_expression({:timestamps, ctx, []}) do
transform_expression({:timestamps, ctx, [[]]})
end
defp transform_expression({:::, _, [{function_name, _, [name, ecto_type, opts]}, type]})
when function_name in @schema_function_names do
transform_expression(
{function_name, [], [name, ecto_type, [{:__typed_ecto_type__, Macro.escape(type)} | opts]]}
)
end
defp transform_expression({:::, _, [{function_name, _, [name, ecto_type]}, type]})
when function_name in @schema_function_names do
transform_expression(
{function_name, [], [name, ecto_type, [__typed_ecto_type__: Macro.escape(type)]]}
)
end
defp transform_expression({:::, _, [{:field, _, [name]}, type]}) do
transform_expression({:field, [], [name, :string, [__typed_ecto_type__: Macro.escape(type)]]})
end
defp transform_expression(other), do: other
@doc false
def __embeds_module__(env, name, opts, block) do
{pk, opts} = Keyword.pop(opts, :primary_key, {:id, :binary_id, autogenerate: true})
block =
quote do
use TypedEctoSchema
@primary_key unquote(Macro.escape(pk))
typed_embedded_schema do
unquote(block)
end
end
module = Module.concat(env.module, name)
Module.create(module, block, env)
{module, opts}
end
end
|
lib/typed_ecto_schema/syntax_sugar.ex
| 0.591369
| 0.432003
|
syntax_sugar.ex
|
starcoder
|
defmodule ChallengeGov.Reports.DapReports do
@moduledoc """
Context for creating a report
"""
import Ecto.Query
alias ChallengeGov.Repo
alias ChallengeGov.Reports.DapReport
alias Stein.Storage
@doc """
Get a dap report
"""
def get_dap_report(id) do
case Repo.get(DapReport, id) do
nil ->
{:error, :not_found}
document ->
{:ok, document}
end
end
def all_last_six_months() do
last_six_months = Timex.shift(DateTime.utc_now(), months: -6)
DapReport
|> where([r], r.inserted_at > ^last_six_months)
|> order_by([r], desc: r.inserted_at, desc: r.id)
|> Repo.all()
end
@doc """
Upload a new DAP report
"""
def upload_dap_report(user, %{"file" => file, "name" => name}) do
file = Storage.prep_file(file)
key = <KEY>
path = dap_report_path(key, file.extension)
meta = [
{:content_disposition, ~s{attachment; filename="#{file.filename}"}}
]
allowed_extensions = [".pdf", ".txt", ".csv", ".jpg", ".png", ".tiff"]
case Storage.upload(file, path, meta: meta, extensions: allowed_extensions) do
:ok ->
%DapReport{}
|> DapReport.create_changeset(file, key, name)
|> Repo.insert()
{:error, _reason} ->
user
|> Ecto.Changeset.change()
|> Ecto.Changeset.add_error(:file, "had an issue uploading")
|> Ecto.Changeset.apply_action(:insert)
end
end
@doc """
Delete a DAP report
Also removes the file from remote storage
"""
def delete_report(file) do
case Storage.delete(dap_report_path(file)) do
:ok ->
Repo.delete(file)
{:error, error} ->
{:error, error}
end
end
@doc """
Get a signed URL to view the report
"""
def download_report_url(file) do
Storage.url(dap_report_path(file.key, file.extension), signed: [expires_in: 3600])
end
@doc """
Get the storage path for a report
"""
def dap_report_path(key, extension), do: "/dap_reports/#{key}#{extension}"
def dap_report_path(file = %DapReport{}) do
dap_report_path(file.key, file.extension)
end
end
|
lib/challenge_gov/dap_reports.ex
| 0.621656
| 0.408041
|
dap_reports.ex
|
starcoder
|
defmodule Grizzly.CommandClass.Powerlevel do
@type power_level_report :: %{power_level: power_level_description, timeout: non_neg_integer}
@type test_node_report :: %{
test_node_id: non_neg_integer,
status_of_operation: status_of_operation_description,
test_frame_acknowledged_count: non_neg_integer
}
@type power_level_value :: 0x00 | 0x01 | 0x02 | 0x03 | 0x04 | 0x05 | 0x06 | 0x07 | 0x08 | 0x09
@type power_level_description ::
:normal_power
| :minus1dBm
| :minus2dBm
| :minus3dBm
| :minus4dBm
| :minus5dBm
| :minus6dBm
| :minus7dBm
| :minus8dBm
| :minus9dBm
@type status_of_operation_description :: :test_failed | :test_success | :test_in_progress
@type status_of_operation_value :: 0x00 | 0x01 | 0x02
require Logger
@spec decode_power_level(power_level_value) :: power_level_description
def decode_power_level(0x00), do: :normal_power
def decode_power_level(0x01), do: :minus1dBm
def decode_power_level(0x02), do: :minus2dBm
def decode_power_level(0x03), do: :minus3dBm
def decode_power_level(0x04), do: :minus4dBm
def decode_power_level(0x05), do: :minus5dBm
def decode_power_level(0x06), do: :minus6dBm
def decode_power_level(0x07), do: :minus7dBm
def decode_power_level(0x08), do: :minus8dBm
def decode_power_level(0x09), do: :minus9dBm
@spec encode_power_level(power_level_description) ::
{:ok, power_level_value} | {:error, :invalid_arg, any()}
def encode_power_level(:normal_power), do: {:ok, 0x00}
def encode_power_level(:minus1dBm), do: {:ok, 0x01}
def encode_power_level(:minus2dBm), do: {:ok, 0x02}
def encode_power_level(:minus3dBm), do: {:ok, 0x03}
def encode_power_level(:minus4dBm), do: {:ok, 0x04}
def encode_power_level(:minus5dBm), do: {:ok, 0x05}
def encode_power_level(:minus6dBm), do: {:ok, 0x06}
def encode_power_level(:minus7dBm), do: {:ok, 0x07}
def encode_power_level(:minus8dBm), do: {:ok, 0x08}
def encode_power_level(:minus9dBm), do: {:ok, 0x09}
def encode_power_level(other), do: {:error, :invalid_arg, other}
@spec decode_status_of_operation(status_of_operation_value) :: status_of_operation_description
def decode_status_of_operation(0x00), do: :test_failed
def decode_status_of_operation(0x01), do: :test_success
def decode_status_of_operation(0x02), do: :test_in_progress
end
|
lib/grizzly/command_class/powerlevel.ex
| 0.81372
| 0.483344
|
powerlevel.ex
|
starcoder
|
defmodule Lettuce do
@moduledoc """
Lettuce is a generic server process that checks the files within an elixir
project that has lettuce as a dependency and then runs `iex -S mix`. It
initialises the state of the generic server with the `.ex` files inside `lib`
and their last modified time. By default `lib` is used but you may specify
which folders you want to be watched.
## Configuration example
use Mix.Config
config :lettuce, folders_to_watch: ["apps"]
..
You can also change the refresh time to control how often the project files
will be checked.
use Mix.Config
config :lettuce, refresh_time: 1500
..
Even though the `start_link` will throw an error if the `Mix.env` equals to
`:dev` it is recommended to explicitly select the extra applications by
environment in the mix file.
def application do
[
extra_applications: extra_applications(Mix.env(), [:logger, ...])
]
end
defp extra_applications(:dev, default), do: default ++ [:lettuce]
defp extra_applications(_, default), do: default
"""
use GenServer
require Logger
alias Lettuce.Config
alias Mix.Tasks.Compile.Elixir, as: Compiler
@refresh_time Config.refresh_time()
@opts Config.Compiler.options()
@validations Config.Compiler.validations()
@doc false
@spec start_link(list) :: {:ok, pid} | {:error, term}
def start_link(_) do
GenServer.start_link(__MODULE__, nil, name: __MODULE__)
end
# Server (callbacks)
@impl true
def init(_) do
unless Config.silent?() do
Logger.info("current directory: #{File.cwd!()}")
Logger.info("watching folders: #{inspect(Config.folders_to_watch())}")
end
schedule_check()
{:ok, project_files()}
end
@impl true
def handle_info(:project_review, state) do
new_state =
state
|> List.myers_difference(project_files())
|> length()
|> recompile()
schedule_check()
{:noreply, new_state}
end
@spec schedule_check :: reference
defp schedule_check(), do: Process.send_after(self(), :project_review, @refresh_time)
@spec recompile(integer) :: [String.t()]
defp recompile(len) when len != 1 do
unless Config.silent?() do
Logger.info("recompiling...")
end
OptionParser.parse!(@opts, strict: @validations)
Compiler.run(@opts)
project_files()
end
defp recompile(_), do: project_files()
@spec project_files() :: [String.t()]
defp project_files() do
Enum.map(Config.folders_to_watch(), &folder_files/1)
end
@type file_last_modified :: {String.t(), File.erlang_time()}
@spec folder_files(String.t()) :: [file_last_modified]
defp folder_files(folder) do
"#{File.cwd!()}/#{folder}/**/*.ex"
|> Path.wildcard()
|> Enum.map(&put_mtime/1)
end
@spec put_mtime(String.t()) :: file_last_modified
defp put_mtime(file) do
%File.Stat{mtime: mtime} = File.lstat!(file)
{file, mtime}
end
end
|
lib/lettuce.ex
| 0.83128
| 0.424144
|
lettuce.ex
|
starcoder
|
defmodule Argon2 do
@moduledoc """
Elixir wrapper for the Argon2 password hashing function.
Most applications will just need to use the `add_hash/2` and `check_pass/3`
convenience functions in this module.
For a lower-level API, see `Argon2.Base`.
## Configuration
See the documentation for `Argon2.Stats` for information about configuration.
## Argon2
Argon2 is the winner of the [Password Hashing Competition (PHC)](https://password-hashing.net).
Argon2 is a memory-hard password hashing function which can be used to hash
passwords for credential storage, key derivation, or other applications.
Argon2 has the following three variants (Argon2id is the default):
* Argon2d - suitable for applications with no threats from side-channel
timing attacks (eg. cryptocurrencies)
* Argon2i - suitable for password hashing and password-based key derivation
* Argon2id - a hybrid of Argon2d and Argon2i
Argon2i, Argon2d, and Argon2id are parametrized by:
* A **time** cost, which defines the amount of computation realized and
therefore the execution time, given in number of iterations
* A **memory** cost, which defines the memory usage, given in kibibytes
* A **parallelism** degree, which defines the number of parallel threads
More information can be found in the documentation for the `Argon2.Stats`
module and at the [Argon2 reference C implementation
repository](https://github.com/P-H-C/phc-winner-argon2).
## Comparison with Bcrypt / Pbkdf2
Argon2 has better password cracking resistance than Bcrypt and Pbkdf2.
Its main advantage is that, as it is a memory-hard function, it is designed
to withstand parallel attacks that use GPUs or other dedicated hardware.
"""
use Comeonin
alias Argon2.Base
@doc """
Generate a random salt.
The default length for the salt is 16 bytes. We do not recommend using
a salt shorter than the default.
"""
def gen_salt(salt_len \\ 16), do: :crypto.strong_rand_bytes(salt_len)
@doc """
Hashes a password with a randomly generated salt.
## Options
In addition to the `:salt_len` option shown below, this function also takes
options that are then passed on to the `hash_password` function in the
`Argon2.Base` module.
See the documentation for `Argon2.Base.hash_password/3` for further details.
* `:salt_len` - the length of the random salt
* the default is 16 (the minimum is 8) bytes
## Examples
The following examples show how to hash a password with a randomly-generated
salt and then verify a password:
iex> hash = Argon2.hash_pwd_salt("password")
...> Argon2.verify_pass("password", hash)
true
iex> hash = Argon2.hash_pwd_salt("password")
...> Argon2.verify_pass("incorrect", hash)
false
"""
@impl true
def hash_pwd_salt(password, opts \\ []) do
Base.hash_password(password, Keyword.get(opts, :salt_len, 16) |> gen_salt, opts)
end
@doc """
Verifies a password by hashing the password and comparing the hashed value
with a stored hash.
See the documentation for `hash_pwd_salt/2` for examples of using this function.
"""
@impl true
def verify_pass(password, stored_hash) do
hash = :binary.bin_to_list(stored_hash)
case Base.verify_nif(hash, password, argon2_type(stored_hash)) do
0 -> true
-35 -> false
error -> raise ArgumentError, Base.handle_error(error)
end
end
defp argon2_type("$argon2id" <> _), do: 2
defp argon2_type("$argon2i" <> _), do: 1
defp argon2_type("$argon2d" <> _), do: 0
defp argon2_type(_) do
raise ArgumentError,
"Invalid Argon2 hash. " <> "Please check the 'stored_hash' input to verify_pass."
end
end
|
lib/argon2.ex
| 0.865153
| 0.626995
|
argon2.ex
|
starcoder
|
defmodule FakeServer.ResponseFactory do
@moduledoc """
Create reusable and customizable answers for your servers.
With response factories it is possible to create a default format of a given response and identify it with a name so that it can be shared across several test cases.
They are inspired by the [ExMachina's factories](https://github.com/thoughtbot/ex_machina), and were created to suit the use case where it is necessary to modify both the body or headers of a given response while testing different scenarios.
## What is a factory?
A factory is just a function with no arguments inside a module that `use FakeServer.ResponseFactory`.
The function name must end in `_response` and it must return a `FakeServer.HTTP.Response` structure. The factory name is everything before `_response`.
```elixir
# test/support/my_response_factory.ex
defmodule MyResponseFactory do
use FakeServer.ResponseFactory
def person_response do
ok(%{
name: Faker.Name.name,
email: Faker.Internet.free_email,
company: %{name: Faker.Company.name, county: Faker.Address.country}
}, %{"Content-Type" => "application/json"})
end
end
```
## Using a factory
To use a factory, just call `ResponseFactory.build(:factory_name)`. This macro accepts two optinal arguments:
- `body_opts`: This is a list with keys whose values should be overwritten in the body of factory's response. If any of the keys is set to `nil`, it will be deleted from the original body. If a key that does not exist on the original body is set here, **it will be ignored**.
- `header_opts`: This is a map with the headers whose values should be overwritten. If any of the headers is set to `nil`, it will be deleted from the original header list. If a key that does not exist on the original header list is set here, **it will be included on the headers list**.
You can also create a list of responses with `MyResponseFactory.build_list(list_size, :factory_name)`.
## Example
```elixir
# test/my_app/some_test.exs
defmodule MyApp.SomeTest do
use ExUnit.Case, async: false
import FakeServer
test_with_server "basic factory usage" do
customized_response = %{body: person} = MyResponseFactory.build(:person)
route "/person", do: customized_response
response = HTTPoison.get! FakeServer.address <> "/person"
body = Poison.decode!(response.body)
assert response.status_code == 200
assert person[:name] == body["name"]
assert person[:email] == body["email"]
assert person[:company][:name] == body["company"]["name"]
assert person[:company][:country] == body["company"]["country"]
end
test_with_server "setting custom attributes" do
route "/person", do: MyResponseFactory.build(:person, name: "John", email: "<EMAIL>")
response = HTTPoison.get! FakeServer.address <> "/person"
body = Poison.decode!(response.body)
assert response.status_code == 200
assert body["name"] == "John"
assert body["email"] == "<EMAIL>"
end
test_with_server "deleting an attribute" do
route "/person", do: MyResponseFactory.build(:person, name: nil)
response = HTTPoison.get! FakeServer.address <> "/person"
body = Poison.decode!(response.body)
assert response.status_code == 200
assert body["name"] == nil
end
test_with_server "overriding a header" do
route "/person", do: MyResponseFactory.build(:person, %{"Content-Type" => "application/x-www-form-urlencoded"})
response = HTTPoison.get! FakeServer.address <> "/person"
assert response.status_code == 200
assert Enum.any?(response.headers, fn(header) -> header == {"Content-Type", "application/x-www-form-urlencoded"} end)
end
test_with_server "deleting a header" do
route "/person", do: MyResponseFactory.build(:person, %{"Content-Type" => nil})
response = HTTPoison.get! FakeServer.address <> "/person"
assert response.status_code == 200
refute Enum.any?(response.headers, fn(header) -> header == {"Content-Type", _} end)
end
test_with_server "create a list of responses" do
person_list = MyResponseFactory.build_list(3, :person)
route "/person", do: person_list
Enum.each(person_list, fn(person) ->
response = HTTPoison.get! FakeServer.address <> "/person"
body = Poison.decode!(response.body)
assert response.status_code == 200
assert person.body[:name] == body["name"]
assert person.body[:email] == body["email"]
assert person.body[:company][:name] == body["company"]["name"]
assert person.body[:company][:country] == body["company"]["country"]
end)
end
end
```
"""
defmacro __using__(_) do
quote do
import FakeServer.HTTP.Response
def build(name, header_opts) when is_map(header_opts) do
response = get_response(name)
headers = override_headers(response.headers, header_opts)
new(response.code, response.body, headers)
end
def build(name, body_opts \\ [], header_opts \\ %{}) when is_list(body_opts) do
response = get_response(name)
body = override_body_keys(response.body, body_opts)
headers = override_headers(response.headers, header_opts)
new(response.code, body, headers)
end
def build_list(list_size, name) when is_integer(list_size) do
Enum.map(1..list_size, fn(_) -> __MODULE__.build(name) end)
end
def build_list(names_list) do
Enum.map(names_list, fn(name) -> __MODULE__.build(name) end)
end
defp get_response(name) do
function_name = "#{to_string(name)}_response" |> String.to_atom
apply(__MODULE__, function_name, [])
end
defp override_body_keys(original_body, keys) do
keys
|> Enum.reduce(original_body, fn({key, value}, body) ->
override_body_key(body, key, value)
end)
end
defp override_body_key(body, key, value) when is_nil(value), do: Map.delete(body, key)
defp override_body_key(body, key, value) do
if Map.has_key?(body, key), do: Map.put(body, key, value),
else: body
end
defp override_headers(original_headers, new_headers) do
new_headers
|> Enum.reduce(original_headers, fn({header, header_value}, result_headers) ->
if is_nil(header_value) do
Map.delete(result_headers, header)
else
Map.put(result_headers, header, header_value)
end
end)
end
end
end
end
|
lib/fake_server/response_factory.ex
| 0.813757
| 0.738315
|
response_factory.ex
|
starcoder
|
defmodule Sanbase.Signal.OperationEvaluation do
@moduledoc ~s"""
Module providing a single function operation_triggered?/2 that by a given
value and operation returns true or false
"""
def operation_triggered?(nil, _), do: false
def operation_triggered?(value, %{some_of: operations}) when is_list(operations) do
Enum.map(operations, fn op -> operation_triggered?(value, op) end)
|> Enum.member?(true)
end
def operation_triggered?(value, %{all_of: operations}) when is_list(operations) do
Enum.map(operations, fn op -> operation_triggered?(value, op) end)
|> Enum.all?(&(&1 == true))
end
def operation_triggered?(value, %{none_of: operations}) when is_list(operations) do
Enum.map(operations, fn op -> operation_triggered?(value, op) end)
|> Enum.all?(&(&1 == false))
end
# Above
def operation_triggered?(%{current: nil}, %{above: _}), do: false
def operation_triggered?(%{current: value}, %{above: above}), do: value >= above
def operation_triggered?(value, %{above: above}), do: value >= above
# Below
def operation_triggered?(%{current: nil}, %{below: _}), do: false
def operation_triggered?(%{current: value}, %{below: below}), do: value <= below
def operation_triggered?(value, %{below: below}), do: value <= below
# Inside channel
def operation_triggered?(%{current: nil}, %{inside_channel: _}), do: false
def operation_triggered?(%{current: value}, %{inside_channel: [lower, upper]})
when lower < upper,
do: value >= lower and value <= upper
def operation_triggered?(value, %{inside_channel: [lower, upper]})
when lower < upper,
do: value >= lower and value <= upper
# Outside channel
def operation_triggered?(%{current: nil}, %{outside_channel: _}), do: false
def operation_triggered?(%{current: value}, %{outside_channel: [lower, upper]})
when lower < upper,
do: value <= lower or value >= upper
def operation_triggered?(value, %{outside_channel: [lower, upper]})
when lower < upper,
do: value <= lower or value >= upper
# Percent up
def operation_triggered?(%{percent_change: nil}, %{percent_up: _}), do: false
def operation_triggered?(%{percent_change: percent_change}, %{percent_up: percent}),
do: percent_change > 0 and percent_change >= percent
def operation_triggered?(percent_change, %{percent_up: percent}),
do: percent_change > 0 and percent_change >= percent
# Percent down
def operation_triggered?(%{percent_change: nil}, %{percent_down: _}), do: false
def operation_triggered?(%{percent_change: percent_change}, %{percent_down: percent}),
do: percent_change < 0 and abs(percent_change) >= percent
def operation_triggered?(percent_change, %{percent_down: percent}),
do: percent_change < 0 and abs(percent_change) >= percent
# Amount up
def operation_triggered?(%{absolute_change: nil}, %{amount_up: _}), do: false
def operation_triggered?(%{absolute_change: amount_changed}, %{amount_up: amount}),
do: amount_changed > 0 and amount_changed >= amount
def operation_triggered?(amount_changed, %{amount_up: amount}),
do: amount_changed > 0 and amount_changed >= amount
# Amount down
def operation_triggered?(%{absolute_change: nil}, %{amount_down: _}), do: false
def operation_triggered?(%{absolute_change: amount_changed}, %{amount_down: amount}),
do: amount_changed < 0 and abs(amount_changed) >= amount
def operation_triggered?(amount_changed, %{amount_down: amount}),
do: amount_changed < 0 and abs(amount_changed) >= amount
def operation_triggered?(_, _), do: false
end
|
lib/sanbase/signals/operation/operation_evaluation.ex
| 0.761272
| 0.558026
|
operation_evaluation.ex
|
starcoder
|
defmodule Discovery.Data.Provider do
defstruct [
:id,
:name,
:description
]
use Accessible
def provider(id, name, description) do
%Discovery.Data.Provider{id: id, name: name, description: description}
end
def list() do
data = [
provider("1", "Cloud Storage", "Cloud Blob Storage"),
provider("2", "Cloud SQL Database", "Cloud SQL Database"),
provider("3", "IOT Broker", "IOT Broker"),
provider("4", "GitHub", "Events from github.com repositories"),
provider("5", "Other Cloud Storage", "A differe cloud blob storage"),
provider("6", "Cloud Pub/Sub System", "Messages from your pub/sub system")
]
data
end
def providers_for_type(type, _, _) do
type_providers = Map.get(Discovery.Data.Type.type_map(), type[:name])[:providers]
IO.puts("TYPE providers: #{inspect(type_providers)}")
providers = list()
|> Enum.filter(fn p -> Enum.member?(type_providers, p[:id]) end)
IO.puts("providers_for_type: #{inspect(type)} are #{inspect(providers)}")
{:ok, providers}
end
def types_for_provider(provider, _, _) do
IO.puts("types_for_provider: '#{inspect(provider)}'")
types = Discovery.Data.Type.types()
|> Enum.filter(fn a -> Enum.member?(a[:providers], provider[:id]) end)
|> Enum.map(fn a -> Map.delete(a, :providers) end)
IO.puts("types: #{inspect(types)}")
{:ok, types}
end
def by_name(_, %{name: term}, _) do
d_term = String.downcase(term)
# Search through matching providers by name.
results = Enum.filter(list(),
fn(x) -> String.downcase(x[:name]) == d_term end)
case results do
[r] -> {:ok, r}
[] -> {:error, "Provider named '#{inspect(term)}' was not found."}
end
end
defp common_search_filter(items, term, order) do
d_term = String.downcase(term)
# Search through matching providers by name.
results = Enum.filter(items,
fn(x) ->
String.contains?(String.downcase(x[:name]), d_term)
end)
|> Enum.sort(fn(a,b) ->
String.downcase(a[:name]) <= String.downcase(b[:name])
end)
results =
case order do
:asc -> results
:desc -> Enum.reverse(results)
end
results
end
def resolve_types(_, %{matching: term, order: order}, _) do
results = common_search_filter(Discovery.Data.Type.types(), term, order)
{:ok, results}
end
def resolve_types(a, m = %{matching: _term}, b) do
resolve_types(a, Map.put(m, :order, :asc), b)
end
def resolve_types(a, m = %{order: _order}, b) do
resolve_types(a, Map.put(m, :term, ""), b)
end
def resolve_types(_, _, _) do
{:ok, Discovery.Data.Type.types()}
end
def resolve_providers(_, %{matching: term, order: order}, _) do
results = common_search_filter(list(), term, order)
{:ok, results}
end
def resolve_providers(a, m = %{matching: _term}, b) do
resolve_providers(a, Map.put(m, :order, :asc), b)
end
def resolve_providers(a, m = %{order: _order}, b) do
resolve_providers(a, Map.put(m, :term, ""), b)
end
def resolve_providers(_, _, _) do
{:ok, list()}
end
def resolve(_, %{matching: term, order: order}, _) do
results = common_search_filter(list() ++ Discovery.Data.Type.types(), term, order)
{:ok, results}
end
def resolve(a, m = %{matching: _term}, b) do
resolve(a, Map.put(m, :order, :asc), b)
end
def resolve(a, m = %{order: _order}, b) do
resolve(a, Map.put(m, :term, ""), b)
end
def resolve(_, _, _) do
{:ok, list()}
end
def sources(_, _, _), do: []
def extensions(_, _, _) do
{:ok, []}
end
end
|
lib/discovery/data/provider.ex
| 0.57678
| 0.448728
|
provider.ex
|
starcoder
|
defmodule Guss.Canonical.Extensions do
@moduledoc false
@doc """
Converts resource extensions into canonical extension headers.
For more information, see:
https://cloud.google.com/storage/docs/access-control/signed-urls-v2
## Examples
iex> to_string(Guss.Canonical.Extensions.to_iodata(acl: :public_read, meta: [project: [name: "guss"]]))
"x-goog-acl:public-read\\nx-goog-meta-project-name:guss\\n"
"""
@spec to_iodata([{any(), any()}]) :: nil | [any()]
def to_iodata([]), do: nil
def to_iodata(extensions) when is_list(extensions), do: build_attrs(extensions)
defp headerize_attrs([]), do: nil
defp headerize_attrs(attrs) do
for {k, v} <- Enum.group_by(attrs, &elem(&1, 0), &elem(&1, 1)),
filter_header({k, v}) do
[hdr_prefix(), k, hdr_sep(), sanitize(v), hdr_delim()]
end
|> case do
[] -> nil
tags -> tags
end
end
defp hdr_prefix, do: [?x, ?-, ?g, ?o, ?o, ?g, ?-]
defp hdr_sep, do: ?:
defp hdr_delim, do: ?\n
defp nested_attrs(attr, dict, acc) do
Enum.reduce(dict, acc, fn {k, v}, acc ->
attr_name = "#{attr}-#{dasherize(k)}"
case is_list(v) do
true -> nested_attrs(attr_name, v, acc)
false -> [{attr_name, v} | acc]
end
end)
end
defp build_attrs([]), do: []
defp build_attrs(attrs), do: build_attrs(attrs, [])
defp build_attrs([], acc), do: acc |> Enum.sort() |> headerize_attrs()
# Builds nested :meta values
defp build_attrs([{:meta, v} | t], acc) when is_list(v) do
build_attrs(t, nested_attrs(dasherize(:meta), v, acc))
end
# Ignores default ACL policy
defp build_attrs([{:acl, :private} | t], acc) do
build_attrs(t, acc)
end
# Dasherizes ACL values
defp build_attrs([{:acl, v} | t], acc) do
build_attrs(t, [{dasherize(:acl), dasherize(v)} | acc])
end
# Ignores empty values
defp build_attrs([{_, v} | t], acc) when is_nil(v) or v == "" do
build_attrs(t, acc)
end
# Converts atom values to strings
defp build_attrs([{k, v} | t], acc) when is_atom(v) do
build_attrs(t, [{dasherize(k), Atom.to_string(v)} | acc])
end
defp build_attrs([{k, v} | t], acc) do
build_attrs(t, [{dasherize(k), v} | acc])
end
defp dasherize(value) when is_atom(value), do: dasherize(Atom.to_string(value))
defp dasherize(value) when is_binary(value),
do: value |> String.trim() |> String.downcase() |> String.replace("_", "-")
defp sanitize([]), do: []
defp sanitize(values) when is_list(values), do: sanitize(values, [])
defp sanitize(value) when is_binary(value) do
value |> String.trim() |> String.replace(~r/[\r\n]+[\t\s]+/, " ")
end
defp sanitize([], acc), do: acc |> Enum.reverse() |> Enum.join(",")
defp sanitize([value | t], acc) do
sanitize(t, [sanitize(value) | acc])
end
defp filter_header({"encryption-key", _}), do: false
defp filter_header({"encryption-key-sha256", _}), do: false
defp filter_header(_kv), do: true
end
|
lib/guss/canonical.extensions.ex
| 0.703855
| 0.426142
|
canonical.extensions.ex
|
starcoder
|
defmodule Vote do
@moduledoc """
Provides Ranked (STV, AV), and Unranked (FPTP) ballot evaluation.
* STV uses a quota to determine when a candidate is elected in rounds.
Droop, Hare, Impirali, and Hagenbach Bischoff quotas are available.
* IRV is a degenerate case of STV where only one seat is elected,
and all rounds are evaluated until the candidate with the majority is elected
or the last candidate standing with the most votes is elected.
* FPTP is a degenerate case of AV where ballots have no rankings and thus
no distribution can be performed.
"""
@doc """
Evaluates an election.
* `ballots` a list of ballots;
with ranked votes for STV and AV, or unranked votes for FPTP.
* `seats` the number of seats to elect; 1 for AV and FPTP, or > 1 for STV
* Undervoting is handled by always choosing the candidate with least rank
(i.e. absolute rank isn't important, only relative rank is)
* Overvoting is handled by choosing one of the candidates (in ballot order)
and deferring the other(s) into the next round
## Ballots
Ballots are in the form of a list of maps where each map key is the
candidate and each map value is the ranking.
A ballot for FPTP should have only one key and a rank of 1.
The key should may be either a string or a number.
```
[
%{"a" => 1, "b" => 2, ...},
%{"c" => 1, "d" => 2, ...},
...
]
```
## Results
Results are in the form of a map with an entry for each candidate.
Each candidate is represented with a map of the following values:
* `round` is the round that a candidate was :elected or :excluded in,
or not present for candidates that weren't considered in any round
* `votes` is the number of they obtained
(which may not be an integer if there was fractional distrution)
* `surplus` is the number of votes that they obtained beyond the quota which
may be transferred to next choice candidates. There will not be a surplus
for excluded candidates
* `exhausted` is the number of votes that could not be transferred because
there were no next choice candidates to choose from.
* `status` is `:elected`, `:excluded`,
or not present for candidates that weren't considered in any round
```
%{
"a" => %{round: 1, status: :elected, votes: 40.0, surplus: 20.0, exhausted: 0},
"b" => %{round: 2, status: :excluded, votes: 8.0, exhausted: 0},
"c" => %{round: 3, status: :elected, votes: 20.0, surplus: 0.0, exhausted: 0},
"d" => %{votes: 17.0}
}
```
## Options
* `:quota` - the quota will be calculated according to
`:imperali`, `:hare`, `:hagenbach_bischoff`, or `:droop` formulas; defaults to `:droop`
* `:callback` - a function that will receive the intermediate results for each round
"""
def eval(ballots, seats, options \\ []) do
# find the unique list of candidates from all the ballots
candidates =
ballots
|> Stream.flat_map(fn b -> Map.keys(b) end)
|> Stream.uniq()
# create a result that has an empty entry for every candidate
result =
candidates
|> Enum.reduce(%{}, fn c, acc -> Map.put(acc, c, %{votes: 0}) end)
# perform the initial vote distribution
result = distribute(ranked_votes(ballots), result)
# IO.inspect result
quota =
case seats do
1 ->
# make the quota a pure majority (equivalent to hagenbach_bischoff)
Enum.count(ballots) / 2
_ ->
# calculate the number of votes it takes to be elected
case Keyword.get(options, :quota, :droop) do
:imperali -> Float.floor(Enum.count(ballots) / (seats + 2))
:hare -> Float.floor(Enum.count(ballots) / seats)
:hagenbach_bischoff -> Float.floor(Enum.count(ballots) / (seats + 1))
_ -> Float.floor(Enum.count(ballots) / (seats + 1) + 1)
end
end
eval(result, ballots, 1, 0, seats, quota, options)
end
# Recursively evaluate the subsequent rounds of the ranked election.
# Returns updated results.
defp eval(result, ballots, round, elected, seats, quota, options \\ []) do
callback = Keyword.get(options, :callback)
unless is_nil(callback) do callback.(round, result) end
# IO.puts "round #{round}"
# IO.inspect result
cond do
seats == elected ->
result
Enum.count(result, fn {_, v} -> !Map.has_key?(v, :status) end) == 1 ->
# nobody has satisfied the quota and only one candidate standing
# so they win by default even without satisfying the quota
{elected_candidate, elected_result} =
result
|> Enum.find(fn {_, v} -> !Map.has_key?(v, :status) end)
elected_result =
elected_result
|> Map.put(:surplus, elected_result.votes - quota)
|> Map.put(:status, :elected)
|> Map.put(:round, round)
Map.put(result, elected_candidate, elected_result)
true ->
# IO.inspect result
# find the candidate with the most votes
{elected_candidate, elected_result} =
result
|> Stream.filter(fn {_, v} -> !Map.has_key?(v, :status) end)
|> Enum.max_by(fn {_, v} -> v.votes end)
if elected_result.votes >= quota do
# candidate has enough votes to be elected
# IO.puts "electing #{elected_candidate}"
# determine how many votes need redistribution
surplus = elected_result.votes - quota
# update the result for the elected candidate
elected_result =
elected_result
|> Map.put(:surplus, surplus)
|> Map.put(:status, :elected)
|> Map.put(:round, round)
result = Map.put(result, elected_candidate, elected_result)
# distribute all the second choice votes from the ballots that elected this candidate
electing_ballots = used(ballots, elected_candidate)
# IO.puts "weight = #{surplus} / #{Enum.count(electing_ballots)}"
# IO.inspect electing_ballots
weight = surplus / Enum.count(electing_ballots)
result = distribute(electing_ballots, result, elected_candidate, weight)
# perform the next round using ballots that exclude the elected candidate
next_ballots = filter_candidates(ballots, [elected_candidate])
eval(result, next_ballots, round + 1, elected + 1, seats, quota, options)
else
# a candidate must be excluded
# find the candidate with the least votes
{excluded_candidate, excluded_result} =
result
|> Stream.filter(fn {_, v} -> !Map.has_key?(v, :status) end)
|> Enum.min_by(fn {_, v} -> v.votes end)
# IO.puts "excluding #{excluded_candidate}"
# update the result for the excluded candidate
excluded_result =
excluded_result
|> Map.put(:status, :excluded)
|> Map.put(:round, round)
result = Map.put(result, excluded_candidate, excluded_result)
# distribute all the second choice votes from the ballots that excluded this candidate
excluding_ballots = used(ballots, excluded_candidate)
# IO.puts "weight = #{excluded_result.votes} / #{Enum.count(excluding_ballots)}"
# IO.inspect excluding_ballots
weight = excluded_result.votes / Enum.count(excluding_ballots)
result = distribute(excluding_ballots, result, excluded_candidate, weight)
# perform the next round using ballots that exclude the elected candidate
next_ballots = filter_candidates(ballots, [excluded_candidate])
eval(result, next_ballots, round + 1, elected, seats, quota, options)
end
end
end
@doc """
Returns a list of `ballots` that exclude all votes for `candidates`.
This should be called to remove all withdrawn candidates prior to calling `eval/3`.
"""
def filter_candidates(ballots, candidates) do
ballots
|> Stream.map(fn b -> Map.drop(b, candidates) end)
end
@doc """
Filters spoiled ballots for FPTP method
Ballots must have exactly one vote
"""
def spoil_plurality(ballots) do
ballots
|> Stream.filter(fn b -> Enum.count(b) == 1 end)
end
def approval(ballots, seats) do
candidates =
ballots
|> Stream.flat_map(fn b -> Map.keys(b) end)
|> Stream.uniq()
# create a result that has an empty entry for every candidate
result =
candidates
|> Enum.reduce(%{}, fn c, acc -> Map.put(acc, c, %{votes: 0}) end)
result = distribute(approval_votes(ballots), result)
1..seats
|> Enum.reduce(
result,
fn _, a ->
{elected_candidate, elected_result} =
a
|> Stream.filter(fn {_, v} -> !Map.has_key?(v, :status) end)
|> Enum.max_by(fn {_, v} -> v.votes end)
elected_result =
elected_result
|> Map.put(:status, :elected)
Map.put(a, elected_candidate, elected_result)
end
)
end
# returns a map of how many approvals a candidate has obtained
defp approval_votes(ballots) do
# count the number of votes for each candidate
ballots
|> Stream.flat_map(fn b -> Map.keys(b) end)
|> Enum.reduce(%{}, fn c, a -> Map.update(a, c, 1, &(&1 + 1)) end)
end
@doc """
Converts ranked ballots into unranked ballots.
This is useful for conducting a simulated plurality election from ranked ballots.
"""
def unranked(ballots) do
ballots
|> Stream.map(
fn b ->
{candidate, _} = Enum.min_by(b, fn {_, v} -> v end, {:nobody, 0})
%{candidate => 1}
end
)
end
# Returns a list of ballots that contributed to a candidates election or exclusion
defp used(ballots, candidate) do
ballots
|> Stream.filter(
fn b ->
b
|> Enum.min_by(fn {_, v} -> v end, fn -> {:exhausted, 0} end)
|> Tuple.to_list()
|> Enum.member?(candidate)
end
)
end
# Returns a map of how many votes a candidates has obtained
defp ranked_votes(ballots) do
# count the number of votes for each candidate
ballots
|> Stream.map(
fn b ->
# vote(s) with the lowest rank
# candidate from the vote
b
|> Enum.min_by(fn {_, v} -> v end, fn -> {:exhausted, 0} end)
|> Tuple.to_list()
|> List.first()
end
)
|> Enum.reduce(%{}, fn c, a -> Map.update(a, c, 1, &(&1 + 1)) end)
end
# Applies initial vote distribution to result for all candidates.
# Returns updated results.
defp distribute(counts, result) do
Enum.reduce(
result,
%{},
fn {rk, rv}, a ->
# vote count for the current candidate
cv = Map.get(counts, rk, 0)
# update result row for candidate
Map.put(a, rk, Map.update(rv, :votes, 0, &(&1 + cv)))
end
)
end
# Applies subsequent vote distribution to result for the elected or excluded candidate
# Returns updated results.
defp distribute(ballots, result, candidate, weight) do
counts = ranked_votes(filter_candidates(ballots, [candidate]))
result =
Enum.reduce(
result,
%{},
fn {rk, rv}, a ->
# vote count for the current candidate
count = Map.get(counts, rk, 0)
# update result row for candidate
Map.put(a, rk, Map.update(rv, :votes, 0, &(&1 + Float.round(weight * count, 5))))
end
)
# exhausted count
ev = Map.get(counts, :exhausted, 0)
# result row for the current candidate
rv = Map.get(result, candidate, 0)
Map.put(result, candidate, Map.put(rv, :exhausted, Float.round(weight * ev, 5)))
end
@doc """
Parses a BLT file `stream`.
The BLT file format is described here: https://www.opavote.com/help/overview#blt-file-format
Returns a map containing:
* `seats`: the number of seats to be elected
* `ballots`: a list of ballots that can be passed to `eval/3`
* `candidates`: a list of candidate names
* `withdrawn`: a list of candidate ids that should be filtered from the ballots (optional)
"""
def parse_blt(stream) do
# file consists of the following lines
# :initial 1 line <number of candidates c> <number of seats s>
# :ballot 0~1 line <the candidates that have withdrawn>+
# :ballot 1~n lines a ballot (see format below)
# :ballot 1 line 0 (end of ballots marker)
# :candidate c lines "<name of candidate>"
# :candidate 1 line "<name of election>"
# each ballot has the format
# <weight> <candidate> <candidate> ...0
# weight can be used to group identical ballots
# candidate is the integer id of the candidate (i.e. 1,2,3)
# candidate may be a - to indicate a skipped vote
# two candidates may be joined with = to indicate the have equal rank
Enum.reduce(
stream,
%{state: :initial},
fn line, a ->
[data | _] = String.split(line, "#", parts: 2)
data = String.trim(data)
cond do
# comment only line
data == "" ->
a
# first line
a.state == :initial ->
[c, s] = String.split(data, " ")
{candidates, _} = Integer.parse(c)
{seats, _} = Integer.parse(s)
a
|> Map.put(:remaining, candidates)
|> Map.put(:seats, seats)
|> Map.put(:state, :ballot)
|> Map.put(:ballots, [])
|> Map.put(:candidates, [])
# end of ballots marker line
a.state == :ballot && data == "0" ->
Map.put(a, :state, :candidate)
# withdrawn candidates line
a.state == :ballot && String.starts_with?(data, "-") ->
withdrawn =
Regex.scan(~r/(-\d+)+/, data)
|> Enum.map(
fn [match, _] ->
{c, _} = Integer.parse(match)
-c
end
)
Map.put(a, :withdrawn, withdrawn)
# ballot line
a.state == :ballot ->
[weight | candidates] = String.split(data, " ")
{weight, _} = Integer.parse(weight)
ballot =
Enum.reduce(
candidates,
{1, %{}},
fn term, {rank, ballot} ->
case term do
"0" ->
# end of ballot marker
ballot
"-" ->
# undervote marker
{rank + 1, ballot}
_ ->
{
rank + 1,
Enum.reduce(
String.split(term, "="),
ballot,
fn c, a ->
{c, _} = Integer.parse(c)
Map.put(a, c, rank)
end
)
}
end
end
)
Map.update!(
a,
:ballots,
fn ballots ->
Enum.reduce(
1..weight,
ballots,
fn _, a ->
[ballot] ++ a
end
)
end
)
a.state == :candidate && a.remaining == 0 ->
a
|> Map.put(:title, String.replace(String.trim(data, "\""), "\\", ""))
|> Map.delete(:remaining)
|> Map.delete(:state)
a.state == :candidate ->
a
|> Map.update(
:candidates,
[],
fn candidates ->
candidates ++ [String.replace(String.trim(data, "\""), "\\", "")]
end
)
|> Map.update!(:remaining, &(&1 - 1))
true ->
a
end
# cond
end
)
# reduce
end
@doc """
Takes `results` with numeric candidate keys and returns results
with the candidate keys from `candidates`.
"""
def rekey(result, candidates) do
Enum.reduce(result, %{}, fn {i, v}, a -> Map.put(a, Enum.at(candidates, i - 1), v) end)
end
end
|
lib/vote.ex
| 0.847542
| 0.870046
|
vote.ex
|
starcoder
|
defmodule ExUnit do
defrecord Test, [:name, :case, :failure, :time] do
@moduledoc """
A record that keeps information about the test.
It is received by formatters and also accessible
in the metadata under the key `:test`.
"""
end
defrecord TestCase, [:name, :failure] do
@moduledoc """
A record that keeps information about the test case.
It is received by formatters and also accessible
in the metadata under the key `:case`.
"""
end
@moduledoc """
Basic unit testing framework for Elixir.
## Example
A basic setup for ExUnit is shown below:
# File: assertion_test.exs
# 1) Start ExUnit. You can pass some options
# (see `configure/1` for the list of options)
ExUnit.start
# 2) Create a new test module (test case) and use ExUnit.Case
defmodule AssertionTest do
# 3) Notice we pass async: true, this runs the test case
# concurrently with other test cases
use ExUnit.Case, async: true
# 4) A test is a function whose name starts with
# test and receives a context
def test_always_pass(_) do
assert true
end
# 5) It is recommended to use the "test" macro instead of def
test "the truth" do
assert true
end
end
To run the test above, all you need to do is to run the file
using `elixir` from the command line. Assuming you named your file
`assertion_test.exs`, you can run it as:
bin/elixir assertion_test.exs
## Case, callbacks and assertions
See `ExUnit.Case` and `ExUnit.Callbacks` for more information about
defining test cases.
The `ExUnit.Assertions` module contains a set of macros to easily
generate assertions with appropriate error messages.
## Integration with Mix
Mix is the project management and build tool for Elixir. Invoking `mix test`
from the command line will run tests in each file matching the pattern
`*_test.exs` found in the `test` directory of your project.
By convention, you could also create a `test_helper.exs` file inside the
`test` directory and put the code common to all tests there.
The minimum example of a `test_helper.exs` file would be:
# test/test_helper.exs
ExUnit.start
Then, in each test file, require `test_helper.exs` before defining test modules
(or cases):
# test/myproject_test.exs
Code.require_file "test_helper.exs", __DIR__
# ... test cases follow
"""
use Application.Behaviour
@doc false
def start(_type, []) do
pid = ExUnit.Sup.start_link
ExUnit.Server.start_load
pid
end
@doc """
Starts up ExUnit and automatically runs tests right before the
VM terminates. It accepts a set of options to configure `ExUnit`
(the same ones accepted by `configure/1`).
If you want to run tests manually, you can set :autorun to false.
"""
def start(options // []) do
:application.start(:elixir)
:application.start(:ex_unit)
configure(options)
if :application.get_env(:ex_unit, :autorun) != { :ok, false } do
:application.set_env(:ex_unit, :autorun, false)
System.at_exit fn
0 ->
failures = ExUnit.run
System.at_exit fn _ ->
if failures > 0, do: System.halt(1), else: System.halt(0)
end
_ ->
:ok
end
end
end
@doc """
Configures ExUnit.
## Options
ExUnit supports the following options:
* `:color` - When color should be used by specific formatters.
Defaults to the result of `IO.ANSI.terminal?`;
* `:formatter` - The formatter that will print results.
Defaults to `ExUnit.CLIFormatter`;
* `:max_cases` - Maximum number of cases to run in parallel.
Defaults to `:erlang.system_info(:schedulers_online)`;
* `:trace` - Set ExUnit into trace mode, this sets `:max_cases` to 1
and prints each test case and test while running;
* `:autorun` - If ExUnit should run by default on exit, defaults to true;
"""
def configure(options) do
Enum.each options, fn { k, v } ->
:application.set_env(:ex_unit, k, v)
end
end
@doc """
Returns ExUnit configuration.
"""
def configuration do
:application.get_all_env(:ex_unit)
end
@doc """
API used to run the tests. It is invoked automatically
if ExUnit is started via `ExUnit.start`.
Returns the number of failures.
"""
def run do
{ async, sync, load_us } = ExUnit.Server.start_run
opts = Keyword.put_new(configuration, :color, IO.ANSI.terminal?)
ExUnit.Runner.run async, sync, opts, load_us
end
end
|
lib/ex_unit/lib/ex_unit.ex
| 0.834643
| 0.829837
|
ex_unit.ex
|
starcoder
|
defmodule Chronos.Timezones do
@zones [
{ "A", "Alpha Time Zone", "+1:00" },
{ "ACDT", "Australian Central Daylight Time", "+10:30" },
{ "ACST", "Australian Central Standard Time", "+9:30" },
{ "ACT", "Australian Central Time", "+10:30" },
{ "ACWST", "Australian Central Western Standard Time", "+8:45" },
{ "ADT", "Arabia Daylight Time", "+3:00" },
{ "ADT", "Atlantic Daylight Time", "-3:00" },
{ "AEDT", "Australian Eastern Daylight Time", "+11:00" },
{ "AEST", "Australian Eastern Standard Time", "+10:00" },
{ "AET", "Australian Eastern Time", "+10:00" },
{ "AFT", "Afghanistan Time", "+4:30" },
{ "AKDT", "Alaska Daylight Time", "-8:00" },
{ "AKST", "Alaska Standard Time", "-9:00" },
{ "CET", "Central European Time", "+1:00" },
{ "CST", "Central Standard Time", "-6:00" },
{ "EET", "Eastern European Time", "+2:00" },
{ "EST", "Eastern Standard Time", "-5:00" },
{ "IRDT", "Iran Daylight Time", "+4:30" },
{ "IRST", "Iran Standard Time", "+3:30" },
{ "IST", "India Standard Time", "+5:30" },
{ "GMT", "Greenwich Mean Time", "+0:00" },
{ "JST", "Japan Standard Time", "+9:00" },
{ "MSD", "Moscow Daylight Time", "+4:00" },
{ "MSK", "Moscow Standard Time", "+3:00" },
{ "MST", "Mountain Standard Time", "-7:00" },
{ "PST", "Pacific Standard Time", "-8:00" },
{ "SGT", "Singapore Time", "+8:00" },
{ "WET", "Western European Time", "+0:00" },
{ "WEST", "Western European Summer Time", "+1:00" },
]
@doc """
Retrieve a timezones offset
"""
def offset(nil), do: ""
def offset({_, _, offset}), do: offset
def offset(zone) when is_binary(zone) do
@zones |> Enum.find(fn({abbr, name, _}) -> zone == abbr || zone == name end) |> offset
end
@doc """
Retrieve a timezones abbreviation
"""
def abbreviation(nil), do: ""
def abbreviation({abbr, _, _}), do: abbr
def abbreviation(zone) when is_binary(zone) do
@zones |> Enum.find(fn({_, name, offset}) -> zone == offset || zone == name end) |> abbreviation
end
@doc """
Retrieve a timezones name
"""
def name(nil), do: ""
def name({_, name, _}), do: name
def name(zone) when is_binary(zone) do
@zones |> Enum.find(fn({abbr, _, offset}) -> zone == offset || zone == abbr end) |> abbreviation
end
end
|
lib/chronos/timezones.ex
| 0.553747
| 0.53127
|
timezones.ex
|
starcoder
|
defmodule Traverse.Walker do
use Traverse.Types
alias Traverse.Cut
import Traverse.Enum, only: [reduce: 3]
@moduledoc """
Implements traversal functions, structure is not maintained unless the traversal functions do so.
"""
@spec postwalk(any, any, t_simple_walker_fn) :: any
def postwalk(coll, acc, collector)
def postwalk(ele, acc, collector) when is_tuple(ele) do
acc =
ele
|> Tuple.to_list()
|> Enum.reduce(acc, &postwalk(&1, &2, collector))
collector.(ele, acc)
end
def postwalk(ele, acc, collector) when is_list(ele) or is_map(ele) do
acc =
ele
|> reduce(acc, &postwalk(&1, &2, collector))
collector.(ele, acc)
end
def postwalk(ele, acc, collector) do
collector.(ele, acc)
end
@doc """
Like `walk!` implements collector augmentation for partial collector functions
iex(1)> Traverse.Walker.postwalk!( {1, [2, %{a: 3}, 4], 5}, 0,
...(1)> fn (n, acc) when is_number(n) -> acc + n end)
15
"""
@spec postwalk!(any, any, t_simple_walker_fn) :: any
def postwalk!(ele, acc, collector), do: postwalk(ele, acc, wrapped(collector))
@doc """
`walk` implements a top down recursive pre traversal in an arbitrary Elixir datastructure.
iex(2)> Traverse.Walker.walk( {1, [2, %{a: 3}, 4], 5}, 0,
...(2)> fn (n, acc) when is_number(n) -> acc + n
...(2)> _, acc -> acc end )
15
The traversal function can avoid recursive descent by returning its accumulator value boxed in a `%Cut{acc: acc}` struct.
"""
@spec walk(any, any, t_simple_walker_fn) :: any
def walk(coll, acc, collector)
# def walk(%{__struct__: type}=struct, acc, collector) do
# walk(Map.delete(struct, :__struct__), acc, collector)
# end
def walk(ele, acc, collector) when is_map(ele) or is_list(ele) do
case collector.(ele, acc) do
%Cut{acc: acc} ->
acc
acc ->
ele
|> reduce(acc, &walk(&1, &2, collector))
end
end
def walk(ele, acc, collector) when is_tuple(ele) do
case collector.(ele, acc) do
%Cut{acc: acc} ->
acc
acc ->
ele
|> Tuple.to_list()
|> Enum.reduce(acc, &walk(&1, &2, collector))
end
end
def walk(ele, acc, collector) do
collector.(ele, acc)
end
@doc """
`walk!` implements a top down recursive pre traversal in an arbitrary Elixir datastructure.
In contrast to `walk` it augments partial collcetor functions with the second arg of two identity
_, acc -> acc
iex(3)> Traverse.Walker.walk!( {1, [2, %{a: 3}, 4], 5}, 0,
...(3)> fn (n, acc) when is_number(n) -> acc + n end)
15
"""
@spec walk!(any, any, t_simple_walker_fn) :: any
def walk!(coll, acc, collector), do: walk(coll, acc, wrapped(collector))
defp wrapped(fun) do
fn ele, acc ->
try do
fun.(ele, acc)
rescue
FunctionClauseError -> acc
end
end
end
end
|
lib/traverse/walker.ex
| 0.783202
| 0.463626
|
walker.ex
|
starcoder
|
import ExType.Typespec, only: [deftypespec: 2]
deftypespec Map do
@type t(key, value) :: %{optional(key) => value}
@spec delete(t(k, v), k) :: t(k, v) when k: any(), v: any()
@spec drop(t(k, v), T.p(Enumerable, k)) :: t(k, v) when k: any(), v: any()
@spec equal?(map(), map()) :: boolean()
@spec fetch(t(k, v), k) :: {:ok, v} | :error when k: any(), v: any()
@spec fetch!(t(k, v), k) :: v when k: any(), v: any()
@spec from_struct(atom() | struct()) :: map()
@spec get(t(k, v), k) :: v | nil when k: any(), v: any()
@spec get(t(k, v), k, default) :: v | default when k: any(), v: any(), default: any()
@spec get_and_update(t(k, v), k, (v -> {get, v} | :pop)) :: {get, t(k, v)}
when k: any(), v: any(), get: any()
@spec get_and_update!(t(k, v), k, (v -> {get, v} | :pop)) :: {get, t(k, v)}
when k: any(), v: any(), get: any()
@spec get_lazy(t(k, v), k, (() -> v)) :: v when k: any(), v: any()
@spec has_key?(t(k, v), k) :: boolean() when k: any(), v: any()
@spec keys(t(k, v)) :: [k] when k: any(), v: any()
@spec merge(t(k, v), t(k, v)) :: t(k, v) when k: any(), v: any()
@spec merge(t(k, v), t(k, v), (k, v, v -> v)) :: t(k, v) when k: any(), v: any()
@spec new() :: %{}
@spec new(T.p(Enumerable, {x, y})) :: t(x, y) when x: any(), y: any()
@spec new(T.p(Enumerable, x), (x -> {y, z})) :: t(x, y) when x: any(), y: any(), z: any()
@spec pop(t(k, v), k) :: {v | nil, t(k, v)} when k: any(), v: any()
@spec pop(t(k, v), k, default) :: {v | default, t(k, v)} when k: any(), v: any(), default: any()
@spec pop_lazy(t(k, v), k, (() -> v)) :: {v, t(k, v)} when k: any(), v: any()
@spec put(t(k, v), k, v) :: t(k, v) when k: any(), v: any()
@spec put_new(t(k, v), k, v) :: t(k, v) when k: any(), v: any()
@spec put_new_lazy(t(k, v), k, (() -> v)) :: t(k, v) when k: any(), v: any()
@spec replace!(t(k, v), k, v) :: t(k, v) when k: any(), v: any()
@spec split(t(k, v), T.p(Enumerable, k)) :: {t(k, v), t(k, v)} when k: any(), v: any()
@spec take(t(k, v), T.p(Enumerable, k)) :: t(k, v) when k: any(), v: any()
@spec to_list(t(k, v)) :: [{k, v}] when k: any(), v: any()
@spec udpate(t(k, v), k, v, (v -> v)) :: t(k, v) when k: any(), v: any()
@spec update!(t(k, v), k, (v -> v)) :: t(k, v) when k: any(), v: any()
@spec values(t(k, v)) :: [v] when k: any(), v: any()
end
|
lib/ex_type/typespec/elixir/map.ex
| 0.808861
| 0.563858
|
map.ex
|
starcoder
|
defmodule ExchangeApiWeb.OrderBookLive do
@moduledoc false
use ExchangeApiWeb, :live_view
alias ExchangeApiWeb.Ticker
def mount(%{"ticker" => ticker}, _session, socket) do
if connected?(socket), do: :timer.send_interval(1000, self(), :tick)
{:ok, tick} = Ticker.get_ticker(ticker)
{:ok, open_orders} = Exchange.open_orders(tick)
{:ok, last_price_sell} = Exchange.last_price(tick, :sell)
{:ok, last_price_buy} = Exchange.last_price(tick, :buy)
{:ok, last_size_sell} = Exchange.last_size(tick, :sell)
{:ok, last_size_buy} = Exchange.last_size(tick, :buy)
{:ok, spread} = Exchange.spread(tick)
{:ok, highest_ask_volume} = Exchange.highest_ask_volume(tick)
{:ok, lowest_ask_price} = Exchange.lowest_ask_price(tick)
{:ok, highest_bid_volume} = Exchange.highest_bid_volume(tick)
{:ok, highest_bid_price} = Exchange.highest_bid_price(tick)
sell_orders = open_orders |> Enum.filter(fn order -> order.side == :sell end)
buy_orders = open_orders |> Enum.filter(fn order -> order.side == :buy end)
{:ok,
assign(socket,
ticker: ticker,
last_price_sell: last_price_sell,
last_price_buy: last_price_buy,
last_size_sell: last_size_sell,
last_size_buy: last_size_buy,
sell_orders: sell_orders,
buy_orders: buy_orders,
spread: spread,
highest_ask_volume: highest_ask_volume,
lowest_ask_price: lowest_ask_price,
highest_bid_volume: highest_bid_volume,
highest_bid_price: highest_bid_price
)}
end
def handle_info(:tick, socket) do
{:ok, ticker} = Ticker.get_ticker(socket.assigns.ticker)
{:ok, open_orders} = Exchange.open_orders(ticker)
{:ok, last_price_sell} = Exchange.last_price(ticker, :sell)
{:ok, last_price_buy} = Exchange.last_price(ticker, :buy)
{:ok, last_size_sell} = Exchange.last_size(ticker, :sell)
{:ok, last_size_buy} = Exchange.last_size(ticker, :buy)
{:ok, spread} = Exchange.spread(ticker)
{:ok, highest_ask_volume} = Exchange.highest_ask_volume(ticker)
{:ok, lowest_ask_price} = Exchange.lowest_ask_price(ticker)
{:ok, highest_bid_volume} = Exchange.highest_bid_volume(ticker)
{:ok, highest_bid_price} = Exchange.highest_bid_price(ticker)
sell_orders = open_orders |> Enum.filter(fn order -> order.side == :sell end)
buy_orders = open_orders |> Enum.filter(fn order -> order.side == :buy end)
{:noreply,
assign(socket,
last_price_sell: last_price_sell,
last_price_buy: last_price_buy,
last_size_sell: last_size_sell,
last_size_buy: last_size_buy,
sell_orders: sell_orders,
buy_orders: buy_orders,
spread: spread,
highest_ask_volume: highest_ask_volume,
lowest_ask_price: lowest_ask_price,
highest_bid_volume: highest_bid_volume,
highest_bid_price: highest_bid_price
)}
end
end
|
lib/exchange_api_web/live/order_book_live.ex
| 0.621311
| 0.527377
|
order_book_live.ex
|
starcoder
|
defmodule Xandra.Batch do
@moduledoc """
Represents a batch of simple and/or prepared queries.
This module provides a data structure that can be used to group queries and
execute them as a Cassandra `BATCH` query. Batch queries can be executed
through `Xandra.execute/3` and `Xandra.execute!/3`; see their respective
documentation for more information.
Note that the `t/1` type is not documented as it's not meant
for public use. If you want to use batches in your typespecs, use `t:t/0`.
"""
alias Xandra.{Prepared, Simple}
@enforce_keys [:type]
defstruct @enforce_keys ++ [queries: [], default_consistency: nil, protocol_module: nil]
@type type :: :logged | :unlogged | :counter
@typedoc false
@type t(type) :: %__MODULE__{
type: type,
queries: [Simple.t() | Prepared.t()],
default_consistency: atom() | nil,
protocol_module: module() | nil
}
@type t() :: t(type)
@doc """
Creates a new batch query.
`type` represents the type of the batch query (`:logged`, `:unlogged`, or
`:counter`). See the Cassandra documentation for the meaning of these types.
## Examples
batch = Xandra.Batch.new()
"""
@spec new(type) :: t
def new(type \\ :logged) when type in [:logged, :unlogged, :counter] do
%__MODULE__{type: type}
end
@doc """
Adds a query to the given `batch`.
`query` has to be either a simple query (statement) or a prepared query. Note
that parameters have to be added alongside their corresponding query when
adding a query to a batch. In contrast with functions like `Xandra.execute/4`,
simple queries in batch queries only support positional parameters and **do
not** support named parameters; this is a current Cassandra limitation. If a
map of named parameters is passed alongside a simple query, an `ArgumentError`
exception is raised. Named parameters are supported with prepared queries.
## Examples
prepared = Xandra.prepare!(conn, "INSERT INTO users (name, age) VALUES (?, ?)")
batch =
Xandra.Batch.new()
|> Xandra.Batch.add(prepared, ["Rick", 60])
|> Xandra.Batch.add(prepared, ["Morty", 14])
|> Xandra.Batch.add(prepared, ["Jerry", 35])
|> Xandra.Batch.add("DELETE FROM users WHERE name = 'Jerry'")
Xandra.execute!(conn, batch)
"""
@spec add(t, Xandra.statement() | Prepared.t(), Xandra.values()) :: t
def add(batch, query, values \\ [])
def add(%__MODULE__{} = batch, statement, values)
when is_binary(statement) and is_list(values) do
add_query(batch, %Simple{statement: statement}, values)
end
def add(%__MODULE__{} = batch, %Prepared{} = prepared, values) when is_map(values) do
add_query(batch, prepared, Prepared.rewrite_named_params_to_positional(prepared, values))
end
def add(%__MODULE__{} = batch, %Prepared{} = prepared, values) when is_list(values) do
add_query(batch, prepared, values)
end
def add(%__MODULE__{}, _query, values) when is_map(values) do
raise ArgumentError,
"non-prepared statements inside batch queries only support positional " <>
"parameters (this is a current Cassandra limitation), got: #{inspect(values)}"
end
defp add_query(batch, query, values) do
queries = [%{query | values: values} | batch.queries]
%{batch | queries: queries}
end
defimpl DBConnection.Query do
alias Xandra.Frame
def parse(batch, _options) do
batch
end
def encode(batch, nil, options) do
batch = %{batch | queries: Enum.reverse(batch.queries)}
Frame.new(:batch, Keyword.take(options, [:compressor, :tracing]))
|> batch.protocol_module.encode_request(batch, options)
|> Frame.encode(batch.protocol_module)
end
def decode(_batch, response, _options) do
response
end
def describe(batch, _options) do
batch
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(batch, options) do
properties = [
type: batch.type,
queries: format_queries(Enum.reverse(batch.queries))
]
concat(["#Xandra.Batch<", to_doc(properties, options), ">"])
end
defp format_queries(queries) do
Enum.map(queries, fn
%Simple{statement: statement, values: values} ->
{statement, values}
%Prepared{values: values} = prepared ->
{prepared, values}
end)
end
end
end
|
lib/xandra/batch.ex
| 0.907691
| 0.732065
|
batch.ex
|
starcoder
|
defmodule Monad.Maybe do
@moduledoc """
A monad that represents something or nothing.
The concept of having something vs. nothing is similar to having a value vs.
`nil`.
"""
@typedoc """
The possible types of values that can occur (i.e. something and nothing).
"""
@type maybe_type :: :some | :none
use Monad.Behaviour
@opaque t :: %__MODULE__{type: maybe_type, value: term}
@doc false
defstruct type: :none, value: nil
@doc """
Returns a "nothing" state.
"""
defmacro none, do: quote(do: %Monad.Maybe{})
@doc """
Wraps the value into a maybe monad.
iex> some 42
%Monad.Maybe{type: :some, value: 42}
"""
@spec some(term) :: t
def some(value), do: %Monad.Maybe{type: :some, value: value}
@doc """
An alias for `some/1`.
"""
@spec pure(term) :: t
def pure(value), do: some(value)
@doc """
Unwraps the value from a maybe monad.
Does not work with `none` values, since they contain nothing.
iex> m = some 42
iex> unwrap! m
42
"""
@spec unwrap!(t) :: term
def unwrap!(%Monad.Maybe{type: :some, value: value}), do: value
@doc """
Macro that indicates if the maybe monad contains nothing.
This macro may be used in guard clauses.
iex> none? none()
true
iex> none? some 42
false
"""
defmacro none?(maybe), do: quote(do: unquote(maybe) == none())
@doc """
Macro that indicates if the maybe monad contains something.
This macro may be used in guard clauses.
iex> some? none()
false
iex> some? some 42
true
"""
defmacro some?(maybe), do: quote(do: not none?(unquote(maybe)))
@doc """
Callback implementation of `Monad.Behaviour.return/1`.
Wraps the value in a maybe monad.
iex> return 42
%Monad.Maybe{type: :some, value: 42}
"""
@spec return(term) :: t
def return(value), do: pure(value)
@doc """
Callback implementation of `Monad.Behaviour.bind/2`.
If the monad contains a value, then the value is unwrapped and applied to
`fun`.
For `none` monads, `none` is returned without evaluating `fun`.
iex> m = some 42
iex> bind m, (& some &1 * 2)
%Monad.Maybe{type: :some, value: 84}
iex> bind none(), (& some &1 * 2)
%Monad.Maybe{type: :none, value: nil}
"""
@spec bind(t, (term -> t)) :: t
def bind(maybe, fun) when none?(maybe) and is_function(fun, 1), do: maybe
def bind(maybe, fun) when some?(maybe) and is_function(fun, 1) do
maybe |> unwrap! |> fun.()
end
end
|
lib/monad/maybe.ex
| 0.875061
| 0.499878
|
maybe.ex
|
starcoder
|
defmodule HexGrid.Hex do
alias HexGrid.Hex, as: Hex
@moduledoc """
Hex Tile module. See this excellent article for
reference:
http://www.redblobgames.com/grids/hexagons/implementation.html
"""
defstruct q: 0, r: 0, s: 0
@typedoc "Hex Tile"
@opaque t :: %__MODULE__{q: number, r: number, s: number}
@doc ~S"""
Creates a new hex tile
Throws an ArgumentError if q + r + s != 0
## Examples
iex> Hex.new!(0, 1, -1)
%Hex{q: 0, r: 1, s: -1}
iex> Hex.new!(0, 1, 1)
** (ArgumentError) Invalid coordinates in hex given, coordinate scalars q, r and s in %Hex{q:0, r:1, s:1} do not sum to 0
"""
@spec new(number, number, number) :: t
def new!(q, r, s) do
if Float.round((q + r + s) / 1) == 0 do
%Hex{q: q, r: r, s: s}
else
raise ArgumentError, message: "Invalid coordinates in hex given, coordinate scalars q, r and s in %Hex{q:#{q}, r:#{r}, s:#{s}} do not sum to 0"
end
end
@doc ~S"""
Creates a new hex tile
## Examples
iex> Hex.new(0, 1, -1)
{:ok, %Hex{q: 0, r: 1, s: -1}}
iex> Hex.new(0, 1, 1)
{:error, "Invalid coordinates in hex given, coordinate scalars q, r and s in %Hex{q:0, r:1, s:1} do not sum to 0"}
"""
@spec new(number, number, number) :: {:ok, t} | {:error, String.t()}
def new(q, r, s) do
try do
{:ok, new!(q, r, s)}
rescue
e in ArgumentError -> {:error, e.message}
end
end
@doc ~S"""
Adds two hexes together
## Examples
iex> Hex.add(Hex.new!(0, 1, -1), Hex.new!(0, 1, -1))
%Hex{q: 0, r: 2, s: -2}
"""
@spec add(t, t) :: t
def add(first, second) do
Hex.new!(first.q + second.q, first.r + second.r, first.s + second.s)
end
@doc ~S"""
Subtracts two hexes
## Examples
iex> Hex.sub(Hex.new!(0, 0, 0), Hex.new!(0, 1, -1))
%Hex{q: 0, r: -1, s: 1}
"""
def sub(first, second) do
Hex.new!(first.q - second.q, first.r - second.r, first.s - second.s)
end
@doc ~S"""
Multiples hex by scalar
## Examples
iex> Hex.mul(Hex.new!(0, 1, -1), 2)
%Hex{q: 0, r: 2, s: -2}
"""
@spec mul(t, integer) :: t
def mul(hex, scalar) do
Hex.new!(hex.q * scalar, hex.r * scalar, hex.s * scalar)
end
@doc ~S"""
Gets the length of a hex
## Examples
iex> Hex.length(Hex.new!(0, 0, 0))
0
iex> Hex.length(Hex.new!(0, 1, -1))
1
"""
@spec length(t) :: integer
def length(hex) do
round((abs(hex.q) + abs(hex.r) + abs(hex.s)) / 2)
end
@doc ~S"""
Calculates the distance between two hexes
## Examples
iex> Hex.distance(Hex.new!(0, 0, 0), Hex.new!(0, 1, -1))
1
iex> Hex.distance(Hex.new!(0, 0, 0), Hex.new!(1, 1, -2))
2
iex> Hex.distance(Hex.new!(0, 0, 0), Hex.new!(-1, 5, -4))
5
"""
@spec distance(t, t) :: integer
def distance(first, second) do
Hex.length(sub(first, second))
end
@doc ~S"""
Gets a hex with a given direction.
Allowed values are 0-5, inclusive.
0 is hex immediately to the right. As the value increases
the direction vector rotates counter-clockwise.
## Examples
iex> Hex.cube_direction(0)
%Hex{q: 1, r: -1, s: 0}
iex> Hex.cube_direction(1)
%Hex{q: 1, r: 0, s: -1}
iex> Hex.cube_direction(6)
:error
"""
@spec cube_direction(integer) :: t | :error
def cube_direction(dir) do
direction(dir)
end
@doc ~S"""
Gets the neighbour of the hex
## Examples
iex> Hex.neighbour(Hex.new!(0, 0, 0), 0)
%Hex{q: 1, r: -1, s: 0}
iex> Hex.neighbour(Hex.new!(3, -3, 0), 1)
%Hex{q: 4, r: -3, s: -1}
"""
@spec neighbour(t, integer) :: t
def neighbour(hex, dir) do
add(hex, direction(dir))
end
@doc ~S"""
Gets all hexes neighbours
## Examples
iex> Hex.neighbours(Hex.new!(0, 0, 0))
[
%Hex{q: 1, r: -1, s: 0},
%Hex{q: 1, r: 0, s: -1},
%Hex{q: 0, r: 1, s: -1},
%Hex{q: -1, r: 1, s: 0},
%Hex{q: -1, r: 0, s: 1},
%Hex{q: 0, r: -1, s: 1}
]
"""
@spec neighbours(t) :: [t]
def neighbours(hex) do
Enum.map(0..5, fn (x) -> neighbour(hex, x) end)
end
@doc ~S"""
Gets all hexes within a certain distance of the given hex
## Examples
iex> Hex.neighbourhood(Hex.new!(0, 1, -1), 0)
[
%Hex{q: 0, r: 1, s: -1},
]
iex> Hex.neighbourhood(Hex.new!(0, 1, -1), 2)
[%HexGrid.Hex{q: -2, r: 1, s: 1}, %HexGrid.Hex{q: -2, r: 2, s: 0},
%HexGrid.Hex{q: -2, r: 3, s: -1}, %HexGrid.Hex{q: -1, r: 0, s: 1},
%HexGrid.Hex{q: -1, r: 1, s: 0}, %HexGrid.Hex{q: -1, r: 2, s: -1},
%HexGrid.Hex{q: -1, r: 3, s: -2}, %HexGrid.Hex{q: 0, r: -1, s: 1},
%HexGrid.Hex{q: 0, r: 0, s: 0}, %HexGrid.Hex{q: 0, r: 1, s: -1},
%HexGrid.Hex{q: 0, r: 2, s: -2}, %HexGrid.Hex{q: 0, r: 3, s: -3},
%HexGrid.Hex{q: 1, r: -1, s: 0}, %HexGrid.Hex{q: 1, r: 0, s: -1},
%HexGrid.Hex{q: 1, r: 1, s: -2}, %HexGrid.Hex{q: 1, r: 2, s: -3},
%HexGrid.Hex{q: 2, r: -1, s: -1}, %HexGrid.Hex{q: 2, r: 0, s: -2},
%HexGrid.Hex{q: 2, r: 1, s: -3}]
"""
@spec neighbourhood(t, non_neg_integer) :: [t]
def neighbourhood(hex, distance) do
for dq <- -distance..distance,
dr <- Enum.max([-distance, -dq - distance])..Enum.min([distance, -dq + distance]) do
Hex.add(hex, Hex.new!(dq, dr, -dq - dr))
end
end
defp direction(dir) do
case dir do
0 -> Hex.new!(+1, -1, 0)
1 -> Hex.new!(+1, 0, -1)
2 -> Hex.new!(0, +1, -1)
3 -> Hex.new!(-1, +1, 0)
4 -> Hex.new!(-1, 0, +1)
5 -> Hex.new!(0, -1, +1)
_ -> :error
end
end
end
|
lib/hex.ex
| 0.944254
| 0.671772
|
hex.ex
|
starcoder
|
defmodule ExUnit.Callbacks do
@moduledoc %B"""
This module defines four callbacks: `setup_all`, `teardown_all`,
`setup` and `teardown`. Those callbacks are defined via macros
and receives a keyword list of metadata. The callback may
optionally define extra data which will be available in the test
cases.
## Examples
defmodule AssertionTest do
use ExUnit.Case, async: true
setup do
IO.puts "This is a setup callback"
# Returns extra metadata
{ :ok, [hello: "world"] }
end
setup context do
# We can access the test name in the context
IO.puts "Setting up: #{context[:test]}"
# The metadata returned by the previous setup as well
assert context[:hello] == "world"
# No metadata
:ok
end
test "always pass" do
assert true
end
end
"""
@doc false
defmacro __using__(opts) do
parent = opts[:parent]
quote do
@exunit_setup []
@exunit_teardown []
@exunit_setup_all []
@exunit_teardown_all []
@before_compile unquote(__MODULE__)
import unquote(__MODULE__)
def __exunit__(:parent) do
unquote(parent)
end
def __exunit__(:setup, context) do
__exunit_setup__ unquote(parent).__exunit__(:setup, context)
end
def __exunit__(:teardown, context) do
unquote(parent).__exunit__(:teardown, __exunit_teardown__ context)
end
def __exunit__(:setup_all, context) do
__exunit_setup_all__ unquote(parent).__exunit__(:setup_all, context)
end
def __exunit__(:teardown_all, context) do
unquote(parent).__exunit__(:teardown_all, __exunit_teardown_all__ context)
end
end
end
@doc false
defmacro __before_compile__(env) do
[ compile_callbacks(env, :exunit_setup),
compile_callbacks(env, :exunit_teardown),
compile_callbacks(env, :exunit_setup_all),
compile_callbacks(env, :exunit_teardown_all) ]
end
@doc """
Called before the start of each test.
"""
defmacro setup(var // quote(do: _), block) do
quote do
name = :"__exunit_setup_#{length(@exunit_setup)}"
defp name, [unquote(escape var)], [], unquote(escape block)
@exunit_setup [name|@exunit_setup]
end
end
@doc """
Called after the finish of each test. Note that, if the test crasches with an exit
message `teardown` will not be run.
"""
defmacro teardown(var // quote(do: _), block) do
quote do
name = :"__exunit_teardown_#{length(@exunit_teardown)}"
defp name, [unquote(escape var)], [], unquote(escape block)
@exunit_teardown [name|@exunit_teardown]
end
end
@doc """
Called before the start of a case.
"""
defmacro setup_all(var // quote(do: _), block) do
quote do
name = :"__exunit_setup_all_#{length(@exunit_setup_all)}"
defp name, [unquote(escape var)], [], unquote(escape block)
@exunit_setup_all [name|@exunit_setup_all]
end
end
@doc """
Called after the finish of each case.
"""
defmacro teardown_all(var // quote(do: _), block) do
quote do
name = :"__exunit_teardown_all_#{length(@exunit_teardown_all)}"
defp name, [unquote(escape var)], [], unquote(escape block)
@exunit_teardown_all [name|@exunit_teardown_all]
end
end
## Helpers
def __merge__(_mod, other, :ok), do: other
def __merge__(_mod, other, { :ok, data }) when is_list(data), do: Keyword.merge(other, data)
def __merge__(mod, _, failure) do
raise "expected ExUnit callback in #{inspect mod} to return :ok " <>
" or { :ok, data }, got #{inspect failure} instead"
end
defp escape(contents) do
Macro.escape_quoted(contents)
end
defp compile_callbacks(env, kind) do
callbacks = Module.get_attribute(env.module, kind) |> Enum.reverse
acc =
Enum.reduce callbacks, quote(do: context), fn(callback, acc) ->
quote do
context = unquote(acc)
unquote(__MODULE__).__merge__(__MODULE__, context, unquote(callback)(context))
end
end
quote do
defp unquote(:"__#{kind}__")(context), do: unquote(acc)
end
end
end
|
lib/ex_unit/lib/ex_unit/callbacks.ex
| 0.841972
| 0.482124
|
callbacks.ex
|
starcoder
|
defmodule Ash.Changeset do
@moduledoc """
Changesets are used to create and update data in Ash.
Create a changeset with `new/1` or `new/2`, and alter the attributes
and relationships using the functions provided in this module. Nothing in this module
actually incurs changes in a data layer. To commit a changeset, see `c:Ash.Api.create/2`
and `c:Ash.Api.update/2`.
## Primary Keys
For relationship manipulation using `append_to_relationship/3`, `remove_from_relationship/3`
and `replace_relationship/3` there are three types that can be used for primary keys:
1.) An instance of the resource in question.
2.) If the primary key is just a single field, i.e `:id`, then a single value, i.e `1`
3.) A map of keys to values representing the primary key, i.e `%{id: 1}` or `%{id: 1, org_id: 2}`
## Join Attributes
For many to many relationships, the attributes on a join relationship may be set while relating items
by passing a tuple of the primary key and the changes to be applied. This is done via upserts, so
update validations on the join resource are *not* applied, but create validations are.
For example:
```elixir
Ash.Changeset.replace_relationship(changeset, :linked_tickets, [
{1, %{link_type: "blocking"}},
{a_ticket, %{link_type: "caused_by"}},
{%{id: 2}, %{link_type: "related_to"}}
])
```
## Manage relationship vs append/replace/remove
### Manage relationship
`Ash.Changeset.manage_relationship/4` is for creating/updating/destroying related items. A simple example
is for creating a comment and adding it to a post.
```elixir
post
|> Ash.Changeset.manage_relationship(
:comments,
[%{body: "this post is great!"}],
on_destroy: :ignore,
on_update: :error
)
|> MyApp.MyApi.update!(actor: current_user)
```
We configured it to ignore any "destroys", meaning "don't worry about the comments that are related but not in this list."
We also configured it to error on updates, meaning "this shouldn't change any existing comments"
We left `on_create` as the default, which will call the primary create action on the destination.
User input should not be passed directly into this function. See `manage_relationship/4` for more.
*By default, these changes on the destination resources follow the authorization rules, if any, on that resource*
### Append/Replace/Remove
`Ash.Changeset.replace_relationship/3`, `Ash.Changeset.append_to_relationship/3` and `Ash.Changeset.remove_from_relationship/3`
are simply about managing what data is/isn't related. A simple example might be updating the *tags* of a post, where all the tags
already exist, we simply want to edit the information.
*These changes on the destination resources *do not* follow authorization rules of the destination resource.
For example, updating a `has_many` relationship could involve changing the destination field to point at a different record.*
User input should not be passed directly into this function. See `manage_relationship/4` for more.
Instead add an `append_to_relationship`, `remove_from_relationship` or `replace_relationship` to the action itself.
See the action DSL documentation for more.
"""
defstruct [
:data,
:action_type,
:action,
:resource,
:api,
:tenant,
:__validated_for_action__,
params: %{},
action_failed?: false,
arguments: %{},
context: %{},
after_action: [],
before_action: [],
errors: [],
valid?: true,
attributes: %{},
relationships: %{},
change_dependencies: [],
requests: []
]
defimpl Inspect do
import Inspect.Algebra
def inspect(changeset, opts) do
context =
if changeset.context == %{} do
empty()
else
concat("context: ", to_doc(changeset.context, opts))
end
container_doc(
"#Ash.Changeset<",
[
concat("action_type: ", inspect(changeset.action_type)),
concat("action: ", inspect(changeset.action && changeset.action.name)),
concat("attributes: ", to_doc(changeset.attributes, opts)),
concat("relationships: ", to_doc(changeset.relationships, opts)),
arguments(changeset, opts),
concat("errors: ", to_doc(changeset.errors, opts)),
concat("data: ", to_doc(changeset.data, opts)),
context,
concat("valid?: ", to_doc(changeset.valid?, opts))
],
">",
opts,
fn str, _ -> str end
)
end
defp arguments(changeset, opts) do
if changeset.action do
action = Ash.Resource.action(changeset.resource, changeset.action, changeset.action_type)
if is_nil(action) || Enum.empty?(action.arguments) do
empty()
else
arg_string =
action.arguments
|> Map.new(fn argument ->
if argument.sensitive? do
{argument.name, "**redacted**"}
else
{argument.name, Ash.Changeset.get_argument(changeset, argument.name)}
end
end)
|> to_doc(opts)
concat(["arguments: ", arg_string])
end
else
empty()
end
end
end
@type t :: %__MODULE__{}
alias Ash.Error.{
Changes.InvalidArgument,
Changes.InvalidAttribute,
Changes.InvalidChanges,
Changes.InvalidRelationship,
Changes.NoSuchAttribute,
Changes.NoSuchRelationship,
Changes.Required,
Invalid.NoSuchAction,
Invalid.NoSuchResource,
Query.NoReadAction
}
@doc """
Return a changeset over a resource or a record. `params` can be either attributes, relationship values or arguments.
If you are using external input, you almost certainly want to use `Ash.Changeset.for_<action_type>`. However, you can
use `Ash.Changeset.new/2` to start a changeset and make a few changes prior to calling `for_action`. For example:
```elixir
Ash.Changeset.new()
|> Ash.Changeset.change_attribute(:name, "foobar")
|> Ash.Changeset.for_action(...)
```
Anything that is modified prior to `for_action` is validated against the rules of the action, while *anything after it is not*.
This changeset does not consider an action, and so allows you to change things with minimal validation. Values are
validated when changed, and the existence of attributes and relationships are validated. If you want to essentially
"run an action", and get back a changeset with any errors that would be generated by that action (with the exception
of errors that can only be generated by the data layer), use `for_action/2`.
Additionally, this format only supports supplying attributes in the params. This is because we don't know what the
behavior should be for relationship changes, nor what arguments are available. You can manage them yourself with
the functions that allow managing arguments/relationships that are provided in this module, e.g `set_argument/3` and
`replace_relationship/3`
"""
@spec new(Ash.resource() | Ash.record(), params :: map) :: t
def new(resource, params \\ %{})
def new(%resource{} = record, params) do
tenant =
record
|> Map.get(:__metadata__, %{})
|> Map.get(:tenant, nil)
context = Ash.Resource.default_context(resource) || %{}
if Ash.Resource.resource?(resource) do
%__MODULE__{resource: resource, data: record, action_type: :update}
|> change_attributes(params)
|> set_context(context)
|> set_tenant(tenant)
else
%__MODULE__{
resource: resource,
action_type: :update,
data: struct(resource)
}
|> add_error(NoSuchResource.exception(resource: resource))
|> set_tenant(tenant)
|> set_context(context)
end
end
def new(resource, params) do
if Ash.Resource.resource?(resource) do
%__MODULE__{
resource: resource,
action_type: :create,
data: struct(resource)
}
|> change_attributes(params)
else
%__MODULE__{resource: resource, action_type: :create, data: struct(resource)}
|> add_error(NoSuchResource.exception(resource: resource))
end
end
@doc """
Constructs a changeset for a given create action, and validates it.
Anything that is modified prior to `for_create/4` is validated against the rules of the action, while *anything after it is not*.
### Params
`params` may be attributes, relationships, or arguments. You can safely pass user/form input directly into this function.
Only public attributes and relationships are supported. If you want to change private attributes as well, see the
Customization section below. `params` are stored directly as given in the `params` field of the changeset, which is used
### Opts
* `:relationships` - customize relationship behavior. See the Relationships section below.
* `:actor` - set the actor, which can be used in any `Ash.Resource.Change`s configured on the action. (in the `context` argument)
### Relationships
By default, any relationships are *replaced* via `replace_relationship`. To change this behavior, provide the
`relationships` option.
For example:
Ash.Changeset.for_create(MyResource, :create, params, relationships: [relationship: :append, other_relationship: :remove])
### Customization
A changeset can be provided as the first argument, instead of a resource, to allow
setting specific attributes ahead of time.
For example:
MyResource
|> Changeset.change_attribute(:foo, 1)
|> Changeset.for_create(:create, ...opts)
Once a changeset has been validated by `for_create/4` (or `for_update/4`), it isn't validated again in the action.
New changes added are validated individually, though. This allows you to create a changeset according
to a given action, and then add custom changes if necessary.
"""
def for_create(initial, action, params, opts \\ []) do
changeset =
case initial do
%__MODULE__{action_type: :create} = changeset ->
changeset
%__MODULE__{} = changeset ->
add_error(
changeset,
"Initial changeset provided with invalid action type: #{changeset.action_type}"
)
resource when is_atom(resource) ->
new(resource)
other ->
%__MODULE__{resource: other, action_type: :create}
|> add_error(NoSuchResource.exception(resource: other))
end
for_action(changeset, action, params, opts)
end
@doc """
Constructs a changeset for a given update action, and validates it.
Anything that is modified prior to `for_update/4` is validated against the rules of the action, while *anything after it is not*.
See `for_create/4` for more information
"""
def for_update(initial, action, params, opts \\ []) do
changeset =
case initial do
# We accept :destroy here to support soft deletes
%__MODULE__{action_type: type} = changeset when type in [:update, :destroy] ->
changeset
%__MODULE__{} = changeset ->
add_error(
changeset,
"Initial changeset provided with invalid action type: #{changeset.action_type}"
)
%_{} = struct ->
new(struct)
_other ->
%__MODULE__{resource: nil, action_type: :update}
|> add_error(NoSuchResource.exception(resource: nil))
end
for_action(changeset, action, params, opts)
end
@doc """
Constructs a changeset for a given destroy action, and validates it.
Pass an `actor` option to specify the actor
Anything that is modified prior to `for_destroy/4` is validated against the rules of the action, while *anything after it is not*.
Once a changeset has been validated by `for_destroy/4`, it isn't validated again in the action.
New changes added are validated individually, though. This allows you to create a changeset according
to a given action, and then add custom changes if necessary.
"""
def for_destroy(initial, action_name, params, opts \\ []) do
changeset =
case initial do
%__MODULE__{} = changeset ->
changeset
|> Map.put(:action_type, :destroy)
%_{} = struct ->
struct
|> new()
|> Map.put(:action_type, :destroy)
_other ->
%__MODULE__{resource: nil, action_type: :destroy}
|> add_error(NoSuchResource.exception(resource: nil))
end
if changeset.valid? do
action = Ash.Resource.action(changeset.resource, action_name, changeset.action_type)
if action do
changeset
|> cast_params(action, params, opts)
|> Map.put(:action, action)
|> Map.put(:__validated_for_action__, action.name)
|> cast_arguments(action)
|> add_validations()
|> validate_multitenancy()
|> mark_validated(action.name)
else
add_error(
changeset,
NoSuchAction.exception(
resource: changeset.resource,
action: action_name,
type: :destroy
)
)
end
else
changeset
end
end
defp for_action(changeset, action, params, opts) do
if changeset.valid? do
action = Ash.Resource.action(changeset.resource, action, changeset.action_type)
if action do
changeset
|> cast_params(action, params || %{}, opts)
|> cast_arguments(action)
|> Map.put(:action, action)
|> Map.put(:__validated_for_action__, action.name)
|> validate_attributes_accepted(action)
|> validate_relationships_accepted(action)
|> run_action_changes(action, opts[:actor])
|> set_defaults(changeset.action_type)
|> validate_required_belongs_to()
|> add_validations()
|> require_values(changeset.action_type)
|> validate_multitenancy()
|> mark_validated(action.name)
else
add_error(
changeset,
NoSuchAction.exception(
resource: changeset.resource,
action: action,
type: changeset.action_type
)
)
end
else
changeset
end
end
defp mark_validated(changeset, action_name) do
%{changeset | __validated_for_action__: action_name}
end
defp validate_multitenancy(changeset) do
if Ash.Resource.multitenancy_strategy(changeset.resource) &&
not Ash.Resource.multitenancy_global?(changeset.resource) && is_nil(changeset.tenant) do
add_error(
changeset,
"#{inspect(changeset.resource)} changesets require a tenant to be specified"
)
else
changeset
end
end
defp cast_params(changeset, action, params, opts) do
changeset = %{changeset | params: Enum.into(params, %{})}
Enum.reduce(params, changeset, fn {name, value}, changeset ->
cond do
has_argument?(action, name) ->
set_argument(changeset, name, value)
attr = Ash.Resource.public_attribute(changeset.resource, name) ->
if attr.writable? do
change_attribute(changeset, attr.name, value)
else
changeset
end
rel = Ash.Resource.public_relationship(changeset.resource, name) ->
if rel.writable? do
behaviour = opts[:relationships][rel.name] || :replace
case behaviour do
:replace ->
replace_relationship(changeset, rel.name, value)
:append ->
append_to_relationship(changeset, rel.name, value)
:remove ->
append_to_relationship(changeset, rel.name, value)
end
else
changeset
end
true ->
changeset
end
end)
end
defp has_argument?(action, name) when is_atom(name) do
Enum.any?(action.arguments, &(&1.private? == false && &1.name == name))
end
defp has_argument?(action, name) when is_binary(name) do
Enum.any?(action.arguments, &(&1.private? == false && to_string(&1.name) == name))
end
defp validate_attributes_accepted(changeset, %{accept: nil}), do: changeset
defp validate_attributes_accepted(changeset, %{accept: accepted_attributes}) do
changeset.attributes
|> Enum.reject(fn {key, _value} ->
key in accepted_attributes
end)
|> Enum.reduce(changeset, fn {key, _}, changeset ->
add_error(
changeset,
InvalidAttribute.exception(field: key, message: "cannot be changed")
)
end)
end
defp validate_relationships_accepted(changeset, %{accept: nil}), do: changeset
defp validate_relationships_accepted(changeset, %{accept: accepted_relationships}) do
changeset.relationships
|> Enum.reject(fn {key, _value} ->
key in accepted_relationships
end)
|> Enum.reduce(changeset, fn {key, _}, changeset ->
add_error(
changeset,
InvalidRelationship.exception(
relationship: key,
message: "Cannot be changed"
)
)
end)
end
defp run_action_changes(changeset, %{changes: changes}, actor) do
Enum.reduce(changes, changeset, fn
%{change: {module, opts}}, changeset ->
module.change(changeset, opts, %{actor: actor})
%{validation: _} = validation, changeset ->
if validation.expensive? and not changeset.valid? do
changeset
else
do_validation(changeset, validation)
end
end)
end
defp set_defaults(changeset, :create) do
changeset.resource
|> Ash.Resource.attributes()
|> Enum.filter(&(not is_nil(&1.default)))
|> Enum.reduce(changeset, fn attribute, changeset ->
force_change_new_attribute_lazy(changeset, attribute.name, fn ->
default(:create, attribute)
end)
end)
end
defp set_defaults(changeset, :update) do
changeset.resource
|> Ash.Resource.attributes()
|> Enum.filter(&(not is_nil(&1.update_default)))
|> Enum.reduce(changeset, fn attribute, changeset ->
force_change_new_attribute_lazy(changeset, attribute.name, fn ->
default(:update, attribute)
end)
end)
end
defp set_defaults(changeset, _) do
changeset
end
defp default(:create, %{default: {mod, func, args}}), do: apply(mod, func, args)
defp default(:create, %{default: function}) when is_function(function, 0), do: function.()
defp default(:create, %{default: value}), do: value
defp default(:update, %{update_default: {mod, func, args}}), do: apply(mod, func, args)
defp default(:update, %{update_default: function}) when is_function(function, 0),
do: function.()
defp default(:update, %{update_default: value}), do: value
defp validate_required_belongs_to(changeset) do
changeset.resource
|> Ash.Resource.relationships()
|> Enum.filter(&(&1.type == :belongs_to))
|> Enum.filter(& &1.required?)
|> Enum.reduce(changeset, fn required_relationship, changeset ->
case Map.fetch(changeset.relationships, required_relationship.name) do
{:ok, %{add: adding}} when adding != nil and adding != [] ->
changeset
{:ok, %{replace: replacing}} when replacing != nil and replacing != [] ->
changeset
_ ->
case Map.fetch(changeset.attributes, required_relationship.source_field) do
{:ok, value} when not is_nil(value) ->
changeset
_ ->
add_error(
changeset,
Required.exception(
field: required_relationship.name,
type: :relationship
)
)
end
end
end)
end
defp add_validations(changeset) do
Ash.Changeset.before_action(changeset, fn changeset ->
changeset.resource
# We use the `changeset.action_type` to support soft deletes
# Because a delete is an `update` with an action type of `update`
|> Ash.Resource.validations(changeset.action_type)
|> Enum.reduce(changeset, fn validation, changeset ->
if validation.expensive? and not changeset.valid? do
changeset
else
do_validation(changeset, validation)
end
end)
end)
end
defp do_validation(changeset, validation) do
case validation.module.validate(changeset, validation.opts) do
:ok ->
changeset
{:error, error} when is_binary(error) ->
Ash.Changeset.add_error(changeset, validation.message || error)
{:error, error} when is_exception(error) ->
if validation.message do
error =
case error do
%{field: field} when not is_nil(field) ->
error
|> Map.take([:field, :vars])
|> Map.to_list()
|> Keyword.put(:message, validation.message)
|> InvalidAttribute.exception()
%{fields: fields} when fields not in [nil, []] ->
error
|> Map.take([:fields, :vars])
|> Map.to_list()
|> Keyword.put(:message, validation.message)
|> InvalidChanges.exception()
_ ->
validation.message
end
Ash.Changeset.add_error(changeset, error)
else
Ash.Changeset.add_error(changeset, error)
end
{:error, error} ->
error =
if Keyword.keyword?(error) do
Keyword.put(error, :message, validation.message || error[:message])
else
validation.message || error
end
Ash.Changeset.add_error(changeset, error)
end
end
defp require_values(changeset, :create) do
changeset.resource
|> Ash.Resource.attributes()
|> Enum.reject(&(&1.allow_nil? || &1.private? || &1.generated?))
|> Enum.reduce(changeset, fn required_attribute, changeset ->
if Ash.Changeset.changing_attribute?(changeset, required_attribute.name) do
changeset
else
Ash.Changeset.add_error(
changeset,
Required.exception(field: required_attribute.name, type: :attribute)
)
end
end)
end
defp require_values(changeset, _), do: changeset
@doc """
Wraps a function in the before/after action hooks of a changeset.
The function takes a changeset and if it returns
`{:ok, result}`, the result will be passed through the after
action hooks.
"""
@spec with_hooks(
t(),
(t() ->
{:ok, Ash.record(), %{notifications: list(Ash.notification())}} | {:error, term})
) ::
{:ok, term, t(), %{notifications: list(Ash.notification())}} | {:error, term}
def with_hooks(changeset, func) do
{changeset, %{notifications: before_action_notifications}} =
Enum.reduce_while(
changeset.before_action,
{changeset, %{notifications: []}},
fn before_action, {changeset, instructions} ->
case before_action.(changeset) do
{%{valid?: true} = changeset, %{notifications: notifications}} ->
{:cont,
{changeset,
%{
instructions
| notifications: instructions.notifications ++ List.wrap(notifications)
}}}
%{valid?: true} = changeset ->
{:cont, {changeset, instructions}}
changeset ->
{:halt, {changeset, instructions}}
end
end
)
if changeset.valid? do
case func.(changeset) do
{:ok, result} ->
run_after_actions(result, changeset, before_action_notifications)
{:error, error} ->
{:error, error}
end
else
{:error, changeset.errors}
end
end
defp run_after_actions(result, changeset, before_action_notifications) do
Enum.reduce_while(
changeset.after_action,
{:ok, result, changeset, %{notifications: before_action_notifications}},
fn after_action, {:ok, result, changeset, %{notifications: notifications} = acc} ->
case after_action.(changeset, result) do
{:ok, new_result, new_notifications} ->
all_notifications =
Enum.map(notifications ++ new_notifications, fn notification ->
%{
notification
| resource: notification.resource || changeset.resource,
action:
notification.action ||
Ash.Resource.action(
changeset.resource,
changeset.action,
changeset.action_type
),
data: notification.data || new_result,
changeset: notification.changeset || changeset,
actor: notification.actor || Map.get(changeset.context, :actor)
}
end)
{:cont, {:ok, new_result, changeset, %{acc | notifications: all_notifications}}}
{:ok, new_result} ->
{:cont, {:ok, new_result, changeset, acc}}
{:error, error} ->
{:halt, {:error, error}}
end
end
)
end
@doc "Gets the value of an argument provided to the changeset"
@spec get_argument(t, atom) :: term
def get_argument(changeset, argument) when is_atom(argument) do
Map.get(changeset.arguments, argument) || Map.get(changeset.arguments, to_string(argument))
end
@doc "fetches the value of an argument provided to the changeset or `:error`"
@spec fetch_argument(t, atom) :: {:ok, term} | :error
def fetch_argument(changeset, argument) when is_atom(argument) do
case Map.fetch(changeset.arguments, argument) do
{:ok, value} ->
{:ok, value}
:error ->
case Map.fetch(changeset.arguments, to_string(argument)) do
{:ok, value} -> {:ok, value}
:error -> :error
end
end
end
@doc "Gets the changing value or the original value of an attribute"
@spec get_attribute(t, atom) :: term
def get_attribute(changeset, attribute) do
case fetch_change(changeset, attribute) do
{:ok, value} ->
value
:error ->
get_data(changeset, attribute)
end
end
@doc "Gets the new value for an attribute, or `:error` if it is not being changed"
@spec fetch_change(t, atom) :: {:ok, any} | :error
def fetch_change(changeset, attribute) do
Map.fetch(changeset.attributes, attribute)
end
@doc "Gets the original value for an attribute"
@spec get_data(t, atom) :: {:ok, any} | :error
def get_data(changeset, attribute) do
Map.get(changeset.data, attribute)
end
@spec put_context(t(), atom, term) :: t()
def put_context(changeset, key, value) do
%{changeset | context: Map.put(changeset.context, key, value)}
end
@spec set_tenant(t(), String.t()) :: t()
def set_tenant(changeset, tenant) do
%{changeset | tenant: tenant}
end
@spec set_context(t(), map | nil) :: t()
def set_context(changeset, nil), do: changeset
def set_context(changeset, map) do
%{changeset | context: Map.merge(changeset.context, map)}
end
defp cast_arguments(changeset, action) do
Enum.reduce(action.arguments, %{changeset | arguments: %{}}, fn argument, new_changeset ->
value = get_argument(changeset, argument.name) || argument_default(argument.default)
if is_nil(value) && !argument.allow_nil? do
Ash.Changeset.add_error(
changeset,
Required.exception(field: argument.name, type: :argument)
)
else
val =
case fetch_argument(changeset, argument.name) do
:error ->
if argument.default do
{:ok, argument_default(argument.default)}
else
:error
end
{:ok, val} ->
{:ok, val}
end
with {:found, {:ok, value}} <- {:found, val},
{:ok, casted} <- Ash.Type.cast_input(argument.type, value),
{:ok, casted} <-
Ash.Type.apply_constraints(argument.type, casted, argument.constraints) do
%{new_changeset | arguments: Map.put(new_changeset.arguments, argument.name, casted)}
else
{:error, error} ->
add_invalid_errors(:argument, changeset, argument, error)
{:found, :error} ->
changeset
end
end
end)
end
defp argument_default(value) when is_function(value, 0), do: value.()
defp argument_default(value), do: value
@manage_opts [
authorize?: [
type: :boolean,
default: true,
doc:
"Authorize changes to the destination records, if the primary change is being authorized as well."
],
on_create: [
type: :any,
default: :create,
doc: """
instructions for handling records where no matching record existed in the relationship
* `:create`(default) - the records are created using the destination's primary create action
* `{:create, :action_name}` - the records are created using the specified action on the destination resource
* `{:create, :action_name, :join_table_action_name, [:list, :of, :join_table, :params]}` - Same as `{:update, :action_name}` but takes
the list of params specified out and applies them when creating the join table row.
* `:ignore` - those inputs are ignored
* `:error` - an eror is returned indicating that a record would have been created
"""
],
on_update: [
type: :any,
default: :update,
doc: """
instructions for handling records where a matching record existed in the relationship already
* `:update`(default) - the record is updated using the destination's primary update action
* `{:update, :action_name}` - the record is updated using the specified action on the destination resource
* `{:update, :action_name, :join_table_action_name, [:list, :of, :params]}` - Same as `{:update, :action_name}` but takes
the list of params specified out and applies them as an update to the join table row (only valid for many to many).
* `:ignore` - those inputs are ignored
* `:error` - an eror is returned indicating that a record would have been updated
* `:create` - ignores the primary key match and follows the create instructions with these records instead.
* `:destroy` - follows the destroy instructions for any records with matching primary keys
"""
],
on_destroy: [
type: :any,
default: :destroy,
doc: """
instructions for handling records that existed in the current relationship but not in the input
* `:destroy`(default) - the record is destroyed using the destination's primary destroy action
* `{:destroy, :action_name}` - the record is destroyed using the specified action on the destination resource
* `{:destroy, :action_name, :join_resource_action_name}` - the record is destroyed using the specified action on the destination resource,
but first the join resource is destroyed with its specified action
* `:ignore` - those inputs are ignored
* `:error` - an error is returned indicating that a record would have been updated
* `:unrelate` - the related item is not destroyed, but the data is "unrelated", making this behave like `remove_from_relationship/3`
* many_to_many - the join resource row is destroyed
* has_many - the destination_field (on the related record) is set to `nil`
* has_one - the destination_field (on the related record) is set to `nil`
* belongs_to - the source_field (on this record) is set to `nil`
* `{:unrelate, :action_name}` - the record is unrelated using the provided update action.
* many_to_many - a destroy action on the join resource
* has_many - an update action on the destination resource
* has_one - an update action on the destination resource
* belongs_to - an update action on the source resource
"""
]
]
@doc false
def manage_relationship_schema, do: @manage_opts
@doc """
Manages the related records by creating, updating, or destroying them as necessary.
Generally speaking, this function should not be used with user input. If you want to accept user
input to manage a relationship, e.g via a form, api, or controller input, instead add a `managed_relationship`
to your action. See the DSL documentation for more on that
Unlike `append_to_relationship/4`, `replace_relationship/3` and `remove_from_relationship/3`,
this will actually make changes to the non-relationship fields of the destination resource.
For the other functions, the only authorization is involved is the authorization on this resource,
however `manage_relationship/4` will authorization/validate each individual operation.
If you want the input to update existing entities (when the `type` is `:replace`, the default),
you need to ensure that the primary key is provided as part of the input. See the example below:
Changeset.manage_relationship(
changeset,
:comments,
[%{rating: 10, contents: "foo"}],
on_create: {:create, :create_action},
on_destroy: :ignore
)
Changeset.manage_relationship(
changeset,
:comments,
[%{id: 10, rating: 10, contents: "foo"}],
on_update: {:update, :update_action},
on_create: {:create, :create_action})
## Options
#{Ash.OptionsHelpers.docs(@manage_opts)}
### Mixing with other relationship functions
If mixed with `append_to_relationship/3`, `remove_from_relationship/3` and `replace_relationship/3`, those actions will
happen first. After all of those changes have been made, the relationship will be "managed" according to the options provided
to this.
%Post{}
|> Ash.Changeset.new()
|> Ash.Changeset.manage_relationship(:comments, [%{text: "bar"}])
|> Ash.Changeset.append_to_relationship(:comments, [%{text: "foo"}])
|> Api.update!()
# %Post{comments: [%Comment{text: "bar"}, %Comment{text: "foo"}]}
This is a simple way to manage a relationship. If you need custom behavior, you can customize the action that is
called, which allows you to add arguments/changes. However, at some point you may want to forego this function
and make the changes yourself. For example:
input = [%{id: 10, rating: 10, contents: "foo"}]
changeset
|> Changeset.after_action(fn _changeset, result ->
# An example of updating comments based on a result of other changes
for comment <- input do
comment = MyApi.get(Comment, comment.id)
comment
|> Map.update(:rating, 0, &(&1 * result.rating_weight))
|> MyApi.update!()
end
{:ok, result}
end)
"""
def manage_relationship(changeset, relationship, input, opts \\ []) do
opts = Ash.OptionsHelpers.validate!(opts, @manage_opts)
case Ash.Resource.relationship(changeset.resource, relationship) do
nil ->
error =
NoSuchRelationship.exception(
resource: changeset.resource,
name: relationship
)
add_error(changeset, error)
%{cardinality: :one, type: type} = relationship when length(input) > 1 ->
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Cannot manage to a #{type} relationship with a list of records"
)
add_error(changeset, error)
%{writable?: false} = relationship ->
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Relationship is not editable"
)
add_error(changeset, error)
relationship ->
value =
case {relationship.cardinality, input} do
{:one, []} -> nil
{:one, [val]} -> val
{:one, val} -> val
{:many, val_or_vals} -> List.wrap(val_or_vals)
end
relationships =
changeset.relationships
|> Map.put_new(relationship.name, %{})
|> add_to_relationship_key_and_reconcile(relationship, :manage, {value, opts})
%{changeset | relationships: relationships}
end
end
@doc """
Appends a record or a list of records to a relationship. Stacks with previous removals/additions.
Accepts a primary key or a list of primary keys. See the section on "Primary Keys" in the
module documentation for more.
For many to many relationships, accepts changes for any `join_attributes` configured on
the resource. See the section on "Join Attributes" in the module documentation for more.
Does not authorize changes on the destination resource, nor notify those changes.
Cannot be used with `belongs_to` or `has_one` relationships.
See `replace_relationship/3` for manipulating `belongs_to` and `has_one` relationships.
"""
@spec append_to_relationship(t, atom, Ash.primary_key() | [Ash.primary_key()]) :: t()
def append_to_relationship(changeset, relationship, record_or_records) do
case Ash.Resource.relationship(changeset.resource, relationship) do
nil ->
error =
NoSuchRelationship.exception(
resource: changeset.resource,
name: relationship
)
add_error(changeset, error)
%{cardinality: :one, type: type} = relationship ->
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Cannot append to a #{type} relationship"
)
add_error(changeset, error)
%{writable?: false} = relationship ->
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Relationship is not editable"
)
add_error(changeset, error)
relationship ->
case primary_key(relationship, List.wrap(record_or_records)) do
{:ok, primary_keys} ->
relationships =
changeset.relationships
|> Map.put_new(relationship.name, %{})
|> add_to_relationship_key_and_reconcile(relationship, :add, primary_keys)
%{changeset | relationships: relationships}
{:error, error} ->
add_error(changeset, error)
end
end
end
@doc """
Removes a record or a list of records to a relationship. Stacks with previous removals/additions.
Accepts a primary key or a list of primary keys. See the section on "Primary Keys" in the
module documentation for more.
Does not authorize changes on the destination resource, nor notify those changes.
Cannot be used with `belongs_to` or `has_one` relationships.
See `replace_relationship/3` for manipulating those relationships.
"""
@spec remove_from_relationship(t, atom, Ash.primary_key() | [Ash.primary_key()]) :: t()
def remove_from_relationship(changeset, relationship, record_or_records) do
case Ash.Resource.relationship(changeset.resource, relationship) do
nil ->
error =
NoSuchRelationship.exception(
resource: changeset.resource,
name: relationship
)
add_error(changeset, error)
%{cardinality: :one, type: type} = relationship ->
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Cannot remove from a #{type} relationship"
)
add_error(changeset, error)
%{writable?: false} = relationship ->
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Relationship is not editable"
)
add_error(changeset, error)
relationship ->
case primary_key(relationship, List.wrap(record_or_records)) do
{:ok, primary_keys} ->
relationships =
changeset.relationships
|> Map.put_new(relationship.name, %{})
|> add_to_relationship_key_and_reconcile(relationship, :remove, primary_keys)
%{changeset | relationships: relationships}
{:error, error} ->
add_error(changeset, error)
end
end
end
defp add_to_relationship_key_and_reconcile(relationships, relationship, :manage, manage) do
Map.update!(relationships, relationship.name, &Map.put(&1, :manage, manage))
end
defp add_to_relationship_key_and_reconcile(relationships, relationship, key, to_add) do
Map.update!(relationships, relationship.name, fn relationship_changes ->
relationship_changes
|> Map.put_new(key, [])
|> Map.update!(key, &Kernel.++(to_add, &1))
|> reconcile_relationship_changes()
end)
end
@doc """
Replaces the value of a relationship. Any previous additions/removals are cleared.
Accepts a primary key or a list of primary keys. See the section on "Primary Keys" in the
module documentation for more.
For many to many relationships, accepts changes for any `join_attributes` configured on
the resource. See the section on "Join Attributes" in the module documentation for more.
For a `has_many` or `many_to_many` relationship, this means removing any currently related
records that are not present in the replacement list, and creating any that do not exist
in the data layer.
For a `belongs_to` or `has_one`, replace with a `nil` value to unset a relationship.
Does not authorize changes on the destination resource, nor notify those changes.
"""
@spec replace_relationship(
t(),
atom(),
Ash.primary_key() | [Ash.primary_key()] | nil
) :: t()
def replace_relationship(changeset, relationship, record_or_records) do
case Ash.Resource.relationship(changeset.resource, relationship) do
nil ->
error =
NoSuchRelationship.exception(
resource: changeset.resource,
name: relationship
)
add_error(changeset, error)
%{writable?: false} = relationship ->
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Relationship is not editable"
)
add_error(changeset, error)
%{cardinality: :one, type: type}
when is_list(record_or_records) and length(record_or_records) > 1 ->
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Cannot replace a #{type} relationship with multiple records"
)
add_error(changeset, error)
%{type: :many_to_many} = relationship ->
do_replace_many_to_many_relationship(changeset, relationship, record_or_records)
relationship ->
if Ash.Resource.primary_action(relationship.destination, :read) do
records =
if relationship.cardinality == :one do
if is_list(record_or_records) do
List.first(record_or_records)
else
record_or_records
end
else
List.wrap(record_or_records)
end
case primary_key(relationship, records) do
{:ok, primary_key} ->
relationships =
Map.put(changeset.relationships, relationship.name, %{replace: primary_key})
changeset
|> check_entities_for_direct_write(relationship.name, List.wrap(records))
|> Map.put(:relationships, relationships)
{:error, error} ->
add_error(changeset, error)
end
else
add_error(
changeset,
NoReadAction.exception(
resource: changeset.resource,
when: "replacing relationship #{relationship.name}"
)
)
end
end
end
defp do_replace_many_to_many_relationship(changeset, relationship, record_or_records) do
cond do
!Ash.Resource.primary_action(relationship.destination, :read) ->
add_error(
changeset,
NoReadAction.exception(
resource: changeset.resource,
when: "replacing relationship #{relationship.name}"
)
)
!Ash.Resource.primary_action(relationship.through, :read) ->
add_error(
changeset,
NoReadAction.exception(
resource: changeset.resource,
when: "replacing relationship #{relationship.name}"
)
)
true ->
case primary_keys_with_changes(relationship, List.wrap(record_or_records)) do
{:ok, primary_key} ->
relationships =
Map.put(changeset.relationships, relationship.name, %{replace: primary_key})
%{changeset | relationships: relationships}
{:error, error} ->
add_error(changeset, error)
end
end
end
defp check_entities_for_direct_write(changeset, relationship_name, records) do
if Enum.all?(records, &is_resource?/1) do
relation_entities =
Map.merge(Map.get(changeset.context, :destination_entities, %{}), %{
relationship_name => Enum.group_by(records, & &1.__struct__)
})
put_context(changeset, :destination_entities, relation_entities)
else
if Ash.Resource.primary_action(
Ash.Resource.related(changeset.resource, relationship_name),
:read
) do
changeset
else
add_error(
changeset,
NoReadAction.exception(
resource: changeset.resource,
when: "editing relationship #{relationship_name} and not supplying full records"
)
)
end
end
end
defp is_resource?(record) do
Ash.Resource.resource?(record.__struct__)
rescue
_error ->
false
end
@doc "Returns true if an attribute exists in the changes"
@spec changing_attribute?(t(), atom) :: boolean
def changing_attribute?(changeset, attribute) do
Map.has_key?(changeset.attributes, attribute)
end
@doc "Returns true if a relationship exists in the changes"
@spec changing_relationship?(t(), atom) :: boolean
def changing_relationship?(changeset, relationship) do
Map.has_key?(changeset.relationships, relationship)
end
@doc "Change an attribute only if is not currently being changed"
@spec change_new_attribute(t(), atom, term) :: t()
def change_new_attribute(changeset, attribute, value) do
if changing_attribute?(changeset, attribute) do
changeset
else
change_attribute(changeset, attribute, value)
end
end
@doc """
Change an attribute if is not currently being changed, by calling the provided function
Use this if you want to only perform some expensive calculation for an attribute value
only if there isn't already a change for that attribute
"""
@spec change_new_attribute_lazy(t(), atom, (() -> any)) :: t()
def change_new_attribute_lazy(changeset, attribute, func) do
if changing_attribute?(changeset, attribute) do
changeset
else
change_attribute(changeset, attribute, func.())
end
end
@doc """
Add an argument to the changeset, which will be provided to the action
"""
def set_argument(changeset, argument, value) do
%{changeset | arguments: Map.put(changeset.arguments, argument, value)}
end
@doc """
Remove an argument from the changeset
"""
def delete_argument(changeset, argument_or_arguments) do
argument_or_arguments
|> List.wrap()
|> Enum.reduce(changeset, fn argument, changeset ->
%{changeset | arguments: Map.delete(changeset.arguments, argument)}
end)
end
@doc """
Merge a map of arguments to the arguments list
"""
def set_arguments(changeset, map) do
%{changeset | arguments: Map.merge(changeset.arguments, map)}
end
@doc """
Force change an attribute if is not currently being changed, by calling the provided function
See `change_new_attribute_lazy/3` for more.
"""
@spec force_change_new_attribute_lazy(t(), atom, (() -> any)) :: t()
def force_change_new_attribute_lazy(changeset, attribute, func) do
if changing_attribute?(changeset, attribute) do
changeset
else
force_change_attribute(changeset, attribute, func.())
end
end
@doc "Calls `change_attribute/3` for each key/value pair provided"
@spec change_attributes(t(), map | Keyword.t()) :: t()
def change_attributes(changeset, changes) do
Enum.reduce(changes, changeset, fn {key, value}, changeset ->
change_attribute(changeset, key, value)
end)
end
@doc "Adds a change to the changeset, unless the value matches the existing value"
def change_attribute(changeset, attribute, value) do
case Ash.Resource.attribute(changeset.resource, attribute) do
nil ->
error =
NoSuchAttribute.exception(
resource: changeset.resource,
name: attribute
)
add_error(changeset, error)
%{writable?: false} = attribute ->
add_invalid_errors(:attribute, changeset, attribute, "Attribute is not writable")
attribute ->
with {:ok, prepared} <- prepare_change(changeset, attribute, value),
{:ok, casted} <- Ash.Type.cast_input(attribute.type, prepared),
{:ok, casted} <- handle_change(changeset, attribute, casted),
:ok <- validate_allow_nil(attribute, casted),
{:ok, casted} <-
Ash.Type.apply_constraints(attribute.type, casted, attribute.constraints) do
data_value = Map.get(changeset.data, attribute.name)
cond do
is_nil(data_value) and is_nil(casted) ->
changeset
Ash.Type.equal?(attribute.type, casted, data_value) ->
changeset
true ->
%{changeset | attributes: Map.put(changeset.attributes, attribute.name, casted)}
end
else
:error ->
add_invalid_errors(:attribute, changeset, attribute)
{:error, error_or_errors} ->
add_invalid_errors(:attribute, changeset, attribute, error_or_errors)
end
end
end
@doc "Calls `force_change_attribute/3` for each key/value pair provided"
@spec force_change_attributes(t(), map) :: t()
def force_change_attributes(changeset, changes) do
Enum.reduce(changes, changeset, fn {key, value}, changeset ->
force_change_attribute(changeset, key, value)
end)
end
@doc "Changes an attribute even if it isn't writable"
@spec force_change_attribute(t(), atom, any) :: t()
def force_change_attribute(changeset, attribute, value) do
case Ash.Resource.attribute(changeset.resource, attribute) do
nil ->
error =
NoSuchAttribute.exception(
resource: changeset.resource,
name: attribute
)
add_error(changeset, error)
attribute when is_nil(value) ->
%{changeset | attributes: Map.put(changeset.attributes, attribute.name, nil)}
attribute ->
with {:ok, prepared} <- prepare_change(changeset, attribute, value),
{:ok, casted} <- Ash.Type.cast_input(attribute.type, prepared),
{:ok, casted} <- handle_change(changeset, attribute, casted),
{:ok, casted} <-
Ash.Type.apply_constraints(attribute.type, casted, attribute.constraints) do
data_value = Map.get(changeset.data, attribute.name)
cond do
is_nil(data_value) and is_nil(casted) ->
changeset
Ash.Type.equal?(attribute.type, casted, data_value) ->
changeset
true ->
%{changeset | attributes: Map.put(changeset.attributes, attribute.name, casted)}
end
else
:error ->
add_invalid_errors(:attribute, changeset, attribute)
{:error, error_or_errors} ->
add_invalid_errors(:attribute, changeset, attribute, error_or_errors)
end
end
end
@doc "Adds a before_action hook to the changeset."
@spec before_action(t(), (t() -> t() | {t(), %{notificactions: list(Ash.notification())}})) ::
t()
def before_action(changeset, func) do
%{changeset | before_action: [func | changeset.before_action]}
end
@doc "Adds an after_action hook to the changeset."
@spec after_action(
t(),
(t(), Ash.record() ->
{:ok, Ash.record()} | {:ok, Ash.record(), list(Ash.notification())} | {:error, term})
) :: t()
def after_action(changeset, func) do
%{changeset | after_action: [func | changeset.after_action]}
end
@doc "Returns the original data with attribute changes merged, if the changeset is valid."
@spec apply_attributes(t()) :: {:ok, Ash.record()} | {:error, t()}
def apply_attributes(%{valid?: true} = changeset) do
{:ok,
Enum.reduce(changeset.attributes, changeset.data, fn {attribute, value}, data ->
Map.put(data, attribute, value)
end)}
end
def apply_attribute(changeset), do: {:error, changeset}
@doc "Clears an attribute or relationship change off of the changeset"
def clear_change(changeset, field) do
cond do
attr = Ash.Resource.attribute(changeset.resource, field) ->
%{changeset | attributes: Map.delete(changeset.attributes, attr.name)}
rel = Ash.Resource.relationship(changeset.resource, field) ->
%{changeset | relationships: Map.delete(changeset.relationships, rel.name)}
true ->
changeset
end
end
@doc "Adds an error to the changesets errors list, and marks the change as `valid?: false`"
@spec add_error(t(), Ash.error() | String.t() | list(Ash.error() | String.t())) :: t()
def add_error(changeset, errors) when is_list(errors) do
if Keyword.keyword?(errors) do
%{
changeset
| errors: [to_change_error(errors) | changeset.errors],
valid?: false
}
else
Enum.reduce(errors, changeset, &add_error(&2, &1))
end
end
def add_error(changeset, error) when is_binary(error) do
add_error(
changeset,
InvalidChanges.exception(message: error)
)
end
def add_error(changeset, error) do
%{changeset | errors: [error | changeset.errors], valid?: false}
end
defp to_change_error(keyword) do
if keyword[:field] do
InvalidAttribute.exception(
field: keyword[:field],
message: keyword[:message],
vars: keyword
)
else
InvalidChanges.exception(
fields: keyword[:fields] || [],
message: keyword[:message],
vars: keyword
)
end
end
defp prepare_change(%{action_type: :create}, _attribute, value), do: {:ok, value}
defp prepare_change(changeset, attribute, value) do
old_value = Map.get(changeset.data, attribute.name)
Ash.Type.prepare_change(attribute.type, old_value, value)
end
defp handle_change(%{action_type: :create}, _attribute, value), do: {:ok, value}
defp handle_change(changeset, attribute, value) do
old_value = Map.get(changeset.data, attribute.name)
Ash.Type.handle_change(attribute.type, old_value, value)
end
defp reconcile_relationship_changes(%{replace: _, add: add} = changes) do
changes
|> Map.delete(:add)
|> Map.update!(:replace, fn replace ->
replace ++ add
end)
|> reconcile_relationship_changes()
end
defp reconcile_relationship_changes(%{replace: _, remove: remove} = changes) do
changes
|> Map.delete(:remove)
|> Map.update!(:replace, fn replace ->
Enum.reject(replace, &(&1 in remove))
end)
|> reconcile_relationship_changes()
end
defp reconcile_relationship_changes(changes) do
changes
|> update_if_present(:replace, &uniq_if_list/1)
|> update_if_present(:remove, &uniq_if_list/1)
|> update_if_present(:add, &uniq_if_list/1)
end
defp uniq_if_list(list) when is_list(list), do: Enum.uniq(list)
defp uniq_if_list(other), do: other
defp update_if_present(map, key, func) do
if Map.has_key?(map, key) do
Map.update!(map, key, func)
else
map
end
end
defp through_changeset(relationship, changes) do
new(relationship.through, changes)
end
defp primary_keys_with_changes(_, []), do: {:ok, []}
defp primary_keys_with_changes(relationship, records) do
Enum.reduce_while(records, {:ok, []}, fn
{record, changes}, {:ok, acc} ->
with {:ok, primary_key} <- primary_key(relationship, record),
%{valid?: true} = changeset <- through_changeset(relationship, changes) do
{:cont, {:ok, [{primary_key, changeset} | acc]}}
else
%{valid?: false, errors: errors} -> {:halt, {:error, errors}}
{:error, error} -> {:halt, {:error, error}}
end
record, {:ok, acc} ->
case primary_key(relationship, record) do
{:ok, primary_keys} when is_list(primary_keys) ->
{:cont, {:ok, primary_keys ++ acc}}
{:ok, primary_key} ->
{:cont, {:ok, [primary_key | acc]}}
{:error, error} ->
{:halt, {:error, error}}
end
end)
end
defp primary_key(_, nil), do: {:ok, nil}
defp primary_key(relationship, records) when is_list(records) do
case Ash.Resource.primary_key(relationship.destination) do
[_field] ->
multiple_primary_keys(relationship, records)
_ ->
pluck_pk_fields(relationship, records)
end
end
defp primary_key(relationship, record) do
do_primary_key(relationship, record)
end
defp pluck_pk_fields(relationship, records) do
Enum.reduce_while(
records,
{:ok, []},
fn
record, {:ok, acc} ->
case do_primary_key(relationship, record) do
{:ok, pk} -> {:cont, {:ok, [pk | acc]}}
{:error, error} -> {:halt, {:error, error}}
end
end
)
end
defp do_primary_key(relationship, record) when is_map(record) do
primary_key = Ash.Resource.primary_key(relationship.destination)
is_pkey_map? =
Enum.all?(
primary_key,
fn key ->
Map.has_key?(record, key) || Map.has_key?(record, to_string(key))
end
)
if is_pkey_map? do
pkey =
Enum.reduce(primary_key, %{}, fn key, acc ->
case Map.fetch(record, key) do
{:ok, value} -> Map.put(acc, key, value)
:error -> Map.put(acc, key, Map.get(record, to_string(key)))
end
end)
{:ok, pkey}
else
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Invalid identifier #{inspect(record)}"
)
{:error, error}
end
end
defp do_primary_key(relationship, record) do
single_primary_key(relationship, record)
end
defp multiple_primary_keys(relationship, values) do
Enum.reduce_while(values, {:ok, []}, fn record, {:ok, primary_keys} ->
case do_primary_key(relationship, record) do
{:ok, pkey} -> {:cont, {:ok, [pkey | primary_keys]}}
{:error, error} -> {:halt, {:error, error}}
end
end)
end
defp single_primary_key(relationship, value) do
with [field] <- Ash.Resource.primary_key(relationship.destination),
attribute <- Ash.Resource.attribute(relationship.destination, field),
{:ok, casted} <- Ash.Type.cast_input(attribute.type, value) do
{:ok, %{field => casted}}
else
_ ->
error =
InvalidRelationship.exception(
relationship: relationship.name,
message: "Invalid identifier #{inspect(value)}"
)
{:error, error}
end
end
@doc false
def changes_depend_on(changeset, dependency) do
%{changeset | change_dependencies: [dependency | changeset.change_dependencies]}
end
@doc false
def add_requests(changeset, requests) when is_list(requests) do
Enum.reduce(requests, changeset, &add_requests(&2, &1))
end
def add_requests(changeset, request) do
%{changeset | requests: [request | changeset.requests]}
end
defp validate_allow_nil(%{allow_nil?: false} = attribute, nil) do
{:error,
InvalidAttribute.exception(
field: attribute.name,
message: "must be present"
)}
end
defp validate_allow_nil(_, _), do: :ok
defp add_invalid_errors(type, changeset, attribute, message \\ nil) do
messages =
if Keyword.keyword?(message) do
[message]
else
List.wrap(message)
end
Enum.reduce(messages, changeset, fn message, changeset ->
opts =
case message do
keyword when is_list(keyword) ->
fields =
case List.wrap(keyword[:fields]) do
[] ->
List.wrap(keyword[:field])
fields ->
fields
end
fields
|> case do
[] ->
[
keyword
|> Keyword.put(
:message,
add_index(keyword[:message], keyword)
)
|> Keyword.put(:field, attribute.name)
]
fields ->
Enum.map(
fields,
&Keyword.merge(message,
field: attribute.name,
message: add_index(add_field(keyword[:message], "#{&1}"), keyword)
)
)
end
message when is_binary(message) ->
[[field: attribute.name, message: message]]
_ ->
[[field: attribute.name]]
end
exception =
case type do
:attribute -> InvalidAttribute
:argument -> InvalidArgument
end
Enum.reduce(opts, changeset, fn opts, changeset ->
error =
exception.exception(
field: Keyword.get(opts, :field),
message: Keyword.get(opts, :message),
vars: opts
)
add_error(changeset, error)
end)
end)
end
defp add_field(message, field) do
"at field #{field} " <> message
end
defp add_index(message, opts) do
if opts[:index] do
"at index #{opts[:index]} " <> message
else
message
end
end
end
|
lib/ash/changeset/changeset.ex
| 0.90165
| 0.774605
|
changeset.ex
|
starcoder
|
defmodule Mix.Tasks.Server do
@moduledoc """
This Mix task is used to start an HTTP server from the current working
directory. If the directory contains an `index.html` or `index.htm` file
then that file will be served. Else a file explorer will be presented.
The valid CLI arguments include:
```
--port The port that the server should run on (default is 4040)
```
"""
use Mix.Task
@shortdoc "Start an HTTP server from the current working directory"
@version Mix.Project.config()[:version]
@server_deps Mix.Project.config()[:deps]
@switches [
port: :integer
]
@impl true
def run([version]) when version in ~w(-v --version) do
Mix.shell().info("""
HTTP Server v#{@version}
If you use some my open source libraries and projects
and want to ensure their continued development,
be sure to sponsor my work so that I can allocate
more time to open source development.
https://github.com/sponsors/akoutmos
""")
end
def run(args) do
# Parse the cli args
port = parse_port(args)
# Set the Mix env
Mix.env(:prod)
# Supporting lib and runtime configuration
Application.put_env(:phoenix, :json_library, Jason)
Application.put_env(:logger, :console, level: :info)
# Configure logger
Logger.configure_backend(:console,
format: "$time $metadata[$level] $message\n",
metadata: [:request_id],
level: :info
)
# Configure the endpoint server
Application.put_env(:ex_server, ExServerWeb.Endpoint,
http: [ip: {127, 0, 0, 1}, port: port],
server: true,
secret_key_base: String.duplicate("a", 64),
cache_static_manifest: "priv/static/cache_manifest.json",
live_view: [signing_salt: "FtRh_twgiYg7vABG"]
)
# Install production dependencies
@server_deps
|> Enum.filter(fn
{package, version} when is_atom(package) and is_binary(version) -> true
_ -> false
end)
|> Mix.install()
# Start the Phoenix server
{:ok, _} = ExServer.Application.start([], [])
Process.sleep(:infinity)
end
defp parse_port(args) do
args
|> OptionParser.parse(strict: @switches)
|> case do
{[port: port], [], []} ->
port
{[], [], []} ->
4040
_ ->
raise "Invalid CLI arguments provided. Run `mix help server` for the help menu"
end
end
end
|
lib/mix/tasks/server.ex
| 0.726134
| 0.683301
|
server.ex
|
starcoder
|
defmodule DataBase.Schemas.AccountHistory do
@moduledoc """
The bank account daily logbook.
It's in charge to keep track of all
`t:DataBase.Schemas.AccountMovement.t/0` `:direction` and `:amount`
of a `t:DataBase.Schemas.Account.t/0` for each day.
It acts as logbook. Expresses information over the
`account_history` database table. Has its `:account_id` and `:date`
acting both as a composite primary key.
Provides functions to both register and report informations.
"""
import Ecto.Query
import DataBase.Queries.AccountHistory
use Ecto.Schema
alias Decimal, as: D
alias Ecto.Adapters.SQL
alias DataBase.Schemas.AccountMovement, as: Movement
alias DataBase.Repos.AmethystRepo, as: Repo
alias DataBase.{Schemas.Account, Helpers}
@typedoc """
A `DataBase.Schemas.AccountHistory` struct.
"""
@type t :: %__MODULE__{}
@typedoc """
A standard Postgrex response to raw SQL queries.
Represents the registering act response.
See `register/1`.
"""
@type query_result_t :: {:ok, Postgrex.Result.t()} | {:error, any()}
@typedoc """
Represents the overall shape for the registering input.
"""
@type registering_data_t :: [pos_integer() | Date.t() | D.t()]
@typedoc """
Represents the disposable report filters.
"""
@type report_range_t :: :total | Date.t() | Date.Range.t()
@typedoc """
A standard Ecto response to `DataBase.Schemas.AccountHistory` data
insertion.
"""
@type report_result_t :: {:ok, t()} | {:error, any()}
@primary_key false
schema "account_history" do
field :date, :date, primary_key: true
field :initial_balance, :decimal
field :final_balance, :decimal
field :inbounds, :decimal
field :outbounds, :decimal
belongs_to(:account, Account, primary_key: true)
end
@doc """
Registers a given `t:DataBase.Schemas.AccountMovement.t/0` to its
respective `t:t/0` logbook properly updating it.
Creates a new one for the first
`t:DataBase.Schemas.AccountMovement.t/0` of each day.
"""
@spec register(Movement.t) :: Movement.t() | :error
def register(%Movement{} = movement) do
Repo
|> SQL.query(update_query(), registering_data(movement))
|> do_register(movement)
end
@doc """
Given a `t:DataBase.Schemas.Account.t/0` and a `t:report_range_t/0`,
proceeds into filling a report over the requested period of time.
Returns a `t:report_result_t/0`. The report data is presented in
a form of a single `t:t/0`.
"""
@spec report(Account.t, report_range_t) :: report_result_t()
def report(%Account{} = account, :total) do
report(account, Account.activity_range(account))
end
def report(%Account{} = account, %Date{} = date) do
on(account, date)
end
def report(%Account{} = account, %Date.Range{} = r) do
Repo
|> SQL.query(report_query(), [account.id, r.first, r.last])
|> do_report()
end
@spec do_register(query_result_t, Movement.t) :: Movement.t | :error
defp do_register({:ok, _r}, %Movement{} = movement), do: movement
defp do_register(_r, _m), do: :error
@spec registering_data(Movement.t) :: registering_data_t()
defp registering_data(%Movement{} = movement) do
[
movement.account_id,
movement.move_on,
Movement.inbounds_on(movement),
Movement.outbounds_on(movement),
Movement.initial_balance_for(movement),
movement.final_balance
]
end
@spec build(nil | map) :: t()
defp build(nil), do: %__MODULE__{}
defp build(%{} = opts) do
%__MODULE__{
inbounds: opts["inbounds"],
outbounds: opts["outbounds"],
initial_balance: opts["initial_balance"],
final_balance: opts["final_balance"]
}
end
@spec on(Account.t, Date.t) :: {:ok, t()}
defp on(%Account{} = account, %Date{} = date) do
account.id
|> on_query(date)
|> Repo.one()
|> respond()
end
@spec do_report(query_result_t) :: report_result_t()
defp do_report({:ok, result}) do
result
|> Helpers.Postgrex.pack_first()
|> build()
|> respond()
end
defp do_report(_), do: {:error, :database_failure}
@spec respond(t | any) :: {:ok, t()}
defp respond(%__MODULE__{} = history), do: {:ok, history}
defp respond(_), do: {:ok, build(nil)}
@spec on_query(pos_integer, Date.t) :: Ecto.Query.t()
defp on_query(account_id, date) do
from h in __MODULE__,
where: h.account_id == ^account_id,
where: h.date == ^date,
limit: 1
end
end
|
apps/database/lib/database/schemas/account_history.ex
| 0.845736
| 0.681038
|
account_history.ex
|
starcoder
|
defmodule SparkPost.Endpoint do
@moduledoc """
Base client for the SparkPost API, able to make requests and interpret responses.
This module underpins the SparkPost.* modules.
"""
@default_endpoint "https://api.sparkpost.com/api/v1/"
@doc """
Make a request to the SparkPost API.
## Parameters
- `method`: HTTP 1.1 request method as an atom:
- `:delete`
- `:get`
- `:head`
- `:options`
- `:patch`
- `:post`
- `:put`
- `endpoint`: SparkPost API endpoint as string ("transmissions", "templates", ...)
- `body`: A Map that will be encoded to JSON to be sent as the body of the request (defaults to empty)
- `headers`: A Map of headers of the form %{"Header-Name" => "Value"} to be sent with the request
- `options`: A Keyword list of optional elements including:
- `:params`: A Keyword list of query parameters
## Example
List transmissions for the "ElixirRox" campaign:
SparkPost.Endpoint.request(:get, "transmissions", [campaign_id: "ElixirRox"])
#=> %SparkPost.Endpoint.Response{results: [%{"campaign_id" => "",
"content" => %{"template_id" => "inline"}, "description" => "",
"id" => "102258558346809186", "name" => "102258558346809186",
"state" => "Success"}, ...], status_code: 200}
"""
def request(method, endpoint, body \\ %{}, headers \\ %{}, options \\ [], decode_results \\ true) do
url = Application.get_env(:sparkpost, :api_endpoint, @default_endpoint) <> endpoint
{:ok, request_body} = encode_request_body(body)
request_headers =
if method in [:get, :delete] do
headers
else
Map.merge(headers, %{"Content-Type": "application/json"})
end
|> Map.merge(base_request_headers())
request_options =
options
|> Keyword.put(:timeout, Application.get_env(:sparkpost, :http_timeout, 30_000))
|> Keyword.put(:recv_timeout, Application.get_env(:sparkpost, :http_recv_timeout, 8000))
HTTPoison.request(method, url, request_body, request_headers, request_options)
|> handle_response(decode_results)
end
def marshal_response(response, struct_type, subkey \\ nil)
@doc """
Extract a meaningful structure from a generic endpoint response:
response.results[subkey] as struct_type
"""
def marshal_response(%SparkPost.Endpoint.Response{} = response, struct_type, nil) do
struct(struct_type, response.results)
end
def marshal_response(%SparkPost.Endpoint.Response{} = response, struct_type, subkey) do
struct(struct_type, response.results[subkey])
end
def marshal_response(%SparkPost.Endpoint.Error{} = response, _struct_type, _subkey) do
response
end
defp handle_response({:ok, %HTTPoison.Response{status_code: code, body: body}}, decode_results)
when code >= 200 and code < 300 do
decoded_body = decode_response_body(body)
if decode_results && Map.has_key?(decoded_body, :results) do
%SparkPost.Endpoint.Response{status_code: code, results: decoded_body.results}
else
%SparkPost.Endpoint.Response{status_code: code, results: decoded_body}
end
end
defp handle_response({:ok, %HTTPoison.Response{status_code: code, body: body}}, _decode_results)
when code >= 400 do
decoded_body = decode_response_body(body)
if Map.has_key?(decoded_body, :errors) do
%SparkPost.Endpoint.Error{status_code: code, errors: decoded_body.errors}
end
end
defp handle_response({:error, %HTTPoison.Error{reason: reason}}, _decode_results) do
%SparkPost.Endpoint.Error{status_code: nil, errors: [reason]}
end
defp base_request_headers do
{:ok, version} = :application.get_key(:sparkpost, :vsn)
%{
"User-Agent": "elixir-sparkpost/" <> to_string(version),
Authorization: Application.get_env(:sparkpost, :api_key)
}
end
# Do not try to remove nils from an empty map
defp encode_request_body(body) when is_map(body) and map_size(body) == 0, do: {:ok, ""}
defp encode_request_body(body) do
body |> Washup.filter() |> Poison.encode()
end
defp decode_response_body(body) when byte_size(body) == 0, do: ""
defp decode_response_body(body) do
body |> Poison.decode!(keys: :atoms)
end
end
|
lib/endpoint.ex
| 0.864454
| 0.427875
|
endpoint.ex
|
starcoder
|
defmodule ExRabbitMQ.Consumer do
@moduledoc """
A behaviour module that abstracts away the handling of RabbitMQ connections and channels.
It abstracts the handling of message delivery and acknowlegement.
It also provides hooks to allow the programmer to wrap the consumption of a message without having to directly
access the AMPQ interfaces.
For a connection configuration example see `ExRabbitMQ.Config.Connection`.
For a queue configuration example see `ExRabbitMQ.Config.Session`.
#### Example usage for a consumer implementing a `GenServer`
```elixir
defmodule MyExRabbitMQConsumer do
@module __MODULE__
use GenServer
use ExRabbitMQ.Consumer, GenServer
def start_link do
GenServer.start_link(@module, :ok)
end
def init(state) do
new_state =
xrmq_init(:my_connection_config, :my_session_config, state)
|> xrmq_extract_state()
{:ok, new_state}
end
# required override
def xrmq_basic_deliver(payload, meta, state) do
# your message delivery logic goes here...
{:noreply, state}
end
# optional override when there is a need to do setup the channel right after the connection has been established.
def xrmq_channel_setup(channel, state) do
# any channel setup goes here...
{:ok, state}
end
# optional override when there is a need to setup the queue and/or exchange just before the consume.
def xrmq_queue_setup(channel, queue, state) do
# The default queue setup uses the exchange, exchange_opts, bind_opts and qos_opts from
# the queue's configuration to setup the QoS, declare the exchange and bind it with the queue.
# Your can override this function, but you can also keep this functionality of the automatic queue setup by
# calling super, eg:
{:ok, state} = super(channel, queue, state)
# any other queue setup goes here...
end
end
```
"""
alias ExRabbitMQ.AST.Common, as: CommonAST
alias ExRabbitMQ.Config.Session, as: SessionConfig
alias ExRabbitMQ.State, as: XRMQState
require ExRabbitMQ.AST.Common
require ExRabbitMQ.AST.Consumer.GenServer
@type callback_result ::
{:noreply, term}
| {:noreply, term, timeout | :hibernate}
| {:noreply, [term], term}
| {:noreply, [term], term, :hibernate}
| {:stop, term, term}
@doc """
Setup the process for consuming a RabbitMQ queue.
Initiates a connection or reuses an existing one.
When a connection is established then a new channel is opened.
Next, `c:xrmq_channel_setup/2` is called to do any extra work on the opened channel.
If `start_consuming` is `true` then the process will start consume messages from RabbitMQ.
The function accepts the following arguments:
* `connection` - The configuration information for the RabbitMQ connection.
It can either be a `ExRabbitMQ.Config.Connection` struct or an atom that will be used as the `key` for reading the
the `:exrabbitmq` configuration part from the enviroment.
For more information on how to configure the connection, check `ExRabbitMQ.Config.Connection`.
* `queue` - The configuration information for the RabbitMQ queue to consume.
It can either be a `ExRabbitMQ.Config.Session` struct or an atom that will be used as the `key` for reading the
the `:exrabbitmq` configuration part from the enviroment.
For more information on how to configure the consuming queue, check `ExRabbitMQ.Config.Connection`.
* `start_consuming` - When `true` then `c:xrmq_consume/1` is called automatically after the connection and channel has
been established successfully. *Optional: Defaults to `true`.*
* `state` - The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_init(CommonAST.connection(), CommonAST.queue(), boolean, term) ::
CommonAST.result()
@doc """
This helper function tries to use `c:xrmq_init/4` to set up a connection to RabbitMQ.
In case that fails, it tries again after a configured interval.
The interval can be configured by writing:
```elixir
config :exrabbitmq, :try_init_interval, <THE INTERVAL BETWEEN CONNECTION RETRIES IN MILLISECONDS>
```
The simplest way to use this is to add the following as part of the `GenServer.init/1` callback result:
```elixir
ExRabbitMQ.continue_tuple_try_init(connection_config, session_config, true, nil)
```
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_try_init(CommonAST.connection(), CommonAST.queue(), boolean, term) ::
CommonAST.result()
@doc """
This overridable callback is called by `c:xrmq_try_init/4` just before a new connection attempt is made.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_on_try_init(term) :: term
@doc """
This overridable callback is called by `c:xrmq_try_init/4` when a new connection has been established.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
The return value of this callback tells the caller how to continue.
If `{:cont, state}` is returned, the coller will continue with `{:noreply, state}`.
If `{:halt, reason, state}` is returned, the caller will continue with `{:stop, reason, state}`.
By default, the return value of this callback is `{:cont, state}`.
"""
@callback xrmq_on_try_init_success(term) :: {:cont, term} | {:halt, term, term}
@doc """
This overridable callback is called by `c:xrmq_try_init/4` when a new connection could not be established
but a new attempt can be made (ie, waiting for a connection to become available).
The error the occurred as well as the wrapper process's state is passed in to allow the callback to mutate
it if overriden.
The return value of this callback tells the caller how to continue.
If `{:cont, state}` is returned, the coller will continue with `{:noreply, state}`.
If `{:halt, reason, state}` is returned, the caller will continue with `{:stop, reason, state}`.
By default, the return value of this callback is `{:cont, state}`.
"""
@callback xrmq_on_try_init_error_retry(term, term) :: {:cont, term} | {:halt, term, term}
@doc """
This overridable callback is called by `c:xrmq_try_init/4` when a new connection could not be established
and the error is not normally recoverable (ie, an error not related to a connection being currently unavailable).
The error that occurred as well as the wrapper process's state is passed in to allow the callback to mutate
it if overriden.
The return value of this callback tells the caller how to continue.
If `{:cont, state}` is returned, the coller will continue with `{:noreply, state}`.
If `{:halt, reason, state}` is returned, the caller will continue with `{:stop, reason, state}`.
By default, the return value of this callback is `{:halt, reason, state}`.
"""
@callback xrmq_on_try_init_error(term, term) :: {:cont, term} | {:halt, term, term}
@doc false
@callback xrmq_open_channel_setup_consume(term) :: {:ok, term} | {:error, term, term}
@doc false
@callback xrmq_session_setup(AMQP.Channel.t(), atom | SessionConfig.t(), term) ::
CommonAST.result()
@doc """
This hook is called when a connection has been established and a new channel has been opened.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_channel_setup(AMQP.Channel.t(), term) :: CommonAST.result()
@doc """
This hook is called when a connection has been established and a new channel has been opened,
right after `c:xrmq_channel_setup/2`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_channel_open(AMQP.Channel.t(), term) :: CommonAST.result()
@doc """
This hook is called automatically, if `start_consuming` was `true` when `c:xrmq_init/4`.
If not, then the user has to call it to start consuming.
It is invoked when a connection has been established and a new channel has been opened.
Its flow is to:
1. Declare the queue
2. Run `c:xrmq_queue_setup/3`
3. Start consuming from the queue
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_consume(term) :: CommonAST.result()
@doc """
This hook is called automatically as part of the flow in `c:xrmq_consume/1`.
It allows the user to run extra queue setup steps when the queue has been declared.
The default queue setup uses the exchange, exchange_opts, bind_opts and qos_opts from
the queue's configuration to setup the QoS, declare the exchange and bind it with the queue.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
This callback is the only required callback (i.e., without any default implementation) and
is called as a response to a `:basic_consume` message.
It is passed the `payload` of the request as well as the `meta` object or the message.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_deliver(term, term, term) :: callback_result
@doc """
This overridable hook is called as a response to a `:basic_cancel` message.
It is passed the `cancellation_info` of the request and by default it logs an error and
returns `{:stop, :basic_cancel, state}`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_cancel(term, term) :: callback_result
@doc """
This overridable function can be called whenever `no_ack` is set to `false` and the user
wants to *ack* a message.
It is passed the `delivery_tag` of the request and by default it simply *acks* the message
as per the RabbitMQ API.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_ack(String.t(), term) :: CommonAST.result()
@doc """
This overridable function can be called whenever `no_ack` is set to `false` and the user wants
to reject a message.
It is passed the `delivery_tag` of the request and by default it simply rejects the message
as per the RabbitMQ API.
If the `opts` argument is omitted, the default value is `[]`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_reject(String.t(), term, term) :: CommonAST.result()
@doc """
This overridable function can be called whenever `no_ack` is set to `false` and the user wants
to requeue a message.
It is passed the `delivery_tag` of the request and by default it simply requeue the message and
redelivered to the next available consumer as per the RabbitMQ API.
If the `opts` argument is omitted, the default value is `[]`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_nack(String.t(), term, term) :: CommonAST.result()
@doc """
This overridable function publishes the `payload` to the `exchange` using the provided `routing_key`.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_basic_publish(term, String.t(), String.t(), [term]) ::
CommonAST.basic_publish_result()
@doc """
Helper function that extracts the `state` argument from the passed in tuple.
"""
@callback xrmq_extract_state({:ok, term} | {:error, term, term}) :: term
@doc """
This overridable hook is called when a new connection is established.
It is passed the connection struct and the wrapper process's state is passed in to allow the callback
to mutate it if overriden.
"""
@callback xrmq_on_connection_opened(AMQP.Connection.t(), term) :: term
@doc """
This overridable hook is called when an already established connection has just been re-established.
It is passed the connection struct and the wrapper process's state is passed in to allow the callback
to mutate it if overriden.
"""
@callback xrmq_on_connection_reopened(AMQP.Connection.t(), term) :: term
@doc """
This overridable hook is called when a new connection is established but consuming from the configured queue has
failed.
The error that occurred as well as the wrapper process's state is passed in to allow the callback to mutate
it if overriden.
The return value of this callback tells the caller how to continue.
If `{:cont, state}` is returned, the coller will continue with `{:noreply, state}`.
If `{:halt, reason, state}` is returned, the caller will continue with `{:stop, reason, state}`.
By default, the return value of this callback is `{:halt, reason, state}`.
"""
@callback xrmq_on_connection_opened_consume_failed(term, term) ::
{:cont, term} | {:halt, term, term}
@doc """
This overridable hook is called when an already established connection has just been re-established but consuming
from the configured queue has failed.
The error that occurred as well as the wrapper process's state is passed in to allow the callback to mutate
it if overriden.
The return value of this callback tells the caller how to continue.
If `{:cont, state}` is returned, the coller will continue with `{:noreply, state}`.
If `{:halt, reason, state}` is returned, the caller will continue with `{:stop, reason, state}`.
By default, the return value of this callback is `{:halt, reason, state}`.
"""
@callback xrmq_on_connection_reopened_consume_failed(term, term) ::
{:cont, term} | {:halt, term, term}
@doc """
This overridable hook is called when a channel that had died has just been reopened but consuming from the configured
queue has failed.
The error that occurred as well as the wrapper process's state is passed in to allow the callback to mutate
it if overriden.
The return value of this callback tells the caller how to continue.
If `{:cont, state}` is returned, the coller will continue with `{:noreply, state}`.
If `{:halt, reason, state}` is returned, the caller will continue with `{:stop, reason, state}`.
By default, the return value of this callback is `{:halt, reason, state}`.
"""
@callback xrmq_on_channel_reopened_consume_failed(term, term) ::
{:cont, term} | {:halt, term, term}
@doc """
This overridable hook is called when an already established connection is dropped.
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_on_connection_closed(term) :: term
@doc """
This overridable hook is called when receiving a message and having enabled message size accounting,
it is decided that the process should hibernate.
Message size accounting (disabled by default) can be enabled by writing:
```elixir
config :exrabbitmq, :accounting_enabled, true
```
The configuration option to set the threshold for the message bytes seen so far, in KBs, is set by writing:
```elixir
config :exrabbitmq, :kb_of_messages_seen_so_far_threshold, <NUMBER OF KBs TO USE AS THE THRESHOLD>
```
The result of this callback will be returned as the result of the callback where the message has been delivered
and `c:xrmq_basic_deliver/3` has been called.
The result of `c:xrmq_basic_deliver/3` is the one used as the argument to this callback and by default it is left
untouched.
"""
@callback xrmq_on_hibernation_threshold_reached(tuple) :: tuple
@doc """
This overridable hook is called when a message is buffered while waiting for a connection to be (re-)established.
Message buffering (disabled by default) can be enabled by writing:
```elixir
config :exrabbitmq, :message_buffering_enabled, true
```
The arguments passed are the current count of the buffered messages so far as well as the message payload,
exchange, routing key and the options passed to the call to `xrmq_basic_publish/4`.
"""
@callback xrmq_on_message_buffered(non_neg_integer, binary, binary, binary, keyword) :: term
@doc """
This overridable hook is called when a connection is (re-)established and there are buffered messages to send.
Message buffering (disabled by default) can be enabled by writing:
```elixir
config :exrabbitmq, :message_buffering_enabled, true
```
The wrapper process's state is passed in to allow the callback to mutate it if overriden.
"""
@callback xrmq_flush_buffered_messages([XRMQState.buffered_message()], term) :: term
# credo:disable-for-next-line
defmacro __using__(_) do
inner_ast = ExRabbitMQ.AST.Consumer.GenServer.ast()
common_ast = ExRabbitMQ.AST.Common.ast()
# credo:disable-for-next-line
quote location: :keep do
alias ExRabbitMQ.Config.Connection, as: XRMQConnectionConfig
alias ExRabbitMQ.Config.Session, as: XRMQSessionConfig
alias ExRabbitMQ.Constants, as: XRMQConstants
alias ExRabbitMQ.State, as: XRMQState
require ExRabbitMQ.Logger, as: XRMQLogger
unquote(inner_ast)
def xrmq_init(connection_config, session_config, start_consuming \\ true, state) do
connection_config = XRMQConnectionConfig.get(connection_config)
session_config = XRMQSessionConfig.get(session_config)
XRMQState.set_auto_consume_on_connection(start_consuming)
case xrmq_connection_setup(connection_config) do
:ok ->
XRMQState.set_session_config(session_config)
case xrmq_open_channel_setup_consume(state) do
{:ok, state} ->
state = xrmq_flush_buffered_messages(state)
{:ok, state}
{:error, reason, state} ->
case xrmq_on_connection_opened_consume_failed(reason, state) do
{:cont, state} -> {:ok, state}
{:halt, reason, state} -> {:error, reason, state}
end
end
{:error, reason} ->
XRMQState.set_connection_status(:disconnected)
{:error, reason, state}
end
end
def xrmq_try_init(connection_config, session_config, start_consuming \\ true, state) do
xrmq_try_init_consumer({connection_config, session_config, start_consuming}, state)
end
def xrmq_open_channel_setup_consume(state) do
with {:ok, state} <- xrmq_open_channel(state),
{channel, _} <- XRMQState.get_channel_info(),
session_config <- XRMQState.get_session_config(),
{:ok, state} <- xrmq_session_setup(channel, session_config, state),
# get the session_config again because it may have changed (eg, by using an anonymous queue)
session_config <- XRMQState.get_session_config(),
{:ok, state} <- xrmq_qos_setup(channel, session_config.qos_opts, state) do
if XRMQState.get_auto_consume_on_connection(),
do: xrmq_consume(channel, session_config.queue, session_config.consume_opts, state),
else: {:ok, state}
else
{:error, _reason, _state} = error -> error
{:error, reason} -> {:error, reason, state}
error -> {:error, error, state}
end
end
def xrmq_consume(state) do
{channel, _} = XRMQState.get_channel_info()
session_config = XRMQState.get_session_config()
xrmq_consume(channel, session_config.queue, session_config.consume_opts, state)
end
def xrmq_consume(channel, queue, consume_opts, state) do
XRMQState.set_auto_consume_on_connection(true)
case AMQP.Basic.consume(channel, queue, nil, consume_opts) do
{:ok, _} -> {:ok, state}
{:error, reason} -> {:error, reason, state}
end
catch
:exit, reason ->
case reason do
{:error, reason} -> {:error, reason, state}
error -> {:error, error, state}
end
end
defp xrmq_qos_setup(_channel, [], state), do: {:ok, state}
defp xrmq_qos_setup(channel, opts, state) do
with :ok <- AMQP.Basic.qos(channel, opts) do
{:ok, state}
end
catch
:exit, reason ->
case reason do
{:error, reason} -> {:error, reason, state}
error -> {:error, error, state}
end
end
def xrmq_basic_ack(delivery_tag, state) do
case XRMQState.get_channel_info() do
{nil, _} ->
{:error, XRMQConstants.no_channel_error(), state}
{channel, _} ->
try do
case AMQP.Basic.ack(channel, delivery_tag) do
:ok -> {:ok, state}
error -> {:error, error, state}
end
catch
:exit, reason ->
case reason do
{:error, reason} -> {:error, reason, state}
error -> {:error, error, state}
end
end
end
end
def xrmq_basic_reject(delivery_tag, opts \\ [], state) do
case XRMQState.get_channel_info() do
{nil, _} ->
{:error, XRMQConstants.no_channel_error(), state}
{channel, _} ->
try do
case AMQP.Basic.reject(channel, delivery_tag, opts) do
:ok -> {:ok, state}
error -> {:error, error, state}
end
catch
:exit, reason ->
case reason do
{:error, reason} -> {:error, reason, state}
error -> {:error, error, state}
end
end
end
end
def xrmq_basic_nack(delivery_tag, opts \\ [], state) do
case XRMQState.get_channel_info() do
{nil, _} ->
{:error, XRMQConstants.no_channel_error(), state}
{channel, _} ->
try do
case AMQP.Basic.nack(channel, delivery_tag, opts) do
:ok -> {:ok, state}
error -> {:error, error, state}
end
catch
:exit, reason ->
case reason do
{:error, reason} -> {:error, reason, state}
error -> {:error, error, state}
end
end
end
end
def xrmq_on_hibernation_threshold_reached(callback_result), do: callback_result
def xrmq_on_connection_opened_consume_failed(reason, state), do: {:halt, reason, state}
def xrmq_on_connection_reopened_consume_failed(reason, state), do: {:halt, reason, state}
def xrmq_on_channel_reopened_consume_failed(_reason, state), do: {:cont, state}
unquote(common_ast)
defp xrmq_try_init_consumer(
{connection_config_spec, session_config_spec, auto_consume} = opts,
state
) do
connection_config_spec
|> xrmq_init(session_config_spec, auto_consume, state)
|> xrmq_try_init_inner(opts)
end
defp xrmq_try_init_consumer({connection_config_spec, session_config_spec} = opts, state) do
connection_config_spec
|> xrmq_init(session_config_spec, true, state)
|> xrmq_try_init_inner(opts)
end
defoverridable xrmq_basic_cancel: 2,
xrmq_basic_ack: 2,
xrmq_basic_reject: 2,
xrmq_basic_reject: 3,
xrmq_basic_nack: 2,
xrmq_basic_nack: 3,
xrmq_on_hibernation_threshold_reached: 1,
xrmq_on_connection_opened_consume_failed: 2,
xrmq_on_connection_reopened_consume_failed: 2,
xrmq_on_channel_reopened_consume_failed: 2
end
end
end
|
lib/ex_rabbit_m_q/consumer.ex
| 0.882187
| 0.875095
|
consumer.ex
|
starcoder
|
defmodule PhoenixIntegration.Form.Messages do
@moduledoc false
# The various messages - both warnings and errors - that can be given to the user.
alias PhoenixIntegration.Form.Common
@headlines %{
no_such_name_in_form: "You tried to set the value of a tag that isn't in the form.",
arity_clash: "You are combining list and scalar values.",
tag_has_no_name: "A form tag has no name.",
empty_name: "A tag has an empty name.",
form_conflicting_paths: "The form has two conflicting names."
}
# This is used for testing as well as within this module.
def get(key), do: @headlines[key]
# ----------------------------------------------------------------------------
# Entry point
def emit(message_tuples, form) do
Enum.map(message_tuples, fn {message_atom, data} ->
emit_one(message_atom, form, data)
end)
end
defp emit_one(message_atom, form, context) when is_list(context) do
{severity, iodata} =
apply(__MODULE__, message_atom, [get(message_atom), form] ++ context)
warnings? = Application.get_env(:phoenix_integration, :warnings, true)
case {severity, warnings?} do
{:error, _} ->
put_iodata(:red, "Error", iodata)
{:warning, true} ->
put_iodata(:yellow, "Warning", iodata)
{:warning, false} ->
:ignore
end
end
defp emit_one(message_atom, form, context) do
emit_one(message_atom, form, [context])
end
# ----------------------------------------------------------------------------
# A function for each headline
def no_such_name_in_form(headline, form, context) do
hint =
case context.why do
:path_too_long -> [
"Your path is longer than the names it should match.",
key_values([
"Here is your path", inspect(context.change.path),
"Here is an available name", context.tree[context.last_tried].name])
]
:path_too_short -> [
"You provided only a prefix of all the available names.",
key_values([
"Here is your path", inspect(context.change.path),
"Here is an available name", Common.any_leaf(context.tree).name])
]
:possible_typo ->
key_values([
"Path tried", inspect(context.change.path),
"Is this a typo?", "#{inspect context.last_tried}",
"Your value", inspect(context.change.value)])
end
{:error, [headline, hint, form_description(form)]}
end
def arity_clash(headline, form, %{existing: existing, change: change}) do
hint =
case existing.has_list_value do
true -> [
"Note that the name of the tag you're setting ends in `[]`:",
" #{inspect existing.name}",
"So your value should be a list, rather than this:",
" #{inspect change.value}",
]
false -> [
"The value you want to use is a list:",
" #{inspect change.value}",
"But the name of the tag doesn't end in `[]`:",
" #{inspect existing.name}"
]
end
{:error, [headline, hint, form_description(form)]}
end
def tag_has_no_name(headline, form, floki_tag) do
{:warning, [
headline,
Floki.raw_html(floki_tag),
"It can't be included in the params sent to the controller.",
form_description(form),
]}
end
def empty_name(headline, form, floki_tag) do
{:warning, [
headline,
Floki.raw_html(floki_tag),
form_description(form),
]}
end
def form_conflicting_paths(headline, form, %{old: old, new: new}) do
{:warning, [
headline,
"Phoenix will ignore one of them.",
key_values([
"Earlier name", old.name,
" Later name", new.name,
]),
form_description(form),
]}
end
# ----------------------------------------------------------------------------
# This prints (to stdio) an iodata tree, but unlike IO.puts, it adds
# a newline at the end of each element. It also handles color.
defp put_iodata(color, word, [headline | rest]) do
prefix = apply(IO.ANSI, color, [])
IO.puts "#{prefix}#{word}: #{headline}"
for iodata <- rest, do: put_iodata(iodata)
IO.puts "#{IO.ANSI.reset}"
end
defp put_iodata(iodata) when is_list(iodata) do
for line <- iodata, do: put_iodata(line)
end
defp put_iodata(string) when is_binary(string), do: IO.puts string
# ----------------------------------------------------------------------------
defp form_description(form) do
[action] = Floki.attribute(form, "action")
[ key_value("Form action", inspect action),
case Floki.attribute(form, "id") do
[] -> []
[id] -> key_value("Form id", inspect id)
end
]
end
defp key_values(list) do
list
|> Enum.chunk_every(2)
|> Enum.map(fn [key, value] -> key_value(key, value) end)
end
defp key_value(key, value) do
"#{key}: #{value}"
end
end
|
lib/phoenix_integration/form/messages.ex
| 0.724091
| 0.470068
|
messages.ex
|
starcoder
|
defmodule Membrane.RTP.JitterBuffer.BufferStore do
@moduledoc false
# Store for RTP packets. Packets are stored in `Heap` ordered by packet index. Packet index is
# defined in RFC 3711 (SRTP) as: 2^16 * rollover count + sequence number.
use Bunch
use Bunch.Access
alias Membrane.{Buffer, RTP}
alias Membrane.RTP.{JitterBuffer, Utils}
alias Membrane.RTP.JitterBuffer.Record
require Bitwise
@seq_number_limit Bitwise.bsl(1, 16)
defstruct flush_index: nil,
highest_incoming_index: nil,
heap: Heap.new(&Record.rtp_comparator/2),
set: MapSet.new(),
rollover_count: 0
@typedoc """
Type describing BufferStore structure.
Fields:
- `rollover_count` - count of all performed rollovers (cycles of sequence number)
- `heap` - contains records containing buffers
- `set` - helper structure for faster read operations; content is the same as in `heap`
- `flush_index` - index of the last packet that has been emitted (or would habe been
emitted, but never arrived) as a result of a call to one of the `flush` functions
- `highest_incoming_index` - the highest index in the buffer so far, mapping to the most recently produced
RTP packet placed in JitterBuffer
"""
@type t :: %__MODULE__{
flush_index: JitterBuffer.packet_index() | nil,
highest_incoming_index: JitterBuffer.packet_index() | nil,
heap: Heap.t(),
set: MapSet.t(),
rollover_count: non_neg_integer()
}
@typedoc """
An atom describing an error that may happen during insertion.
"""
@type insert_error :: :late_packet
@typedoc """
An atom describing an error that may happen when fetching a buffer
from the Store.
"""
@type get_buffer_error :: :not_present
@doc """
Inserts buffer into the Store.
Every subsequent buffer must have sequence number Bigger than the previously returned
one or be part of rollover.
"""
@spec insert_buffer(t(), Buffer.t()) :: {:ok, t()} | {:error, insert_error()}
def insert_buffer(store, %Buffer{metadata: %{rtp: %{sequence_number: seq_num}}} = buffer) do
do_insert_buffer(store, buffer, seq_num)
end
@spec do_insert_buffer(t(), Buffer.t(), RTP.Header.sequence_number_t()) ::
{:ok, t()} | {:error, insert_error()}
defp do_insert_buffer(%__MODULE__{flush_index: nil} = store, buffer, 0) do
store = add_record(store, Record.new(buffer, @seq_number_limit), :next)
{:ok, %__MODULE__{store | flush_index: @seq_number_limit - 1}}
end
defp do_insert_buffer(%__MODULE__{flush_index: nil} = store, buffer, seq_num) do
store = add_record(store, Record.new(buffer, seq_num), :current)
{:ok, %__MODULE__{store | flush_index: seq_num - 1}}
end
defp do_insert_buffer(
%__MODULE__{
flush_index: flush_index,
highest_incoming_index: highest_incoming_index,
rollover_count: roc
} = store,
buffer,
seq_num
) do
highest_seq_num = rem(highest_incoming_index, @seq_number_limit)
{rollover, index} =
case Utils.from_which_rollover(highest_seq_num, seq_num, @seq_number_limit) do
:current -> {:current, seq_num + roc * @seq_number_limit}
:previous -> {:previous, seq_num + (roc - 1) * @seq_number_limit}
:next -> {:next, seq_num + (roc + 1) * @seq_number_limit}
end
if is_fresh_packet?(flush_index, index) do
record = Record.new(buffer, index)
{:ok, add_record(store, record, rollover)}
else
{:error, :late_packet}
end
end
@doc """
Flushes the store to the buffer with the next sequence number.
If this buffer is present, it will be returned.
Otherwise it will be treated as late and rejected on attempt to insert into the store.
"""
@spec flush_one(t) :: {Record.t() | nil, t}
def flush_one(store)
def flush_one(%__MODULE__{flush_index: nil} = store) do
{nil, store}
end
def flush_one(%__MODULE__{flush_index: flush_index, heap: heap, set: set} = store) do
record = Heap.root(heap)
expected_next_index = flush_index + 1
{result, store} =
if record != nil and record.index == expected_next_index do
updated_heap = Heap.pop(heap)
updated_set = MapSet.delete(set, record.index)
updated_store = %__MODULE__{store | heap: updated_heap, set: updated_set}
{record, updated_store}
else
# TODO: instead of nil use expected_next_index to put in Discontinuity metadata
# after https://github.com/membraneframework/membrane-core/issues/238 is done.
{nil, store}
end
{result, %__MODULE__{store | flush_index: expected_next_index}}
end
@doc """
Flushes the store until the first gap in sequence numbers of records
"""
@spec flush_ordered(t) :: {[Record.t() | nil], t}
def flush_ordered(store) do
flush_while(store, fn %__MODULE__{flush_index: flush_index}, %Record{index: index} ->
index == flush_index + 1
end)
end
@doc """
Flushes the store as long as it contains a buffer with the timestamp older than provided duration
"""
@spec flush_older_than(t, Membrane.Time.t()) :: {[Record.t() | nil], t}
def flush_older_than(store, max_age) do
max_age_timestamp = Membrane.Time.monotonic_time() - max_age
flush_while(store, fn _store, %Record{timestamp: timestamp} ->
timestamp <= max_age_timestamp
end)
end
@doc """
Returns all buffers that are stored in the `BufferStore`.
"""
@spec dump(t()) :: [Record.t()]
def dump(%__MODULE__{} = store) do
{records, _store} = flush_while(store, fn _store, _record -> true end)
records
end
@doc """
Returns timestamp (time of insertion) of a buffer with lowest index
"""
@spec first_record_timestamp(t()) :: Membrane.Time.t() | nil
def first_record_timestamp(%__MODULE__{heap: heap}) do
case Heap.root(heap) do
%Record{timestamp: time} -> time
nil -> nil
end
end
defp is_fresh_packet?(flush_index, index), do: index > flush_index
@spec flush_while(t, (t, Record.t() -> boolean), [Record.t() | nil]) ::
{[Record.t() | nil], t}
defp flush_while(%__MODULE__{heap: heap} = store, fun, acc \\ []) do
heap
|> Heap.root()
|> case do
nil ->
{Enum.reverse(acc), store}
record ->
if fun.(store, record) do
{record, store} = flush_one(store)
flush_while(store, fun, [record | acc])
else
{Enum.reverse(acc), store}
end
end
end
defp add_record(%__MODULE__{heap: heap, set: set} = store, %Record{} = record, record_rollover) do
if set |> MapSet.member?(record.index) do
store
else
%__MODULE__{store | heap: Heap.push(heap, record), set: MapSet.put(set, record.index)}
|> update_highest_incoming_index(record.index)
|> update_roc(record_rollover)
end
end
defp update_highest_incoming_index(
%__MODULE__{highest_incoming_index: last} = store,
added_index
)
when added_index > last or last == nil,
do: %__MODULE__{store | highest_incoming_index: added_index}
defp update_highest_incoming_index(
%__MODULE__{highest_incoming_index: last} = store,
added_index
)
when last >= added_index,
do: store
defp update_roc(%{rollover_count: roc} = store, :next),
do: %__MODULE__{store | rollover_count: roc + 1}
defp update_roc(store, _record_rollover), do: store
end
|
lib/membrane/rtp/jitter_buffer/buffer_store.ex
| 0.803251
| 0.54698
|
buffer_store.ex
|
starcoder
|
defmodule ESpec.Assertions.Accepted do
@moduledoc """
Defines 'accepted' assertion.
it do: expect(SomeModule).to accepted(:func)
"""
use ESpec.Assertions.Interface
defp match(subject, [func, args, opts]) do
pid = Keyword.get(opts, :pid) || :any
opts_count = Keyword.get(opts, :count) || :any
count = get_count(subject, func, args, pid)
matched =
if opts_count == :any do
count >= 1
else
count == opts_count
end
{matched, count}
end
defp get_count(subject, func, args, pid) do
Enum.count(:meck.history(subject), fn el ->
cond do
pid == :any && args == :any -> check_any_any(el, subject, func)
pid == :any -> check_any_pid(el, subject, func, args)
args == :any -> check_any_args(el, subject, func, pid)
true -> check_else(el, subject, func, pid, args)
end
end)
end
defp check_any_any(el, subject, func) do
case el do
{_, {^subject, ^func, _}, _return} -> true
_ -> false
end
end
defp check_any_pid(el, subject, func, args) do
case el do
{_, {^subject, ^func, ^args}, _return} -> true
_ -> false
end
end
defp check_any_args(el, subject, func, pid) do
case el do
{^pid, {^subject, ^func, _}, _return} -> true
_ -> false
end
end
defp check_else(el, subject, func, pid, args) do
case el do
{^pid, {^subject, ^func, ^args}, _return} -> true
_ -> false
end
end
defp success_message(subject, [func, args, opts], _result, positive) do
pid = Keyword.get(opts, :pid) || :any
opts_count = Keyword.get(opts, :count) || :any
count = if opts_count == :any, do: "at least once", else: "`#{opts_count}` times"
to = if positive, do: "accepted", else: "didn't accept"
"`#{inspect(subject)}` #{to} `#{inspect(func)}` with `#{inspect(args)}` in process `#{
inspect(pid)
}` #{count}."
end
defp error_message(subject, [func, args, opts], result, positive) do
to = if positive, do: "to", else: "not to"
but = "it accepted the function `#{result}` times"
pid = Keyword.get(opts, :pid) || :any
opts_count = Keyword.get(opts, :count) || :any
count = if opts_count == :any, do: "at least once", else: "`#{opts_count}` times"
"Expected `#{inspect(subject)}` #{to} accept `#{inspect(func)}` with `#{inspect(args)}` in process `#{
inspect(pid)
}` #{count}, but #{but}. The function was called with arguments #{inspect(args)}"
end
end
|
lib/espec/assertions/accepted.ex
| 0.636692
| 0.467149
|
accepted.ex
|
starcoder
|
defmodule EVM.Refunds.Sstore do
alias EVM.{ExecEnv, Gas}
# Refund given (added into refund counter) when the storage value is set to zero from non-zero.
@storage_refund 15_000
@spec refund({integer(), integer()}, ExecEnv.t()) :: {ExecEnv.t(), integer()}
def refund({key, new_value}, exec_env) do
if exec_env.config.eip1283_sstore_gas_cost_changed do
eip1283_sstore_refund({key, new_value}, exec_env)
else
basic_sstore_refund({key, new_value}, exec_env)
end
end
defp basic_sstore_refund({key, new_value}, exec_env) do
{updated_exec_env, current_value} = ExecEnv.storage(exec_env, key)
refund =
case current_value do
{:ok, value} ->
if value != 0 && new_value == 0 do
@storage_refund
else
0
end
_ ->
0
end
{updated_exec_env, refund}
end
defp eip1283_sstore_refund({key, new_value}, exec_env) do
{updated_exec_env, initial_value} = initial_value(exec_env, key)
{updated_exec_env, current_value} = current_value(updated_exec_env, key)
refund = get_refund(initial_value, current_value, new_value)
{updated_exec_env, refund}
end
defp get_refund(_, _current_value = value, _new_value = value), do: 0
defp get_refund(0, 0, _new_value), do: 0
defp get_refund(_initial_value = value, _current_value = value, _new_value = 0),
do: @storage_refund
defp get_refund(initial_value, current_value, new_value) do
first_refund =
cond do
initial_value != 0 && current_value == 0 ->
-@storage_refund
initial_value != 0 && new_value == 0 ->
@storage_refund
true ->
0
end
second_refund =
cond do
initial_value == new_value && initial_value == 0 ->
Gas.g_sset() - Gas.g_sload()
initial_value == new_value ->
Gas.g_sreset() - Gas.g_sload()
true ->
0
end
first_refund + second_refund
end
defp initial_value(exec_env, key) do
{updated_exec_env, result} = ExecEnv.initial_storage(exec_env, key)
value =
case result do
:account_not_found -> 0
:key_not_found -> 0
{:ok, value} -> value
end
{updated_exec_env, value}
end
defp current_value(exec_env, key) do
{updated_exec_env, result} = ExecEnv.storage(exec_env, key)
value =
case result do
:account_not_found -> 0
:key_not_found -> 0
{:ok, value} -> value
end
{updated_exec_env, value}
end
end
|
apps/evm/lib/evm/refunds/sstore.ex
| 0.551332
| 0.418162
|
sstore.ex
|
starcoder
|
defmodule Coders.Repo do
@moduledoc """
Maintain DB connection and implement functions which handle models.
DB most functions have two forms, the normal form, and the band form (
with "!" suffix). Usually, the normal form will return `#{__MODULE__}.rethink_resp`,
while the bang form will return the data directly, or raise errors if error happen.
TODO
Implementing all these functions is a bit crazy here.
We should define a `run` macro. With which, we can do things like:
Repo.run(with: User), do: get(id)
Repo.run(with: User), do: get_all([id1, id2, ..., idN]) |> delete
Repo.run(with: User), do: map(...) |> group(...)
Without this `run` macro, we lost lots of flexibilities from REQL.
"""
use RethinkDB.Connection
alias RethinkDB.Query, as: Q
@typedoc "Response from RethinkDB."
@type rethink_resp :: %RethinkDB.Record{} | %RethinkDB.Collection{} | %RethinkDB.Feed{} | %RethinkDB.Response{}
@db_name Keyword.get(Application.get_env(:coders, Coders.Repo), :database, "coders")
@doc """
Insert a model's data into the model's table.
"""
@spec insert(Ecto.Changeset.t) :: rethink_resp
def insert(%Ecto.Changeset{model: model, changes: changes, valid?: true}) do
changes = Map.merge(%{created_at: Ecto.DateTime.to_string(Ecto.DateTime.utc)}, changes)
table_of(model) |> Q.insert(changes) |> run
end
def insert(%Ecto.Changeset{valid?: false} = cs) do
raise Coders.InvalidData, cs
end
def insert!(changeset) do
case process_rethinkdb_record(insert(changeset), "generated_keys") do
true -> id(changeset)
keys -> keys
end
end
@doc """
Update a record of the model.
"""
def update(model, id, updates) when is_atom(model) and is_map(updates) do
table_of(model) |> Q.get(id) |> Q.update(updates) |> run
end
@doc """
Delete a record of the model by ID (primary key).
"""
def delete(model, id) when is_atom(model) and is_list(id) do
table_of(model) |> Q.get_all(id) |> Q.delete |> run
end
def delete(model, id) when is_atom(model) do
delete(model, [id])
end
def delete!(model, id) when is_atom(model) do
delete(model, id) |> process_rethinkdb_record("deleted")
end
@doc """
Delete all records of a model.
"""
def delete_all(model) when is_atom(model) do
table_of(model) |> Q.delete |> run
end
def delete_all!(model) when is_atom(model) do
delete_all(model) |> process_rethinkdb_record("deleted")
end
@doc """
Get all records of a model from DB.
"""
def get_all(model) do
table_of(model) |> run
end
def get_all!(model) do
case get_all(model) do
%RethinkDB.Collection{data: data} -> data
_ -> raise "Could not read data from DB." # TODO should do better exception
end
end
@doc """
Get a record of a model by the primary key.
"""
def get(model, id) when is_list(id) do
table_of(model) |> Q.get_all(id) |> run
end
def get(model, id), do: get(model, [id])
def get!(model, id) do
case get(model, id) do
%RethinkDB.Collection{data: []} -> nil
%RethinkDB.Collection{data: [data]} -> data
%RethinkDB.Collection{data: data} -> data
error -> raise "Could not read data from DB. #{inspect error}" # TODO should do better exception
end
end
@doc """
Create the database.
"""
def create_db, do: Q.db_create(@db_name) |> run
@doc """
Drop the database.
"""
def drop_db, do: Q.db_drop(@db_name) |> run
@doc """
Create a table defined by the model Struct.
"""
def create_table(model) do
p_k = case pk(model) do
[k] -> k
keys -> keys
end
db
|> Q.table_create(table_name(model), %{primary_key: p_k})
|> run
end
@doc false
# Return the db query.
defp db, do: Q.db(@db_name)
@doc false
# Return the table name of a model or a model's changeset.
defp table_name(model_or_changeset)
defp table_name(%Ecto.Changeset{model: model}), do: table_name(model.__struct__)
defp table_name(%{__struct__: mod}) when is_atom(mod), do: table_name(mod)
defp table_name(model) when is_atom(model), do: model.__schema__(:source)
@doc false
# Return the primary key of a model.
defp pk(%{__struct__: mod}) when is_atom(mod), do: pk(mod)
defp pk(model) when is_atom(model) do
model.__schema__(:primary_key)
end
@doc false
# Return the table query of a model.
defp table_of(model_or_changeset) do
Q.table(db, table_name(model_or_changeset))
end
@doc false
# Retrieve the value of the primary key of a model or changeset.
defp id(%Ecto.Changeset{model: model, changes: changes}) do
Enum.reduce pk(model), [], fn k, acc ->
[changes[k] | acc]
end
end
defp id(model) do
Enum.reduce pk(model), [], fn k, acc ->
[model[k] | acc]
end
end
defp process_rethinkdb_record(resp, concern \\ :empty) do
case resp do
%RethinkDB.Record{data: %{"errors" => 0, ^concern => keys}} -> keys
%RethinkDB.Record{data: %{"errors" => 0}} -> true
%RethinkDB.Record{data: %{"first_error" => error}} -> raise Coders.InvalidData, error
end
end
end
|
lib/coders/repo.ex
| 0.601125
| 0.46794
|
repo.ex
|
starcoder
|
defmodule Cuda.Test.GraphHelpers do
@moduledoc """
Represents helper functions for testing Cuda.Graph module
"""
# graphics: ┌┐└┘─│▶⎡⎣⎤⎦┴┤├┬
alias Cuda.Graph
alias Cuda.Graph.Node
alias Cuda.Graph.Pin
defmodule Double do
@moduledoc """
Implements node with two input and output pins and specific type.
Type is set by using a key :type in options.
"""
use Node
def __pins__(_) do
[input(:input1, :i8), input(:input2, :i8),
output(:output1, :i8), output(:output2, :i8)]
end
def __type__(assigns) do
Keyword.get(assigns.options, :type, :virtual)
end
end
defmodule Single do
@moduledoc """
Implements node with one input and output pins and specific type.
Type is set by using a key :type in options.
"""
use Node
def __pins__(_) do
[input(:input, :i8), output(:output, :i8)]
end
def __type__(assigns) do
Keyword.get(assigns.options, :type, :virtual)
end
end
defmodule Producer do
@moduledoc """
Implements node with one producer pin and specific type.
Type is set by using a key :type in options.
"""
use Node
def __pins__(_) do
[producer(:producer, :i8)]
end
def __type__(assigns) do
Keyword.get(assigns.options, :type, :virtual)
end
end
defmodule Custom do
@moduledoc """
Implements node with custom number of input and output pins and specific type.
Type is set by using a key :type in options.
Number of input and output pins is set by using key :io, wich takes a tuple
{input_pins_number, output_pins_number}
"""
use Node
def __pins__(assigns) do
{i, o} = Keyword.get(assigns.options, :io, {1, 1})
inputs = i > 0 && (for x <- 1..i, do: input(String.to_atom("input#{x}"), :i8)) || []
outputs = o > 0 && (for x <- 1..o, do: output(String.to_atom("output#{x}"), :i8)) || []
inputs ++ outputs
end
def __type__(assigns) do
Keyword.get(assigns.options, :type, :virtual)
end
end
defmodule SimpleGraph do
@moduledoc """
Represents a simple graph
"""
use Graph
def __pins__(_) do
[input(:input, :i8), output(:output, :i8)]
end
def __graph__(graph) do
graph
|> add(:a, Single)
|> link(:input, {:a, :input})
|> link({:a, :output}, :output)
end
end
import Graph, except: [graph: 1, graph: 2]
@doc """
Returns a specified graph for testing
"""
@spec graph(atom | list) :: Graph.t
def graph(opts \\ [])
# [i]──▶[input (a) output]─x─▶[o]
def graph(:unconnected) do
graph(id: :g,
pins: [%Pin{id: :i, type: :input, data_type: :i8},
%Pin{id: :o, type: :output, data_type: :i8}])
|> add(:a, Single)
|> link(:i, {:a, :input})
end
# [i]──▶[input (a) output]──▶[o]
def graph(:i1_single1_o1) do
graph(id: :g,
pins: [%Pin{id: :i, type: :input, data_type: :i8},
%Pin{id: :o, type: :output, data_type: :i8}])
|> add(:a, Single)
|> link(:i, {:a, :input})
|> link({:a, :output}, :o)
end
# [i]─┬─▶[input (a) output]──▶[o1]
# └─▶[input (b) output]──▶[o2]
def graph(:i1_single2_o2) do
graph(id: :g,
pins: [%Pin{id: :i, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8},
%Pin{id: :o2, type: :output, data_type: :i8}])
|> add(:a, Single)
|> add(:b, Single)
|> link(:i, {:a, :input})
|> link(:i, {:b, :input})
|> link({:a, :output}, :o1)
|> link({:b, :output}, :o2)
end
# [i1]──▶⎡input1 (a) output1⎤──▶[o1]
# [i2]──▶⎣input2 output2⎦──▶[o2]
def graph(:i2_double1_o2) do
graph(id: :g,
pins: [%Pin{id: :i1, type: :input, data_type: :i8},
%Pin{id: :i2, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8},
%Pin{id: :o2, type: :output, data_type: :i8}])
|> add(:a, Double)
|> link(:i1, {:a, :input1})
|> link(:i2, {:a, :input2})
|> link({:a, :output1}, :o1)
|> link({:a, :output2}, :o2)
end
# [i]──▶⎡input1 (a) output1⎤──▶[o]
# ┌─▶⎣input2 output2⎦─┐
# └───────────────────────┘
def graph(:i1_double1_o1) do
graph(id: :g,
pins: [%Pin{id: :i, type: :input, data_type: :i8},
%Pin{id: :o, type: :output, data_type: :i8}])
|> add(:a, Double)
|> link(:i, {:a, :input1})
|> link({:a, :output1}, :o)
|> link({:a, :output2}, {:a, :input2})
end
# [i]──▶[input (a) output]─┬──────────────────────▶[o1]
# └─▶[input (b) output]──▶[o2]
def graph(:i1_single1_single1_o2) do
graph(id: :g,
pins: [%Pin{id: :i, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8},
%Pin{id: :o2, type: :output, data_type: :i8}])
|> add(:a, Single)
|> add(:b, Single)
|> link(:i, {:a, :input})
|> link({:a, :output}, :o1)
|> link({:a, :output}, {:b, :input})
|> link({:b, :output}, :o2)
end
# [i]──▶[input──▶[x-input (x-a) x-output]──▶output]──▶[o]
def graph(:i1_graph1_o1) do
graph(id: :g,
pins: [%Pin{id: :i, type: :input, data_type: :i8},
%Pin{id: :o, type: :output, data_type: :i8}])
|> add(:x, SimpleGraph)
|> link(:i, {:x, :input})
|> link({:x, :output}, :o)
end
# [i1]──▶[input (a) output]──┬──[input (b) output]──▶[input (d) output]──▶[o1]
# └─▶[input (c) output]───────────────────────▶[o2]
def graph(:i1_single4_o2) do
graph(id: :graph,
pins: [
%Pin{id: :i1, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8},
%Pin{id: :o2, type: :output, data_type: :i8}])
|> add(:a, Single)
|> add(:b, Single)
|> add(:c, Single)
|> add(:d, Single)
|> link(:i1, {:a, :input})
|> link({:a, :output}, {:b, :input})
|> link({:a, :output}, {:c, :input})
|> link({:b, :output}, {:d, :input})
|> link({:d, :output}, :o1)
|> link({:c, :output}, :o2)
end
# [i1]──▶[input (a) output]──┬──[input (b) output]───────────────────────▶[o1]
# └─▶[input (c) output]──▶[input (d) output]──▶[o2]
def graph(:i1_single4_o2_inverse) do
graph(id: :graph,
pins: [
%Pin{id: :i1, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8},
%Pin{id: :o2, type: :output, data_type: :i8}])
|> add(:a, Single)
|> add(:b, Single)
|> add(:c, Single)
|> add(:d, Single)
|> link(:i1, {:a, :input})
|> link({:a, :output}, {:b, :input})
|> link({:a, :output}, {:c, :input})
|> link({:c, :output}, {:d, :input})
|> link({:b, :output}, :o1)
|> link({:d, :output}, :o2)
end
# ┌───▶[input (a) output]───▶[input (c) output]───▶[o1]
# [i1]─│
# └───▶[input (b) output]─────────────────────────▶[o2]
def graph(:i1_single3_o2) do
graph(id: :graph,
pins: [
%Pin{id: :i1, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8},
%Pin{id: :o2, type: :output, data_type: :i8}])
|> add(:a, Single)
|> add(:b, Single)
|> add(:c, Single)
|> link(:i1, {:a, :input})
|> link(:i1, {:b, :input})
|> link({:a, :output}, {:c, :input})
|> link({:c, :output}, :o1)
|> link({:b, :output}, :o2)
end
# ┌───▶[input (a) output]─────────────────────────▶[o1]
# [i1]─│
# └───▶[input (b) output]───▶[input (c) output]───▶[o2]
def graph(:i1_single3_o2_inverse) do
graph(id: :graph,
pins: [
%Pin{id: :i1, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8},
%Pin{id: :o2, type: :output, data_type: :i8}])
|> add(:a, Single)
|> add(:b, Single)
|> add(:c, Single)
|> link(:i1, {:a, :input})
|> link(:i1, {:b, :input})
|> link({:b, :output}, {:c, :input})
|> link({:a, :output}, :o1)
|> link({:c, :output}, :o2)
end
# [i1]─────▶[input (a) output]─────────────────────────▶[o1]
# [ (b) producer]───▶[input (c) output]───▶[o2]
def graph(:i1_producer1_single2_o2) do
graph(id: :graph,
pins: [
%Pin{id: :i1, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8},
%Pin{id: :o2, type: :output, data_type: :i8}])
|> add(:a, Single)
|> add(:b, Producer)
|> add(:c, Single)
|> link(:i1, {:a, :input})
|> link({:b, :producer}, {:c, :input})
|> link({:a, :output}, :o1)
|> link({:c, :output}, :o2)
end
# [i1]──▶[input (a) output]──▶[input (b) output]──▶[input (c) output]──▶[o1]
def graph(:i1_single3_o1) do
graph(id: :graph,
pins: [
%Pin{id: :i1, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8}])
|> add(:a, Single)
|> add(:b, Single)
|> add(:c, Single)
|> link(:i1, {:a, :input})
|> link({:a, :output}, {:b, :input})
|> link({:b, :output}, {:c, :input})
|> link({:c, :output}, :o1)
end
def graph(:longest_chain_test) do
graph(id: :graph,
pins: [
%Pin{id: :i1, type: :input, data_type: :i8},
%Pin{id: :i2, type: :input, data_type: :i8},
%Pin{id: :o1, type: :output, data_type: :i8},
%Pin{id: :o2, type: :output, data_type: :i8}])
|> add(:a, Custom, type: :virtual, io: {1, 2})
|> add(:b, Single, type: :virtual)
|> add(:c, Custom, type: :virtual, io: {1, 3})
|> add(:d, Double, type: :gpu)
|> add(:e, Single, type: :virtual)
|> add(:f, Single, type: :gpu)
|> add(:g, Single, type: :virtual)
|> add(:h, Single, type: :gpu)
|> add(:i, Single, type: :virtual)
|> add(:j, Custom, type: :gpu, io: {2, 1})
|> add(:k, Custom, type: :gpu, io: {2, 1})
|> add(:l, Single, type: :gpu)
|> add(:m, Single, type: :virtual)
|> add(:n, Custom, type: :virtual, io: {2, 1})
|> add(:o, Single, type: :gpu)
|> link(:i1, {:a, :input1})
|> link(:i2, {:b, :input})
|> link({:a, :output1}, {:c, :input1})
|> link({:a, :output2}, {:d, :input1})
|> link({:b, :output}, {:d, :input2})
|> link({:c, :output1}, {:e, :input})
|> link({:c, :output2}, {:f, :input})
|> link({:c, :output3}, {:g, :input})
|> link({:d, :output1}, {:h, :input})
|> link({:d, :output2}, {:i, :input})
|> link({:e, :output}, {:j, :input1})
|> link({:f, :output}, {:j, :input2})
|> link({:g, :output}, {:k, :input1})
|> link({:h, :output}, {:k, :input2})
|> link({:i, :output}, {:l, :input})
|> link({:j, :output1}, {:m, :input})
|> link({:k, :output1}, {:n, :input2})
|> link({:l, :output}, {:o, :input})
|> link({:m, :output}, {:n, :input1})
|> link({:n, :output1}, :o1)
|> link({:o, :output}, :o2)
end
def graph(:network_test) do
graph(id: :network,
pins: [
%Pin{id: :input, type: :input, data_type: :i8},
%Pin{id: :reply, type: :input, data_type: :i8},
%Pin{id: :output, type: :output, data_type: :i8}])
|> add(:conv, Single)
|> add(:fc, Single)
|> add(:error, Custom, io: {2, 1})
|> add(:back_fc, Custom, io: {3, 1})
|> add(:back_conv, Custom, io: {3, 1})
|> link(:input, {:back_conv, :input1})
|> link(:input, {:conv, :input})
|> link(:reply, {:error, :input2})
|> link({:conv, :output}, {:back_conv, :input2})
|> link({:conv, :output}, {:back_fc, :input1})
|> link({:conv, :output}, {:fc, :input})
|> link({:fc, :output}, {:back_fc, :input2})
|> link({:fc, :output}, {:error, :input1})
|> link({:error, :output1}, {:back_fc, :input3})
|> link({:back_fc, :output1}, {:back_conv, :input3})
|> link({:back_conv, :output1}, :output)
end
def graph(opts) do
%Graph{} |> Map.merge(opts |> Enum.into(%{}))
end
@doc """
Adds nested computation graph to predefined graph
"""
@spec nested_graph(predefined_graph_name :: atom, nested_graph_name :: atom) :: Cuda.Graph.t
def nested_graph(predefined, nested \\ :nested) do
Code.ensure_loaded(Graph.ComputationGraph)
nested = Graph.Factory.new(%Cuda.Graph{}, nested, Graph.ComputationGraph, [], [])
predefined
|> graph()
|> Graph.GraphProto.add(nested)
end
@doc """
Converts nodes to it's ids
"""
@spec nodes2ids([Cuda.Graph.Node.t]) :: [term]
def nodes2ids([]), do: []
def nodes2ids([val | rest]) when is_list(val) do
[Enum.map(val, &(&1.id)) | nodes2ids(rest)]
end
def nodes2ids([val | rest]) do
[val.id | nodes2ids(rest)]
end
def sort_node_ids(nodes) when is_list(nodes) do
nodes
|> Enum.map(&sort_node_ids/1)
|> Enum.sort(fn
a, b when is_list(a) and is_list(b) -> length(a) >= length(b)
a, b -> a <= b
end)
end
def sort_node_ids(x), do: x
@doc """
Checks connection and order of connection between two nodes, before it expands the graph
"""
@spec connected?(Cuda.Graph.t, current_node_id :: atom, next_node_id :: atom) :: boolean
def connected?(graph, current_node_id, next_node_id) do
graph = Cuda.Graph.Processing.expand(graph)
graph.links
|> Enum.any?(fn {{cnode_id, _}, {nnode_id, _}} ->
cnid = if is_tuple(cnode_id) do
cnode_id
|> Tuple.to_list()
|> List.last()
else
cnode_id
end
nnid = if is_tuple(nnode_id) do
nnode_id
|> Tuple.to_list()
|> List.last()
else
nnode_id
end
cnid == current_node_id and nnid == next_node_id
end)
end
def callback(action, args, state)
def callback(action, {{node1, _pin1}, {node2, _pin2}}, state) do
IO.puts("#{action}: #{node1.id} - #{node2.id}")
{:ok, state ++ [{action, {node1.id, node2.id}}]}
end
def callback(action, {node, _pin}, state) do
IO.puts("#{action}: #{node.id}")
{:ok, state ++ [{action, node.id}]}
end
def callback(action, args, _state) do
{action, args}
end
def view(%{id: id, nodes: n, links: l}) do
IO.puts("Graph: #{id}")
IO.puts("Nodes:")
Enum.each(n, & IO.puts("#{&1.id}"))
IO.puts("Links:")
Enum.each(l, fn
{{:__self__, gpin}, {nid, npin}} -> IO.puts("#{gpin} -> {#{nid}, #{npin}}")
{{nid, npin}, {:__self__, gpin}} -> IO.puts("{#{nid}, #{npin}} -> #{gpin}")
{{nid, npin}, {nid2, npin2}} -> IO.puts("{#{nid}, #{npin}} -> {#{nid2}, #{npin2}}")
end)
end
def update_node(g, _, []), do: g
def update_node(g, node_id, [opt | rest]) do
g = update_node(g, node_id, opt)
update_node(g, node_id, rest)
end
def update_node(g, node_id, {key, value}) do
with index when not is_nil(index) <- Enum.find_index(g.nodes, & &1.id == node_id) do
nodes = List.update_at(g.nodes, index, fn node ->
case key do
:id -> %{node | id: value}
:type -> %{node | type: value}
:pins -> %{node | pins: value}
_ -> node
end
end)
%{g | nodes: nodes}
else
nil -> g
end
end
end
|
test/support/graph_helpers.ex
| 0.733738
| 0.603581
|
graph_helpers.ex
|
starcoder
|
defmodule Toby.App.Views.Load do
@moduledoc """
Builds a view for displaying information about system load
"""
import Ratatouille.Constants, only: [color: 1]
import Ratatouille.View
alias Toby.Util.Selection
@style_selected [
color: color(:black),
background: color(:white)
]
def render(%{
data: %{
utilization: utilization,
scheduler_count: scheduler_count,
memory: memory,
io: io
},
cursor_y: %{position: position}
})
when position <= 8 do
util_opts = build_utilization_opts(scheduler_count)
visible_util_opts = Selection.slice(util_opts, 6, position)
util_series = selected_utilization_series(utilization, util_opts, position)
row do
column size: 12 do
panel title: "Scheduler Utilization (%)" do
row do
column size: 9 do
chart(type: :line, series: fill_series(util_series), height: 10)
end
column size: 3 do
panel title: "Selection", height: 10 do
table do
for {{label, _key}, idx} <- visible_util_opts do
table_row(if(idx == position, do: @style_selected, else: [])) do
table_cell(content: label)
end
end
end
end
end
end
end
row do
column size: 6 do
panel title: "Memory Usage (MB)", height: 15 do
chart(type: :line, series: fill_series(memory), height: 10)
end
end
column size: 6 do
panel title: "IO Usage (B)", height: 15 do
chart(type: :line, series: fill_series(io), height: 10)
end
end
end
end
end
end
defp selected_utilization_series(utilization, opts, cursor) do
# Find the selected utilization series
{{_label, key}, _idx} = Enum.at(opts, cursor)
for sample <- utilization, do: sample[key]
end
defp build_utilization_opts(scheduler_count) do
scheduler_opts = for i <- 1..scheduler_count, do: {"Scheduler #{i}", i}
Enum.with_index([{"Total", :total} | scheduler_opts])
end
defp fill_series(series) when length(series) >= 60, do: series
defp fill_series(series), do: List.duplicate(0, 60 - length(series)) ++ series
end
|
lib/toby/app/views/load.ex
| 0.804675
| 0.469703
|
load.ex
|
starcoder
|
defmodule GrowthBook.Condition do
@moduledoc """
Functionality for evaluating conditions.
You should not (have to) use any of these functions in your own application. They are documented
for library developers only. Breaking changes in this module will not be considered breaking
changes in the library's public API (or cause a minor/major semver update).
"""
alias GrowthBook.Context
alias GrowthBook.Helpers
@typedoc """
Condition
A condition is evaluated against `t:GrowthBook.Context.attributes/0` and used to target features/experiments
to specific users.
The syntax is inspired by MongoDB queries. Here is an example:
```
%{
"country" => "US",
"browser" => %{
"$in" => ["firefox", "chrome"]
},
"email" => %{
"$not" => %{
"$regex" => "@gmail.com$"
}
}
}
```
"""
@type t() :: map()
@typedoc "A condition value"
@type condition_value() :: term()
@doc """
Evaluates a condition against the given attributes.
Conditions are MongoDB-query-like expressions.
## Available expressions:
### Expression groups
- `$or`: Logical OR
- `$nor`: Logical OR, but inverted
- `$and`: Logical AND
- `$not`: Logical NOT
### Simple expressions
- `$eq`: `left == right`
- `$ne`: `left != right`
- `$lt`: `left < right`
- `$lte`: `left <= right`
- `$gt`: `left > right`
- `$gte`: `left >= right`
- `$exists`: `(left in [nil, :undefined]) != right`
- `$type`: `typeof left == right`
- `$regex`: `right |> Regex.compile!() |> Regex.match?(left)`
### Array expressions
- `$in`: `left in right`
- `$nin`: `left not in right`
- `$elemMatch`: performs the given condition(s) of left for each element of right (with support for expressions)
- `$all`: performs the given condition(s) of left for each element of right (without support support for expressions)
- `$size`: `eval_contition_value(left, length(right))`
## Examples
iex> GrowthBook.Condition.eval_condition(%{"hello" => "world"}, %{
...> "hello" => "world"
...> })
true
iex> GrowthBook.Condition.eval_condition(%{"hello" => "world"}, %{
...> "hello" => "optimizely"
...> })
false
"""
@spec eval_condition(Context.attributes(), t()) :: boolean()
def eval_condition(attributes, %{"$or" => conditions}),
do: eval_or(attributes, conditions)
def eval_condition(attributes, %{"$nor" => conditions}),
do: not eval_or(attributes, conditions)
def eval_condition(attributes, %{"$and" => conditions}),
do: eval_and(attributes, conditions)
def eval_condition(attributes, %{"$not" => conditions}),
do: not eval_condition(attributes, conditions)
def eval_condition(attributes, conditions) do
Enum.reduce_while(conditions, true, fn {path, condition}, acc ->
if eval_condition_value(condition, get_path(attributes, path)) do
{:cont, acc}
else
{:halt, false}
end
end)
end
@spec eval_condition_value(condition_value(), term()) :: boolean()
defp eval_condition_value(condition, value) when is_binary(condition),
do: to_string(value) == condition
defp eval_condition_value(condition, value) when is_number(condition) and is_number(value),
do: value == condition
defp eval_condition_value(condition, value) when is_float(condition) and is_binary(value),
do: {condition, ""} == Float.parse(value)
defp eval_condition_value(condition, value) when is_integer(condition) and is_binary(value),
do: {condition, ""} == Integer.parse(value)
defp eval_condition_value(condition, value) when is_boolean(condition),
do: Helpers.cast_boolish(value) == condition
defp eval_condition_value(condition, value) do
if is_list(condition) or not operator_object?(condition) do
condition == value
else
Enum.reduce_while(condition, true, fn {operator, expected}, acc ->
if eval_operator_condition(operator, value, expected) do
{:cont, acc}
else
{:halt, false}
end
end)
end
end
@spec eval_operator_condition(String.t(), term(), term()) :: boolean()
defp eval_operator_condition("$eq", left, right), do: left == right
defp eval_operator_condition("$ne", left, right), do: left != right
# Perform JavaScript-like type coercion
# see https://262.ecma-international.org/5.1/#sec-11.8.5
@type_coercion_operators ["$lt", "$lte", "$gt", "$gte"]
defp eval_operator_condition(operator, left, right)
when is_number(left) and is_binary(right) and operator in @type_coercion_operators do
case Float.parse(right) do
{right, _rest} -> eval_operator_condition(operator, left, right)
_unparseable -> false
end
end
defp eval_operator_condition(operator, left, right)
when is_number(right) and is_binary(left) and operator in @type_coercion_operators do
case Float.parse(left) do
{left, _rest} -> eval_operator_condition(operator, left, right)
_unparseable -> false
end
end
defp eval_operator_condition("$lt", left, right), do: left < right
defp eval_operator_condition("$lte", left, right), do: left <= right
defp eval_operator_condition("$gt", left, right), do: left > right
defp eval_operator_condition("$gte", left, right), do: left >= right
defp eval_operator_condition("$exists", left, right),
do: if(right, do: left not in [nil, :undefined], else: left in [nil, :undefined])
defp eval_operator_condition("$in", left, right), do: left in right
defp eval_operator_condition("$nin", left, right), do: left not in right
defp eval_operator_condition("$not", left, right), do: not eval_condition_value(right, left)
defp eval_operator_condition("$size", left, right) when is_list(left),
do: eval_condition_value(right, length(left))
defp eval_operator_condition("$size", _left, _right), do: false
defp eval_operator_condition("$elemMatch", left, right), do: elem_match(left, right)
defp eval_operator_condition("$all", left, right) when is_list(left) and is_list(right) do
Enum.reduce_while(right, true, fn condition, acc ->
if Enum.any?(left, &eval_condition_value(condition, &1)) do
{:cont, acc}
else
{:halt, false}
end
end)
end
defp eval_operator_condition("$all", _left, _right), do: false
defp eval_operator_condition("$regex", left, right) do
case Regex.compile(right) do
{:ok, regex} -> Regex.match?(regex, left)
{:error, _err} -> false
end
end
defp eval_operator_condition("$type", left, right), do: get_type(left) == right
defp eval_operator_condition(operator, _left, _right) do
IO.warn("Unknown operator: #{operator}")
false
end
@spec elem_match(term(), term()) :: boolean()
defp elem_match(left, right) when is_list(left) do
check =
if operator_object?(right),
do: &eval_condition_value(right, &1),
else: &eval_condition(&1, right)
Enum.reduce_while(left, false, fn value, acc ->
if check.(value) do
{:halt, true}
else
{:cont, acc}
end
end)
end
defp elem_match(_left, _right), do: false
@spec eval_or(Context.attributes(), [t()]) :: boolean()
defp eval_or(_attributes, []), do: true
defp eval_or(attributes, [condition]), do: eval_condition(attributes, condition)
defp eval_or(attributes, [condition | conditions]),
do: eval_condition(attributes, condition) or eval_or(attributes, conditions)
@spec eval_and(Context.attributes(), [t()]) :: boolean()
defp eval_and(_attributes, []), do: true
defp eval_and(attributes, [condition | conditions]),
do: eval_condition(attributes, condition) and eval_and(attributes, conditions)
@spec operator_object?(t()) :: boolean()
defp operator_object?(condition) when is_map(condition) do
Enum.all?(condition, fn
{"$" <> _key, _value} -> true
_non_operator -> false
end)
end
defp operator_object?(_condition), do: false
# Given attributes and a dot-separated path string, returns the value of
# the attribute at the path
@doc false
@spec get_path(map(), String.t()) :: term() | :undefined
def get_path(map, path) do
path = String.split(path, ".")
do_get_path(map, path)
end
defp do_get_path(value, []), do: value
defp do_get_path(value, [key | path]) when is_map_key(value, key) do
%{^key => next_value} = value
do_get_path(next_value, path)
end
defp do_get_path(_value, _path), do: :undefined
# Returns the data type of the passed argument
@doc false
@spec get_type(term()) :: String.t()
def get_type(attribute_value) when is_binary(attribute_value), do: "string"
def get_type(attribute_value) when is_number(attribute_value), do: "number"
def get_type(attribute_value) when is_boolean(attribute_value), do: "boolean"
def get_type(attribute_value) when is_list(attribute_value), do: "array"
def get_type(attribute_value) when is_map(attribute_value), do: "object"
def get_type(attribute_value) when is_nil(attribute_value), do: "null"
def get_type(:undefined), do: "undefined"
def get_type(_attribute_value), do: "unknown"
end
|
lib/growth_book/condition.ex
| 0.930514
| 0.789234
|
condition.ex
|
starcoder
|
defmodule Rayray.Renderings.Scene do
alias Rayray.Camera
alias Rayray.Canvas
alias Rayray.Lights
alias Rayray.Material
alias Rayray.Matrix
alias Rayray.Sphere
alias Rayray.Transformations
alias Rayray.Tuple
alias Rayray.World
def do_it() do
# floor
floor = Sphere.new()
material = Material.new()
material = %{material | color: Tuple.color(1, 0.9, 0.9), specular: 0}
floor = %{floor | transform: Matrix.scaling(10, 0.01, 10), material: material}
# left wall
left_wall = Sphere.new()
left_wall_transform =
Matrix.translation(0, 0, 5)
|> Matrix.multiply(Matrix.rotation_y(-1 * :math.pi() / 4))
|> Matrix.multiply(Matrix.rotation_z(:math.pi() / 2))
|> Matrix.multiply(Matrix.scaling(10, 0.01, 10))
left_wall_material = floor.material
left_wall = %{left_wall | transform: left_wall_transform, material: left_wall_material}
# right wall
right_wall = Sphere.new()
right_wall_transform =
Matrix.translation(0, 0, 5)
|> Matrix.multiply(Matrix.rotation_y(:math.pi() / 4))
|> Matrix.multiply(Matrix.rotation_z(:math.pi() / 2))
|> Matrix.multiply(Matrix.scaling(10, 0.01, 10))
right_wall_material = floor.material
right_wall = %{right_wall | transform: right_wall_transform, material: right_wall_material}
# middle
middle = Sphere.new()
middle_material = Material.new()
middle_material = %{
middle_material
| color: Tuple.color(0.1, 1, 0.5),
diffuse: 0.7,
specular: 0.3
}
middle = %{middle | transform: Matrix.translation(-0.5, 1, 0.5), material: middle_material}
# right
right = Sphere.new()
right_material = Material.new()
right_material = %{
right_material
| color: Tuple.color(0.5, 1, 0.1),
diffuse: 0.7,
specular: 0.3
}
right = %{
right
| transform:
Matrix.multiply(Matrix.translation(1.5, 0.5, -0.5), Matrix.scaling(0.5, 0.5, 0.5)),
material: right_material
}
# left
left = Sphere.new()
left_material = Material.new()
left_material = %{
left_material
| color: Tuple.color(1, 0.8, 0.1),
diffuse: 0.7,
specular: 0.3
}
left = %{
left
| transform:
Matrix.multiply(Matrix.translation(-1.5, 0.33, -0.75), Matrix.scaling(0.33, 0.33, 0.33)),
material: left_material
}
world = World.new()
world = %{
world
| light: Lights.point_light(Tuple.point(-10, 10, -10), Tuple.color(1, 1, 1)),
objects: [floor, left_wall, right_wall, middle, right, left]
}
camera = Camera.new(1000, 500, :math.pi() / 3)
camera = %{
camera
| transform:
Transformations.view_transform(
Tuple.point(0, 1.5, -5),
Tuple.point(0, 1, 0),
Tuple.vector(0, 1, 0)
)
}
IO.puts("started rendering")
canvas = Camera.render(camera, world)
IO.puts("done rendering")
ppm = Canvas.canvas_to_ppm(canvas)
IO.puts("Done ppm")
File.write!("scene_shadowed_big.ppm", ppm)
end
end
|
lib/rayray/renderings/scene.ex
| 0.830594
| 0.538134
|
scene.ex
|
starcoder
|
defmodule BitwiseIp.Blocks do
@moduledoc """
Functions for handling lists of bitwise IP blocks.
Because the `BitwiseIp.Block` representation relies on a binary prefix, it's
not possible to express certain ranges with a single block. For instance, the
range of addresses between `192.168.12.0` and `192.168.16.255` might make
intuitive sense, but the binary representation of the third byte presents a
challenge:
* `12` = `0b00001100`
* `13` = `0b00001101`
* `14` = `0b00001110`
* `15` = `0b00001111`
* `16` = `0b00010000`
Notice that `12`-`15` share the prefix `0b000011xx`, so those addresses could
be covered by the CIDR block `192.168.12.0/22`. (The prefix length is 22 for
the 16 bits of `192.168.` plus the 6 most significant bits of the third
byte.) But that would *not* cover the `192.168.16.x` addresses:
```
iex> BitwiseIp.Block.parse!("192.168.12.0/22")
...> |> Enum.take_every(256)
...> |> Enum.map(&to_string/1)
["192.168.12.0", "192.168.13.0", "192.168.14.0", "192.168.15.0"]
```
All this is to say that there are general limitations to the expressiveness
of a single CIDR range, so it's natural that most applications will deal with
a collection of blocks at a time - conceptually, a list of lists of IP
addresses.
Whereas bitwise IP blocks have a straightforward binary representation, a
list of blocks is somewhat more unwieldy. This module provides utility
functions that make handling these lists more ergonomic. In particular, the
`member?/2` function helps you avoid a common performance pitfall.
"""
@typedoc """
A list of bitwise IP blocks.
The `BitwiseIp.Blocks` module operates over lists of `BitwiseIp.Block`
structs. This itself does not warrant a separate struct with any extra
indirection, so we just use lists directly.
"""
@type t() :: [BitwiseIp.Block.t()]
@doc """
Efficiently checks if an IP address is a member of any of the blocks.
Libraries will generally handle IP addresses encoded as `:inet`-style tuples
of integers. Therefore, in order to use `BitwiseIp.Block.member?/2`, you'll
first need to use `BitwiseIp.encode/1` to convert the tuple into an
integer-encoded struct.
A common mistake when handling a list of blocks is to do the bitwise IP
encoding repeatedly within a loop:
```
# This is a mistake!
ip = {127, 0, 0, 1}
Enum.any?(blocks, &BitwiseIp.Block.member?(&1, BitwiseIp.encode(ip)))
```
The problem with the above is that the return value of `BitwiseIp.encode(ip)`
doesn't change as we iterate through the list. The cost of redundantly
encoding the same IP address over & over is often enough to outweigh any
performance gains from using the bitwise membership checks.
This function helps enforce a pattern where the encoding is only done once
(essentially performing [loop-invariant code
motion](https://en.wikipedia.org/wiki/Loop-invariant_code_motion)). That is,
it's akin to saying:
```
ip = {127, 0, 0, 1}
encoded = BitwiseIp.encode(ip) # this is only done once
Enum.any?(blocks, &BitwiseIp.Block.member?(&1, encoded))
```
This function also accepts an already-encoded `BitwiseIp` struct as an
argument, in which case no extra encoding needs to be performed. This is
useful for cases where you need to perform even more loop-invariant code
motion, such as when you're handling two separate lists. In such a case, you
should use a pattern like:
```
# make sure to only encode the IP once
ip = {127, 0, 0, 1}
encoded = BitwiseIp.encode(ip)
BitwiseIp.Blocks.member?(blocks1, encoded) # check the first list
BitwiseIp.Blocks.member?(blocks2, encoded) # check the second list
```
## Examples
```
iex> ["1.2.0.0/16", "3.4.0.0/16", "5.6.0.0/16"]
...> |> Enum.map(&BitwiseIp.Block.parse!/1)
...> |> BitwiseIp.Blocks.member?({1, 2, 3, 4})
true
iex> ["1.2.0.0/16", "3.4.0.0/16", "5.6.0.0/16"]
...> |> Enum.map(&BitwiseIp.Block.parse!/1)
...> |> BitwiseIp.Blocks.member?({7, 8, 9, 10})
false
iex> ["1.2.0.0/16", "3.4.0.0/16", "5.6.0.0/16"]
...> |> Enum.map(&BitwiseIp.Block.parse!/1)
...> |> BitwiseIp.Blocks.member?(BitwiseIp.encode({1, 2, 3, 4}))
true
iex> ["1.2.0.0/16", "3.4.0.0/16", "5.6.0.0/16"]
...> |> Enum.map(&BitwiseIp.Block.parse!/1)
...> |> BitwiseIp.Blocks.member?(BitwiseIp.encode({7, 8, 9, 10}))
false
```
"""
@spec member?(t(), BitwiseIp.t()) :: boolean()
def member?(blocks, %BitwiseIp{} = ip) do
Enum.any?(blocks, &BitwiseIp.Block.member?(&1, ip))
end
@spec member?(t(), :inet.ip_address()) :: boolean()
def member?(blocks, ip) do
member?(blocks, BitwiseIp.encode(ip))
end
@doc """
An error-raising variant of `parse/1`.
This function takes a list of strings in CIDR notation and parses them into
bitwise IP blocks using `BitwiseIp.Block.parse!/1`. If any of the strings are
invalid, the whole list fails to parse and the error is propagated. If you
want to discard invalid elements instead, use `parse/1`.
## Examples
```
iex> BitwiseIp.Blocks.parse!(["172.16.17.32/16", "dead::beef"])
...> |> Enum.map(&to_string/1)
["172.16.17.32/16", "dead::beef/128"]
iex> BitwiseIp.Blocks.parse!(["3.14/16", "invalid", "dead::cow"])
** (ArgumentError) Invalid IP address "3.14" in CIDR "3.14/16"
iex> BitwiseIp.Blocks.parse!(["172.16.17.32/16", "invalid", "dead::beef"])
** (ArgumentError) Invalid IP address "invalid" in CIDR "invalid"
```
"""
@spec parse!([String.t()]) :: t()
def parse!(cidrs) do
Enum.map(cidrs, &BitwiseIp.Block.parse!/1)
end
@doc """
Parses a list of strings into bitwise IP blocks.
This function takes a list of strings in CIDR notation and parses them into
bitwise IP blocks using `BitwiseIp.Block.parse/1`. If a string is invalid,
its value is discarded from the resulting list. If you want to raise an error
instead, use `parse!/1`.
## Examples
```
iex> BitwiseIp.Blocks.parse(["172.16.17.32/16", "dead::beef"])
...> |> Enum.map(&to_string/1)
["172.16.17.32/16", "dead::beef/128"]
iex> BitwiseIp.Blocks.parse(["3.14/16", "invalid", "dead::cow"])
[]
iex> BitwiseIp.Blocks.parse(["3.14.0.0/16", "invalid", "dead::beef"])
...> |> Enum.map(&to_string/1)
["3.14.0.0/16", "dead::beef/128"]
```
"""
@spec parse([String.t()]) :: t()
def parse(cidrs)
def parse([cidr | cidrs]) do
case BitwiseIp.Block.parse(cidr) do
{:ok, block} -> [block | parse(cidrs)]
{:error, _} -> parse(cidrs)
end
end
def parse([]) do
[]
end
@doc """
Computes an equivalent list of blocks optimal for `member?/2`.
While an individual `BitwiseIp.Block.member?/2` call is already efficient,
the performance of `member?/2` is sensitive to a couple of factors:
1. The size of the list matters, since a smaller list requires fewer
individual checks.
2. The order of the elements in the list matters, since `member?/2` will exit
early as soon as any individual check returns true.
To optimize for the size of the list, this function recursively merges any
two blocks where one is a subset of the other. This is tested using
`BitwiseIp.Block.subnet?/2`. For example, `1.2.0.0/16` is a subset of
`1.0.0.0/8`, so instead of calling `BitwiseIp.Block.member?/2` on both of
them, we can simply check the larger range of the two - in this case,
`1.0.0.0/8`.
The order can be optimized by placing larger blocks earlier in the list.
Assuming an even distribution of IP addresses, it's more likely for an
address to fall inside of a block that covers a wider range. Thus, we can
sort by the integer-encoded mask: a smaller mask means a shorter network
prefix, which means there are more addresses possible (see
`BitwiseIp.Block.size/1` for more on computing the size of a block from its
mask).
This optimization is kind of a parlor trick cribbed from the
[cider](https://hex.pm/packages/cider) library. Except in pathological cases,
the run time cost of performing the optimization is likely larger than any
performance gained by using the new list. As such, if you're going to use
this function at all, it's only really appropriate to call at compile time,
which means your original list of blocks has to be available statically.
## Examples
```
iex> ["1.2.3.4", "1.2.3.0/24", "1.2.0.0/16", "1.0.0.0/8"]
...> |> BitwiseIp.Blocks.parse!()
...> |> BitwiseIp.Blocks.optimize()
...> |> Enum.map(&to_string/1)
["1.0.0.0/8"]
iex> ["1.2.0.0/16", "3.0.0.0/8"]
...> |> BitwiseIp.Blocks.parse!()
...> |> BitwiseIp.Blocks.optimize()
...> |> Enum.map(&to_string/1)
["3.0.0.0/8", "1.2.0.0/16"]
iex> ["1.2.0.0/16", "3.4.5.0/24", "1.0.0.0/8", "3.4.0.0/16"]
...> |> BitwiseIp.Blocks.parse!()
...> |> BitwiseIp.Blocks.optimize()
...> |> Enum.map(&to_string/1)
["1.0.0.0/8", "3.4.0.0/16"]
```
"""
@spec optimize(t()) :: t()
def optimize(blocks) do
case try_to_optimize(blocks) do
{:success, blocks} -> optimize(blocks)
:failure -> blocks |> Enum.sort_by(& &1.mask)
end
end
defp try_to_optimize(blocks, unmergeable \\ [])
defp try_to_optimize([block | blocks], unmergeable) do
case try_to_merge(block, blocks) do
{:success, merged} -> {:success, merged ++ unmergeable}
:failure -> try_to_optimize(blocks, [block | unmergeable])
end
end
defp try_to_optimize([], _) do
:failure
end
defp try_to_merge(block, blocks, visited \\ [])
defp try_to_merge(a, [b | unvisited], visited) do
cond do
BitwiseIp.Block.subnet?(a, b) -> {:success, [a | unvisited] ++ visited}
BitwiseIp.Block.subnet?(b, a) -> {:success, [b | unvisited] ++ visited}
true -> try_to_merge(a, unvisited, [b | visited])
end
end
defp try_to_merge(_, [], _) do
:failure
end
end
|
lib/bitwise_ip/blocks.ex
| 0.949576
| 0.919643
|
blocks.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.NodeInfoCachedGet do
@moduledoc """
Get the node information that is cached on another device
This is useful for getting the command classes and device classes
When sending this command the Z-Wave network should send back a
`NodeInfoCachedReport` command.
Params:
`:seq_number` - the sequence number for the networked command (required)
`:max_age` - the max age of the node info frame give in 2^n minutes
see section on cached minutes below for more information (optional)
`:node_id` - the node id that that node information is being requested for
(required)
## Cached Minutes
This Z-Wave network will cache node information to perverse bandwidth and
provides access to node information about sleeping nodes.
When sending the `NodeInfoCachedGet` command we can specify the max age of
the cached data. If the cached data is older than the `:max_age` param the
Z-Wave network will try to refresh the cache and send back the most updated
information.
The values for the `:max_age` parameter are numbers from 1 to 14. This number
will be 2 ^ number minutes. So if you pass the number `4` the receiving
Z-Wave device will consider that 16 minutes.
Two other options are `:infinite` and `:force_update`. Where `:infinite`
means that the cache will not be freshed regardless of how old the data is
and where `:force_update` means that no matter the age of the cached node
data the cache will attempt to be updated.
We default to `10` which `1024` minutes, or just a little over 1 day. This
default is chosen to limit bandwidth usage. Also, the data found in the
report is fairly static, so there isn't a pressing need to update the cache
to often.
"""
@behaviour Grizzly.ZWave.Command
@type max_age :: 1..14 | :infinite | :force_update
@type param ::
{:seq_number, Grizzly.seq_number()}
| {:node_id, Grizzly.Node.id()}
| {:max_age, max_age()}
alias Grizzly.ZWave.{Command, DecodeError}
alias Grizzly.ZWave.CommandClasses.NetworkManagementProxy
@impl true
@spec new([param]) :: {:ok, Command.t()}
def new(params) do
params = set_defaults(params)
# TODO: validate params
command = %Command{
name: :node_info_cache_get,
command_byte: 0x03,
command_class: NetworkManagementProxy,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
@spec encode_params(Command.t()) :: binary()
def encode_params(command) do
seq_number = Command.param!(command, :seq_number)
max_age = Command.param!(command, :max_age)
node_id = Command.param!(command, :node_id)
<<seq_number, encode_max_age(max_age), node_id>>
end
@impl true
@spec decode_params(binary()) :: {:ok, [param]} | {:error, DecodeError.t()}
def decode_params(params_binary) do
<<seq_number, max_age_byte, node_id>> = params_binary
case decode_max_age(max_age_byte) do
{:ok, max_age} ->
{:ok, [seq_number: seq_number, max_age: max_age, node_id: node_id]}
{:error, %DecodeError{}} = error ->
error
end
end
@spec encode_max_age(max_age()) :: 0..15
def encode_max_age(n) when n > 0 and n < 15, do: n
def encode_max_age(:infinite), do: 15
def encode_max_age(:force_update), do: 0
@spec decode_max_age(byte()) :: {:ok, max_age()} | {:error, DecodeError.t()}
def decode_max_age(0), do: {:ok, :force_update}
def decode_max_age(15), do: {:ok, :infinite}
def decode_max_age(n) when n > 0 and n < 15, do: n
def decode_max_age(n),
do: {:error, %DecodeError{value: n, param: :max_age, command: :node_info_cache_get}}
defp set_defaults(params) do
Keyword.put_new(params, :max_age, 10)
end
end
|
lib/grizzly/zwave/commands/node_info_cached_get.ex
| 0.767211
| 0.676212
|
node_info_cached_get.ex
|
starcoder
|
defmodule Aoc2021.Day13 do
@moduledoc """
See https://adventofcode.com/2021/day/13
"""
@type instruction() :: {:fold_x, non_neg_integer()} | {:fold_y, non_neg_integer()}
defmodule Reader do
@moduledoc false
@spec read_input(Path.t()) :: {MapSet.t(), [Aoc2021.Day13.instruction()]}
def read_input(path) do
{points, instructions} =
path
|> File.stream!()
|> Stream.map(&String.trim/1)
|> Enum.split_while(&non_empty?/1)
# Drop empty line
instructions = tl(instructions)
{parse_points(points), parse_instructions(instructions)}
end
defp non_empty?(""), do: false
defp non_empty?(_), do: true
defp parse_points(lines) do
lines
|> Enum.map(&String.split(&1, ","))
|> Enum.map(fn line -> Enum.map(line, &String.to_integer/1) end)
|> Enum.reduce(MapSet.new(), fn [x, y], acc -> MapSet.put(acc, {x, y}) end)
end
defp parse_instructions(lines) do
lines
|> Enum.map(&String.replace(&1, "fold along ", ""))
|> Enum.map(&String.split(&1, "="))
|> Enum.map(&parse_instruction/1)
end
defp parse_instruction(["x", x]), do: {:fold_x, String.to_integer(x)}
defp parse_instruction(["y", y]), do: {:fold_y, String.to_integer(y)}
end
@spec solve_part1() :: non_neg_integer()
@spec solve_part1(Path.t()) :: non_neg_integer()
def solve_part1(path \\ "priv/day13/input.txt") do
{dots, instructions} = Reader.read_input(path)
instructions
|> hd()
|> List.wrap()
|> apply_folds(dots)
|> MapSet.size()
end
@spec solve_part2() :: :ok
@spec solve_part2(Path.t()) :: :ok
def solve_part2(path \\ "priv/day13/input.txt") do
{dots, instructions} = Reader.read_input(path)
dots =
instructions
|> apply_folds(dots)
output = format_dots(dots)
IO.puts(output)
end
defp apply_folds(instructions, dots) do
Enum.reduce(instructions, dots, &apply_fold/2)
end
defp apply_fold({:fold_x, x}, dots) do
reject = fn {xx, _} -> x == xx end
split = fn {xx, _} -> xx < x end
map = fn {xx, y} -> {x - (xx - x), y} end
apply_fold_with(dots, reject, split, map)
end
defp apply_fold({:fold_y, y}, dots) do
reject = fn {_, yy} -> y == yy end
split = fn {_, yy} -> yy < y end
map = fn {x, yy} -> {x, y - (yy - y)} end
apply_fold_with(dots, reject, split, map)
end
defp apply_fold_with(dots, reject, split, map) do
{under, over} =
dots
|> Enum.reject(&reject.(&1))
|> Enum.split_with(&split.(&1))
under = MapSet.new(under)
over = MapSet.new(over, &map.(&1))
MapSet.union(under, over)
end
defp format_dots(dots) do
max_x =
dots
|> Enum.map(fn {x, _} -> x end)
|> Enum.sort(:desc)
|> hd()
max_y =
dots
|> Enum.map(fn {_, y} -> y end)
|> Enum.sort(:desc)
|> hd()
lines =
for y <- 0..max_y do
list =
for x <- 0..max_x do
if MapSet.member?(dots, {x, y}) do
"X"
else
"."
end
end
Enum.join(list)
end
Enum.join(lines, "\n")
end
end
|
lib/aoc2021/day13.ex
| 0.743168
| 0.439988
|
day13.ex
|
starcoder
|
defmodule Niex.Content do
defstruct(content: nil, type: nil)
@moduledoc """
Content that can be rendered within a cell in a Niex notebook when
returned (or rendered via `Niex.render/1`) in a notebook cell.
"""
@doc """
Builds content for a cell containing an image at the provided `url`.
"""
def image(url) do
%Niex.Content{type: "image", content: url}
end
@doc """
Builds content for a video containing an image at the provided `url`.
"""
def video(url, options \\ []) do
%Niex.Content{
type: "video",
content: %{url: url, options: Enum.into(options, %{width: 480, height: 360})}
}
end
@doc """
Returns content for a cell containing a chart using the Chartkick library.
The `type` of the chart corresponds to the chart type as shown in the
[ChartKick docs](https://github.com/ankane/chartkick.js).
"""
def chart(type, data, options \\ []) do
%Niex.Content{
type: "chart",
content: %{type: type, data: data, options: Enum.into(options, %{width: 480, height: 360})}
}
end
@doc """
Builds content for a cell containing plain HTML
"""
def html(content) do
%Niex.Content{type: "html", content: content}
end
@doc """
Builds content for a cell containing preformatted text
"""
def pre(content) do
%Niex.Content{type: "pre", content: content}
end
@doc """
Renders the provided `Niex.Content` into HTML
"""
def render(%Niex.Content{type: "chart", content: data}) do
"""
<div class="chart" style="width: #{data.options.width}px; height: #{data.options.height}px" phx-hook="NiexChart" data-chart='#{
Poison.encode!(data)
}' id="#{UUID.uuid4()}}" />
"""
end
def render(%Niex.Content{type: "image", content: url}) do
"""
<img src="#{url}" />
"""
end
def render(%Niex.Content{type: "video", content: %{url: url, options: options}}) do
"""
<video controls width="#{options[:width]}" height="#{options[:height]}" src="#{url}" />
"""
end
def render(%Niex.Content{type: "pre", content: content}) do
"""
<pre>#{content}</pre>
"""
end
def render(%Niex.Content{type: "html", content: content}) do
content
end
end
|
niex/lib/niex/content.ex
| 0.822759
| 0.560854
|
content.ex
|
starcoder
|
defmodule Underscore do
@moduledoc """
Re-implementing (most of) the pure collection functions from Underscore.js in Elixir (as a code kata exercise). Requirements: neither the Enum module nor native erlang method implementations may be used and functions must support tail recursion.
"""
@doc """
## Examples
iex> Underscore.reduce([1, 2, 3], fn(acc, x) -> acc + x end, 4)
10
iex> Underscore.reduce([1, 2, 3], fn(acc, x) -> acc + x end)
6
"""
def reduce(list, fun, acc \\ nil) do
do_reduce(list, fun, acc)
end
defp do_reduce([], _fun, acc) do
acc
end
defp do_reduce([head | tail], fun, nil) do
do_reduce(tail, fun, head)
end
defp do_reduce([head | tail], fun, acc) do
do_reduce(tail, fun, fun.(acc, head))
end
@doc """
## Examples
iex> Underscore.reverse([1, 2, 3])
[3, 2, 1]
"""
def reverse(list) do
reduce(list, fn(acc, x) -> [x | acc] end, [])
end
@doc """
## Examples
iex> Underscore.map([1, 2, 3], fn(x) -> x * x end)
[1, 4, 9]
"""
def map(list, fun) do
do_map(list, fun, [])
end
defp do_map(list, fun, acc) do
list
|> reduce(fn(acc, x) -> [fun.(x) | acc] end, acc)
|> reverse
end
@doc """
## Examples
iex> Underscore.find([1, 2, 3, 4, 5, 6], fn(x) -> rem(x, 2) == 0 end)
2
"""
def find(list, predicate) do
do_find(list, predicate)
end
defp do_find([], _predicate) do
:none
end
defp do_find([head | tail], predicate) do
if predicate.(head) do
head
else
do_find(tail, predicate)
end
end
@doc """
## Examples
iex> Underscore.filter([1, 2, 3, 4, 5, 6], fn(x) -> rem(x, 2) == 0 end)
[2, 4, 6]
"""
def filter(list, predicate) do
reduce(list, fn(acc, x) ->
if predicate.(x), do: [x | acc], else: acc
end, []) |> reverse
end
@doc """
## Examples
iex> Underscore.where([%{color: "purple", shape: "circle"}, %{color: "red", shape: "triangle"}, %{color: "blue", shape: "circle"}, %{color: "green", shape: "square"}], %{shape: "circle"})
[%{color: "purple", shape: "circle"}, %{color: "blue", shape: "circle"}]
"""
def where(list, properties) do
with filter_props <- MapSet.new(properties) do
filter(list, fn(x) ->
MapSet.subset?(filter_props, MapSet.new(x))
end)
end
end
@doc """
## Examples
iex> Underscore.find_where([%{color: "purple", shape: "circle"}, %{color: "red", shape: "triangle"}, %{color: "blue", shape: "circle"}, %{color: "green", shape: "square"}], %{shape: "circle"})
%{color: "purple", shape: "circle"}
"""
def find_where(list, properties) do
with filter_props <- MapSet.new(properties) do
find(list, fn(x) ->
MapSet.subset?(filter_props, MapSet.new(x))
end)
end
end
@doc """
## Examples
iex> Underscore.reject([1, 2, 3, 4, 5, 6], fn(x) -> rem(x, 2) == 0 end)
[1, 3, 5]
"""
def reject(list, predicate) do
reduce(list, fn(acc, x) ->
if !predicate.(x), do: [x | acc], else: acc
end, []) |> reverse
end
@doc """
## Examples
iex> Underscore.identity("foo")
"foo"
"""
def identity(x), do: x
@doc """
## Examples
iex> Underscore.every([2, 4, 5], fn(x) -> rem(x, 2) == 0 end)
false
iex> Underscore.every([2, 4, 6], fn(x) -> rem(x, 2) == 0 end)
true
iex> Underscore.every([false, true, false])
false
"""
def every(list, predicate \\ &identity/1) do
reject(list, predicate) == []
end
@doc """
## Examples
iex> Underscore.some([2, 4, 5], fn(x) -> rem(x, 2) == 0 end)
true
iex> Underscore.some([2, 4, 6], fn(x) -> rem(x, 2) == 0 end)
true
iex> Underscore.some([false, true, false])
true
"""
def some(list, predicate \\ &identity/1) do
find(list, predicate) != :none
end
@doc """
## Examples
iex> Underscore.contains([2, 4, 6], 4)
true
iex> Underscore.contains([2, 4, 6], 8)
false
"""
def contains(list, value) do
reduce(list, fn(acc, x) ->
if acc || x == value, do: true, else: false
end, false)
end
@doc """
## Examples
iex> Underscore.pluck([%{color: "purple", shape: "circle"}, %{color: "red", shape: "triangle"}, %{color: "blue", shape: "circle"}, %{color: "green", shape: "square"}], :color)
["purple", "red", "blue", "green"]
"""
def pluck(list, key) do
map(list, &(Map.get(&1, key)))
end
@doc """
## Examples
iex> Underscore.max([1, 100, 10])
100
iex> Underscore.max([%{num: 1}, %{num: 100}, %{num: 10}], fn(x) -> x.num end)
%{num: 100}
"""
def max(list, fun \\ &identity/1) do
reduce(list, fn(acc, x) ->
if fun.(x) > fun.(acc), do: x, else: acc
end)
end
@doc """
## Examples
iex> Underscore.min([100, 1, 10])
1
iex> Underscore.min([%{num: 1}, %{num: 100}, %{num: 10}], fn(x) -> x.num end)
%{num: 1}
"""
def min(list, fun \\ &identity/1) do
reduce(list, fn(acc, x) ->
if fun.(x) < fun.(acc), do: x, else: acc
end)
end
@doc """
## Examples
iex> Underscore.sort([2, 3, 5, 4, 1, 5])
[1, 2, 3, 4, 5, 5]
iex> Underscore.sort([2, 3, 5, 4, 1, 5], fn(x) -> -x end)
[5, 5, 4, 3, 2, 1]
"""
def sort(list, fun \\ &identity/1) do
do_sort([], list, fun)
end
defp do_sort(_sorted = [], _unsorted = [head| tail], fun) do
do_sort([head], tail, fun)
end
defp do_sort(sorted, _unsorted = [], _fun) do
sorted
end
defp do_sort(sorted, _unsorted = [head | tail], fun) do
sorted
|> insert(head, fun)
|> do_sort(tail, fun)
end
defp insert(_sorted = [], node, _fun) do
[node]
end
defp insert(_sorted = [min | rest], node, fun) do
if fun.(min) >= fun.(node) do
[node | [min | rest]]
else
[min | insert(rest, node, fun)]
end
end
@doc """
## Examples
iex> Underscore.group_by([1, 2, 3], fn(x) -> if rem(x, 2) == 0, do: :even, else: :odd end)
%{even: [2], odd: [1, 3]}
"""
def group_by(list, fun) do
reduce(list, fn(acc, x) ->
Map.update(acc, fun.(x), [x], &(&1 ++ [x]))
end, %{})
end
@doc """
## Examples
iex> Underscore.index_by([1, 2, 3], fn(x) -> x * x end)
%{1 => 1, 4 => 2, 9 => 3}
"""
def index_by(list, fun) do
reduce(list, fn(acc, x) ->
Map.put(acc, fun.(x), x)
end, %{})
end
@doc """
## Examples
iex> Underscore.size([1, 1, 1, 1])
4
"""
def size(list) do
reduce(list, fn(acc, _x) ->
acc + 1
end, 0)
end
@doc """
## Examples
iex> Underscore.count_by([1, 2, 3, 4, 5], fn(x) -> if rem(x, 2) == 0, do: :even, else: :odd end)
%{odd: 3, even: 2}
"""
def count_by(list, fun) do
with groups <- group_by(list, fun), keys <- Map.keys(groups) do
reduce(keys, fn(acc, key) ->
Map.put(acc, key, size(Map.get(groups, key)))
end, %{})
end
end
@doc """
## Examples
iex> Underscore.partition([1, 2, 3, 4, 5], fn(x) -> rem(x, 2) != 0 end)
[[1, 3, 5], [2, 4]]
"""
def partition(list, predicate) do
%{true => match, false => rest} = group_by(list, predicate)
[match, rest]
end
end
|
lib/underscore.ex
| 0.827584
| 0.484258
|
underscore.ex
|
starcoder
|
defmodule Ash.Resource.Identity do
@moduledoc """
Represents a unique constraint on a resource
Data layers should (and all built in ones do), discount `nil` or `null` (in the case of postgres) values
when determining if a unique constraint matches. This often means that you should
prefer to use identities with non-nullable columns.
Eventually, features could be added to support including `nil` or `null` values, but they would
need to include a corresponding feature for data layers.
"""
defstruct [:name, :keys, :description, :message, :eager_check_with, :pre_check_with]
@schema [
name: [
type: :atom,
required: true,
doc:
"The name of the identity. Used by extensions to target specific identities for fetching single instances of a resource"
],
keys: [
type: {:custom, __MODULE__, :keys, []},
required: true,
doc:
"The names of attributes, aggregates or calculations that uniquely identify this resource."
],
eager_check_with: [
type: {:behaviour, Ash.Api},
doc: """
Validates that the unique identity provided is unique at validation time, using the api module provided.
The identity is checked on each validation of the changeset. For example, if you are using
`AshPhoenix.Form`, this looks for a conflicting record on each call to `Form.validate/2`.
For updates, it is only checked if one of the involved fields is being changed.
For creates, The identity is checked unless your are performing an `upsert`, and the
`upsert_identity` is this identity. Keep in mind that for this to work properly, you will need
to pass the `upsert?: true, upsert_identity: :identity_name` *when creating the changeset* instead of
passing it to the Api when creating.
The `primary?` action is used to search for a record. This will error if you have not
configured one.
"""
],
pre_check_with: [
type: {:behaviour, Ash.Api},
doc: """
Validates that the unique identity provided is unique *just prior* to enacting the resource action, using the Api provided.
Behaves the same as `eager_check_with`, but it runs just prior to the action being committed. Useful for
data layers that don't support transactions/unique constraints, or manual resources with identities.
"""
],
description: [
type: :string,
doc: "An optional description for the identity"
],
message: [
type: :string,
doc: "An error message to use when the unique identity would be violated"
]
]
def schema, do: @schema
@type t :: %__MODULE__{
name: atom(),
keys: list(atom()),
description: String.t() | nil
}
def keys(keys) do
keys = List.wrap(keys)
if Enum.all?(keys, &is_atom/1) do
{:ok, keys}
else
{:error, "Expected a list of atoms for the identity keys"}
end
end
end
|
lib/ash/resource/identity.ex
| 0.925735
| 0.617844
|
identity.ex
|
starcoder
|
defmodule Kxir.Logs do
@moduledoc """
Provides convience operations over kubernetes logging facilities.
Requires a 'pod_name' and optionally a namespace (defaults to "default").
Example: kx logs some-pod somenamespace
"""
@behaviour Kxir.CLI.Help
alias Kxir.{Pod, Logs.Filter.Jaro}
@doc """
Aggregates the logs from the given pod by considering all init containers.
This should replace commands like:
$ kubectl logs some-pod-23123 -c current-init-container
"""
def aggregate(pod_name), do: aggregate(pod_name, [])
def aggregate(pod_name, []) do
do_aggregate(pod_name, "default")
end
def aggregate(pod_name, namespace: namespace) do
do_aggregate(pod_name, namespace)
end
def aggregate(pod_name, jaro: filtered_name),
do: aggregate(pod_name, namespace: "default", jaro: filtered_name)
def aggregate(pod_name, namespace: namespace, jaro: filtered_name) do
# todo: remove duplicated code and extract filter and namespace to keyword opts
Pod.logs(pod_name, namespace)
|> Stream.filter(fn tuple -> Jaro.filter(tuple, name: filtered_name) end)
|> Stream.map(fn t -> attach_name_to_line(t) end)
end
defp do_aggregate(pod_name, namespace) do
Pod.logs(pod_name, namespace)
|> Stream.map(fn t -> attach_name_to_line(t) end)
#|> Enum.sort_by(fn x -> elem(x, 0) end, :asc)
|> Stream.each(&(IO.puts(elem(&1,1))))
|> Enum.to_list()
end
defp attach_name_to_line({idx, name, lines}) do
logs = String.split(lines, "\n")
|> Stream.filter(&(String.length(&1) > 0))
|> Stream.map(fn line -> color_for(name) <> "[#{name}] " <> IO.ANSI.reset() <> line end)
|> Enum.join("\n")
{idx, logs <> "\n"}
end
defp color_for(name) do
colors = [
IO.ANSI.light_magenta(),
IO.ANSI.light_blue(),
IO.ANSI.light_red(),
IO.ANSI.light_cyan(),
IO.ANSI.light_green()
]
idx = :erlang.phash2(name, length(colors) - 1)
Enum.at(colors, idx)
end
def help_text() do
"#{IO.ANSI.green()}\nUsage: #{IO.ANSI.reset()}logs #{IO.ANSI.red()}[pod_name] #{
IO.ANSI.yellow()
}[namespace]"
end
end
|
lib/kxir/log/log.ex
| 0.550607
| 0.402128
|
log.ex
|
starcoder
|
defmodule Statistics.Math do
@e :math.exp(1)
@pi :math.pi()
@doc """
Get square root
return sqrt from Erlang
## Examples
iex> Statistics.Math.sqrt(9)
3.0
iex> Statistics.Math.sqrt(99)
9.9498743710662
"""
@spec sqrt(number) :: number
defdelegate sqrt(num), to: :math
@doc """
Get power from Erlang
This is needed because Elixir doesn't
currently have the `**` operator
## Examples
iex> Statistics.Math.pow(2,3)
8.0
iex> Statistics.Math.pow(9,9)
387420489.0
iex> Statistics.Math.pow(2,0)
1
iex> Statistics.Math.pow(-2, 1.5)
-2.8284271247461903
iex> Statistics.Math.pow(0, 5)
0
"""
@spec pow(number, number) :: number
def pow(_, 0), do: 1
def pow(0, pow) when pow >= 0, do: 0
# Erlang doesn't like raising negative numbers to non-integer powers
def pow(num, pow) when num < 0 and is_float(pow) do
:math.pow(-num, pow) * -1
end
# otherwise let erlang do it
defdelegate pow(num, pow), to: :math
@doc """
The constant *e*
## Examples
iex> Statistics.Math.e
2.718281828459045
"""
@spec e() :: number
def e do
@e
end
@doc """
The constant *pi*
(returned from Erlang Math module)
## Examples
iex> Statistics.Math.pi
3.141592653589793
"""
@spec pi() :: number
def pi do
@pi
end
@doc """
The natural log
( from Erlang Math module)
## Examples
iex> Statistics.Math.ln(20)
2.995732273553991
iex> Statistics.Math.ln(200)
5.298317366548036
"""
@spec ln(number) :: number
defdelegate ln(i), to: :math, as: :log
@doc """
Exponent function
Raise *e* to given power
## Examples
iex> Statistics.Math.exp(5.6)
270.42640742615254
"""
@spec exp(number) :: number
defdelegate exp(x), to: :math
@doc """
Get a random number from erlang
"""
@spec rand() :: number
defdelegate rand(), to: :rand, as: :uniform
@doc """
Round a decimal to a specific precision
## Examples
iex> Statistics.Math.round(0.123456, 4)
0.1235
"""
@spec round(number, number) :: number
def round(x, precision) do
p = pow(10, precision)
:erlang.round(x * p) / p
end
@doc """
Floor function
## Examples
iex> Statistics.Math.floor(3.999)
3.0
"""
@spec floor(number) :: number
def floor(x) do
f = :erlang.trunc(x) * 1.0
cond do
x - f >= 0 ->
f
x - f < 0 ->
f - 1
end
end
@doc """
Ceiling function
## Examples
iex> Statistics.Math.ceil(3.999)
4.0
"""
@spec ceil(number) :: number
def ceil(x) do
f = :erlang.trunc(x) * 1.0
cond do
x - f > 0 ->
f + 1
x - f <= 0 ->
f
end
end
@doc """
Get the absolute value of a number
## Examples
iex> Statistics.Math.abs(-4)
4
"""
@spec abs(number) :: number
defdelegate abs(x), to: :erlang
@doc """
Factorial!
"""
@spec factorial(non_neg_integer) :: non_neg_integer
def factorial(n) when n < 0 do
raise ArithmeticError, message: "Argument n must be a positive number"
end
def factorial(n) when n == 0 or n == 1 do
1
end
def factorial(n) do
(to_int(n) - 1)..1
|> Enum.to_list()
|> List.foldl(n, fn x, acc -> x * acc end)
end
@doc """
Get the base integer from a float
## Examples
iex> Statistics.Math.to_int(66.6666)
66
"""
@spec to_int(number) :: integer
defdelegate to_int(f), to: :erlang, as: :trunc
@doc """
The number of k combinations of n
Both arguments must be integers
## Examples
iex> Statistics.Math.combination(10, 3)
120
"""
@spec combination(non_neg_integer, non_neg_integer) :: non_neg_integer
def combination(n, k) do
:erlang.div(factorial(n), factorial(k) * factorial(n - k))
end
@doc """
The number of k permuations of n
## Examples
iex> Statistics.Math.permutation(10, 3)
720
"""
@spec permutation(non_neg_integer, non_neg_integer) :: non_neg_integer
def permutation(n, k) do
:erlang.div(factorial(n), factorial(n - k))
end
end
|
lib/math/math.ex
| 0.882136
| 0.646056
|
math.ex
|
starcoder
|
defmodule AWS.TimestreamWrite do
@moduledoc """
Amazon Timestream Write
Amazon Timestream is a fast, scalable, fully managed time series database
service that makes it easy to store and analyze trillions of time series data
points per day.
With Timestream, you can easily store and analyze IoT sensor data to derive
insights from your IoT applications. You can analyze industrial telemetry to
streamline equipment management and maintenance. You can also store and analyze
log data and metrics to improve the performance and availability of your
applications. Timestream is built from the ground up to effectively ingest,
process, and store time series data. It organizes data to optimize query
processing. It automatically scales based on the volume of data ingested and on
the query volume to ensure you receive optimal performance while inserting and
querying data. As your data grows over time, Timestream’s adaptive query
processing engine spans across storage tiers to provide fast analysis while
reducing costs.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "Timestream Write",
api_version: "2018-11-01",
content_type: "application/x-amz-json-1.0",
credential_scope: nil,
endpoint_prefix: "ingest.timestream",
global?: false,
protocol: "json",
service_id: "Timestream Write",
signature_version: "v4",
signing_name: "timestream",
target_prefix: "Timestream_20181101"
}
end
@doc """
Creates a new Timestream database.
If the KMS key is not specified, the database will be encrypted with a
Timestream managed KMS key located in your account. Refer to [Amazon Web Services managed KMS
keys](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk)
for more info. [Service quotas apply](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html).
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.create-db.html)
for details.
"""
def create_database(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateDatabase", input, options)
end
@doc """
The CreateTable operation adds a new table to an existing database in your
account.
In an Amazon Web Services account, table names must be at least unique within
each Region if they are in the same database. You may have identical table names
in the same Region if the tables are in separate databases. While creating the
table, you must specify the table name, database name, and the retention
properties. [Service quotas apply](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html).
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.create-table.html)
for details.
"""
def create_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "CreateTable", input, options)
end
@doc """
Deletes a given Timestream database.
*This is an irreversible operation. After a database is deleted, the time series
data from its tables cannot be recovered.*
All tables in the database must be deleted first, or a ValidationException error
will be thrown.
Due to the nature of distributed retries, the operation can return either
success or a ResourceNotFoundException. Clients should consider them equivalent.
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.delete-db.html)
for details.
"""
def delete_database(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteDatabase", input, options)
end
@doc """
Deletes a given Timestream table.
This is an irreversible operation. After a Timestream database table is deleted,
the time series data stored in the table cannot be recovered.
Due to the nature of distributed retries, the operation can return either
success or a ResourceNotFoundException. Clients should consider them equivalent.
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.delete-table.html)
for details.
"""
def delete_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteTable", input, options)
end
@doc """
Returns information about the database, including the database name, time that
the database was created, and the total number of tables found within the
database.
[Service quotas apply](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html).
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.describe-db.html)
for details.
"""
def describe_database(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeDatabase", input, options)
end
@doc """
DescribeEndpoints returns a list of available endpoints to make Timestream API
calls against.
This API is available through both Write and Query.
Because the Timestream SDKs are designed to transparently work with the
service’s architecture, including the management and mapping of the service
endpoints, *it is not recommended that you use this API unless*:
* You are using [VPC endpoints (Amazon Web Services PrivateLink) with
Timestream](https://docs.aws.amazon.com/timestream/latest/developerguide/VPCEndpoints)
* Your application uses a programming language that does not yet
have SDK support
* You require better control over the client-side implementation
For detailed information on how and when to use and implement DescribeEndpoints,
see [The Endpoint Discovery Pattern](https://docs.aws.amazon.com/timestream/latest/developerguide/Using.API.html#Using-API.endpoint-discovery).
"""
def describe_endpoints(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeEndpoints", input, options)
end
@doc """
Returns information about the table, including the table name, database name,
retention duration of the memory store and the magnetic store.
[Service quotas apply](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html).
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.describe-table.html)
for details.
"""
def describe_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeTable", input, options)
end
@doc """
Returns a list of your Timestream databases.
[Service quotas apply](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html).
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.list-db.html)
for details.
"""
def list_databases(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListDatabases", input, options)
end
@doc """
A list of tables, along with the name, status and retention properties of each
table.
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.list-table.html)
for details.
"""
def list_tables(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTables", input, options)
end
@doc """
List all tags on a Timestream resource.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForResource", input, options)
end
@doc """
Associate a set of tags with a Timestream resource.
You can then activate these user-defined tags so that they appear on the Billing
and Cost Management console for cost allocation tracking.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "TagResource", input, options)
end
@doc """
Removes the association of tags from a Timestream resource.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UntagResource", input, options)
end
@doc """
Modifies the KMS key for an existing database.
While updating the database, you must specify the database name and the
identifier of the new KMS key to be used (`KmsKeyId`). If there are any
concurrent `UpdateDatabase` requests, first writer wins.
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.update-db.html)
for details.
"""
def update_database(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateDatabase", input, options)
end
@doc """
Modifies the retention duration of the memory store and magnetic store for your
Timestream table.
Note that the change in retention duration takes effect immediately. For
example, if the retention period of the memory store was initially set to 2
hours and then changed to 24 hours, the memory store will be capable of holding
24 hours of data, but will be populated with 24 hours of data 22 hours after
this change was made. Timestream does not retrieve data from the magnetic store
to populate the memory store.
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.update-table.html)
for details.
"""
def update_table(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateTable", input, options)
end
@doc """
The WriteRecords operation enables you to write your time series data into
Timestream.
You can specify a single data point or a batch of data points to be inserted
into the system. Timestream offers you with a flexible schema that auto detects
the column names and data types for your Timestream tables based on the
dimension names and data types of the data points you specify when invoking
writes into the database. Timestream support eventual consistency read
semantics. This means that when you query data immediately after writing a batch
of data into Timestream, the query results might not reflect the results of a
recently completed write operation. The results may also include some stale
data. If you repeat the query request after a short time, the results should
return the latest data. [Service quotas apply](https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html).
See [code sample](https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.write.html)
for details.
## Upserts
You can use the `Version` parameter in a `WriteRecords` request to update data
points. Timestream tracks a version number with each record. `Version` defaults
to `1` when not specified for the record in the request. Timestream will update
an existing record’s measure value along with its `Version` upon receiving a
write request with a higher `Version` number for that record. Upon receiving an
update request where the measure value is the same as that of the existing
record, Timestream still updates `Version`, if it is greater than the existing
value of `Version`. You can update a data point as many times as desired, as
long as the value of `Version` continuously increases.
For example, suppose you write a new record without indicating `Version` in the
request. Timestream will store this record, and set `Version` to `1`. Now,
suppose you try to update this record with a `WriteRecords` request of the same
record with a different measure value but, like before, do not provide
`Version`. In this case, Timestream will reject this update with a
`RejectedRecordsException` since the updated record’s version is not greater
than the existing value of Version. However, if you were to resend the update
request with `Version` set to `2`, Timestream would then succeed in updating the
record’s value, and the `Version` would be set to `2`. Next, suppose you sent a
`WriteRecords` request with this same record and an identical measure value, but
with `Version` set to `3`. In this case, Timestream would only update `Version`
to `3`. Any further updates would need to send a version number greater than
`3`, or the update requests would receive a `RejectedRecordsException`.
"""
def write_records(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "WriteRecords", input, options)
end
end
|
lib/aws/generated/timestream_write.ex
| 0.895191
| 0.603465
|
timestream_write.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.