code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Aoc2021.Day9 do
@moduledoc """
See https://adventofcode.com/2021/day/9
"""
@spec solve_part1() :: non_neg_integer()
@spec solve_part1(Path.t()) :: non_neg_integer()
def solve_part1(path \\ "priv/day9/input.txt") do
path
|> read_map()
|> low_points()
|> risk_levels()
|> Enum.sum()
end
@spec solve_part2() :: non_neg_integer()
@spec solve_part2(Path.t()) :: non_neg_integer()
def solve_part2(path \\ "priv/day9/input.txt") do
map = read_map(path)
map
|> low_points()
|> Enum.map(fn lp -> basin(lp, map) end)
|> Enum.map(&length/1)
|> Enum.sort(:desc)
|> Enum.take(3)
|> Enum.product()
end
defp basin(p, map) do
# A basin is all locations that eventually flow downward to a single low
# point. Therefore, every low point has a basin, although some basins are
# very small. Locations of height 9 do not count as being in any basin, and
# all other locations will always be part of exactly one basin.
basin(p, map, MapSet.new())
end
defp basin({_, :border}, _, _), do: []
defp basin({_, 9}, _, _), do: []
defp basin({{r, c}, _} = p, map, seen) do
if MapSet.member?(seen, p) do
[]
else
[p1, p2, p3, p4] =
[{r + 1, c}, {r - 1, c}, {r, c + 1}, {r, c - 1}]
|> Enum.map(fn pos -> {pos, height(map, pos)} end)
seen = MapSet.put(seen, p)
n1 = basin(p1, map, seen)
seen = mark_seen(n1, seen)
n2 = basin(p2, map, seen)
seen = mark_seen(n2, seen)
n3 = basin(p3, map, seen)
seen = mark_seen(n3, seen)
n4 = basin(p4, map, seen)
[p] ++ n1 ++ n2 ++ n3 ++ n4
end
end
defp mark_seen(points, seen) do
Enum.reduce(points, seen, fn x, acc -> MapSet.put(acc, x) end)
end
defp height(map, p), do: Map.get(map, p, :border)
defp low_points(map) do
Enum.filter(map, fn p -> low_point?(p, map) end)
end
defp low_point?({pos, height}, map) do
pos
|> neighbour_heights(map)
|> Enum.all?(fn nh -> nh > height end)
end
defp border?(:border), do: true
defp border?(_), do: false
defp neighbour_heights({row, col}, map) do
[{row - 1, col}, {row + 1, col}, {row, col - 1}, {row, col + 1}]
|> Enum.map(fn pos -> height(map, pos) end)
|> Enum.reject(&border?/1)
end
defp risk_levels(map) do
Enum.map(map, fn {_, height} -> height + 1 end)
end
defp read_map(path) do
{_, map} =
path
|> File.stream!()
|> Stream.map(&String.trim/1)
|> Enum.reduce({0, %{}}, fn line, {row, acc} ->
{
row + 1,
read_map_row(line, row, acc)
}
end)
map
end
defp read_map_row(line, row, acc) do
{_, map} =
line
|> String.graphemes()
|> Enum.reduce({0, acc}, fn char, {col, acc} ->
{
col + 1,
Map.put(acc, {row, col}, String.to_integer(char))
}
end)
map
end
end
|
lib/aoc2021/day9.ex
| 0.756042
| 0.53443
|
day9.ex
|
starcoder
|
defmodule Piton.Pool do
@moduledoc """
`Piton.Pool` is a `GenServer` which will be on charge of a pool of `Piton.Port`s.
`Piton.Pool` will launch as many Python processes as you define in `pool_number` and it will share them between all the request (executions)
it receives. It is also protected from Python exceptions, therefore, if a Python code raises an exception that can close the port, a new one
will be opened and added it to the pool.
## Start a Pool
```elixir
{:ok, pool} = Piton.Pool.start_link([module: MyPoolPort, pool_number: pool_number], [])
```
The arguments has to be in a Keyword List and it has to contain:
module: Module which has to `use Piton.Port`
pool_number: number of available Pythons.
## Run a Python code using the pool
```elixir
Piton.Pool.execute(pid_of_the_pool, elixir_function, list_of_arguments_of_elixir_function)
```
### Timeout
`Piton.Port.execution` function has a `timeout`, this timeout will be passes as timeout to the `Piton.Port.execution` function.
"""
@timeout 5000
use GenServer
alias Piton.PoolFunctions
require Logger
def start_link(args, opts) do
GenServer.start_link(__MODULE__, args, opts)
end
@doc """
It will execute the arguments in the given function of the given module using the given pool of ports.
"""
@spec execute(pid, atom, list, timeout) :: {:ok, any} | {:error, any}
def execute(pid, python_function, python_arguments, timeout \\ @timeout) do
GenServer.call(pid, {:execute, python_function, python_arguments, timeout}, timeout)
end
@doc """
It will return the number of available ports.
"""
@spec get_number_of_available_ports(pid) :: integer
def get_number_of_available_ports(pid), do: GenServer.call(pid, :number_of_available_ports)
@doc """
It will return the number of processes that are waiting for an available port.
"""
@spec get_number_of_waiting_processes(pid) :: integer
def get_number_of_waiting_processes(pid), do: GenServer.call(pid, :number_of_waiting_processes)
def init(module: module, pool_number: pool_number) do
py_ports = for _ <- 1..pool_number, do: module.start() |> elem(1)
Enum.each(py_ports, fn py_port -> Process.monitor(py_port) end)
{:ok, %{py_ports: py_ports, waiting_processes: [], module: module}}
end
def handle_call({:execute, python_function, python_arguments, timeout}, from, %{
py_ports: py_ports,
waiting_processes: waiting_processes,
module: module
}) do
pool = self()
case PoolFunctions.get_lifo(py_ports) do
{nil, new_py_ports} ->
new_waiting = {module, python_function, python_arguments, from, pool, timeout}
{:noreply,
%{
py_ports: new_py_ports,
waiting_processes: [new_waiting | waiting_processes],
module: module
}}
{py_port, new_py_ports} ->
Task.start(fn ->
run({py_port, module, python_function, python_arguments, from, pool, timeout})
end)
{:noreply,
%{py_ports: new_py_ports, waiting_processes: waiting_processes, module: module}}
end
end
def handle_call(:number_of_available_ports, _from, state) do
{:reply, length(state[:py_ports]), state}
end
def handle_call(:number_of_waiting_processes, _from, state) do
{:reply, length(state[:waiting_processes]), state}
end
def handle_cast(
{:return_py_port, py_port},
%{py_ports: py_ports, waiting_processes: waiting_processes} = state
) do
{new_py_ports, new_waiting_processes} =
check_and_run_waiting_process(py_port, py_ports, waiting_processes)
{:noreply, %{state | py_ports: new_py_ports, waiting_processes: new_waiting_processes}}
end
def handle_cast(_msg, state) do
{:noreply, state}
end
def handle_info(
{:DOWN, _ref, :process, _pid, _msg},
%{py_ports: py_ports, module: module, waiting_processes: waiting_processes} = state
) do
py_port = module.start() |> elem(1)
Process.monitor(py_port)
{new_py_ports, new_waiting_processes} =
check_and_run_waiting_process(py_port, py_ports, waiting_processes)
{:noreply, %{state | py_ports: new_py_ports, waiting_processes: new_waiting_processes}}
end
def handle_info(_msg, state) do
{:noreply, state}
end
defp run({py_port, python_module, python_function, python_arguments, from, pool, timeout}) do
try do
result = apply(python_module, python_function, [py_port] ++ python_arguments ++ [timeout])
GenServer.reply(from, {:ok, result})
GenServer.cast(pool, {:return_py_port, py_port})
catch
:exit, msg ->
Logger.error("Catched error during running a task in Piton.Pool: Exit !!")
GenServer.reply(from, {:error, msg})
end
end
defp check_and_run_waiting_process(new_py_port, py_ports, []) do
{PoolFunctions.push_lifo(new_py_port, py_ports), []}
end
defp check_and_run_waiting_process(new_py_port, py_ports, waiting_processes) do
[waiting_process | new_waiting_processes] = waiting_processes
Task.start(fn -> run(Tuple.insert_at(waiting_process, 0, new_py_port)) end)
{py_ports, new_waiting_processes}
end
end
|
lib/piton/pool.ex
| 0.746324
| 0.89616
|
pool.ex
|
starcoder
|
defmodule Instream do
@moduledoc """
InfluxDB driver for Elixir
## Connections
To connect to an InfluxDB server you need a connection module:
defmodule MyConnection do
use Instream.Connection, otp_app: :my_app
end
The `:otp_app` name and the name of the module can be freely chosen but have
to be linked to a corresponding configuration entry. This defined connection
module needs to be hooked up into your supervision tree:
children = [
# ...
MyConnection,
# ...
]
Example of the matching configuration entry:
# InfluxDB v2.x
config :my_app, MyConnection,
auth: [method: :token, token: "my_token"],
bucket: "my_default_bucket",
org: "my_default_org",
host: "my.influxdb.host",
version: :v2
# InfluxDB v1.x
config :my_app, MyConnection,
auth: [username: "my_username", password: "<PASSWORD>"],
database: "my_default_database",
host: "my.influxdb.host"
More details on connections and configuration options can be found with the
modules `Instream.Connection` and `Instream.Connection.Config`.
## Queries
To read data from your InfluxDB server you should send a query:
# Flux query
MyConnection.query(~s(
from(bucket: "\#{MyConnection.config(:bucket)}")
|> range(start: -5m)
|> filter(fn: (r) =>
r._measurement == "instream_examples"
)
|> first()
))
# InfluxQL query
MyConnection.query("SELECT * FROM instream_examples")
Most of the queries you send require a `:database` or
`:bucket`/`:organization` to operate on.
These values will be taken from your connection configuration by default.
By using the option argument of `MyConnection.query/2` you can pass different
values to use on a per-query basis:
MyConnection.query("... query ...", database: "my_other_database")
MyConnection.query(
"... query ...",
bucket: "my_other_bucket",
org: "my_other_organization"
)
Responses from a query will be decoded into maps by default.
Depending on your InfluxDB version you can use the `:result_as` option
argument to skip the decoding or request a non-default response type:
- `result_as: :csv`: response as CSV when using InfluxDB v1
- `result_as: :raw`: result as sent from the server without decoding
### Query Language Selection
Depending on your configured InfluxDB version all queries will be treated
as `:flux` (v2) or `:influxql` by default. You can send a query in the
non-default language by passing the `:query_language` option:
MyConnection.query("... query ...", query_language: :flux)
MyConnection.query("... query ...", query_language: :influxql)
### Query Parameter Binding (InfluxDB v1.x)
Queries can be parameterized, for example when you are dealing with
untrusted user input:
MyConnection.query(
"SELECT * FROM some_measurement WHERE field = $field_param",
params: %{field_param: "some_value"}
)
### POST Queries (InfluxDB v1.x)
Some queries require you to switch from the regular `read only context`
(all GET requests) to a `write context` (all POST requests).
When not using the query build you have to pass that information
manually to `query/2`:
MyConnection.query(
"CREATE DATABASE create_in_write_mode",
method: :post
)
### Query Timeout Configuration
If you find your queries running into timeouts (e.g. `:hackney` not waiting
long enough for a response) you can pass an option to the query call:
MyConnection.query(query, http_opts: [recv_timeout: 250])
This value can also be set as a default using your
[HTTP client configuration](`Instream.Connection.Config`).
A passed configuration will take precedence over the connection configuration.
## Writing Points
Writing data to your InfluxDB server is done using either `Instream.Series`
modules or raw maps.
Depending on your [connection configuration](`Instream.Connection.Config`)
the selected writer module provides additional options.
The write function can be used with a single or multiple data points:
MyConnection.write(point)
MyConnection.write([point_1, point_2])
### Writing Points using Series
Each series in your database can be represented using a definition module:
defmodule MySeries do
use Instream.Series
series do
measurement "my_measurement"
tag :bar
tag :foo
field :value
end
end
This module will provide you with a struct you can use to define points
you want to write to your database:
MyConnection.write(%MySeries{
fields: %MySeries.Fields{value: 17},
tags: %MySeries.Tags{bar: "bar", foo: "foo"}
})
More information about series definitions can be found in the
module documentation of `Instream.Series`.
### Writing Points using Plain Maps
As an alternative you can use a non-struct map to write points to a database:
MyConnection.write(
%{
measurement: "my_measurement",
fields: %{answer: 42, value: 1},
tags: %{foo: "bar"},
timestamp: 1_439_587_926_000_000_000
},
# more points possible ...
)
The field `:timestamp` is optional. InfluxDB will use the receive time of
the write request if it is missing.
"""
end
|
lib/instream.ex
| 0.908674
| 0.418192
|
instream.ex
|
starcoder
|
defmodule BSV.Util.VarBin do
@moduledoc """
Module for parsing and serializing variable length binary data as integers,
binaries and structs.
"""
@doc """
Parses the given binary into an integer. Returns a tuple containing the
decoded integer and any remaining binary data.
## Examples
iex> BSV.Util.VarBin.parse_int(<<253, 4, 1>>)
{260, ""}
iex> BSV.Util.VarBin.parse_int(<<254, 0, 225, 245, 5>>)
{100_000_000, ""}
"""
@spec parse_int(binary | IO.device()) :: {integer, binary}
def parse_int(<<253, size::little-16, data::binary>> = binary) when is_binary(binary),
do: {size, data}
def parse_int(<<254, size::little-32, data::binary>> = binary) when is_binary(binary),
do: {size, data}
def parse_int(<<255, size::little-64, data::binary>> = binary) when is_binary(binary),
do: {size, data}
def parse_int(<<size::integer, data::binary>> = binary) when is_binary(binary), do: {size, data}
def parse_int(file) when not is_binary(file) do
size =
case file |> IO.binread(1) do
<<253>> ->
<<size::little-16>> = file |> IO.binread(2)
size
<<254>> ->
<<size::little-32>> = file |> IO.binread(4)
size
<<255>> ->
<<size::little-64>> = file |> IO.binread(8)
size
<<size::integer>> ->
size
end
{size, file}
end
@doc """
Serializes the given integer into a binary.
## Examples
iex> BSV.Util.VarBin.serialize_int(260)
<<253, 4, 1>>
iex> BSV.Util.VarBin.serialize_int(100_000_000)
<<254, 0, 225, 245, 5>>
"""
@spec serialize_int(integer) :: binary
def serialize_int(int) when int < 253, do: <<int::integer>>
def serialize_int(int) when int < 0x10000, do: <<253, int::little-16>>
def serialize_int(int) when int < 0x100000000, do: <<254, int::little-32>>
def serialize_int(int), do: <<255, int::little-64>>
@doc """
Parses the given binary into a chunk of binary data, using the first byte(s)
to determing the size of the chunk. Returns a tuple containing the chunk and
any remaining binary data.
## Examples
iex> BSV.Util.VarBin.parse_bin(<<5, 104, 101, 108, 108, 111>>)
{"hello", ""}
"""
@spec parse_bin(binary) :: {binary, binary}
def parse_bin(data) do
{size, data} = parse_int(data)
data |> read_bytes(size)
end
@doc """
Prefixes the given binary with a variable length integer to indicate the size
of the following binary,
## Examples
iex> BSV.Util.VarBin.serialize_bin("hello")
<<5, 104, 101, 108, 108, 111>>
"""
@spec serialize_bin(binary) :: binary
def serialize_bin(data) do
size =
data
|> byte_size
|> serialize_int
size <> data
end
@doc """
Parses the given binary into a list of parsed structs, using the first byte(s)
to determing the number of items, and calling the given callback to parse each
repsective chunk of data.
Returns a tuple containing a list of parsed items and any remaining binary data.
## Examples
BSV.Util.VarBin.parse_items(data, &BSV.Transaction.Input.parse/1)
{[
%BSV.Trasaction.Input{},
%BSV.Trasaction.Input{}
], ""}
"""
@spec parse_items(binary, function) :: {list, binary}
def parse_items(data, callback) when is_function(callback) do
{size, data} = parse_int(data)
parse_items(data, size, [], callback)
end
defp parse_items(data, 0, items, _cb), do: {Enum.reverse(items), data}
defp parse_items(data, size, items, cb) do
{item, data} = cb.(data)
items =
if item do
[item | items]
else
items
end
parse_items(data, size - 1, items, cb)
end
@doc """
Serializes the given list of items into a binary, first by prefixing the
binary with a variable length integer to indicate the number of items, and
then by calling the given callback to serialize each respective item.
## Examples
[
%BSV.Trasaction.Input{},
%BSV.Trasaction.Input{}
]
|> BSV.Util.VarBin.serialize_items(data, &BSV.Transaction.Input.serialize/1)
<< data >>
"""
@spec serialize_items(list, function) :: binary
def serialize_items(items, callback) when is_function(callback) do
size = length(items) |> serialize_int
serialize_items(items, size, callback)
end
defp serialize_items([], data, _cb), do: data
defp serialize_items([item | items], data, callback) do
bin = callback.(item)
serialize_items(items, data <> bin, callback)
end
@spec read_bytes(binary() | IO.device(), non_neg_integer()) ::
{binary(), binary() | IO.device()}
def read_bytes(data, size) when is_binary(data) do
<<block_bytes::binary-size(size), rest::binary>> = data
{block_bytes, rest}
end
def read_bytes(file, size) do
data = IO.binread(file, size)
if is_binary(data) do
{data, file}
else
require Logger
Logger.error("read bytes: #{inspect(data)}")
{"", file}
end
end
end
|
lib/bsv/util/var_bin.ex
| 0.853913
| 0.550064
|
var_bin.ex
|
starcoder
|
defmodule Chain.State do
alias Chain.Account
# @enforce_keys [:store]
defstruct accounts: %{}, hash: nil, store: nil
@type t :: %Chain.State{accounts: %{}, hash: nil}
def new() do
%Chain.State{}
end
def compact(%Chain.State{accounts: accounts} = state) do
accounts =
Enum.map(accounts, fn {id, acc} -> {id, Account.compact(acc)} end)
|> Map.new()
%Chain.State{state | accounts: accounts}
|> Map.delete(:store)
end
def normalize(%Chain.State{hash: nil, accounts: accounts} = state) do
accounts =
accounts
|> Enum.map(fn {id, acc} -> {id, Account.normalize(acc)} end)
|> Map.new()
state = %Chain.State{state | accounts: accounts}
# store: can be non-existing because of later addition to the schema
state = Map.put(state, :store, tree(state))
%Chain.State{state | hash: hash(state)}
end
def normalize(%Chain.State{} = state) do
state
end
# store: can be non-existing because of later addition to the schema
def tree(%Chain.State{accounts: accounts, store: store}) when is_tuple(store) do
items =
Enum.map(accounts, fn {id, acc} -> {id, Account.hash(acc)} end)
|> Map.new()
store =
Enum.reduce(MerkleTree.to_list(store), store, fn {id, _hash}, store ->
if not Map.has_key?(items, id) do
MerkleTree.delete(store, id)
else
store
end
end)
MerkleTree.insert_items(store, items)
end
def tree(%Chain.State{accounts: accounts}) do
items = Enum.map(accounts, fn {id, acc} -> {id, Account.hash(acc)} end)
MerkleTree.new()
|> MerkleTree.insert_items(items)
end
def hash(%Chain.State{hash: nil} = state) do
MerkleTree.root_hash(tree(state))
end
def hash(%Chain.State{hash: hash}) do
hash
end
def accounts(%Chain.State{accounts: accounts}) do
accounts
end
@spec account(Chain.State.t(), <<_::160>>) :: Chain.Account.t() | nil
def account(%Chain.State{accounts: accounts}, id = <<_::160>>) do
Map.get(accounts, id)
end
@spec ensure_account(Chain.State.t(), <<_::160>> | Wallet.t() | non_neg_integer()) ::
Chain.Account.t()
def ensure_account(state = %Chain.State{}, id = <<_::160>>) do
case account(state, id) do
nil -> Chain.Account.new(nonce: 0)
acc -> acc
end
end
def ensure_account(state = %Chain.State{}, id) when is_integer(id) do
ensure_account(state, <<id::unsigned-size(160)>>)
end
def ensure_account(state = %Chain.State{}, id) do
ensure_account(state, Wallet.address!(id))
end
@spec set_account(Chain.State.t(), binary(), Chain.Account.t()) :: Chain.State.t()
def set_account(state = %Chain.State{accounts: accounts}, id = <<_::160>>, account) do
%{state | accounts: Map.put(accounts, id, account), hash: nil}
end
@spec delete_account(Chain.State.t(), binary()) :: Chain.State.t()
def delete_account(state = %Chain.State{accounts: accounts}, id = <<_::160>>) do
%{state | accounts: Map.delete(accounts, id), hash: nil}
end
def difference(%Chain.State{} = state_a, %Chain.State{} = state_b) do
diff = MerkleTree.difference(tree(state_a), tree(state_b))
Enum.map(diff, fn {id, _} ->
acc_a = ensure_account(state_a, id)
acc_b = ensure_account(state_b, id)
delta = %{
nonce: {Account.nonce(acc_a), Account.nonce(acc_b)},
balance: {Account.balance(acc_a), Account.balance(acc_b)},
code: {Account.code(acc_a), Account.code(acc_b)}
}
report = %{state: MerkleTree.difference(Account.tree(acc_a), Account.tree(acc_b))}
report =
Enum.reduce(delta, report, fn {key, {a, b}}, report ->
if a == b do
report
else
Map.put(report, key, {a, b})
end
end)
{id, report}
end)
end
def apply_difference(%Chain.State{} = state, difference) do
Enum.reduce(difference, state, fn {id, report}, state ->
acc = ensure_account(state, id)
acc =
Enum.reduce(report, acc, fn {key, delta}, acc ->
case key do
:state ->
Enum.reduce(delta, acc, fn {key, {a, b}}, acc ->
tree = Account.tree(acc)
^a = MerkleTree.get(tree, key)
tree = MerkleTree.insert(tree, key, b)
Account.put_tree(acc, tree)
end)
_other ->
{a, b} = delta
^a = apply(Account, key, [acc])
%{acc | key => b}
end
end)
set_account(state, id, acc)
end)
end
# ========================================================
# File Import / Export
# ========================================================
@spec to_binary(Chain.State.t()) :: binary
def to_binary(state) do
Enum.reduce(accounts(state), Map.new(), fn {id, acc}, map ->
Map.put(map, id, %{
nonce: acc.nonce,
balance: acc.balance,
data: Account.tree(acc) |> MerkleTree.to_list(),
code: acc.code
})
end)
|> BertInt.encode!()
end
def from_binary(bin) do
map = BertInt.decode!(bin)
Enum.reduce(map, new(), fn {id, acc}, state ->
set_account(state, id, %Chain.Account{
nonce: acc.nonce,
balance: acc.balance,
storage_root: MapMerkleTree.new() |> MerkleTree.insert_items(acc.data),
code: acc.code
})
end)
end
end
|
lib/chain/state.ex
| 0.705684
| 0.532668
|
state.ex
|
starcoder
|
defmodule Animina.Accounts.User do
use Ecto.Schema
import Ecto.Changeset
@derive {Inspect, except: [:password]}
schema "users" do
field :email, :string
field :password, :string, virtual: true
field :hashed_password, :string
field :confirmed_at, :naive_datetime
field :first_name, :string
field :last_name, :string
field :gender, :string
field :username, :string
field :email_md5sum, :string
field :birthday, :date
field :about, :string
field :homepage, :string
field :lang, :string
field :timezone, :string
field :points, :integer
field :lifetime_points, :integer
field :coupon_code, :string
field :redeemed_coupon_code, :string
has_many :messages, Animina.Chats.Message
has_many :received_transfers, Animina.Points.Transfer, foreign_key: :receiver_id
has_many :teams, Animina.Games.Team, foreign_key: :owner_id, on_delete: :delete_all
timestamps()
end
@doc """
A user changeset for registration.
It is important to validate the length of both email and password.
Otherwise databases may truncate the email without warnings, which
could lead to unpredictable or insecure behaviour. Long passwords may
also be very expensive to hash for certain algorithms.
## Options
* `:hash_password` - Hashes the password so it can be stored securely
in the database and ensures the password field is cleared to prevent
leaks in the logs. If password hashing is not needed and clearing the
password field is not desired (like when using this changeset for
validations on a LiveView form), this option can be set to `false`.
Defaults to `true`.
"""
def registration_changeset(user, attrs, opts \\ []) do
user
|> cast(attrs, [
:email,
:password,
:first_name,
:last_name,
:gender,
:birthday,
:username,
:lang,
:timezone,
:redeemed_coupon_code
])
|> validate_length(:username, min: 2, max: 40)
|> validate_email()
|> validate_required([:email, :username, :lang, :gender, :last_name, :first_name, :timezone])
|> downcase_username
|> create_and_set_coupon_code
|> validate_redeemed_coupon_code
|> validate_inclusion(:gender, ["male", "female", "other", "prefer not to say"])
|> validate_inclusion(:lang, ["de", "en"])
|> validate_inclusion(:timezone, TimeZoneInfo.time_zones())
|> validate_exclusion(:birthday, [~D[1900-01-01]], message: "please enter your real birthday")
|> unique_constraint([:username, :email])
|> unique_constraint([:username, :coupon_code])
|> validate_length(:first_name, max: 255)
|> validate_length(:last_name, max: 255)
|> validate_password(opts)
end
defp create_and_set_coupon_code(changeset) do
put_change(changeset, :coupon_code, CouponCode.generate())
end
defp validate_redeemed_coupon_code(changeset) do
case get_change(changeset, :redeemed_coupon_code) do
nil ->
changeset
redeemed_coupon_code ->
case CouponCode.validate(redeemed_coupon_code) do
{:ok, validated_coupon_code} ->
put_change(changeset, :redeemed_coupon_code, validated_coupon_code)
_ ->
changeset
end
end
end
defp downcase_email_address(changeset) do
update_change(changeset, :email, &String.downcase/1)
end
defp downcase_username(changeset) do
update_change(changeset, :username, &String.downcase/1)
end
defp fill_email_md5sum(changeset) do
if email = get_change(changeset, :email) do
email_md5sum =
:crypto.hash(:md5, email)
|> Base.encode16()
|> String.downcase()
put_change(changeset, :email_md5sum, email_md5sum)
else
changeset
end
end
defp validate_email(changeset) do
changeset
|> validate_required([:email])
|> downcase_email_address
|> validate_format(:email, ~r/^[^\s]+@[^\s]+$/, message: "must have the @ sign and no spaces")
|> validate_length(:email, min: 5, max: 255)
|> unsafe_validate_unique(:email, Animina.Repo)
|> unique_constraint([:email, :username])
|> fill_email_md5sum
end
defp validate_password(changeset, opts) do
changeset
|> validate_required([:password])
|> validate_length(:password,
min: Application.fetch_env!(:animina, :min_password_length),
max: 80
)
|> maybe_hash_password(opts)
end
defp maybe_hash_password(changeset, opts) do
hash_password? = Keyword.get(opts, :hash_password, true)
password = get_change(changeset, :password)
if hash_password? && password && changeset.valid? do
changeset
|> put_change(:hashed_password, Bcrypt.hash_pwd_salt(password))
|> delete_change(:password)
else
changeset
end
end
@doc """
A user changeset for changing the email.
It requires the email to change otherwise an error is added.
"""
def email_changeset(user, attrs) do
user
|> cast(attrs, [:email])
|> validate_email()
|> case do
%{changes: %{email: _}} = changeset -> changeset
%{} = changeset -> add_error(changeset, :email, "did not change")
end
end
@doc """
A user changeset for changing the password.
## Options
* `:hash_password` - Hashes the password so it can be stored securely
in the database and ensures the password field is cleared to prevent
leaks in the logs. If password hashing is not needed and clearing the
password field is not desired (like when using this changeset for
validations on a LiveView form), this option can be set to `false`.
Defaults to `true`.
"""
def password_changeset(user, attrs, opts \\ []) do
user
|> cast(attrs, [:password])
|> validate_confirmation(:password, message: "does not match password")
|> validate_password(opts)
end
@doc """
Confirms the account by setting `confirmed_at`.
"""
def confirm_changeset(user) do
now = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second)
change(user, confirmed_at: now)
end
@doc """
Verifies the password.
If there is no user or the user doesn't have a password, we call
`Bcrypt.no_user_verify/0` to avoid timing attacks.
"""
def valid_password?(%Animina.Accounts.User{hashed_password: <PASSWORD>}, password)
when is_binary(hashed_password) and byte_size(password) > 0 do
Bcrypt.verify_pass(password, <PASSWORD>)
end
def valid_password?(_, _) do
Bcrypt.no_user_verify()
false
end
@doc """
Validates the current password otherwise adds an error to the changeset.
"""
def validate_current_password(changeset, password) do
if valid_password?(changeset.data, password) do
changeset
else
add_error(changeset, :current_password, "is not valid")
end
end
def changeset(user, attrs) do
user
|> cast(attrs, [
:first_name,
:last_name,
:gender,
:birthday,
:username,
:about,
:homepage,
:lang,
:timezone,
:points,
:lifetime_points
])
|> validate_required([
:first_name,
:last_name,
:gender,
:birthday,
:username,
:lang,
:timezone,
:points,
:lifetime_points
])
|> validate_length(:username, min: 2, max: 40)
|> downcase_username
|> validate_inclusion(:gender, ["male", "female", "other", "prefer not to say"])
|> validate_exclusion(:birthday, [~D[1900-01-01]], message: "please enter your real birthday")
|> unique_constraint(:username)
|> validate_length(:first_name, max: 255)
|> validate_length(:last_name, max: 255)
|> validate_length(:about, max: 512)
|> validate_inclusion(:lang, ["de", "en"])
|> validate_inclusion(:timezone, TimeZoneInfo.time_zones())
|> validate_number(:points, greater_than_or_equal_to: 0)
|> validate_number(:lifetime_points, greater_than_or_equal_to: 0)
end
end
|
lib/animina/accounts/user.ex
| 0.593374
| 0.423398
|
user.ex
|
starcoder
|
defmodule Guardian.Plug.VerifyHeader do
@moduledoc """
Use this plug to verify a token contained in the header.
You should set the value of the Authorization header to:
Authorization: <jwt>
## Example
plug Guardian.Plug.VerifyHeader
## Example
plug Guardian.Plug.VerifyHeader, key: :secret
Verifying the session will update the claims on the request,
available with Guardian.Plug.claims/1
In the case of an error, the claims will be set to { :error, reason }
A "realm" can be specified when using the plug.
Realms are like the name of the token and allow many tokens
to be sent with a single request.
plug Guardian.Plug.VerifyHeader, realm: "Bearer"
When a realm is not specified,
the first authorization header found is used, and assumed to be a raw token
#### example
plug Guardian.Plug.VerifyHeader
# will take the first auth header
# Authorization: <jwt>
"""
def init(opts \\ %{}) do
opts_map = Enum.into(opts, %{})
realm = Map.get(opts_map, :realm)
if realm do
{:ok, reg} = Regex.compile("#{realm}\:?\s+(.*)$", "i")
Map.put(opts_map, :realm_reg, reg)
else
opts_map
end
end
def call(conn, opts) do
key = Map.get(opts, :key, :default)
case Guardian.Plug.claims(conn, key) do
{:ok, _} -> conn
{:error, :no_session} ->
verify_token(conn, fetch_token(conn, opts), key)
_ -> conn
end
end
defp verify_token(conn, nil, _), do: conn
defp verify_token(conn, "", _), do: conn
defp verify_token(conn, token, key) do
case Guardian.decode_and_verify(token, %{}) do
{:ok, claims} ->
conn
|> Guardian.Plug.set_claims({:ok, claims}, key)
|> Guardian.Plug.set_current_token(token, key)
{:error, reason} ->
Guardian.Plug.set_claims(conn,{:error, reason}, key)
end
end
defp fetch_token(conn, opts) do
fetch_token(conn, opts, Plug.Conn.get_req_header(conn, "authorization"))
end
defp fetch_token(_, _, []), do: nil
defp fetch_token(conn, opts = %{realm_reg: reg}, [token|tail]) do
trimmed_token = String.strip(token)
case Regex.run(reg, trimmed_token) do
[_, match] -> String.strip(match)
_ -> fetch_token(conn, opts, tail)
end
end
defp fetch_token(_, _, [token|_tail]), do: String.strip(token)
end
|
lib/guardian/plug/verify_header.ex
| 0.674479
| 0.57078
|
verify_header.ex
|
starcoder
|
defmodule Tensorflow.OpDef.ArgDef do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
description: String.t(),
type: Tensorflow.DataType.t(),
type_attr: String.t(),
number_attr: String.t(),
type_list_attr: String.t(),
is_ref: boolean
}
defstruct [
:name,
:description,
:type,
:type_attr,
:number_attr,
:type_list_attr,
:is_ref
]
field(:name, 1, type: :string)
field(:description, 2, type: :string)
field(:type, 3, type: Tensorflow.DataType, enum: true)
field(:type_attr, 4, type: :string)
field(:number_attr, 5, type: :string)
field(:type_list_attr, 6, type: :string)
field(:is_ref, 16, type: :bool)
end
defmodule Tensorflow.OpDef.AttrDef do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
type: String.t(),
default_value: Tensorflow.AttrValue.t() | nil,
description: String.t(),
has_minimum: boolean,
minimum: integer,
allowed_values: Tensorflow.AttrValue.t() | nil
}
defstruct [
:name,
:type,
:default_value,
:description,
:has_minimum,
:minimum,
:allowed_values
]
field(:name, 1, type: :string)
field(:type, 2, type: :string)
field(:default_value, 3, type: Tensorflow.AttrValue)
field(:description, 4, type: :string)
field(:has_minimum, 5, type: :bool)
field(:minimum, 6, type: :int64)
field(:allowed_values, 7, type: Tensorflow.AttrValue)
end
defmodule Tensorflow.OpDef do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
input_arg: [Tensorflow.OpDef.ArgDef.t()],
output_arg: [Tensorflow.OpDef.ArgDef.t()],
control_output: [String.t()],
attr: [Tensorflow.OpDef.AttrDef.t()],
deprecation: Tensorflow.OpDeprecation.t() | nil,
summary: String.t(),
description: String.t(),
is_commutative: boolean,
is_aggregate: boolean,
is_stateful: boolean,
allows_uninitialized_input: boolean
}
defstruct [
:name,
:input_arg,
:output_arg,
:control_output,
:attr,
:deprecation,
:summary,
:description,
:is_commutative,
:is_aggregate,
:is_stateful,
:allows_uninitialized_input
]
field(:name, 1, type: :string)
field(:input_arg, 2, repeated: true, type: Tensorflow.OpDef.ArgDef)
field(:output_arg, 3, repeated: true, type: Tensorflow.OpDef.ArgDef)
field(:control_output, 20, repeated: true, type: :string)
field(:attr, 4, repeated: true, type: Tensorflow.OpDef.AttrDef)
field(:deprecation, 8, type: Tensorflow.OpDeprecation)
field(:summary, 5, type: :string)
field(:description, 6, type: :string)
field(:is_commutative, 18, type: :bool)
field(:is_aggregate, 16, type: :bool)
field(:is_stateful, 17, type: :bool)
field(:allows_uninitialized_input, 19, type: :bool)
end
defmodule Tensorflow.OpDeprecation do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
version: integer,
explanation: String.t()
}
defstruct [:version, :explanation]
field(:version, 1, type: :int32)
field(:explanation, 2, type: :string)
end
defmodule Tensorflow.OpList do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
op: [Tensorflow.OpDef.t()]
}
defstruct [:op]
field(:op, 1, repeated: true, type: Tensorflow.OpDef)
end
|
lib/tensorflow/core/framework/op_def.pb.ex
| 0.819605
| 0.616287
|
op_def.pb.ex
|
starcoder
|
defmodule AWS.GameLift do
@moduledoc """
Amazon GameLift Service
GameLift provides solutions for hosting session-based multiplayer game servers
in the cloud, including tools for deploying, operating, and scaling game
servers.
Built on AWS global computing infrastructure, GameLift helps you deliver
high-performance, high-reliability, low-cost game servers while dynamically
scaling your resource usage to meet player demand.
## About GameLift solutions
Get more information on these GameLift solutions in the [Amazon GameLift Developer Guide](http://docs.aws.amazon.com/gamelift/latest/developerguide/).
* Managed GameLift -- GameLift offers a fully managed service to set
up and maintain computing machines for hosting, manage game session and player
session life cycle, and handle security, storage, and performance tracking. You
can use automatic scaling tools to balance hosting costs against meeting player
demand., configure your game session management to minimize player latency, or
add FlexMatch for matchmaking.
* Managed GameLift with Realtime Servers – With GameLift Realtime
Servers, you can quickly configure and set up game servers for your game.
Realtime Servers provides a game server framework with core Amazon GameLift
infrastructure already built in.
* GameLift FleetIQ – Use GameLift FleetIQ as a standalone feature
while managing your own EC2 instances and Auto Scaling groups for game hosting.
GameLift FleetIQ provides optimizations that make low-cost Spot Instances viable
for game hosting.
## About this API Reference
This reference guide describes the low-level service API for Amazon GameLift.
You can find links to language-specific SDK guides and the AWS CLI reference
with each operation and data type topic. Useful links:
* [GameLift API operations listed by tasks](https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html)
* [ GameLift tools and resources](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-components.html)
"""
@doc """
Registers a player's acceptance or rejection of a proposed FlexMatch match.
A matchmaking configuration may require player acceptance; if so, then matches
built with that configuration cannot be completed unless all players accept the
proposed match within a specified time limit.
When FlexMatch builds a match, all the matchmaking tickets involved in the
proposed match are placed into status `REQUIRES_ACCEPTANCE`. This is a trigger
for your game to get acceptance from all players in the ticket. Acceptances are
only valid for tickets when they are in this status; all other acceptances
result in an error.
To register acceptance, specify the ticket ID, a response, and one or more
players. Once all players have registered acceptance, the matchmaking tickets
advance to status `PLACING`, where a new game session is created for the match.
If any player rejects the match, or if acceptances are not received before a
specified timeout, the proposed match is dropped. The matchmaking tickets are
then handled in one of two ways: For tickets where one or more players rejected
the match, the ticket status is returned to `SEARCHING` to find a new match. For
tickets where one or more players failed to respond, the ticket status is set to
`CANCELLED`, and processing is terminated. A new matchmaking request for these
players can be submitted as needed.
## Learn more
[ Add FlexMatch to a Game Client](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html)
[ FlexMatch Events Reference](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-events.html)
## Related operations
* `StartMatchmaking`
* `DescribeMatchmaking`
* `StopMatchmaking`
* `AcceptMatch`
* `StartMatchBackfill`
"""
def accept_match(client, input, options \\ []) do
request(client, "AcceptMatch", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Locates an available game server and temporarily reserves it to host gameplay
and players.
This operation is called from a game client or client service (such as a
matchmaker) to request hosting resources for a new game session. In response,
GameLift FleetIQ locates an available game server, places it in `CLAIMED` status
for 60 seconds, and returns connection information that players can use to
connect to the game server.
To claim a game server, identify a game server group. You can also specify a
game server ID, although this approach bypasses GameLift FleetIQ placement
optimization. Optionally, include game data to pass to the game server at the
start of a game session, such as a game map or player information.
When a game server is successfully claimed, connection information is returned.
A claimed game server's utilization status remains `AVAILABLE` while the claim
status is set to `CLAIMED` for up to 60 seconds. This time period gives the game
server time to update its status to `UTILIZED` (using `UpdateGameServer`) once
players join. If the game server's status is not updated within 60 seconds, the
game server reverts to unclaimed status and is available to be claimed by
another request. The claim time period is a fixed value and is not configurable.
If you try to claim a specific game server, this request will fail in the
following cases:
* If the game server utilization status is `UTILIZED`.
* If the game server claim status is `CLAIMED`.
When claiming a specific game server, this request will succeed even if the game
server is running on an instance in `DRAINING` status. To avoid this, first
check the instance status by calling `DescribeGameServerInstances`.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `RegisterGameServer`
* `ListGameServers`
* `ClaimGameServer`
* `DescribeGameServer`
* `UpdateGameServer`
* `DeregisterGameServer`
"""
def claim_game_server(client, input, options \\ []) do
request(client, "ClaimGameServer", input, options)
end
@doc """
Creates an alias for a fleet.
In most situations, you can use an alias ID in place of a fleet ID. An alias
provides a level of abstraction for a fleet that is useful when redirecting
player traffic from one fleet to another, such as when updating your game build.
Amazon GameLift supports two types of routing strategies for aliases: simple and
terminal. A simple alias points to an active fleet. A terminal alias is used to
display messaging or link to a URL instead of routing players to an active
fleet. For example, you might use a terminal alias when a game version is no
longer supported and you want to direct players to an upgrade site.
To create a fleet alias, specify an alias name, routing strategy, and optional
description. Each simple alias can point to only one fleet, but a fleet can have
multiple aliases. If successful, a new alias record is returned, including an
alias ID and an ARN. You can reassign an alias to another fleet by calling
`UpdateAlias`.
* `CreateAlias`
* `ListAliases`
* `DescribeAlias`
* `UpdateAlias`
* `DeleteAlias`
* `ResolveAlias`
"""
def create_alias(client, input, options \\ []) do
request(client, "CreateAlias", input, options)
end
@doc """
Creates a new Amazon GameLift build resource for your game server binary files.
Game server binaries must be combined into a zip file for use with Amazon
GameLift.
When setting up a new game build for GameLift, we recommend using the AWS CLI
command **
[upload-build](https://docs.aws.amazon.com/cli/latest/reference/gamelift/upload-build.html) **. This helper command combines two tasks: (1) it uploads your build files from
a file directory to a GameLift Amazon S3 location, and (2) it creates a new
build resource.
The `CreateBuild` operation can used in the following scenarios:
* To create a new game build with build files that are in an S3
location under an AWS account that you control. To use this option, you must
first give Amazon GameLift access to the S3 bucket. With permissions in place,
call `CreateBuild` and specify a build name, operating system, and the S3
storage location of your game build.
* To directly upload your build files to a GameLift S3 location. To
use this option, first call `CreateBuild` and specify a build name and operating
system. This operation creates a new build resource and also returns an S3
location with temporary access credentials. Use the credentials to manually
upload your build files to the specified S3 location. For more information, see
[Uploading
Objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html)
in the *Amazon S3 Developer Guide*. Build files can be uploaded to the GameLift
S3 location once only; that can't be updated.
If successful, this operation creates a new build resource with a unique build
ID and places it in `INITIALIZED` status. A build must be in `READY` status
before you can create fleets with it.
## Learn more
[Uploading Your Game](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
[ Create a Build with Files in Amazon S3](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build)
## Related operations
* `CreateBuild`
* `ListBuilds`
* `DescribeBuild`
* `UpdateBuild`
* `DeleteBuild`
"""
def create_build(client, input, options \\ []) do
request(client, "CreateBuild", input, options)
end
@doc """
Creates a new fleet to run your game servers.
whether they are custom game builds or Realtime Servers with game-specific
script. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances,
each of which can host multiple game sessions. When creating a fleet, you choose
the hardware specifications, set some configuration options, and specify the
game server to deploy on the new fleet.
To create a new fleet, provide the following: (1) a fleet name, (2) an EC2
instance type and fleet type (spot or on-demand), (3) the build ID for your game
build or script ID if using Realtime Servers, and (4) a runtime configuration,
which determines how game servers will run on each instance in the fleet.
If the `CreateFleet` call is successful, Amazon GameLift performs the following
tasks. You can track the process of a fleet by checking the fleet status or by
monitoring fleet creation events:
* Creates a fleet resource. Status: `NEW`.
* Begins writing events to the fleet event log, which can be
accessed in the Amazon GameLift console.
* Sets the fleet's target capacity to 1 (desired instances), which
triggers Amazon GameLift to start one new EC2 instance.
* Downloads the game build or Realtime script to the new instance
and installs it. Statuses: `DOWNLOADING`, `VALIDATING`, `BUILDING`.
* Starts launching server processes on the instance. If the fleet is
configured to run multiple server processes per instance, Amazon GameLift
staggers each process launch by a few seconds. Status: `ACTIVATING`.
* Sets the fleet's status to `ACTIVE` as soon as one server process
is ready to host a game session.
## Learn more
[Setting Up Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[Debug Fleet Creation Issues](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def create_fleet(client, input, options \\ []) do
request(client, "CreateFleet", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Creates a GameLift FleetIQ game server group for managing game hosting on a
collection of Amazon EC2 instances for game hosting.
This operation creates the game server group, creates an Auto Scaling group in
your AWS account, and establishes a link between the two groups. You can view
the status of your game server groups in the GameLift console. Game server group
metrics and events are emitted to Amazon CloudWatch.
Before creating a new game server group, you must have the following:
* An Amazon EC2 launch template that specifies how to launch Amazon
EC2 instances with your game server build. For more information, see [ Launching an Instance from a Launch
Template](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)
in the *Amazon EC2 User Guide*.
* An IAM role that extends limited access to your AWS account to
allow GameLift FleetIQ to create and interact with the Auto Scaling group. For
more information, see [Create IAM roles for cross-service interaction](https://docs.aws.amazon.com/gamelift/latest/developerguide/gsg-iam-permissions-roles.html)
in the *GameLift FleetIQ Developer Guide*.
To create a new game server group, specify a unique group name, IAM role and
Amazon EC2 launch template, and provide a list of instance types that can be
used in the group. You must also set initial maximum and minimum limits on the
group's instance count. You can optionally set an Auto Scaling policy with
target tracking based on a GameLift FleetIQ metric.
Once the game server group and corresponding Auto Scaling group are created, you
have full access to change the Auto Scaling group's configuration as needed.
Several properties that are set when creating a game server group, including
maximum/minimum size and auto-scaling policy settings, must be updated directly
in the Auto Scaling group. Keep in mind that some Auto Scaling group properties
are periodically updated by GameLift FleetIQ as part of its balancing activities
to optimize for availability and cost.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `CreateGameServerGroup`
* `ListGameServerGroups`
* `DescribeGameServerGroup`
* `UpdateGameServerGroup`
* `DeleteGameServerGroup`
* `ResumeGameServerGroup`
* `SuspendGameServerGroup`
* `DescribeGameServerInstances`
"""
def create_game_server_group(client, input, options \\ []) do
request(client, "CreateGameServerGroup", input, options)
end
@doc """
Creates a multiplayer game session for players.
This operation creates a game session record and assigns an available server
process in the specified fleet to host the game session. A fleet must have an
`ACTIVE` status before a game session can be created in it.
To create a game session, specify either fleet ID or alias ID and indicate a
maximum number of players to allow in the game session. You can also provide a
name and game-specific properties for this game session. If successful, a
`GameSession` object is returned containing the game session properties and
other settings you specified.
**Idempotency tokens.** You can add a token that uniquely identifies game
session requests. This is useful for ensuring that game session requests are
idempotent. Multiple requests with the same idempotency token are processed only
once; subsequent requests return the original result. All response values are
the same with the exception of game session status, which may change.
**Resource creation limits.** If you are creating a game session on a fleet with
a resource creation limit policy in force, then you must specify a creator ID.
Without this ID, Amazon GameLift has no way to evaluate the policy for this new
game session request.
**Player acceptance policy.** By default, newly created game sessions are open
to new players. You can restrict new player access by using `UpdateGameSession`
to change the game session's player session creation policy.
**Game session logs.** Logs are retained for all active game sessions for 14
days. To access the logs, call `GetGameSessionLogUrl` to download the log files.
*Available in Amazon GameLift Local.*
* `CreateGameSession`
* `DescribeGameSessions`
* `DescribeGameSessionDetails`
* `SearchGameSessions`
* `UpdateGameSession`
* `GetGameSessionLogUrl`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def create_game_session(client, input, options \\ []) do
request(client, "CreateGameSession", input, options)
end
@doc """
Establishes a new queue for processing requests to place new game sessions.
A queue identifies where new game sessions can be hosted -- by specifying a list
of destinations (fleets or aliases) -- and how long requests can wait in the
queue before timing out. You can set up a queue to try to place game sessions on
fleets in multiple Regions. To add placement requests to a queue, call
`StartGameSessionPlacement` and reference the queue name.
**Destination order.** When processing a request for a game session, Amazon
GameLift tries each destination in order until it finds one with available
resources to host the new game session. A queue's default order is determined by
how destinations are listed. The default order is overridden when a game session
placement request provides player latency information. Player latency
information enables Amazon GameLift to prioritize destinations where players
report the lowest average latency, as a result placing the new game session
where the majority of players will have the best possible gameplay experience.
**Player latency policies.** For placement requests containing player latency
information, use player latency policies to protect individual players from very
high latencies. With a latency cap, even when a destination can deliver a low
latency for most players, the game is not placed where any individual player is
reporting latency higher than a policy's maximum. A queue can have multiple
latency policies, which are enforced consecutively starting with the policy with
the lowest latency cap. Use multiple policies to gradually relax latency
controls; for example, you might set a policy with a low latency cap for the
first 60 seconds, a second policy with a higher cap for the next 60 seconds,
etc.
To create a new queue, provide a name, timeout value, a list of destinations
and, if desired, a set of latency policies. If successful, a new queue object is
returned.
## Learn more
[ Design a Game Session Queue](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-design.html)
[ Create a Game Session Queue](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-creating.html)
## Related operations
* `CreateGameSessionQueue`
* `DescribeGameSessionQueues`
* `UpdateGameSessionQueue`
* `DeleteGameSessionQueue`
"""
def create_game_session_queue(client, input, options \\ []) do
request(client, "CreateGameSessionQueue", input, options)
end
@doc """
Defines a new matchmaking configuration for use with FlexMatch.
A matchmaking configuration sets out guidelines for matching players and getting
the matches into games. You can set up multiple matchmaking configurations to
handle the scenarios needed for your game. Each matchmaking ticket
(`StartMatchmaking` or `StartMatchBackfill`) specifies a configuration for the
match and provides player attributes to support the configuration being used.
To create a matchmaking configuration, at a minimum you must specify the
following: configuration name; a rule set that governs how to evaluate players
and find acceptable matches; a game session queue to use when placing a new game
session for the match; and the maximum time allowed for a matchmaking attempt.
To track the progress of matchmaking tickets, set up an Amazon Simple
Notification Service (SNS) to receive notifications, and provide the topic ARN
in the matchmaking configuration. An alternative method, continuously poling
ticket status with `DescribeMatchmaking`, should only be used for games in
development with low matchmaking usage.
## Learn more
[ Design a FlexMatch Matchmaker](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html)
[ Set Up FlexMatch Event Notification](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html)
## Related operations
* `CreateMatchmakingConfiguration`
* `DescribeMatchmakingConfigurations`
* `UpdateMatchmakingConfiguration`
* `DeleteMatchmakingConfiguration`
* `CreateMatchmakingRuleSet`
* `DescribeMatchmakingRuleSets`
* `ValidateMatchmakingRuleSet`
* `DeleteMatchmakingRuleSet`
"""
def create_matchmaking_configuration(client, input, options \\ []) do
request(client, "CreateMatchmakingConfiguration", input, options)
end
@doc """
Creates a new rule set for FlexMatch matchmaking.
A rule set describes the type of match to create, such as the number and size of
teams. It also sets the parameters for acceptable player matches, such as
minimum skill level or character type. A rule set is used by a
`MatchmakingConfiguration`.
To create a matchmaking rule set, provide unique rule set name and the rule set
body in JSON format. Rule sets must be defined in the same Region as the
matchmaking configuration they are used with.
Since matchmaking rule sets cannot be edited, it is a good idea to check the
rule set syntax using `ValidateMatchmakingRuleSet` before creating a new rule
set.
## Learn more
* [Build a Rule Set](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html)
* [Design a Matchmaker](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html)
* [Matchmaking with FlexMatch](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html)
## Related operations
* `CreateMatchmakingConfiguration`
* `DescribeMatchmakingConfigurations`
* `UpdateMatchmakingConfiguration`
* `DeleteMatchmakingConfiguration`
* `CreateMatchmakingRuleSet`
* `DescribeMatchmakingRuleSets`
* `ValidateMatchmakingRuleSet`
* `DeleteMatchmakingRuleSet`
"""
def create_matchmaking_rule_set(client, input, options \\ []) do
request(client, "CreateMatchmakingRuleSet", input, options)
end
@doc """
Reserves an open player slot in an active game session.
Before a player can be added, a game session must have an `ACTIVE` status, have
a creation policy of `ALLOW_ALL`, and have an open player slot. To add a group
of players to a game session, use `CreatePlayerSessions`. When the player
connects to the game server and references a player session ID, the game server
contacts the Amazon GameLift service to validate the player reservation and
accept the player.
To create a player session, specify a game session ID, player ID, and optionally
a string of player data. If successful, a slot is reserved in the game session
for the player and a new `PlayerSession` object is returned. Player sessions
cannot be updated.
*Available in Amazon GameLift Local.*
* `CreatePlayerSession`
* `CreatePlayerSessions`
* `DescribePlayerSessions`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def create_player_session(client, input, options \\ []) do
request(client, "CreatePlayerSession", input, options)
end
@doc """
Reserves open slots in a game session for a group of players.
Before players can be added, a game session must have an `ACTIVE` status, have a
creation policy of `ALLOW_ALL`, and have an open player slot. To add a single
player to a game session, use `CreatePlayerSession`. When a player connects to
the game server and references a player session ID, the game server contacts the
Amazon GameLift service to validate the player reservation and accept the
player.
To create player sessions, specify a game session ID, a list of player IDs, and
optionally a set of player data strings. If successful, a slot is reserved in
the game session for each player and a set of new `PlayerSession` objects is
returned. Player sessions cannot be updated.
*Available in Amazon GameLift Local.*
* `CreatePlayerSession`
* `CreatePlayerSessions`
* `DescribePlayerSessions`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def create_player_sessions(client, input, options \\ []) do
request(client, "CreatePlayerSessions", input, options)
end
@doc """
Creates a new script record for your Realtime Servers script.
Realtime scripts are JavaScript that provide configuration settings and optional
custom game logic for your game. The script is deployed when you create a
Realtime Servers fleet to host your game sessions. Script logic is executed
during an active game session.
To create a new script record, specify a script name and provide the script
file(s). The script files and all dependencies must be zipped into a single
file. You can pull the zip file from either of these locations:
* A locally available directory. Use the *ZipFile* parameter for
this option.
* An Amazon Simple Storage Service (Amazon S3) bucket under your AWS
account. Use the *StorageLocation* parameter for this option. You'll need to
have an Identity Access Management (IAM) role that allows the Amazon GameLift
service to access your S3 bucket.
If the call is successful, a new script record is created with a unique script
ID. If the script file is provided as a local file, the file is uploaded to an
Amazon GameLift-owned S3 bucket and the script record's storage location
reflects this location. If the script file is provided as an S3 bucket, Amazon
GameLift accesses the file at this storage location as needed for deployment.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
[Set Up a Role for Amazon GameLift Access](https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html)
## Related operations
* `CreateScript`
* `ListScripts`
* `DescribeScript`
* `UpdateScript`
* `DeleteScript`
"""
def create_script(client, input, options \\ []) do
request(client, "CreateScript", input, options)
end
@doc """
Requests authorization to create or delete a peer connection between the VPC for
your Amazon GameLift fleet and a virtual private cloud (VPC) in your AWS
account.
VPC peering enables the game servers on your fleet to communicate directly with
other AWS resources. Once you've received authorization, call
`CreateVpcPeeringConnection` to establish the peering connection. For more
information, see [VPC Peering with Amazon GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html).
You can peer with VPCs that are owned by any AWS account you have access to,
including the account that you use to manage your Amazon GameLift fleets. You
cannot peer with VPCs that are in different Regions.
To request authorization to create a connection, call this operation from the
AWS account with the VPC that you want to peer to your Amazon GameLift fleet.
For example, to enable your game servers to retrieve data from a DynamoDB table,
use the account that manages that DynamoDB resource. Identify the following
values: (1) The ID of the VPC that you want to peer with, and (2) the ID of the
AWS account that you use to manage Amazon GameLift. If successful, VPC peering
is authorized for the specified VPC.
To request authorization to delete a connection, call this operation from the
AWS account with the VPC that is peered with your Amazon GameLift fleet.
Identify the following values: (1) VPC ID that you want to delete the peering
connection for, and (2) ID of the AWS account that you use to manage Amazon
GameLift.
The authorization remains valid for 24 hours unless it is canceled by a call to
`DeleteVpcPeeringAuthorization`. You must create or delete the peering
connection while the authorization is valid.
* `CreateVpcPeeringAuthorization`
* `DescribeVpcPeeringAuthorizations`
* `DeleteVpcPeeringAuthorization`
* `CreateVpcPeeringConnection`
* `DescribeVpcPeeringConnections`
* `DeleteVpcPeeringConnection`
"""
def create_vpc_peering_authorization(client, input, options \\ []) do
request(client, "CreateVpcPeeringAuthorization", input, options)
end
@doc """
Establishes a VPC peering connection between a virtual private cloud (VPC) in an
AWS account with the VPC for your Amazon GameLift fleet.
VPC peering enables the game servers on your fleet to communicate directly with
other AWS resources. You can peer with VPCs in any AWS account that you have
access to, including the account that you use to manage your Amazon GameLift
fleets. You cannot peer with VPCs that are in different Regions. For more
information, see [VPC Peering with Amazon GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html).
Before calling this operation to establish the peering connection, you first
need to call `CreateVpcPeeringAuthorization` and identify the VPC you want to
peer with. Once the authorization for the specified VPC is issued, you have 24
hours to establish the connection. These two operations handle all tasks
necessary to peer the two VPCs, including acceptance, updating routing tables,
etc.
To establish the connection, call this operation from the AWS account that is
used to manage the Amazon GameLift fleets. Identify the following values: (1)
The ID of the fleet you want to be enable a VPC peering connection for; (2) The
AWS account with the VPC that you want to peer with; and (3) The ID of the VPC
you want to peer with. This operation is asynchronous. If successful, a
`VpcPeeringConnection` request is created. You can use continuous polling to
track the request's status using `DescribeVpcPeeringConnections`, or by
monitoring fleet events for success or failure using `DescribeFleetEvents`.
* `CreateVpcPeeringAuthorization`
* `DescribeVpcPeeringAuthorizations`
* `DeleteVpcPeeringAuthorization`
* `CreateVpcPeeringConnection`
* `DescribeVpcPeeringConnections`
* `DeleteVpcPeeringConnection`
"""
def create_vpc_peering_connection(client, input, options \\ []) do
request(client, "CreateVpcPeeringConnection", input, options)
end
@doc """
Deletes an alias.
This operation removes all record of the alias. Game clients attempting to
access a server process using the deleted alias receive an error. To delete an
alias, specify the alias ID to be deleted.
* `CreateAlias`
* `ListAliases`
* `DescribeAlias`
* `UpdateAlias`
* `DeleteAlias`
* `ResolveAlias`
"""
def delete_alias(client, input, options \\ []) do
request(client, "DeleteAlias", input, options)
end
@doc """
Deletes a build.
This operation permanently deletes the build resource and any uploaded build
files. Deleting a build does not affect the status of any active fleets using
the build, but you can no longer create new fleets with the deleted build.
To delete a build, specify the build ID.
## Learn more
[ Upload a Custom Server Build](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
## Related operations
* `CreateBuild`
* `ListBuilds`
* `DescribeBuild`
* `UpdateBuild`
* `DeleteBuild`
"""
def delete_build(client, input, options \\ []) do
request(client, "DeleteBuild", input, options)
end
@doc """
Deletes everything related to a fleet.
Before deleting a fleet, you must set the fleet's desired capacity to zero. See
`UpdateFleetCapacity`.
If the fleet being deleted has a VPC peering connection, you first need to get a
valid authorization (good for 24 hours) by calling
`CreateVpcPeeringAuthorization`. You do not need to explicitly delete the VPC
peering connection--this is done as part of the delete fleet process.
This operation removes the fleet and its resources. Once a fleet is deleted, you
can no longer use any of the resource in that fleet.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def delete_fleet(client, input, options \\ []) do
request(client, "DeleteFleet", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Terminates a game server group and permanently deletes the game server group
record.
You have several options for how these resources are impacted when deleting the
game server group. Depending on the type of delete operation selected, this
operation might affect these resources:
* The game server group
* The corresponding Auto Scaling group
* All game servers that are currently running in the group
To delete a game server group, identify the game server group to delete and
specify the type of delete operation to initiate. Game server groups can only be
deleted if they are in `ACTIVE` or `ERROR` status.
If the delete request is successful, a series of operations are kicked off. The
game server group status is changed to `DELETE_SCHEDULED`, which prevents new
game servers from being registered and stops automatic scaling activity. Once
all game servers in the game server group are deregistered, GameLift FleetIQ can
begin deleting resources. If any of the delete operations fail, the game server
group is placed in `ERROR` status.
GameLift FleetIQ emits delete events to Amazon CloudWatch.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `CreateGameServerGroup`
* `ListGameServerGroups`
* `DescribeGameServerGroup`
* `UpdateGameServerGroup`
* `DeleteGameServerGroup`
* `ResumeGameServerGroup`
* `SuspendGameServerGroup`
* `DescribeGameServerInstances`
"""
def delete_game_server_group(client, input, options \\ []) do
request(client, "DeleteGameServerGroup", input, options)
end
@doc """
Deletes a game session queue.
Once a queue is successfully deleted, unfulfilled `StartGameSessionPlacement`
requests that reference the queue will fail. To delete a queue, specify the
queue name.
## Learn more
[ Using Multi-Region Queues](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-intro.html)
## Related operations
* `CreateGameSessionQueue`
* `DescribeGameSessionQueues`
* `UpdateGameSessionQueue`
* `DeleteGameSessionQueue`
"""
def delete_game_session_queue(client, input, options \\ []) do
request(client, "DeleteGameSessionQueue", input, options)
end
@doc """
Permanently removes a FlexMatch matchmaking configuration.
To delete, specify the configuration name. A matchmaking configuration cannot be
deleted if it is being used in any active matchmaking tickets.
## Related operations
* `CreateMatchmakingConfiguration`
* `DescribeMatchmakingConfigurations`
* `UpdateMatchmakingConfiguration`
* `DeleteMatchmakingConfiguration`
* `CreateMatchmakingRuleSet`
* `DescribeMatchmakingRuleSets`
* `ValidateMatchmakingRuleSet`
* `DeleteMatchmakingRuleSet`
"""
def delete_matchmaking_configuration(client, input, options \\ []) do
request(client, "DeleteMatchmakingConfiguration", input, options)
end
@doc """
Deletes an existing matchmaking rule set.
To delete the rule set, provide the rule set name. Rule sets cannot be deleted
if they are currently being used by a matchmaking configuration.
## Learn more
* [Build a Rule Set](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html)
## Related operations
* `CreateMatchmakingConfiguration`
* `DescribeMatchmakingConfigurations`
* `UpdateMatchmakingConfiguration`
* `DeleteMatchmakingConfiguration`
* `CreateMatchmakingRuleSet`
* `DescribeMatchmakingRuleSets`
* `ValidateMatchmakingRuleSet`
* `DeleteMatchmakingRuleSet`
"""
def delete_matchmaking_rule_set(client, input, options \\ []) do
request(client, "DeleteMatchmakingRuleSet", input, options)
end
@doc """
Deletes a fleet scaling policy.
Once deleted, the policy is no longer in force and GameLift removes all record
of it. To delete a scaling policy, specify both the scaling policy name and the
fleet ID it is associated with.
To temporarily suspend scaling policies, call `StopFleetActions`. This operation
suspends all policies for the fleet.
* `DescribeFleetCapacity`
* `UpdateFleetCapacity`
* `DescribeEC2InstanceLimits`
* Manage scaling policies:
* `PutScalingPolicy` (auto-scaling)
* `DescribeScalingPolicies` (auto-scaling)
* `DeleteScalingPolicy` (auto-scaling)
* Manage fleet actions:
* `StartFleetActions`
* `StopFleetActions`
"""
def delete_scaling_policy(client, input, options \\ []) do
request(client, "DeleteScalingPolicy", input, options)
end
@doc """
Deletes a Realtime script.
This operation permanently deletes the script record. If script files were
uploaded, they are also deleted (files stored in an S3 bucket are not deleted).
To delete a script, specify the script ID. Before deleting a script, be sure to
terminate all fleets that are deployed with the script being deleted. Fleet
instances periodically check for script updates, and if the script record no
longer exists, the instance will go into an error state and be unable to host
game sessions.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
## Related operations
* `CreateScript`
* `ListScripts`
* `DescribeScript`
* `UpdateScript`
* `DeleteScript`
"""
def delete_script(client, input, options \\ []) do
request(client, "DeleteScript", input, options)
end
@doc """
Cancels a pending VPC peering authorization for the specified VPC.
If you need to delete an existing VPC peering connection, call
`DeleteVpcPeeringConnection`.
* `CreateVpcPeeringAuthorization`
* `DescribeVpcPeeringAuthorizations`
* `DeleteVpcPeeringAuthorization`
* `CreateVpcPeeringConnection`
* `DescribeVpcPeeringConnections`
* `DeleteVpcPeeringConnection`
"""
def delete_vpc_peering_authorization(client, input, options \\ []) do
request(client, "DeleteVpcPeeringAuthorization", input, options)
end
@doc """
Removes a VPC peering connection.
To delete the connection, you must have a valid authorization for the VPC
peering connection that you want to delete. You can check for an authorization
by calling `DescribeVpcPeeringAuthorizations` or request a new one using
`CreateVpcPeeringAuthorization`.
Once a valid authorization exists, call this operation from the AWS account that
is used to manage the Amazon GameLift fleets. Identify the connection to delete
by the connection ID and fleet ID. If successful, the connection is removed.
* `CreateVpcPeeringAuthorization`
* `DescribeVpcPeeringAuthorizations`
* `DeleteVpcPeeringAuthorization`
* `CreateVpcPeeringConnection`
* `DescribeVpcPeeringConnections`
* `DeleteVpcPeeringConnection`
"""
def delete_vpc_peering_connection(client, input, options \\ []) do
request(client, "DeleteVpcPeeringConnection", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Removes the game server from a game server group.
As a result of this operation, the deregistered game server can no longer be
claimed and will not be returned in a list of active game servers.
To deregister a game server, specify the game server group and game server ID.
If successful, this operation emits a CloudWatch event with termination
timestamp and reason.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `RegisterGameServer`
* `ListGameServers`
* `ClaimGameServer`
* `DescribeGameServer`
* `UpdateGameServer`
* `DeregisterGameServer`
"""
def deregister_game_server(client, input, options \\ []) do
request(client, "DeregisterGameServer", input, options)
end
@doc """
Retrieves properties for an alias.
This operation returns all alias metadata and settings. To get an alias's target
fleet ID only, use `ResolveAlias`.
To get alias properties, specify the alias ID. If successful, the requested
alias record is returned.
* `CreateAlias`
* `ListAliases`
* `DescribeAlias`
* `UpdateAlias`
* `DeleteAlias`
* `ResolveAlias`
"""
def describe_alias(client, input, options \\ []) do
request(client, "DescribeAlias", input, options)
end
@doc """
Retrieves properties for a custom game build.
To request a build resource, specify a build ID. If successful, an object
containing the build properties is returned.
## Learn more
[ Upload a Custom Server Build](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
## Related operations
* `CreateBuild`
* `ListBuilds`
* `DescribeBuild`
* `UpdateBuild`
* `DeleteBuild`
"""
def describe_build(client, input, options \\ []) do
request(client, "DescribeBuild", input, options)
end
@doc """
Retrieves the following information for the specified EC2 instance type:
* Maximum number of instances allowed per AWS account (service
limit).
* Current usage for the AWS account.
To learn more about the capabilities of each instance type, see [Amazon EC2 Instance Types](http://aws.amazon.com/ec2/instance-types/). Note that the
instance types offered may vary depending on the region.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def describe_e_c2_instance_limits(client, input, options \\ []) do
request(client, "DescribeEC2InstanceLimits", input, options)
end
@doc """
Retrieves core properties, including configuration, status, and metadata, for a
fleet.
To get attributes for one or more fleets, provide a list of fleet IDs or fleet
ARNs. To get attributes for all fleets, do not specify a fleet identifier. When
requesting attributes for multiple fleets, use the pagination parameters to
retrieve results as a set of sequential pages. If successful, a
`FleetAttributes` object is returned for each fleet requested, unless the fleet
identifier is not found.
Some API operations may limit the number of fleet IDs allowed in one request. If
a request exceeds this limit, the request fails and the error message includes
the maximum allowed number.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* Describe fleets:
* `DescribeFleetAttributes`
* `DescribeFleetCapacity`
* `DescribeFleetPortSettings`
* `DescribeFleetUtilization`
* `DescribeRuntimeConfiguration`
* `DescribeEC2InstanceLimits`
* `DescribeFleetEvents`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def describe_fleet_attributes(client, input, options \\ []) do
request(client, "DescribeFleetAttributes", input, options)
end
@doc """
Retrieves the current capacity statistics for one or more fleets.
These statistics present a snapshot of the fleet's instances and provide insight
on current or imminent scaling activity. To get statistics on game hosting
activity in the fleet, see `DescribeFleetUtilization`.
You can request capacity for all fleets or specify a list of one or more fleet
identifiers. When requesting multiple fleets, use the pagination parameters to
retrieve results as a set of sequential pages. If successful, a `FleetCapacity`
object is returned for each requested fleet ID. When a list of fleet IDs is
provided, attribute objects are returned only for fleets that currently exist.
Some API operations may limit the number of fleet IDs allowed in one request. If
a request exceeds this limit, the request fails and the error message includes
the maximum allowed.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[GameLift Metrics for Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* Describe fleets:
* `DescribeFleetAttributes`
* `DescribeFleetCapacity`
* `DescribeFleetPortSettings`
* `DescribeFleetUtilization`
* `DescribeRuntimeConfiguration`
* `DescribeEC2InstanceLimits`
* `DescribeFleetEvents`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def describe_fleet_capacity(client, input, options \\ []) do
request(client, "DescribeFleetCapacity", input, options)
end
@doc """
Retrieves entries from the specified fleet's event log.
You can specify a time range to limit the result set. Use the pagination
parameters to retrieve results as a set of sequential pages. If successful, a
collection of event log entries matching the request are returned.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* Describe fleets:
* `DescribeFleetAttributes`
* `DescribeFleetCapacity`
* `DescribeFleetPortSettings`
* `DescribeFleetUtilization`
* `DescribeRuntimeConfiguration`
* `DescribeEC2InstanceLimits`
* `DescribeFleetEvents`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def describe_fleet_events(client, input, options \\ []) do
request(client, "DescribeFleetEvents", input, options)
end
@doc """
Retrieves a fleet's inbound connection permissions.
Connection permissions specify the range of IP addresses and port settings that
incoming traffic can use to access server processes in the fleet. Game sessions
that are running on instances in the fleet use connections that fall in this
range.
To get a fleet's inbound connection permissions, specify the fleet's unique
identifier. If successful, a collection of `IpPermission` objects is returned
for the requested fleet ID. If the requested fleet has been deleted, the result
set is empty.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* Describe fleets:
* `DescribeFleetAttributes`
* `DescribeFleetCapacity`
* `DescribeFleetPortSettings`
* `DescribeFleetUtilization`
* `DescribeRuntimeConfiguration`
* `DescribeEC2InstanceLimits`
* `DescribeFleetEvents`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def describe_fleet_port_settings(client, input, options \\ []) do
request(client, "DescribeFleetPortSettings", input, options)
end
@doc """
Retrieves utilization statistics for one or more fleets.
These statistics provide insight into how available hosting resources are
currently being used. To get statistics on available hosting resources, see
`DescribeFleetCapacity`.
You can request utilization data for all fleets, or specify a list of one or
more fleet IDs. When requesting multiple fleets, use the pagination parameters
to retrieve results as a set of sequential pages. If successful, a
`FleetUtilization` object is returned for each requested fleet ID, unless the
fleet identifier is not found.
Some API operations may limit the number of fleet IDs allowed in one request. If
a request exceeds this limit, the request fails and the error message includes
the maximum allowed.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[GameLift Metrics for Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* Describe fleets:
* `DescribeFleetAttributes`
* `DescribeFleetCapacity`
* `DescribeFleetPortSettings`
* `DescribeFleetUtilization`
* `DescribeRuntimeConfiguration`
* `DescribeEC2InstanceLimits`
* `DescribeFleetEvents`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def describe_fleet_utilization(client, input, options \\ []) do
request(client, "DescribeFleetUtilization", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Retrieves information for a registered game server.
Information includes game server status, health check info, and the instance
that the game server is running on.
To retrieve game server information, specify the game server ID. If successful,
the requested game server object is returned.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `RegisterGameServer`
* `ListGameServers`
* `ClaimGameServer`
* `DescribeGameServer`
* `UpdateGameServer`
* `DeregisterGameServer`
"""
def describe_game_server(client, input, options \\ []) do
request(client, "DescribeGameServer", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Retrieves information on a game server group.
This operation returns only properties related to GameLift FleetIQ. To view or
update properties for the corresponding Auto Scaling group, such as launch
template, auto scaling policies, and maximum/minimum group size, access the Auto
Scaling group directly.
To get attributes for a game server group, provide a group name or ARN value. If
successful, a `GameServerGroup` object is returned.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `CreateGameServerGroup`
* `ListGameServerGroups`
* `DescribeGameServerGroup`
* `UpdateGameServerGroup`
* `DeleteGameServerGroup`
* `ResumeGameServerGroup`
* `SuspendGameServerGroup`
* `DescribeGameServerInstances`
"""
def describe_game_server_group(client, input, options \\ []) do
request(client, "DescribeGameServerGroup", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Retrieves status information about the Amazon EC2 instances associated with a
GameLift FleetIQ game server group.
Use this operation to detect when instances are active or not available to host
new game servers. If you are looking for instance configuration information,
call `DescribeGameServerGroup` or access the corresponding Auto Scaling group
properties.
To request status for all instances in the game server group, provide a game
server group ID only. To request status for specific instances, provide the game
server group ID and one or more instance IDs. Use the pagination parameters to
retrieve results in sequential segments. If successful, a collection of
`GameServerInstance` objects is returned.
This operation is not designed to be called with every game server claim
request; this practice can cause you to exceed your API limit, which results in
errors. Instead, as a best practice, cache the results and refresh your cache no
more than once every 10 seconds.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `CreateGameServerGroup`
* `ListGameServerGroups`
* `DescribeGameServerGroup`
* `UpdateGameServerGroup`
* `DeleteGameServerGroup`
* `ResumeGameServerGroup`
* `SuspendGameServerGroup`
* `DescribeGameServerInstances`
"""
def describe_game_server_instances(client, input, options \\ []) do
request(client, "DescribeGameServerInstances", input, options)
end
@doc """
Retrieves properties, including the protection policy in force, for one or more
game sessions.
This operation can be used in several ways: (1) provide a `GameSessionId` or
`GameSessionArn` to request details for a specific game session; (2) provide
either a `FleetId` or an `AliasId` to request properties for all game sessions
running on a fleet.
To get game session record(s), specify just one of the following: game session
ID, fleet ID, or alias ID. You can filter this request by game session status.
Use the pagination parameters to retrieve results as a set of sequential pages.
If successful, a `GameSessionDetail` object is returned for each session
matching the request.
* `CreateGameSession`
* `DescribeGameSessions`
* `DescribeGameSessionDetails`
* `SearchGameSessions`
* `UpdateGameSession`
* `GetGameSessionLogUrl`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def describe_game_session_details(client, input, options \\ []) do
request(client, "DescribeGameSessionDetails", input, options)
end
@doc """
Retrieves properties and current status of a game session placement request.
To get game session placement details, specify the placement ID. If successful,
a `GameSessionPlacement` object is returned.
* `CreateGameSession`
* `DescribeGameSessions`
* `DescribeGameSessionDetails`
* `SearchGameSessions`
* `UpdateGameSession`
* `GetGameSessionLogUrl`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def describe_game_session_placement(client, input, options \\ []) do
request(client, "DescribeGameSessionPlacement", input, options)
end
@doc """
Retrieves the properties for one or more game session queues.
When requesting multiple queues, use the pagination parameters to retrieve
results as a set of sequential pages. If successful, a `GameSessionQueue` object
is returned for each requested queue. When specifying a list of queues, objects
are returned only for queues that currently exist in the Region.
## Learn more
[ View Your Queues](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-console.html)
## Related operations
* `CreateGameSessionQueue`
* `DescribeGameSessionQueues`
* `UpdateGameSessionQueue`
* `DeleteGameSessionQueue`
"""
def describe_game_session_queues(client, input, options \\ []) do
request(client, "DescribeGameSessionQueues", input, options)
end
@doc """
Retrieves a set of one or more game sessions.
Request a specific game session or request all game sessions on a fleet.
Alternatively, use `SearchGameSessions` to request a set of active game sessions
that are filtered by certain criteria. To retrieve protection policy settings
for game sessions, use `DescribeGameSessionDetails`.
To get game sessions, specify one of the following: game session ID, fleet ID,
or alias ID. You can filter this request by game session status. Use the
pagination parameters to retrieve results as a set of sequential pages. If
successful, a `GameSession` object is returned for each game session matching
the request.
*Available in Amazon GameLift Local.*
* `CreateGameSession`
* `DescribeGameSessions`
* `DescribeGameSessionDetails`
* `SearchGameSessions`
* `UpdateGameSession`
* `GetGameSessionLogUrl`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def describe_game_sessions(client, input, options \\ []) do
request(client, "DescribeGameSessions", input, options)
end
@doc """
Retrieves information about a fleet's instances, including instance IDs.
Use this operation to get details on all instances in the fleet or get details
on one specific instance.
To get a specific instance, specify fleet ID and instance ID. To get all
instances in a fleet, specify a fleet ID only. Use the pagination parameters to
retrieve results as a set of sequential pages. If successful, an `Instance`
object is returned for each result.
## Learn more
[Remotely Access Fleet Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html)
[Debug Fleet Issues](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html)
## Related operations
* `DescribeInstances`
* `GetInstanceAccess`
"""
def describe_instances(client, input, options \\ []) do
request(client, "DescribeInstances", input, options)
end
@doc """
Retrieves one or more matchmaking tickets.
Use this operation to retrieve ticket information, including--after a successful
match is made--connection information for the resulting new game session.
To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the
request is successful, a ticket object is returned for each requested ID that
currently exists.
This operation is not designed to be continually called to track matchmaking
ticket status. This practice can cause you to exceed your API limit, which
results in errors. Instead, as a best practice, set up an Amazon Simple
Notification Service (SNS) to receive notifications, and provide the topic ARN
in the matchmaking configuration. Continuously poling ticket status with
`DescribeMatchmaking` should only be used for games in development with low
matchmaking usage.
## Learn more
[ Add FlexMatch to a Game Client](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html)
[ Set Up FlexMatch Event Notification](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html)
## Related operations
* `StartMatchmaking`
* `DescribeMatchmaking`
* `StopMatchmaking`
* `AcceptMatch`
* `StartMatchBackfill`
"""
def describe_matchmaking(client, input, options \\ []) do
request(client, "DescribeMatchmaking", input, options)
end
@doc """
Retrieves the details of FlexMatch matchmaking configurations.
This operation offers the following options: (1) retrieve all matchmaking
configurations, (2) retrieve configurations for a specified list, or (3)
retrieve all configurations that use a specified rule set name. When requesting
multiple items, use the pagination parameters to retrieve results as a set of
sequential pages.
If successful, a configuration is returned for each requested name. When
specifying a list of names, only configurations that currently exist are
returned.
## Learn more
[ Setting Up FlexMatch Matchmakers](https://docs.aws.amazon.com/gamelift/latest/developerguide/matchmaker-build.html)
## Related operations
* `CreateMatchmakingConfiguration`
* `DescribeMatchmakingConfigurations`
* `UpdateMatchmakingConfiguration`
* `DeleteMatchmakingConfiguration`
* `CreateMatchmakingRuleSet`
* `DescribeMatchmakingRuleSets`
* `ValidateMatchmakingRuleSet`
* `DeleteMatchmakingRuleSet`
"""
def describe_matchmaking_configurations(client, input, options \\ []) do
request(client, "DescribeMatchmakingConfigurations", input, options)
end
@doc """
Retrieves the details for FlexMatch matchmaking rule sets.
You can request all existing rule sets for the Region, or provide a list of one
or more rule set names. When requesting multiple items, use the pagination
parameters to retrieve results as a set of sequential pages. If successful, a
rule set is returned for each requested name.
## Learn more
* [Build a Rule Set](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html)
## Related operations
* `CreateMatchmakingConfiguration`
* `DescribeMatchmakingConfigurations`
* `UpdateMatchmakingConfiguration`
* `DeleteMatchmakingConfiguration`
* `CreateMatchmakingRuleSet`
* `DescribeMatchmakingRuleSets`
* `ValidateMatchmakingRuleSet`
* `DeleteMatchmakingRuleSet`
"""
def describe_matchmaking_rule_sets(client, input, options \\ []) do
request(client, "DescribeMatchmakingRuleSets", input, options)
end
@doc """
Retrieves properties for one or more player sessions.
This operation can be used in several ways: (1) provide a `PlayerSessionId` to
request properties for a specific player session; (2) provide a `GameSessionId`
to request properties for all player sessions in the specified game session; (3)
provide a `PlayerId` to request properties for all player sessions of a
specified player.
To get game session record(s), specify only one of the following: a player
session ID, a game session ID, or a player ID. You can filter this request by
player session status. Use the pagination parameters to retrieve results as a
set of sequential pages. If successful, a `PlayerSession` object is returned for
each session matching the request.
*Available in Amazon GameLift Local.*
* `CreatePlayerSession`
* `CreatePlayerSessions`
* `DescribePlayerSessions`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def describe_player_sessions(client, input, options \\ []) do
request(client, "DescribePlayerSessions", input, options)
end
@doc """
Retrieves a fleet's runtime configuration settings.
The runtime configuration tells Amazon GameLift which server processes to run
(and how) on each instance in the fleet.
To get a runtime configuration, specify the fleet's unique identifier. If
successful, a `RuntimeConfiguration` object is returned for the requested fleet.
If the requested fleet has been deleted, the result set is empty.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
[Running Multiple Processes on a Fleet](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-multiprocess.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* Describe fleets:
* `DescribeFleetAttributes`
* `DescribeFleetCapacity`
* `DescribeFleetPortSettings`
* `DescribeFleetUtilization`
* `DescribeRuntimeConfiguration`
* `DescribeEC2InstanceLimits`
* `DescribeFleetEvents`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def describe_runtime_configuration(client, input, options \\ []) do
request(client, "DescribeRuntimeConfiguration", input, options)
end
@doc """
Retrieves all scaling policies applied to a fleet.
To get a fleet's scaling policies, specify the fleet ID. You can filter this
request by policy status, such as to retrieve only active scaling policies. Use
the pagination parameters to retrieve results as a set of sequential pages. If
successful, set of `ScalingPolicy` objects is returned for the fleet.
A fleet may have all of its scaling policies suspended (`StopFleetActions`).
This operation does not affect the status of the scaling policies, which remains
ACTIVE. To see whether a fleet's scaling policies are in force or suspended,
call `DescribeFleetAttributes` and check the stopped actions.
* `DescribeFleetCapacity`
* `UpdateFleetCapacity`
* `DescribeEC2InstanceLimits`
* Manage scaling policies:
* `PutScalingPolicy` (auto-scaling)
* `DescribeScalingPolicies` (auto-scaling)
* `DeleteScalingPolicy` (auto-scaling)
* Manage fleet actions:
* `StartFleetActions`
* `StopFleetActions`
"""
def describe_scaling_policies(client, input, options \\ []) do
request(client, "DescribeScalingPolicies", input, options)
end
@doc """
Retrieves properties for a Realtime script.
To request a script record, specify the script ID. If successful, an object
containing the script properties is returned.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
## Related operations
* `CreateScript`
* `ListScripts`
* `DescribeScript`
* `UpdateScript`
* `DeleteScript`
"""
def describe_script(client, input, options \\ []) do
request(client, "DescribeScript", input, options)
end
@doc """
Retrieves valid VPC peering authorizations that are pending for the AWS account.
This operation returns all VPC peering authorizations and requests for peering.
This includes those initiated and received by this account.
* `CreateVpcPeeringAuthorization`
* `DescribeVpcPeeringAuthorizations`
* `DeleteVpcPeeringAuthorization`
* `CreateVpcPeeringConnection`
* `DescribeVpcPeeringConnections`
* `DeleteVpcPeeringConnection`
"""
def describe_vpc_peering_authorizations(client, input, options \\ []) do
request(client, "DescribeVpcPeeringAuthorizations", input, options)
end
@doc """
Retrieves information on VPC peering connections.
Use this operation to get peering information for all fleets or for one specific
fleet ID.
To retrieve connection information, call this operation from the AWS account
that is used to manage the Amazon GameLift fleets. Specify a fleet ID or leave
the parameter empty to retrieve all connection records. If successful, the
retrieved information includes both active and pending connections. Active
connections identify the IpV4 CIDR block that the VPC uses to connect.
* `CreateVpcPeeringAuthorization`
* `DescribeVpcPeeringAuthorizations`
* `DeleteVpcPeeringAuthorization`
* `CreateVpcPeeringConnection`
* `DescribeVpcPeeringConnections`
* `DeleteVpcPeeringConnection`
"""
def describe_vpc_peering_connections(client, input, options \\ []) do
request(client, "DescribeVpcPeeringConnections", input, options)
end
@doc """
Retrieves the location of stored game session logs for a specified game session.
When a game session is terminated, Amazon GameLift automatically stores the logs
in Amazon S3 and retains them for 14 days. Use this URL to download the logs.
See the [AWS Service Limits](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift)
page for maximum log file sizes. Log files that exceed this limit are not saved.
* `CreateGameSession`
* `DescribeGameSessions`
* `DescribeGameSessionDetails`
* `SearchGameSessions`
* `UpdateGameSession`
* `GetGameSessionLogUrl`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def get_game_session_log_url(client, input, options \\ []) do
request(client, "GetGameSessionLogUrl", input, options)
end
@doc """
Requests remote access to a fleet instance.
Remote access is useful for debugging, gathering benchmarking data, or observing
activity in real time.
To remotely access an instance, you need credentials that match the operating
system of the instance. For a Windows instance, Amazon GameLift returns a user
name and password as strings for use with a Windows Remote Desktop client. For a
Linux instance, Amazon GameLift returns a user name and RSA private key, also as
strings, for use with an SSH client. The private key must be saved in the proper
format to a `.pem` file before using. If you're making this request using the
AWS CLI, saving the secret can be handled as part of the GetInstanceAccess
request, as shown in one of the examples for this operation.
To request access to a specific instance, specify the IDs of both the instance
and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling
`DescribeInstances`. If successful, an `InstanceAccess` object is returned that
contains the instance's IP address and a set of credentials.
## Learn more
[Remotely Access Fleet Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html)
[Debug Fleet Issues](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html)
## Related operations
* `DescribeInstances`
* `GetInstanceAccess`
"""
def get_instance_access(client, input, options \\ []) do
request(client, "GetInstanceAccess", input, options)
end
@doc """
Retrieves all aliases for this AWS account.
You can filter the result set by alias name and/or routing strategy type. Use
the pagination parameters to retrieve results in sequential pages.
Returned aliases are not listed in any particular order.
* `CreateAlias`
* `ListAliases`
* `DescribeAlias`
* `UpdateAlias`
* `DeleteAlias`
* `ResolveAlias`
"""
def list_aliases(client, input, options \\ []) do
request(client, "ListAliases", input, options)
end
@doc """
Retrieves build resources for all builds associated with the AWS account in use.
You can limit results to builds that are in a specific status by using the
`Status` parameter. Use the pagination parameters to retrieve results in a set
of sequential pages.
Build resources are not listed in any particular order.
## Learn more
[ Upload a Custom Server Build](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
## Related operations
* `CreateBuild`
* `ListBuilds`
* `DescribeBuild`
* `UpdateBuild`
* `DeleteBuild`
"""
def list_builds(client, input, options \\ []) do
request(client, "ListBuilds", input, options)
end
@doc """
Retrieves a collection of fleet resources for this AWS account.
You can filter the result set to find only those fleets that are deployed with a
specific build or script. Use the pagination parameters to retrieve results in
sequential pages.
Fleet resources are not listed in a particular order.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def list_fleets(client, input, options \\ []) do
request(client, "ListFleets", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Retrieves information on all game servers groups that exist in the current AWS
account for the selected Region.
Use the pagination parameters to retrieve results in a set of sequential
segments.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `CreateGameServerGroup`
* `ListGameServerGroups`
* `DescribeGameServerGroup`
* `UpdateGameServerGroup`
* `DeleteGameServerGroup`
* `ResumeGameServerGroup`
* `SuspendGameServerGroup`
* `DescribeGameServerInstances`
"""
def list_game_server_groups(client, input, options \\ []) do
request(client, "ListGameServerGroups", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Retrieves information on all game servers that are currently active in a
specified game server group.
You can opt to sort the list by game server age. Use the pagination parameters
to retrieve results in a set of sequential segments.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `RegisterGameServer`
* `ListGameServers`
* `ClaimGameServer`
* `DescribeGameServer`
* `UpdateGameServer`
* `DeregisterGameServer`
"""
def list_game_servers(client, input, options \\ []) do
request(client, "ListGameServers", input, options)
end
@doc """
Retrieves script records for all Realtime scripts that are associated with the
AWS account in use.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
## Related operations
* `CreateScript`
* `ListScripts`
* `DescribeScript`
* `UpdateScript`
* `DeleteScript`
"""
def list_scripts(client, input, options \\ []) do
request(client, "ListScripts", input, options)
end
@doc """
Retrieves all tags that are assigned to a GameLift resource.
Resource tags are used to organize AWS resources for a range of purposes. This
operation handles the permissions necessary to manage tags for the following
GameLift resource types:
* Build
* Script
* Fleet
* Alias
* GameSessionQueue
* MatchmakingConfiguration
* MatchmakingRuleSet
To list tags for a resource, specify the unique ARN value for the resource.
## Learn more
[Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in
the *AWS General Reference*
[ AWS Tagging Strategies](http://aws.amazon.com/answers/account-management/aws-tagging-strategies/)
## Related operations
* `TagResource`
* `UntagResource`
* `ListTagsForResource`
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Creates or updates a scaling policy for a fleet.
Scaling policies are used to automatically scale a fleet's hosting capacity to
meet player demand. An active scaling policy instructs Amazon GameLift to track
a fleet metric and automatically change the fleet's capacity when a certain
threshold is reached. There are two types of scaling policies: target-based and
rule-based. Use a target-based policy to quickly and efficiently manage fleet
scaling; this option is the most commonly used. Use rule-based policies when you
need to exert fine-grained control over auto-scaling.
Fleets can have multiple scaling policies of each type in force at the same
time; you can have one target-based policy, one or multiple rule-based scaling
policies, or both. We recommend caution, however, because multiple auto-scaling
policies can have unintended consequences.
You can temporarily suspend all scaling policies for a fleet by calling
`StopFleetActions` with the fleet action AUTO_SCALING. To resume scaling
policies, call `StartFleetActions` with the same fleet action. To stop just one
scaling policy--or to permanently remove it, you must delete the policy with
`DeleteScalingPolicy`.
Learn more about how to work with auto-scaling in [Set Up Fleet Automatic Scaling](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-autoscaling.html).
## Target-based policy
A target-based policy tracks a single metric: PercentAvailableGameSessions. This
metric tells us how much of a fleet's hosting capacity is ready to host game
sessions but is not currently in use. This is the fleet's buffer; it measures
the additional player demand that the fleet could handle at current capacity.
With a target-based policy, you set your ideal buffer size and leave it to
Amazon GameLift to take whatever action is needed to maintain that target.
For example, you might choose to maintain a 10% buffer for a fleet that has the
capacity to host 100 simultaneous game sessions. This policy tells Amazon
GameLift to take action whenever the fleet's available capacity falls below or
rises above 10 game sessions. Amazon GameLift will start new instances or stop
unused instances in order to return to the 10% buffer.
To create or update a target-based policy, specify a fleet ID and name, and set
the policy type to "TargetBased". Specify the metric to track
(PercentAvailableGameSessions) and reference a `TargetConfiguration` object with
your desired buffer value. Exclude all other parameters. On a successful
request, the policy name is returned. The scaling policy is automatically in
force as soon as it's successfully created. If the fleet's auto-scaling actions
are temporarily suspended, the new policy will be in force once the fleet
actions are restarted.
## Rule-based policy
A rule-based policy tracks specified fleet metric, sets a threshold value, and
specifies the type of action to initiate when triggered. With a rule-based
policy, you can select from several available fleet metrics. Each policy
specifies whether to scale up or scale down (and by how much), so you need one
policy for each type of action.
For example, a policy may make the following statement: "If the percentage of
idle instances is greater than 20% for more than 15 minutes, then reduce the
fleet capacity by 10%."
A policy's rule statement has the following structure:
If `[MetricName]` is `[ComparisonOperator]` `[Threshold]` for `[EvaluationPeriods]` minutes, then `[ScalingAdjustmentType]` to/by `[ScalingAdjustment]`.
To implement the example, the rule statement would look like this:
If `[PercentIdleInstances]` is `[GreaterThanThreshold]` `[20]` for `[15]` minutes, then `[PercentChangeInCapacity]` to/by `[10]`.
To create or update a scaling policy, specify a unique combination of name and
fleet ID, and set the policy type to "RuleBased". Specify the parameter values
for a policy rule statement. On a successful request, the policy name is
returned. Scaling policies are automatically in force as soon as they're
successfully created. If the fleet's auto-scaling actions are temporarily
suspended, the new policy will be in force once the fleet actions are restarted.
* `DescribeFleetCapacity`
* `UpdateFleetCapacity`
* `DescribeEC2InstanceLimits`
* Manage scaling policies:
* `PutScalingPolicy` (auto-scaling)
* `DescribeScalingPolicies` (auto-scaling)
* `DeleteScalingPolicy` (auto-scaling)
* Manage fleet actions:
* `StartFleetActions`
* `StopFleetActions`
"""
def put_scaling_policy(client, input, options \\ []) do
request(client, "PutScalingPolicy", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Creates a new game server resource and notifies GameLift FleetIQ that the game
server is ready to host gameplay and players.
This operation is called by a game server process that is running on an instance
in a game server group. Registering game servers enables GameLift FleetIQ to
track available game servers and enables game clients and services to claim a
game server for a new game session.
To register a game server, identify the game server group and instance where the
game server is running, and provide a unique identifier for the game server. You
can also include connection and game server data. When a game client or service
requests a game server by calling `ClaimGameServer`, this information is
returned in the response.
Once a game server is successfully registered, it is put in status `AVAILABLE`.
A request to register a game server may fail if the instance it is running on is
in the process of shutting down as part of instance balancing or scale-down
activity.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `RegisterGameServer`
* `ListGameServers`
* `ClaimGameServer`
* `DescribeGameServer`
* `UpdateGameServer`
* `DeregisterGameServer`
"""
def register_game_server(client, input, options \\ []) do
request(client, "RegisterGameServer", input, options)
end
@doc """
Retrieves a fresh set of credentials for use when uploading a new set of game
build files to Amazon GameLift's Amazon S3.
This is done as part of the build creation process; see `CreateBuild`.
To request new credentials, specify the build ID as returned with an initial
`CreateBuild` request. If successful, a new set of credentials are returned,
along with the S3 storage location associated with the build ID.
## Learn more
[ Create a Build with Files in S3](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build)
## Related operations
* `CreateBuild`
* `ListBuilds`
* `DescribeBuild`
* `UpdateBuild`
* `DeleteBuild`
"""
def request_upload_credentials(client, input, options \\ []) do
request(client, "RequestUploadCredentials", input, options)
end
@doc """
Retrieves the fleet ID that an alias is currently pointing to.
* `CreateAlias`
* `ListAliases`
* `DescribeAlias`
* `UpdateAlias`
* `DeleteAlias`
* `ResolveAlias`
"""
def resolve_alias(client, input, options \\ []) do
request(client, "ResolveAlias", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Reinstates activity on a game server group after it has been suspended.
A game server group might be suspended by the`SuspendGameServerGroup` operation,
or it might be suspended involuntarily due to a configuration problem. In the
second case, you can manually resume activity on the group once the
configuration problem has been resolved. Refer to the game server group status
and status reason for more information on why group activity is suspended.
To resume activity, specify a game server group ARN and the type of activity to
be resumed. If successful, a `GameServerGroup` object is returned showing that
the resumed activity is no longer listed in `SuspendedActions`.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `CreateGameServerGroup`
* `ListGameServerGroups`
* `DescribeGameServerGroup`
* `UpdateGameServerGroup`
* `DeleteGameServerGroup`
* `ResumeGameServerGroup`
* `SuspendGameServerGroup`
* `DescribeGameServerInstances`
"""
def resume_game_server_group(client, input, options \\ []) do
request(client, "ResumeGameServerGroup", input, options)
end
@doc """
Retrieves all active game sessions that match a set of search criteria and sorts
them in a specified order.
You can search or sort by the following game session attributes:
* **gameSessionId** -- A unique identifier for the game session. You
can use either a `GameSessionId` or `GameSessionArn` value.
* **gameSessionName** -- Name assigned to a game session. This value
is set when requesting a new game session with `CreateGameSession` or updating
with `UpdateGameSession`. Game session names do not need to be unique to a game
session.
* **gameSessionProperties** -- Custom data defined in a game
session's `GameProperty` parameter. `GameProperty` values are stored as
key:value pairs; the filter expression must indicate the key and a string to
search the data values for. For example, to search for game sessions with custom
data containing the key:value pair "gameMode:brawl", specify the following:
`gameSessionProperties.gameMode = "brawl"`. All custom data values are searched
as strings.
* **maximumSessions** -- Maximum number of player sessions allowed
for a game session. This value is set when requesting a new game session with
`CreateGameSession` or updating with `UpdateGameSession`.
* **creationTimeMillis** -- Value indicating when a game session was
created. It is expressed in Unix time as milliseconds.
* **playerSessionCount** -- Number of players currently connected to
a game session. This value changes rapidly as players join the session or drop
out.
* **hasAvailablePlayerSessions** -- Boolean value indicating whether
a game session has reached its maximum number of players. It is highly
recommended that all search requests include this filter attribute to optimize
search performance and return only sessions that players can join.
Returned values for `playerSessionCount` and `hasAvailablePlayerSessions` change
quickly as players join sessions and others drop out. Results should be
considered a snapshot in time. Be sure to refresh search results often, and
handle sessions that fill up before a player can join.
To search or sort, specify either a fleet ID or an alias ID, and provide a
search filter expression, a sort expression, or both. If successful, a
collection of `GameSession` objects matching the request is returned. Use the
pagination parameters to retrieve results as a set of sequential pages.
You can search for game sessions one fleet at a time only. To find game sessions
across multiple fleets, you must search each fleet separately and combine the
results. This search feature finds only game sessions that are in `ACTIVE`
status. To locate games in statuses other than active, use
`DescribeGameSessionDetails`.
* `CreateGameSession`
* `DescribeGameSessions`
* `DescribeGameSessionDetails`
* `SearchGameSessions`
* `UpdateGameSession`
* `GetGameSessionLogUrl`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def search_game_sessions(client, input, options \\ []) do
request(client, "SearchGameSessions", input, options)
end
@doc """
Resumes activity on a fleet that was suspended with `StopFleetActions`.
Currently, this operation is used to restart a fleet's auto-scaling activity.
To start fleet actions, specify the fleet ID and the type of actions to restart.
When auto-scaling fleet actions are restarted, Amazon GameLift once again
initiates scaling events as triggered by the fleet's scaling policies. If
actions on the fleet were never stopped, this operation will have no effect. You
can view a fleet's stopped actions using `DescribeFleetAttributes`.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def start_fleet_actions(client, input, options \\ []) do
request(client, "StartFleetActions", input, options)
end
@doc """
Places a request for a new game session in a queue (see
`CreateGameSessionQueue`).
When processing a placement request, Amazon GameLift searches for available
resources on the queue's destinations, scanning each until it finds resources or
the placement request times out.
A game session placement request can also request player sessions. When a new
game session is successfully created, Amazon GameLift creates a player session
for each player included in the request.
When placing a game session, by default Amazon GameLift tries each fleet in the
order they are listed in the queue configuration. Ideally, a queue's
destinations are listed in preference order.
Alternatively, when requesting a game session with players, you can also provide
latency data for each player in relevant Regions. Latency data indicates the
performance lag a player experiences when connected to a fleet in the Region.
Amazon GameLift uses latency data to reorder the list of destinations to place
the game session in a Region with minimal lag. If latency data is provided for
multiple players, Amazon GameLift calculates each Region's average lag for all
players and reorders to get the best game play across all players.
To place a new game session request, specify the following:
* The queue name and a set of game session properties and settings
* A unique ID (such as a UUID) for the placement. You use this ID to
track the status of the placement request
* (Optional) A set of player data and a unique player ID for each
player that you are joining to the new game session (player data is optional,
but if you include it, you must also provide a unique ID for each player)
* Latency data for all players (if you want to optimize game play
for the players)
If successful, a new game session placement is created.
To track the status of a placement request, call `DescribeGameSessionPlacement`
and check the request's status. If the status is `FULFILLED`, a new game session
has been created and a game session ARN and Region are referenced. If the
placement request times out, you can resubmit the request or retry it with a
different queue.
* `CreateGameSession`
* `DescribeGameSessions`
* `DescribeGameSessionDetails`
* `SearchGameSessions`
* `UpdateGameSession`
* `GetGameSessionLogUrl`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def start_game_session_placement(client, input, options \\ []) do
request(client, "StartGameSessionPlacement", input, options)
end
@doc """
Finds new players to fill open slots in an existing game session.
This operation can be used to add players to matched games that start with fewer
than the maximum number of players or to replace players when they drop out. By
backfilling with the same matchmaker used to create the original match, you
ensure that new players meet the match criteria and maintain a consistent
experience throughout the game session. You can backfill a match anytime after a
game session has been created.
To request a match backfill, specify a unique ticket ID, the existing game
session's ARN, a matchmaking configuration, and a set of data that describes all
current players in the game session. If successful, a match backfill ticket is
created and returned with status set to QUEUED. The ticket is placed in the
matchmaker's ticket pool and processed. Track the status of the ticket to
respond as needed.
The process of finding backfill matches is essentially identical to the initial
matchmaking process. The matchmaker searches the pool and groups tickets
together to form potential matches, allowing only one backfill ticket per
potential match. Once the a match is formed, the matchmaker creates player
sessions for the new players. All tickets in the match are updated with the game
session's connection information, and the `GameSession` object is updated to
include matchmaker data on the new players. For more detail on how match
backfill requests are processed, see [ How Amazon GameLift FlexMatch Works](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html).
## Learn more
[ Backfill Existing Games with FlexMatch](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html)
[ How GameLift FlexMatch Works](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html)
## Related operations
* `StartMatchmaking`
* `DescribeMatchmaking`
* `StopMatchmaking`
* `AcceptMatch`
* `StartMatchBackfill`
"""
def start_match_backfill(client, input, options \\ []) do
request(client, "StartMatchBackfill", input, options)
end
@doc """
Uses FlexMatch to create a game match for a group of players based on custom
matchmaking rules, and starts a new game for the matched players.
Each matchmaking request specifies the type of match to build (team
configuration, rules for an acceptable match, etc.). The request also specifies
the players to find a match for and where to host the new game session for
optimal performance. A matchmaking request might start with a single player or a
group of players who want to play together. FlexMatch finds additional players
as needed to fill the match. Match type, rules, and the queue used to place a
new game session are defined in a `MatchmakingConfiguration`.
To start matchmaking, provide a unique ticket ID, specify a matchmaking
configuration, and include the players to be matched. You must also include a
set of player attributes relevant for the matchmaking configuration. If
successful, a matchmaking ticket is returned with status set to `QUEUED`.
Track the status of the ticket to respond as needed and acquire game session
connection information for successfully completed matches. Ticket status updates
are tracked using event notification through Amazon Simple Notification Service
(SNS), which is defined in the matchmaking configuration.
**Processing a matchmaking request** -- FlexMatch handles a matchmaking request
as follows:
1. Your client code submits a `StartMatchmaking` request for one or
more players and tracks the status of the request ticket.
2. FlexMatch uses this ticket and others in process to build an
acceptable match. When a potential match is identified, all tickets in the
proposed match are advanced to the next status.
3. If the match requires player acceptance (set in the matchmaking
configuration), the tickets move into status `REQUIRES_ACCEPTANCE`. This status
triggers your client code to solicit acceptance from all players in every ticket
involved in the match, and then call `AcceptMatch` for each player. If any
player rejects or fails to accept the match before a specified timeout, the
proposed match is dropped (see `AcceptMatch` for more details).
4. Once a match is proposed and accepted, the matchmaking tickets
move into status `PLACING`. FlexMatch locates resources for a new game session
using the game session queue (set in the matchmaking configuration) and creates
the game session based on the match data.
5. When the match is successfully placed, the matchmaking tickets
move into `COMPLETED` status. Connection information (including game session
endpoint and player session) is added to the matchmaking tickets. Matched
players can use the connection information to join the game.
## Learn more
[ Add FlexMatch to a Game Client](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html)
[ Set Up FlexMatch Event Notification](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html)
[ FlexMatch Integration Roadmap](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-tasks.html)
[ How GameLift FlexMatch Works](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html)
## Related operations
* `StartMatchmaking`
* `DescribeMatchmaking`
* `StopMatchmaking`
* `AcceptMatch`
* `StartMatchBackfill`
"""
def start_matchmaking(client, input, options \\ []) do
request(client, "StartMatchmaking", input, options)
end
@doc """
Suspends activity on a fleet.
Currently, this operation is used to stop a fleet's auto-scaling activity. It is
used to temporarily stop triggering scaling events. The policies can be retained
and auto-scaling activity can be restarted using `StartFleetActions`. You can
view a fleet's stopped actions using `DescribeFleetAttributes`.
To stop fleet actions, specify the fleet ID and the type of actions to suspend.
When auto-scaling fleet actions are stopped, Amazon GameLift no longer initiates
scaling events except in response to manual changes using `UpdateFleetCapacity`.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* `UpdateFleetAttributes`
* `StartFleetActions` or `StopFleetActions`
"""
def stop_fleet_actions(client, input, options \\ []) do
request(client, "StopFleetActions", input, options)
end
@doc """
Cancels a game session placement that is in `PENDING` status.
To stop a placement, provide the placement ID values. If successful, the
placement is moved to `CANCELLED` status.
* `CreateGameSession`
* `DescribeGameSessions`
* `DescribeGameSessionDetails`
* `SearchGameSessions`
* `UpdateGameSession`
* `GetGameSessionLogUrl`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def stop_game_session_placement(client, input, options \\ []) do
request(client, "StopGameSessionPlacement", input, options)
end
@doc """
Cancels a matchmaking ticket or match backfill ticket that is currently being
processed.
To stop the matchmaking operation, specify the ticket ID. If successful, work on
the ticket is stopped, and the ticket status is changed to `CANCELLED`.
This call is also used to turn off automatic backfill for an individual game
session. This is for game sessions that are created with a matchmaking
configuration that has automatic backfill enabled. The ticket ID is included in
the `MatchmakerData` of an updated game session object, which is provided to the
game server.
If the operation is successful, the service sends back an empty JSON struct with
the HTTP 200 response (not an empty HTTP body).
## Learn more
[ Add FlexMatch to a Game Client](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html)
## Related operations
* `StartMatchmaking`
* `DescribeMatchmaking`
* `StopMatchmaking`
* `AcceptMatch`
* `StartMatchBackfill`
"""
def stop_matchmaking(client, input, options \\ []) do
request(client, "StopMatchmaking", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Temporarily stops activity on a game server group without terminating instances
or the game server group.
You can restart activity by calling `ResumeGameServerGroup`. You can suspend the
following activity:
* **Instance type replacement** - This activity evaluates the
current game hosting viability of all Spot instance types that are defined for
the game server group. It updates the Auto Scaling group to remove nonviable
Spot Instance types, which have a higher chance of game server interruptions. It
then balances capacity across the remaining viable Spot Instance types. When
this activity is suspended, the Auto Scaling group continues with its current
balance, regardless of viability. Instance protection, utilization metrics, and
capacity scaling activities continue to be active.
To suspend activity, specify a game server group ARN and the type of activity to
be suspended. If successful, a `GameServerGroup` object is returned showing that
the activity is listed in `SuspendedActions`.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `CreateGameServerGroup`
* `ListGameServerGroups`
* `DescribeGameServerGroup`
* `UpdateGameServerGroup`
* `DeleteGameServerGroup`
* `ResumeGameServerGroup`
* `SuspendGameServerGroup`
* `DescribeGameServerInstances`
"""
def suspend_game_server_group(client, input, options \\ []) do
request(client, "SuspendGameServerGroup", input, options)
end
@doc """
Assigns a tag to a GameLift resource.
AWS resource tags provide an additional management tool set. You can use tags to
organize resources, create IAM permissions policies to manage access to groups
of resources, customize AWS cost breakdowns, etc. This operation handles the
permissions necessary to manage tags for the following GameLift resource types:
* Build
* Script
* Fleet
* Alias
* GameSessionQueue
* MatchmakingConfiguration
* MatchmakingRuleSet
To add a tag to a resource, specify the unique ARN value for the resource and
provide a tag list containing one or more tags. The operation succeeds even if
the list includes tags that are already assigned to the specified resource.
## Learn more
[Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in
the *AWS General Reference*
[ AWS Tagging Strategies](http://aws.amazon.com/answers/account-management/aws-tagging-strategies/)
## Related operations
* `TagResource`
* `UntagResource`
* `ListTagsForResource`
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes a tag that is assigned to a GameLift resource.
Resource tags are used to organize AWS resources for a range of purposes. This
operation handles the permissions necessary to manage tags for the following
GameLift resource types:
* Build
* Script
* Fleet
* Alias
* GameSessionQueue
* MatchmakingConfiguration
* MatchmakingRuleSet
To remove a tag from a resource, specify the unique ARN value for the resource
and provide a string list containing one or more tags to be removed. This
operation succeeds even if the list includes tags that are not currently
assigned to the specified resource.
## Learn more
[Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in
the *AWS General Reference*
[ AWS Tagging Strategies](http://aws.amazon.com/answers/account-management/aws-tagging-strategies/)
## Related operations
* `TagResource`
* `UntagResource`
* `ListTagsForResource`
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates properties for an alias.
To update properties, specify the alias ID to be updated and provide the
information to be changed. To reassign an alias to another fleet, provide an
updated routing strategy. If successful, the updated alias record is returned.
* `CreateAlias`
* `ListAliases`
* `DescribeAlias`
* `UpdateAlias`
* `DeleteAlias`
* `ResolveAlias`
"""
def update_alias(client, input, options \\ []) do
request(client, "UpdateAlias", input, options)
end
@doc """
Updates metadata in a build resource, including the build name and version.
To update the metadata, specify the build ID to update and provide the new
values. If successful, a build object containing the updated metadata is
returned.
## Learn more
[ Upload a Custom Server Build](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
## Related operations
* `CreateBuild`
* `ListBuilds`
* `DescribeBuild`
* `UpdateBuild`
* `DeleteBuild`
"""
def update_build(client, input, options \\ []) do
request(client, "UpdateBuild", input, options)
end
@doc """
Updates fleet properties, including name and description, for a fleet.
To update metadata, specify the fleet ID and the property values that you want
to change. If successful, the fleet ID for the updated fleet is returned.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* Update fleets:
* `UpdateFleetAttributes`
* `UpdateFleetCapacity`
* `UpdateFleetPortSettings`
* `UpdateRuntimeConfiguration`
* `StartFleetActions` or `StopFleetActions`
"""
def update_fleet_attributes(client, input, options \\ []) do
request(client, "UpdateFleetAttributes", input, options)
end
@doc """
Updates capacity settings for a fleet.
Use this operation to specify the number of EC2 instances (hosts) that you want
this fleet to contain. Before calling this operation, you may want to call
`DescribeEC2InstanceLimits` to get the maximum capacity based on the fleet's EC2
instance type.
Specify minimum and maximum number of instances. Amazon GameLift will not change
fleet capacity to values fall outside of this range. This is particularly
important when using auto-scaling (see `PutScalingPolicy`) to allow capacity to
adjust based on player demand while imposing limits on automatic adjustments.
To update fleet capacity, specify the fleet ID and the number of instances you
want the fleet to host. If successful, Amazon GameLift starts or terminates
instances so that the fleet's active instance count matches the desired instance
count. You can view a fleet's current capacity information by calling
`DescribeFleetCapacity`. If the desired instance count is higher than the
instance type's limit, the "Limit Exceeded" exception occurs.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* Update fleets:
* `UpdateFleetAttributes`
* `UpdateFleetCapacity`
* `UpdateFleetPortSettings`
* `UpdateRuntimeConfiguration`
* `StartFleetActions` or `StopFleetActions`
"""
def update_fleet_capacity(client, input, options \\ []) do
request(client, "UpdateFleetCapacity", input, options)
end
@doc """
Updates port settings for a fleet.
To update settings, specify the fleet ID to be updated and list the permissions
you want to update. List the permissions you want to add in
`InboundPermissionAuthorizations`, and permissions you want to remove in
`InboundPermissionRevocations`. Permissions to be removed must match existing
fleet permissions. If successful, the fleet ID for the updated fleet is
returned.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* Update fleets:
* `UpdateFleetAttributes`
* `UpdateFleetCapacity`
* `UpdateFleetPortSettings`
* `UpdateRuntimeConfiguration`
* `StartFleetActions` or `StopFleetActions`
"""
def update_fleet_port_settings(client, input, options \\ []) do
request(client, "UpdateFleetPortSettings", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Updates information about a registered game server to help GameLift FleetIQ to
track game server availability.
This operation is called by a game server process that is running on an instance
in a game server group.
Use this operation to update the following types of game server information. You
can make all three types of updates in the same request:
* To update the game server's utilization status, identify the game
server and game server group and specify the current utilization status. Use
this status to identify when game servers are currently hosting games and when
they are available to be claimed.
* To report health status, identify the game server and game server
group and set health check to `HEALTHY`. If a game server does not report health
status for a certain length of time, the game server is no longer considered
healthy. As a result, it will be eventually deregistered from the game server
group to avoid affecting utilization metrics. The best practice is to report
health every 60 seconds.
* To change game server metadata, provide updated game server data.
Once a game server is successfully updated, the relevant statuses and timestamps
are updated.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `RegisterGameServer`
* `ListGameServers`
* `ClaimGameServer`
* `DescribeGameServer`
* `UpdateGameServer`
* `DeregisterGameServer`
"""
def update_game_server(client, input, options \\ []) do
request(client, "UpdateGameServer", input, options)
end
@doc """
## This operation is used with the Amazon GameLift FleetIQ solution and game
server groups.
Updates GameLift FleetIQ-specific properties for a game server group.
Many Auto Scaling group properties are updated on the Auto Scaling group
directly, including the launch template, Auto Scaling policies, and
maximum/minimum/desired instance counts.
To update the game server group, specify the game server group ID and provide
the updated values. Before applying the updates, the new values are validated to
ensure that GameLift FleetIQ can continue to perform instance balancing
activity. If successful, a `GameServerGroup` object is returned.
## Learn more
[GameLift FleetIQ Guide](https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html)
## Related operations
* `CreateGameServerGroup`
* `ListGameServerGroups`
* `DescribeGameServerGroup`
* `UpdateGameServerGroup`
* `DeleteGameServerGroup`
* `ResumeGameServerGroup`
* `SuspendGameServerGroup`
* `DescribeGameServerInstances`
"""
def update_game_server_group(client, input, options \\ []) do
request(client, "UpdateGameServerGroup", input, options)
end
@doc """
Updates game session properties.
This includes the session name, maximum player count, protection policy, which
controls whether or not an active game session can be terminated during a
scale-down event, and the player session creation policy, which controls whether
or not new players can join the session. To update a game session, specify the
game session ID and the values you want to change. If successful, an updated
`GameSession` object is returned.
* `CreateGameSession`
* `DescribeGameSessions`
* `DescribeGameSessionDetails`
* `SearchGameSessions`
* `UpdateGameSession`
* `GetGameSessionLogUrl`
* Game session placements
* `StartGameSessionPlacement`
* `DescribeGameSessionPlacement`
* `StopGameSessionPlacement`
"""
def update_game_session(client, input, options \\ []) do
request(client, "UpdateGameSession", input, options)
end
@doc """
Updates settings for a game session queue, which determines how new game session
requests in the queue are processed.
To update settings, specify the queue name to be updated and provide the new
settings. When updating destinations, provide a complete list of destinations.
## Learn more
[ Using Multi-Region Queues](https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-intro.html)
## Related operations
* `CreateGameSessionQueue`
* `DescribeGameSessionQueues`
* `UpdateGameSessionQueue`
* `DeleteGameSessionQueue`
"""
def update_game_session_queue(client, input, options \\ []) do
request(client, "UpdateGameSessionQueue", input, options)
end
@doc """
Updates settings for a FlexMatch matchmaking configuration.
These changes affect all matches and game sessions that are created after the
update. To update settings, specify the configuration name to be updated and
provide the new settings.
## Learn more
[ Design a FlexMatch Matchmaker](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html)
## Related operations
* `CreateMatchmakingConfiguration`
* `DescribeMatchmakingConfigurations`
* `UpdateMatchmakingConfiguration`
* `DeleteMatchmakingConfiguration`
* `CreateMatchmakingRuleSet`
* `DescribeMatchmakingRuleSets`
* `ValidateMatchmakingRuleSet`
* `DeleteMatchmakingRuleSet`
"""
def update_matchmaking_configuration(client, input, options \\ []) do
request(client, "UpdateMatchmakingConfiguration", input, options)
end
@doc """
Updates the current runtime configuration for the specified fleet, which tells
Amazon GameLift how to launch server processes on instances in the fleet.
You can update a fleet's runtime configuration at any time after the fleet is
created; it does not need to be in an `ACTIVE` status.
To update runtime configuration, specify the fleet ID and provide a
`RuntimeConfiguration` object with an updated set of server process
configurations.
Each instance in a Amazon GameLift fleet checks regularly for an updated runtime
configuration and changes how it launches server processes to comply with the
latest version. Existing server processes are not affected by the update;
runtime configuration changes are applied gradually as existing processes shut
down and new processes are launched during Amazon GameLift's normal process
recycling activity.
## Learn more
[Setting up GameLift Fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)
## Related operations
* `CreateFleet`
* `ListFleets`
* `DeleteFleet`
* `DescribeFleetAttributes`
* Update fleets:
* `UpdateFleetAttributes`
* `UpdateFleetCapacity`
* `UpdateFleetPortSettings`
* `UpdateRuntimeConfiguration`
* `StartFleetActions` or `StopFleetActions`
"""
def update_runtime_configuration(client, input, options \\ []) do
request(client, "UpdateRuntimeConfiguration", input, options)
end
@doc """
Updates Realtime script metadata and content.
To update script metadata, specify the script ID and provide updated name and/or
version values.
To update script content, provide an updated zip file by pointing to either a
local file or an Amazon S3 bucket location. You can use either method regardless
of how the original script was uploaded. Use the *Version* parameter to track
updates to the script.
If the call is successful, the updated metadata is stored in the script record
and a revised script is uploaded to the Amazon GameLift service. Once the script
is updated and acquired by a fleet instance, the new version is used for all new
game sessions.
## Learn more
[Amazon GameLift Realtime Servers](https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html)
## Related operations
* `CreateScript`
* `ListScripts`
* `DescribeScript`
* `UpdateScript`
* `DeleteScript`
"""
def update_script(client, input, options \\ []) do
request(client, "UpdateScript", input, options)
end
@doc """
Validates the syntax of a matchmaking rule or rule set.
This operation checks that the rule set is using syntactically correct JSON and
that it conforms to allowed property expressions. To validate syntax, provide a
rule set JSON string.
## Learn more
* [Build a Rule Set](https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html)
## Related operations
* `CreateMatchmakingConfiguration`
* `DescribeMatchmakingConfigurations`
* `UpdateMatchmakingConfiguration`
* `DeleteMatchmakingConfiguration`
* `CreateMatchmakingRuleSet`
* `DescribeMatchmakingRuleSets`
* `ValidateMatchmakingRuleSet`
* `DeleteMatchmakingRuleSet`
"""
def validate_matchmaking_rule_set(client, input, options \\ []) do
request(client, "ValidateMatchmakingRuleSet", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "gamelift"}
host = build_host("gamelift", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "GameLift.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/game_lift.ex
| 0.902529
| 0.661322
|
game_lift.ex
|
starcoder
|
defmodule Snappy do
@moduledoc """
An Elixir binding for snappy, a fast compressor/decompressor.
"""
@doc """
Compress
## Examples
iex> {:ok, _compressed} = Snappy.compress("aaaaaaaaaaaaaaaaaaaa")
{:ok, <<20, 0, 97, 74, 1, 0>>}
"""
@spec compress(binary()) :: {:ok, binary()} | {:error, String.t()}
def compress(data) when is_binary(data) do
Snappy.Nif.compress(data)
end
@doc """
Uncompress
## Examples
iex> {:ok, compressed} = Snappy.compress("aaaaaaaaaaaaaaaaaaaa")
{:ok, <<20, 0, 97, 74, 1, 0>>}
iex> {:ok, _uncompressed} = Snappy.uncompress(compressed)
{:ok, "aaaaaaaaaaaaaaaaaaaa"}
"""
@spec uncompress(binary()) :: {:ok, binary()} | {:error, String.t()}
def uncompress(data) when is_binary(data) do
Snappy.Nif.uncompress(data)
end
@doc """
Returns the maximal size of the compressed representation of
input data that is "source_bytes" bytes in length;
## Examples
iex> {:ok, _max_size} = Snappy.max_compressed_length("aaaaaaaaaaaaaaaaaaaa")
{:ok, 55}
"""
@spec max_compressed_length(binary()) :: {:ok, non_neg_integer()} | {:error, String.t()}
def max_compressed_length(data) when is_binary(data) do
Snappy.Nif.max_compressed_length(data)
end
@doc """
Returns true and stores the length of the uncompressed data
This operation takes O(1) time in C.
## Examples
iex> {:ok, compressed} = Snappy.compress("aaaaaaaaaaaaaaaaaaaa")
{:ok, <<20, 0, 97, 74, 1, 0>>}
iex> {:ok, _uncompressed_length} = Snappy.uncompressed_length(compressed)
{:ok, 20}
"""
@spec uncompressed_length(binary()) :: {:ok, non_neg_integer()} | {:error, String.t()}
def uncompressed_length(compressed) when is_binary(compressed) do
Snappy.Nif.uncompressed_length(compressed)
end
@doc """
Returns true iff the contents of "compressed" can be uncompressed
successfully. Does not return the uncompressed data. Takes
time proportional to compressed_length, but is usually at least
a factor of four faster than actual decompression.
## Examples
iex> {:ok, compressed} = Snappy.compress("aaaaaaaaaaaaaaaaaaaa")
{:ok, <<20, 0, 97, 74, 1, 0>>}
iex> true = Snappy.valid_compressed_buffer?(compressed)
"""
@spec valid_compressed_buffer?(binary()) :: true | false
def valid_compressed_buffer?(compressed) when is_binary(compressed) do
Snappy.Nif.is_valid_compressed_buffer(compressed, byte_size(compressed))
end
@spec valid_compressed_buffer?(binary(), pos_integer()) :: true | false
def valid_compressed_buffer?(compressed, size) when is_binary(compressed) and size > 0 do
Snappy.Nif.is_valid_compressed_buffer(compressed, size)
end
end
|
lib/snappy.ex
| 0.89217
| 0.411939
|
snappy.ex
|
starcoder
|
defmodule Abit do
@moduledoc """
Use `:atomics` as a bit array or as an array of N-bit counters.
[Erlang atomics documentation](http://erlang.org/doc/man/atomics.html)
The `Abit` module (this module) has functions to use `:atomics` as a bit array.
The bit array is zero indexed.
The `Abit.Counter` module has functions to use `:atomics` as an array of N-bit
counters.
The `Abit.Bitmask` functions help working with bitmasks.
## Abit
iex> ref = :atomics.new(100, signed: false)
iex> Abit.bit_count(ref)
6400
iex> Abit.bit_at(ref, 0)
0
iex> Abit.set_bit_at(ref, 0, 1)
:ok
iex> Abit.bit_at(ref, 0)
1
## Abit.Counter
iex> counter = %Abit.Counter{} = Abit.Counter.new(100, 16)
iex> Abit.Counter.get(counter, 0)
0
iex> Abit.Counter.put(counter, 0, 100)
{:ok, {0, 100}}
iex> Abit.Counter.add(counter, 0, 100)
{:ok, {0, 200}}
iex> Abit.Counter.member?(counter, 200)
true
## Abit.Bitmask
iex> Abit.Bitmask.set_bits_count(3)
2
iex> Abit.Bitmask.bit_at(2, 0)
0
iex> Abit.Bitmask.bit_at(2, 1)
1
"""
import Bitwise
@doc """
Returns total count of bits in atomics `ref`.
`:atomics` are 64 bit integers so total
count of bits is size * 64.
## Examples
iex> ref = :atomics.new(1, signed: false)
iex> Abit.bit_count(ref)
64
iex> ref2 = :atomics.new(2, signed: false)
iex> Abit.bit_count(ref2)
128
"""
@spec bit_count(reference) :: pos_integer
def bit_count(ref) when is_reference(ref) do
:atomics.info(ref).size * 64
end
@doc """
Merge bits of atomics `ref_a` & `ref_b` using the
bitwise OR operator.
`ref_b` will be merged into `ref_a`.
Returns `ref_a` mutated.
"""
@spec merge(reference, reference) :: reference
def merge(ref_a, ref_b) when is_reference(ref_a) and is_reference(ref_b) do
%{size: size} = ref_a |> :atomics.info()
do_merge(ref_a, ref_b, size)
end
defp do_merge(ref_a, _, 0), do: ref_a
defp do_merge(ref_a, ref_b, index) do
merged_value = :atomics.get(ref_a, index) ||| :atomics.get(ref_b, index)
:atomics.put(ref_a, index, merged_value)
do_merge(ref_a, ref_b, index - 1)
end
@doc """
Bit intersection of atomics using Bitwise AND operator.
Returns `ref_a` mutated.
"""
@spec intersect(reference, reference) :: reference
def intersect(ref_a, ref_b) when is_reference(ref_a) and is_reference(ref_b) do
%{size: size} = ref_a |> :atomics.info()
do_intersect(ref_a, ref_b, size)
end
defp do_intersect(ref_a, _, 0), do: ref_a
defp do_intersect(ref_a, ref_b, index) do
intersected_value = :atomics.get(ref_a, index) &&& :atomics.get(ref_b, index)
:atomics.put(ref_a, index, intersected_value)
do_intersect(ref_a, ref_b, index - 1)
end
@doc """
Sets the bit at `bit_index` to `bit` in the atomics `ref`.
Returns `:ok`.
## Examples
iex> ref = :atomics.new(1, signed: false)
iex> ref |> Abit.set_bit_at(0, 1)
iex> ref |> :atomics.get(1)
1
iex> ref |> Abit.set_bit_at(0, 0)
:ok
iex> ref |> :atomics.get(1)
0
"""
@spec set_bit_at(reference, non_neg_integer, 0 | 1) :: :ok
def set_bit_at(ref, bit_index, bit) when is_reference(ref) and bit in [0, 1] do
{atomics_index, integer_bit_index} = bit_position(bit_index)
current_value = :atomics.get(ref, atomics_index)
do_set_bit_at(ref, atomics_index, integer_bit_index, bit, current_value)
end
defp do_set_bit_at(ref, atomics_index, integer_bit_index, bit, current_value) do
next_value = Abit.Bitmask.set_bit_at(current_value, integer_bit_index, bit)
case :atomics.compare_exchange(ref, atomics_index, current_value, next_value) do
:ok ->
:ok
non_matching_current_value ->
do_set_bit_at(ref, atomics_index, integer_bit_index, bit, non_matching_current_value)
end
end
@doc """
Returns position of bit in `:atomics`.
Returns a 2 tuple containing:
* `atomics_index` - the index of the integer in atomics where the bit is located
* `bit_index` - the index of the bit in the integer
## Examples
iex> Abit.bit_position(0)
{1, 0}
iex> Abit.bit_position(11)
{1, 11}
iex> Abit.bit_position(64)
{2, 0}
"""
@spec bit_position(non_neg_integer) :: {non_neg_integer, non_neg_integer}
def bit_position(bit_index) when is_integer(bit_index) and bit_index >= 0 do
atomics_index = div(bit_index, 64) + 1
bit_index = rem(bit_index, 64)
{atomics_index, bit_index}
end
@doc """
Returns bit at `bit_index` in atomic `ref`.
## Examples
iex> ref = :atomics.new(1, signed: false)
iex> ref |> :atomics.put(1, 3)
iex> Abit.bit_at(ref, 0)
1
iex> Abit.bit_at(ref, 1)
1
iex> Abit.bit_at(ref, 2)
0
"""
@spec bit_at(reference, non_neg_integer) :: 0 | 1
def bit_at(ref, bit_index) when is_reference(ref) and is_integer(bit_index) do
{atomics_index, integer_bit_index} = bit_position(bit_index)
bit_at(ref, atomics_index, integer_bit_index)
end
defp bit_at(ref, atomics_index, integer_bit_index) do
integer = :atomics.get(ref, atomics_index)
Abit.Bitmask.bit_at(integer, integer_bit_index)
end
@doc """
Returns number of bits set to 1 in atomics array `ref`.
## Examples
iex> ref = :atomics.new(1, signed: false)
iex> ref |> :atomics.put(1, 3)
iex> Abit.set_bits_count(ref)
2
iex> ref2 = :atomics.new(1, signed: false)
iex> Abit.set_bits_count(ref2)
0
"""
@spec set_bits_count(reference) :: non_neg_integer
def set_bits_count(ref) when is_reference(ref) do
%{size: size} = ref |> :atomics.info()
do_set_bits_count(ref, size, 0)
end
defp do_set_bits_count(_, 0, acc), do: acc
defp do_set_bits_count(ref, index, acc) do
set_bits_count_at_index = Abit.Bitmask.set_bits_count(:atomics.get(ref, index))
do_set_bits_count(ref, index - 1, acc + set_bits_count_at_index)
end
@doc """
Returns the bitwise hamming distance between the two
given `:atomics` references `ref_l` and `ref_r`.
Raises ArgumentError if the size of `ref_l` and `ref_r` don't equal.
## Examples
iex> ref_l = :atomics.new(10, signed: false)
iex> ref_r = :atomics.new(10, signed: false)
iex> Abit.hamming_distance(ref_l, ref_r)
0
iex> ref_l |> :atomics.put(1, 7)
iex> Abit.hamming_distance(ref_l, ref_r)
3
"""
@spec hamming_distance(reference, reference) :: non_neg_integer
def hamming_distance(ref_l, ref_r) when is_reference(ref_l) and is_reference(ref_r) do
%{size: ref_l_size} = :atomics.info(ref_l)
%{size: ref_r_size} = :atomics.info(ref_r)
if ref_l_size != ref_r_size do
raise ArgumentError,
"The sizes of the provided `:atomics` references don't match" <>
"Size of `ref_l` is #{ref_l_size}. Size of `ref_r` is #{ref_r_size}."
end
do_hamming_distance(ref_l, ref_r, 1, ref_l_size, 0)
end
defp do_hamming_distance(ref_l, ref_r, index, index, acc) do
acc + hamming_distance_at(ref_l, ref_r, index)
end
defp do_hamming_distance(ref_l, ref_r, index, size, acc) do
do_hamming_distance(
ref_l,
ref_r,
index + 1,
size,
acc + hamming_distance_at(ref_l, ref_r, index)
)
end
defp hamming_distance_at(ref_l, ref_r, index) do
ref_l_value = ref_l |> :atomics.get(index)
ref_r_value = ref_r |> :atomics.get(index)
Abit.Bitmask.hamming_distance(ref_l_value, ref_r_value)
end
@doc """
Returns a flat list of every atomic value converted
into a list of bits from `ref` atomics reference.
## Examples
ref = :atomics.new(10, signed: false)
ref |> Abit.to_list
[0, 0, 0, 0, 0, ...]
"""
@doc since: "0.2.3"
@spec to_list(reference) :: list(0 | 1)
def to_list(ref) when is_reference(ref) do
size = :atomics.info(ref).size
(1..size)
|> Enum.flat_map(fn index ->
:atomics.get(ref, index) |> Abit.Bitmask.to_list(64)
end)
end
end
|
lib/abit.ex
| 0.936343
| 0.771026
|
abit.ex
|
starcoder
|
defmodule AFK.Keycode.Layer do
@moduledoc """
Represents a key that can activate other layers on and off in various ways.
Layers can be activated in 3 ways:
* `:hold` - Temporarily activates a layer while being held
* `:toggle` - Toggles a layer on or off when pressed
* `:default` - Sets a layer as the default layer
"""
@enforce_keys [:mode, :layer]
defstruct [:mode, :layer]
@type mode :: :default | :hold | :toggle
@type layer :: non_neg_integer
@type t :: %__MODULE__{
mode: mode,
layer: layer
}
@doc """
Creates a layer activation keycode.
The valid types are:
* `:hold` - Activates a layer while being held
* `:toggle` - Toggles a layer on or off when pressed
* `:default` - Sets a layer as the default layer
## Examples
iex> new(:hold, 1)
%AFK.Keycode.Layer{layer: 1, mode: :hold}
iex> new(:hold, 2)
%AFK.Keycode.Layer{layer: 2, mode: :hold}
iex> new(:toggle, 1)
%AFK.Keycode.Layer{layer: 1, mode: :toggle}
iex> new(:default, 2)
%AFK.Keycode.Layer{layer: 2, mode: :default}
"""
@spec new(mode, layer) :: t
def new(mode, layer) do
struct!(__MODULE__,
mode: mode,
layer: layer
)
end
defimpl AFK.ApplyKeycode do
alias AFK.Keycode.Layer
alias AFK.State.Keymap
@spec apply_keycode(keycode :: AFK.Keycode.Layer.t(), state :: AFK.State.t(), key :: atom) :: AFK.State.t()
def apply_keycode(%Layer{mode: :hold} = keycode, state, key) do
keymap = Keymap.add_activation(state.keymap, keycode, key)
%{state | keymap: keymap}
end
def apply_keycode(%Layer{mode: :toggle} = keycode, state, key) do
keymap = Keymap.toggle_activation(state.keymap, keycode, key)
%{state | keymap: keymap}
end
def apply_keycode(%Layer{mode: :default} = keycode, state, key) do
keymap = Keymap.set_default(state.keymap, keycode, key)
%{state | keymap: keymap}
end
@spec unapply_keycode(keycode :: AFK.Keycode.Layer.t(), state :: AFK.State.t(), key :: atom) :: AFK.State.t()
def unapply_keycode(%Layer{mode: :hold} = keycode, state, key) do
keymap = Keymap.remove_activation(state.keymap, keycode, key)
%{state | keymap: keymap}
end
def unapply_keycode(%Layer{mode: :toggle}, state, _key) do
state
end
def unapply_keycode(%Layer{mode: :default}, state, _key) do
state
end
end
end
|
lib/afk/keycode/layer.ex
| 0.919625
| 0.667349
|
layer.ex
|
starcoder
|
defmodule Trifolium.API do
@moduledoc """
Thin helper functions to enhance requests to Trefle API.
"""
@type response :: {:ok, %{}} | {:error, non_neg_integer(), %{}}
@doc """
Builds a query params map, which contains the token
used to communicate with Trefle API, along with
the keywords which should be passed to Trefle API.
Accepts a nested map, which in this case will build the parameters
according to the nested rules allowed on Trefle API.
Also accepts a list as a possible parameter, stringifying it.
"""
@spec build_query_params(any) :: %{:token => binary, optional(any) => any}
def build_query_params(query_opts \\ []) do
parse_query_opts(query_opts)
|> Map.merge(token_query_params())
end
@spec token_query_params() :: %{token: String.t()}
defp token_query_params, do: %{token: Tr<PASSWORD>ium.Config.token()}
defp parse_query_opts(query_opts) do
Enum.flat_map(query_opts, fn
{key, value} when is_list(value) ->
[{key, Enum.join(value, ",")}]
{key, value} when is_map(value) ->
parse_query_opts(value)
|> Enum.map(fn {inner_key, value} -> {"#{key}[#{inner_key}]", value} end)
{key, value} ->
[{key, value}]
end)
|> Map.new()
end
@doc """
Parse a response returned by Trefle API in a JSON,
in a correct fashion, according to the status_code returned
If the `status_code` field from the request is 200 or 201, returns `{:ok, %{}}`.
If the `status_code` field from the request is 404, returns `{:error, 404, %{message: message}`
where `message` is the returned value from Trefle.
If any status other than 200 or 404 is returned on `status_code`
we return `{:error, status, message}` where :status is the `status_code` returned, and you
hould handle the error yourself. The `message` part of the tuple is the `body`
returned by the request.
"""
@spec parse_response({:ok, map()}) :: Trifolium.API.response()
def parse_response({:ok, %{status_code: 200, body: body}}), do: {:ok, Jason.decode!(body)}
def parse_response({:ok, %{status_code: 201, body: body}}), do: {:ok, Jason.decode!(body)}
def parse_response({:ok, %{status_code: 404 = status_code, message: message}}),
do: {:error, status_code, %{message: message}}
def parse_response({:ok, %{status_code: status_code, body: body}}),
do: {:error, status_code, Jason.decode!(body)}
end
|
lib/trifolium/api.ex
| 0.813461
| 0.417687
|
api.ex
|
starcoder
|
defmodule ExUnit.Parameterized.ParamsCallback do
@moduledoc false
@spec test_with_params(bitstring, any, fun, [tuple]) :: any
defmacro test_with_params(desc, context, fun, params_ast) do
ast = Keyword.get(params_ast, :do, nil)
case validate_map?(ast) do
true ->
ast |> do_test_with(desc, context, fun)
false ->
try do
{params, _} = params_ast |> Code.eval_quoted()
params
|> Keyword.get(:do, nil)
|> do_test_with(desc, context, fun)
rescue
_ ->
ast |> do_test_with(desc, context, fun)
end
end
end
defp validate_map?([]), do: false
defp validate_map?([{:%{}, _, _}]), do: true
defp validate_map?({{_, _, [{_, _, [:Enum]}, :map]}, _, ast}), do: validate_map?(ast)
defp validate_map?(asts) when is_list(asts) do
[head | _tail] = asts
case head do
{:{}, _, [{:%{}, _, _}]} ->
true
head_list when is_list(head_list) ->
validate_map?(head)
_ ->
false
end
end
defp validate_map?(_asts), do: false
defp do_test_with(ast, desc, context, fun) do
ast
|> param_with_index()
|> Enum.map(fn param ->
test_with(desc, context, fun, param)
end)
end
defp test_with(desc, context, fun, {{param_desc, {_, _, values}}, num})
when is_atom(param_desc) and not is_nil(param_desc) do
run("'#{desc}': '#{param_desc}': number of #{num}", context, fun, values)
end
# Quote literals case : http://elixir-lang.org/docs/master/elixir/Kernel.SpecialForms.html#quote/2
defp test_with(desc, context, fun, {{param_desc, values}, num})
when is_atom(param_desc) and not is_nil(param_desc) do
run("'#{desc}': '#{param_desc}': number of #{num}", context, fun, escape_values(values))
end
defp test_with(desc, context, fun, {{_, _, values}, num}),
do: run("'#{desc}': number of #{num}", context, fun, values)
defp test_with(desc, context, fun, {[{:{}, _, [{:%{}, _, values}]}], num}),
do: run("'#{desc}': number of #{num}", context, fun, values)
# Quote literals case : http://elixir-lang.org/docs/master/elixir/Kernel.SpecialForms.html#quote/2
defp test_with(desc, context, fun, {values, num}),
do: run("'#{desc}': number of #{num}", context, fun, escape_values(values))
defp run(desc, context, fun, params) do
quote do
test(unquote(desc), unquote(context), do: unquote(fun).(unquote_splicing(params)))
end
end
defp param_with_index(list) when is_list(list) do
Enum.zip(list, 0..Enum.count(list))
end
defp param_with_index({_, _, [list, _]}) when is_list(list) do
Enum.zip(list, 0..Enum.count(list))
end
defp param_with_index(_) do
raise(ArgumentError, message: "Unsupported format")
end
defp escape_values(values) do
values
|> Tuple.to_list()
|> Enum.map(fn x ->
case x do
# The value has 'callback' as arguments. Then, it has '.:'
{{:., _, _}, _, _} ->
x
# The tuple might be a function
value when is_tuple(value) and is_atom(elem(value, 0)) ->
x
value when is_map(value) or is_tuple(value) ->
Macro.escape(x)
_ ->
x
end
end)
end
end
|
lib/ex_parameterized/params_callback.ex
| 0.576304
| 0.449332
|
params_callback.ex
|
starcoder
|
defmodule Cuckoo do
@moduledoc """
This module implements a [Cuckoo Filter](https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf).
## Implementation Details
The implementation follows the specification as per the paper above.
For hashing we use the x64_128 variant of Murmur3 and the Erlang phash2.
## Examples
iex> cf = Cuckoo.new(1000, 16, 4)
%Cuckoo.Filter{...}
iex> {:ok, cf} = Cuckoo.insert(cf, 5)
%Cuckoo.Filter{...}
iex> Cuckoo.contains?(cf, 5)
true
iex> {:ok, cf} = Cuckoo.delete(cf, 5)
%Cuckoo.Filter{...}
iex> Cuckoo.contains?(cf, 5)
false
"""
use Bitwise
alias Cuckoo.Bucket, as: Bucket
@max_kicks 500
defstruct [
:buckets,
:fingerprint_size,
:fingerprints_per_bucket,
:max_num_keys
]
@type t :: %Cuckoo{
buckets: :array.array(),
fingerprint_size: pos_integer,
fingerprints_per_bucket: pos_integer,
max_num_keys: pos_integer
}
defmodule Error do
defexception reason: nil, action: "", element: nil
def message(exception) do
"could not #{exception.action} #{exception.element}: #{exception.reason}"
end
end
@doc """
Creates a new Cuckoo Filter using the given `max_num_keys`, `fingerprint_size` and
`fingerprints_per_bucket`.
The suggested values for the last two according to one of the publications should
be `16` and `4` respectively, as it allows the Cuckoo Filter to achieve a sweet spot
in space effiency and table occupancy.
"""
@spec new(pos_integer, pos_integer, pos_integer) :: Cuckoo.t()
def new(max_num_keys, fingerprint_size, fingerprints_per_bucket \\ 4) when max_num_keys > 2 do
num_buckets = upper_power_2(max_num_keys / fingerprints_per_bucket)
frac = max_num_keys / num_buckets / fingerprints_per_bucket
%Cuckoo{
buckets:
:array.new([
if frac > 0.96 do
num_buckets <<< 1
else
num_buckets
end,
:fixed,
{:default, Bucket.new(fingerprints_per_bucket)}
]),
fingerprint_size: fingerprint_size,
fingerprints_per_bucket: fingerprints_per_bucket,
max_num_keys: max_num_keys
}
end
@doc """
Tries to insert `element` into the Cuckoo Filter.
Returns `{:ok, filter}` if successful, otherwise returns `{:error, :full}` from which
you should consider the Filter to be full.
"""
@spec insert(Cuckoo.t(), any) :: {:ok, Cuckoo.t()} | {:error, :full}
def insert(
%Cuckoo{
buckets: buckets,
fingerprint_size: bits_per_item,
fingerprints_per_bucket: fingerprints_per_bucket
} = filter,
element
) do
num_buckets = :array.size(buckets)
{fingerprint, i1} = fingerprint_and_index(element, num_buckets, bits_per_item)
i2 = alt_index(i1, fingerprint, num_buckets)
i1_bucket = :array.get(i1, buckets)
case Bucket.has_room?(i1_bucket) do
{:ok, index} ->
{:ok,
%{filter | buckets: :array.set(i1, Bucket.set(i1_bucket, index, fingerprint), buckets)}}
{:error, :full} ->
i2_bucket = :array.get(i2, buckets)
case Bucket.has_room?(i2_bucket) do
{:ok, index} ->
{:ok,
%{
filter
| buckets: :array.set(i2, Bucket.set(i2_bucket, index, fingerprint), buckets)
}}
{:error, :full} ->
random_i = Enum.random([i1, i2])
kickout(filter, random_i, fingerprint, fingerprints_per_bucket)
end
end
end
@doc """
Checks if the Cuckoo Filter contains `element`.
Returns `true` if does, otherwise returns `false`.
"""
@spec contains?(Cuckoo.t(), any) :: boolean
def contains?(%Cuckoo{buckets: buckets, fingerprint_size: bits_per_item}, element) do
num_buckets = :array.size(buckets)
{fingerprint, i1} = fingerprint_and_index(element, num_buckets, bits_per_item)
if Bucket.contains?(:array.get(i1, buckets), fingerprint) do
true
else
i2 = alt_index(i1, fingerprint, num_buckets)
Bucket.contains?(:array.get(i2, buckets), fingerprint)
end
end
@doc """
Attempts to delete `element` from the Cuckoo Filter if it contains it.
Returns `{:error, :inexistent}` if the element doesn't exist in the filter, otherwise
returns `{:ok, filter}`.
"""
@spec delete(Cuckoo.t(), any) :: {:ok, Cuckoo.t()} | {:error, :inexistent}
def delete(%Cuckoo{buckets: buckets, fingerprint_size: bits_per_item} = filter, element) do
num_buckets = :array.size(buckets)
{fingerprint, i1} = fingerprint_and_index(element, num_buckets, bits_per_item)
b1 = :array.get(i1, buckets)
case Bucket.find(b1, fingerprint) do
{:ok, index} ->
updated_bucket = Bucket.reset(b1, index)
{:ok, %{filter | buckets: :array.set(i1, updated_bucket, buckets)}}
{:error, :inexistent} ->
i2 = alt_index(i1, fingerprint, num_buckets)
b2 = :array.get(i2, buckets)
case Bucket.find(b2, fingerprint) do
{:ok, index} ->
updated_bucket = Bucket.reset(b2, index)
{:ok, %{filter | buckets: :array.set(i2, updated_bucket, buckets)}}
{:error, :inexistent} ->
{:error, :inexistent}
end
end
end
@doc """
Returns a filter with the inserted element or raises `Cuckoo.Error` if an error occurs.
"""
@spec insert!(Cuckoo.t(), any) :: Cuckoo.t() | no_return
def insert!(filter, element) do
case insert(filter, element) do
{:ok, filter} ->
filter
{:error, reason} ->
raise Cuckoo.Error, reason: reason, action: "insert element", element: element
end
end
@doc """
Returns a filter with the removed element or raises `Cuckoo.Error` if an error occurs.
"""
@spec delete!(Cuckoo.t(), any) :: Cuckoo.t() | no_return
def delete!(filter, element) do
case delete(filter, element) do
{:ok, filter} ->
filter
{:error, reason} ->
raise Cuckoo.Error, reason: reason, action: "delete element", element: element
end
end
# private helper functions
@spec kickout(Cuckoo.t(), non_neg_integer, pos_integer, pos_integer, pos_integer) ::
{:ok, Cuckoo.t()} | {:error, :full}
defp kickout(filter, index, fingerprint, fingerprints_per_bucket, current_kick \\ @max_kicks)
defp kickout(_, _, _, _, 0), do: {:error, :full}
defp kickout(
%Cuckoo{buckets: buckets} = filter,
index,
fingerprint,
fingerprints_per_bucket,
current_kick
) do
bucket = :array.get(index, buckets)
# randomly select an entry from the bucket
rand = :rand.uniform(fingerprints_per_bucket) - 1
# withdraw its fingerprint
old_fingerprint = Bucket.get(bucket, rand)
# replace it
bucket = Bucket.set(bucket, rand, fingerprint)
buckets = :array.set(index, bucket, buckets)
# find a place to put the old fingerprint
fingerprint = old_fingerprint
num_buckets = :array.size(buckets)
index = alt_index(index, fingerprint, num_buckets)
bucket = :array.get(index, buckets)
case Bucket.has_room?(bucket) do
{:ok, b_index} ->
bucket = Bucket.set(bucket, b_index, fingerprint)
buckets = :array.set(index, bucket, buckets)
{:ok, %{filter | buckets: buckets}}
{:error, :full} ->
kickout(
%{filter | buckets: buckets},
index,
fingerprint,
fingerprints_per_bucket,
current_kick - 1
)
end
end
@spec gen_index(pos_integer, pos_integer) :: non_neg_integer
defp gen_index(hash, num_buckets) do
rem(hash, num_buckets)
end
@spec fingerprint(pos_integer, pos_integer) :: pos_integer
defp fingerprint(hash, bits_per_item) do
hash &&& (1 <<< bits_per_item) - 1
end
# calculates the smallest power of 2 greater than or equal to n
@spec upper_power_2(float) :: pos_integer
defp upper_power_2(n) do
2
|> :math.pow(Float.ceil(log2(n)))
|> trunc
end
@spec log2(float) :: float
defp log2(n) do
:math.log(n) / :math.log(2)
end
@spec hash1(any) :: pos_integer
defp hash1(element), do: Murmur.hash_x64_128(element)
@spec hash2(pos_integer) :: pos_integer
defp hash2(fingerprint), do: :erlang.phash2(fingerprint)
@spec fingerprint_and_index(any, pos_integer, pos_integer) :: {pos_integer, non_neg_integer}
defp fingerprint_and_index(element, num_buckets, bits_per_item) do
hash = hash1(element)
fingerprint = fingerprint(hash, bits_per_item)
index = gen_index(hash >>> 32, num_buckets)
{fingerprint, index}
end
@spec alt_index(non_neg_integer, pos_integer, pos_integer) :: non_neg_integer
defp alt_index(i1, fingerprint, num_buckets) do
i1 ^^^ gen_index(hash2(fingerprint), num_buckets)
end
end
|
lib/cuckoo.ex
| 0.911431
| 0.593904
|
cuckoo.ex
|
starcoder
|
defmodule Alerts.Priority do
@moduledoc """
Calculate an alert's priority
"""
alias Alerts.Match
@type priority_level :: :high | :low | :system
@ongoing_effects Alerts.Alert.ongoing_effects()
@spec priority(map, DateTime.t()) :: priority_level
def priority(map, now \\ Util.now())
def priority(%{lifecycle: lifecycle}, _) when lifecycle == :upcoming do
# Ongoing alerts are low
:low
end
def priority(%{effect: :delay}, _) do
# Delays are always high
:high
end
def priority(%{effect: :suspension}, _) do
# Suspensions are high
:high
end
def priority(%{effect: :cancellation, active_period: active_period} = params, time) do
date = Timex.to_date(time)
if Enum.all?(active_period, &outside_date_range?(date, &1)) and
is_urgent_alert?(params, time) == false,
do: :low,
else: :high
end
def priority(%{severity: severity} = params, time) when severity >= 7 do
if is_urgent_alert?(params, time), do: :high, else: :low
end
def priority(%{effect: :access_issue}, _) do
:low
end
def priority(%{effect: :service_change, severity: severity}, _)
when severity <= 3 do
:low
end
def priority(%{lifecycle: lifecycle}, _)
when lifecycle in [:ongoing, :ongoing_upcoming] do
# Ongoing alerts are low
:low
end
def priority(%{effect: effect, active_period: active_period}, time)
when effect in @ongoing_effects do
# non-Ongoing alerts are low if they aren't happening now
if Match.any_period_match?(active_period, time), do: :high, else: :low
end
def priority(_, _) do
# Default to low
:low
end
@doc """
Reducer to determine if alert is urgent due to time.
High-severity alert should always be an alert if any of the following are true:
* updated in the last week
* now is within a week of start date
* now is within one week of end date
"""
@spec is_urgent_alert?(map, DateTime.t()) :: boolean
def is_urgent_alert?(%{severity: severity}, _) when severity < 7 do
false
end
def is_urgent_alert?(%{active_period: []}, _) do
true
end
def is_urgent_alert?(%{updated_at: updated_at, active_period: active_period}, time) do
within_one_week(time, updated_at) || Enum.any?(active_period, &is_urgent_period?(&1, time))
end
def is_urgent_alert?(_, _) do
false
end
@spec is_urgent_period?({DateTime.t() | nil, DateTime.t() | nil}, DateTime.t()) :: boolean
def is_urgent_period?({nil, nil}, %DateTime{}) do
true
end
def is_urgent_period?({nil, %DateTime{} = until}, %DateTime{} = time) do
within_one_week(until, time)
end
def is_urgent_period?({%DateTime{} = from, nil}, %DateTime{} = time) do
within_one_week(time, from)
end
def is_urgent_period?({from, until}, time) do
is_urgent_period?({from, nil}, time) || is_urgent_period?({nil, until}, time)
end
def within_one_week(time_1, time_2) do
diff = Timex.diff(time_1, time_2, :days)
diff <= 6 && diff >= -6
end
@spec outside_date_range?(Date.t(), {Date.t(), Date.t()}) :: boolean
defp outside_date_range?(date, {nil, until}) do
until_date = Timex.to_date(until)
date > until_date
end
defp outside_date_range?(date, {from, nil}) do
from_date = Timex.to_date(from)
date < from_date
end
defp outside_date_range?(date, {from, until}) do
from_date = Timex.to_date(from)
until_date = Timex.to_date(until)
date < from_date || date > until_date
end
end
|
apps/alerts/lib/priority.ex
| 0.80837
| 0.473231
|
priority.ex
|
starcoder
|
defmodule Maverick.Api do
@moduledoc """
Provides the entrypoint for configuring and managing the
implementation of Maverick in an application by a single
`use/2` macro that provides a supervision tree `start_link/1`
and `child_spec/1` for adding Maverick as a child of the
top-level application supervisor.
The Api module implementing `use Maverick.Api`, when started,
will orchestrate the start of the process that does the heavy
lifting of compiling function routes into a callback Handler
module at application boot and then handing off to the Elli
webserver configured to route requests by way of that Handler module.
## `use Maverick.Api` options
* `:otp_app` - The name of the application implementing Maverick
as an atom (required).
## `Maverick.Api` child_spec and start_link options
* `:init_name` - The name the Initializer should register as.
Primarily for logging and debugging, as the process should exit
immediately with a `:normal` status if successful. May be any
valid GenServer name.
* `:supervisor_name` - The name the Maverick supervisor process
should register as. May be any valid GenServer name.
* `:name` - The name the Elli server process should register as.
May be any valid GenServer name.
* `:port` - The port number the webserver will listen on. Defaults
to 4000.
* `:tls_certfile` - The path to the PEM-encoded SSL/TLS certificate
file to encrypt requests and responses.
* `:tls_keyfile` - The path to the PEM-encoded SSL/TLS key file to
encrypt requests and responses.
"""
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts] do
use Plug.Builder
require Logger
@otp_app Keyword.fetch!(opts, :otp_app)
@root_scope opts |> Keyword.get(:root_scope, "/")
@router Module.concat(__MODULE__, Router)
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :supervisor
}
end
def start_link(opts \\ []) do
Maverick.Api.Supervisor.start_link(__MODULE__, opts)
end
def list_routes(), do: Maverick.Route.list_routes(@otp_app, @root_scope)
def router() do
@router
end
def init(opts) do
Maverick.Api.Generator.generate_router(__MODULE__)
apply(@router, :init, [opts])
end
def call(conn, opts) do
conn = super(conn, opts)
apply(@router, :call, [conn, opts])
rescue
exception ->
handle_exception(conn, exception)
end
defp handle_exception(_conn, %Plug.Conn.WrapperError{conn: conn, reason: exception}) do
handle_exception(conn, exception)
end
defp handle_exception(conn, error) when is_atom(error) do
exception = Exception.normalize(:error, error)
handle_exception(conn, exception)
end
defp handle_exception(conn, exception) do
Maverick.Exception.handle(exception, conn)
end
end
end
end
|
lib/maverick/api.ex
| 0.810479
| 0.416174
|
api.ex
|
starcoder
|
defmodule Bacen.CCS.ACCS002 do
@moduledoc """
The ACCS002 message.
This message is a response from ACCS001 message.
It has the following XML example:
```xml
<CCSArqAtlzDiariaRespArq>
<SitArq>R</SitArq>
<ErroCCS>ECCS0023</ErroCCS>
<UltNumRemessaArq>000000000000</UltNumRemessaArq>
<DtHrBC>2004-06-16T05:04:00</DtHrBC>
<DtMovto>2004-06-16</DtMovto>
</CCSArqAtlzDiariaRespArq>
```
"""
use Ecto.Schema
import Ecto.Changeset
@typedoc """
The ACCS002 message type
"""
@type t :: %__MODULE__{}
@response_fields ~w(last_file_id status error reference_date movement_date)a
@response_required_fields ~w(last_file_id status reference_date movement_date)a
@response_fields_source_sequence ~w(SitArq ErroCCS UltNumRemessaArq DtHrBC DtMovto)a
@allowed_status ~w(R A)
@primary_key false
embedded_schema do
embeds_one :response, Response, source: :CCSArqAtlzDiariaRespArq, primary_key: false do
field :last_file_id, :string, source: :UltNumRemessaArq
field :status, :string, source: :SitArq
field :error, :string, source: :ErroCCS
field :reference_date, :utc_datetime, source: :DtHrBC
field :movement_date, :date, source: :DtMovto
end
end
@doc """
Creates a new ACCS002 message from given attributes.
"""
@spec new(map()) :: {:ok, t()} | {:error, Ecto.Changeset.t()}
def new(attrs) when is_map(attrs) do
attrs
|> changeset()
|> apply_action(:insert)
end
@doc false
def changeset(accs002 \\ %__MODULE__{}, attrs) when is_map(attrs) do
accs002
|> cast(attrs, [])
|> cast_embed(:response, with: &response_changeset/2, required: true)
end
@doc false
def response_changeset(response, attrs) when is_map(attrs) do
response
|> cast(attrs, @response_fields)
|> validate_required(@response_required_fields)
|> validate_inclusion(:status, @allowed_status)
|> validate_length(:status, is: 1)
|> validate_length(:last_file_id, is: 12)
|> validate_format(:last_file_id, ~r/[0-9]{12}/)
|> validate_by_status()
end
defp validate_by_status(changeset) do
case get_field(changeset, :status) do
"R" ->
changeset
|> validate_required([:error])
|> validate_length(:error, is: 8)
|> validate_format(:error, ~r/E[A-Z]{3}[0-9]{4}/)
_ ->
changeset
end
end
@doc """
Returns the field sequence for given root xml element
## Examples
iex> Bacen.CCS.ACCS002.sequence(:CCSArqAtlzDiariaRespArq)
[:SitArq, :ErroCCS, :UltNumRemessaArq, :DtHrBC, :DtMovto]
"""
@spec sequence(:CCSArqAtlzDiariaRespArq) :: list(atom())
def sequence(:CCSArqAtlzDiariaRespArq), do: @response_fields_source_sequence
end
|
lib/bacen/ccs/accs002.ex
| 0.799873
| 0.664608
|
accs002.ex
|
starcoder
|
defmodule Quandl.V3.Model.DatasetMetadata do
@moduledoc """
Dataset Metadata for a time-series.
## Attributes
* `id` (*type:* `Integer.t`), *default:* `nil`)
* `dataset_code` (*type:* `String.t`), *default:* `nil`)
* `database_id` (*type:* `Integer.t`), *default:* `nil`)
* `database_code` (*type:* `String.t`), *default:* `nil`)
* `name` (*type:* `String.t`), *default:* `nil`)
* `decription` (*type:* `String.t`), *default:* `nil`)
* `refreshed_at` (*type:* `String.t`), *default:* `nil`)
* `oldest_available_date` (*type:* `String.t`), *default:* `nil`)
* `newest_available_date` (*type:* `String.t`), *default:* `nil`)
* `column_names` (*type:* `list(String.t)`), *default:* `nil`)
* `frequency` (*type:* `String.t`), *default:* `nil`)
* `type` (*type:* `String.t`), *default:* `nil`)
* `premium` (*type:* `boolean()`), *default:* `nil`)
"""
use GoogleApi.Gax.ModelBase
@type t :: %__MODULE__{
:id => integer(),
:dataset_code => String.t(),
:database_id => integer(),
:database_code => String.t(),
:name => String.t(),
:description => String.t(),
:refreshed_at => String.t(),
:oldest_available_date => String.t(),
:newest_available_date => String.t(),
:column_names => list(String.t()),
:frequency => String.t(),
:type => String.t(),
:premium => boolean()
}
field(:id)
field(:dataset_code)
field(:database_id)
field(:database_code)
field(:name)
field(:description)
field(:refreshed_at)
field(:oldest_available_date)
field(:newest_available_date)
field(:column_names, type: :list)
field(:frequency)
field(:type)
field(:premium)
end
defimpl Poison.Decoder, for: Quandl.V3.Model.DatasetMetadata do
def decode(value, options) do
Quandl.V3.Model.DatasetMetadata.decode(value, options)
end
end
defimpl Poison.Encoder, for: Quandl.V3.Model.DatasetMetadata do
def encode(value, options) do
GoogleApi.Gax.ModelBase.encode(value, options)
end
end
|
lib/quandl/v3/model/dataset_metadata.ex
| 0.777258
| 0.623921
|
dataset_metadata.ex
|
starcoder
|
defmodule RDF.XSD do
@moduledoc """
An implementation of the XML Schema (XSD) datatype system for use within `RDF.Literal.Datatype` system.
It consists of
- `RDF.XSD.Datatype`: a more specialized `RDF.Literal.Datatype` behaviour for XSD datatypes
- `RDF.XSD.Datatype.Primitive`: macros for the definition of `RDF.Literal.Datatype` and
`RDF.XSD.Datatype` implementations for primitive XSD datatypes
- `RDF.XSD.Datatype.Restriction`: macros for the definition of `RDF.Literal.Datatype` and
`RDF.XSD.Datatype` implementations for derived XSD datatypes
- `RDF.XSD.Facet`: a behaviour for XSD facets which can be used to constrain values on
datatype derivations
see <https://www.w3.org/TR/xmlschema11-2/>
"""
import RDF.Utils.Guards
alias __MODULE__
@facets [
XSD.Facets.MinInclusive,
XSD.Facets.MaxInclusive
]
@doc """
The list of all XSD facets.
"""
@spec facets() :: Enum.t()
def facets(), do: @facets
@facets_by_name Map.new(@facets, fn facet -> {facet.name(), facet} end)
@doc """
Get a `RDF.XSD.Facet` by its name.
"""
def facet(name)
def facet(name) when is_ordinary_atom(name), do: @facets_by_name[to_string(name)]
def facet(name), do: @facets_by_name[name]
@doc """
Returns if the given value is a `RDF.XSD.Datatype` struct or `RDF.Literal` with a `RDF.XSD.Datatype`.
"""
defdelegate datatype?(value), to: RDF.Literal.Datatype.Registry, as: :xsd_datatype?
for datatype <- RDF.Literal.Datatype.Registry.builtin_xsd_datatypes() do
defdelegate unquote(String.to_atom(datatype.name))(value), to: datatype, as: :new
defdelegate unquote(String.to_atom(datatype.name))(value, opts), to: datatype, as: :new
elixir_name = Macro.underscore(datatype.name)
unless datatype.name == elixir_name do
defdelegate unquote(String.to_atom(elixir_name))(value), to: datatype, as: :new
defdelegate unquote(String.to_atom(elixir_name))(value, opts), to: datatype, as: :new
end
end
defdelegate datetime(value), to: XSD.DateTime, as: :new
defdelegate datetime(value, opts), to: XSD.DateTime, as: :new
defdelegate unquote(true)(), to: XSD.Boolean.Value
defdelegate unquote(false)(), to: XSD.Boolean.Value
end
|
lib/rdf/xsd.ex
| 0.885229
| 0.836154
|
xsd.ex
|
starcoder
|
defmodule HamRadio.Bands do
@moduledoc """
Retrieves amateur radio bands.
Band names and ranges have been defined directly from the
[ADIF 3.1.0 specification](http://adif.org/310/ADIF_310.htm#Band_Enumeration).
"""
alias HamRadio.Band
@bands [
%Band{
name: "2190m",
range: 135_700..137_800
},
%Band{
name: "630m",
range: 472_000..479_000
},
%Band{
name: "560m",
range: 501_000..504_000
},
%Band{
name: "160m",
range: 1_800_000..2_000_000
},
%Band{
name: "80m",
range: 3_500_000..4_000_000
},
%Band{
name: "60m",
range: 5_060_000..5_450_000
},
%Band{
name: "40m",
range: 7_000_000..7_300_000
},
%Band{
name: "30m",
range: 10_000_000..10_150_000
},
%Band{
name: "20m",
range: 14_000_000..14_350_000
},
%Band{
name: "17m",
range: 18_068_000..18_168_000
},
%Band{
name: "15m",
range: 21_000_000..21_450_000
},
%Band{
name: "12m",
range: 24_890_000..24_990_000
},
%Band{
name: "10m",
range: 28_000_000..29_700_000
},
%Band{
name: "6m",
range: 50_000_000..54_000_000
},
%Band{
name: "4m",
range: 70_000_000..71_000_000
},
%Band{
name: "2m",
range: 144_000_000..148_000_000
},
%Band{
name: "1.25m",
range: 222_000_000..225_000_000
},
%Band{
name: "70cm",
range: 420_000_000..450_000_000
},
%Band{
name: "33cm",
range: 902_000_000..928_000_000
},
%Band{
name: "23cm",
range: 1_240_000_000..1_300_000_000
},
%Band{
name: "13cm",
range: 2_300_000_000..2_450_000_000
},
%Band{
name: "9cm",
range: 3_300_000_000..3_500_000_000
},
%Band{
name: "6cm",
range: 5_650_000_000..5_925_000_000
},
%Band{
name: "3cm",
range: 10_000_000_000..10_500_000_000
},
%Band{
name: "1.25cm",
range: 24_000_000_000..24_250_000_000
},
%Band{
name: "6mm",
range: 47_000_000_000..47_200_000_000
},
%Band{
name: "4mm",
range: 75_500_000_000..81_000_000_000
},
%Band{
name: "2.5mm",
range: 119_980_000_000..120_020_000_000
},
%Band{
name: "2mm",
range: 142_000_000_000..149_000_000_000
},
%Band{
name: "1mm",
range: 241_000_000_000..250_000_000_000
}
]
@doc """
Returns a list of all known bands.
Bands are sorted by increasing frequency.
"""
@spec list :: [Band.t()]
def list, do: @bands
@doc """
Returns the band at a particular frequency.
Band edges are inclusive.
Returns `nil` if no band is found.
"""
@spec at(float) :: Band.t() | nil
def at(hz) when is_float(hz), do: at(Kernel.trunc(hz))
@spec at(integer) :: Band.t() | nil
def at(hz) do
@bands |> Enum.find(fn band -> hz in band.range end)
end
@doc """
Returns a band with a particular ADIF name.
Returns `nil` if no band is found.
"""
@spec find(String.t()) :: Band.t() | nil
def find(name) do
@bands |> Enum.find(fn band -> band.name == name end)
end
end
|
lib/ham_radio/bands.ex
| 0.776029
| 0.46794
|
bands.ex
|
starcoder
|
defmodule MarsRover.Planet do
@moduledoc """
The planet object represented as a square grid with wrapping edges.
"""
use GenServer
defstruct [:grid, :obstacles]
# API
@spec start_link(Int.t(), Int.t()) :: :ignore | {:error, any()} | {:ok, pid()}
def start_link(width, height) when width <= 0 or height <= 0, do: {:error, :invalid_grid}
def start_link(width, height) do
state = %__MODULE__{grid: {width, height}, obstacles: []}
GenServer.start_link(__MODULE__, state)
end
@spec add_obstacle(atom() | pid(), Int.t(), Int.t()) :: any()
def add_obstacle(_planet, x, y) when x <= 0 or y <= 0, do: {:error, :invalid_coordinates}
def add_obstacle(planet, x, y) do
GenServer.call(planet, {:add_obstacle, x, y})
end
@spec has_obstacle?(atom() | pid(), Int.t(), Int.t()) :: any()
def has_obstacle?(planet, x, y), do: GenServer.call(planet, {:has_obstacle, x, y})
@spec normalize_coordinates(atom() | pid(), Int.t(), Int.t()) :: any()
def normalize_coordinates(planet, x, y), do: GenServer.call(planet, {:normalize, x, y})
# Callbacks
@spec init(any()) :: {:ok, any()}
def init(state), do: {:ok, state}
def handle_call({:add_obstacle, x, y}, _from, %{grid: {width, height}} = state) do
x = to_planet_edges(x, width)
y = to_planet_edges(y, height)
{res, state} = handle_add_obstacle(x, y, state)
{:reply, res, state}
end
def handle_call({:has_obstacle, x, y}, _from, %{grid: {width, height}} = state) do
x = to_planet_edges(x, width)
y = to_planet_edges(y, height)
{:reply, {x, y} in state.obstacles, state}
end
def handle_call({:normalize, x, y}, _from, %{grid: {width, height}} = state) do
x = to_planet_edges(x, width)
y = to_planet_edges(y, height)
{:reply, {x, y}, state}
end
# Privates
# Add a new obstacle in grid, considering circular edges
defp handle_add_obstacle(x, y, state) when x <= 0 or y <= 0, do: {{:error, :invalid_coordinates}, state}
defp handle_add_obstacle(x, y, state) do
obstacles =
[{x, y}]
|> Kernel.++(state.obstacles)
|> Enum.uniq()
{{:ok, {x, y}}, %{state | obstacles: obstacles}}
end
# Normalize position considering the circular edge.
defp to_planet_edges(val, edge) do
case rem(val, edge) do
0 -> edge
n -> n
end
end
end
|
lib/planet.ex
| 0.893877
| 0.641071
|
planet.ex
|
starcoder
|
defmodule Shippex.Util do
@moduledoc false
@doc """
Takes a price and multiplies it by 100. Accepts nil, floats, integers, and
strings.
iex> Util.price_to_cents(nil)
0
iex> Util.price_to_cents(0)
0
iex> Util.price_to_cents(28.00)
2800
iex> Util.price_to_cents("28.00")
2800
iex> Util.price_to_cents("28")
2800
iex> Util.price_to_cents(28)
2800
"""
@spec price_to_cents(nil | number() | String.t()) :: integer
def price_to_cents(string) when is_binary(string) do
{float, _} = Float.parse(string)
price_to_cents(float)
end
def price_to_cents(nil), do: 0
def price_to_cents(float) when is_float(float), do: Float.floor(float * 100) |> round
def price_to_cents(integer) when is_integer(integer), do: integer * 100
@doc """
Takes a price and divides it by 100, returning a string representation. This
is used for API calls that require dollars instead of cents. Unlike
`price_to_cents`, this only accepts integers and nil. Otherwise, it will
raise an exception.
iex> Util.price_to_dollars(nil)
"0.00"
iex> Util.price_to_dollars(200_00)
"200"
iex> Util.price_to_dollars("20000")
** (FunctionClauseError) no function clause matching in Shippex.Util.price_to_dollars/1
"""
@spec price_to_dollars(integer) :: String.t() | none()
def price_to_dollars(nil), do: "0.00"
def price_to_dollars(integer) when is_integer(integer) do
dollars = Integer.floor_div(integer, 100)
cents = rem(integer, 100)
s = "#{dollars}"
cond do
cents == 0 ->
s
cents < 10 ->
"#{s}.0#{cents}"
true ->
"#{s}.#{cents}"
end
end
@doc """
Converts pounds to kilograms.
iex> Util.lbs_to_kgs(10)
4.5
iex> Util.lbs_to_kgs(0)
0.0
"""
@spec lbs_to_kgs(number()) :: float()
def lbs_to_kgs(lbs) do
Float.round(lbs * 0.453592, 1)
end
@doc """
Converts kilograms to pounds.
iex> Util.kgs_to_lbs(10)
22.0
iex> Util.kgs_to_lbs(0)
0.0
"""
@spec kgs_to_lbs(number()) :: float()
def kgs_to_lbs(kgs) do
Float.round(kgs * 2.20462, 1)
end
@doc """
Converts inches to centimeters.
iex> Util.inches_to_cm(10)
25.4
iex> Util.inches_to_cm(0)
0.0
"""
@spec inches_to_cm(number()) :: float()
def inches_to_cm(inches) do
Float.round(inches * 2.54, 1)
end
@doc """
Converts centimeters to inches.
iex> Util.cm_to_inches(10)
3.9
iex> Util.cm_to_inches(0)
0.0
"""
@spec cm_to_inches(number()) :: float()
def cm_to_inches(cm) do
Float.round(cm * 0.393701, 1)
end
@doc """
Removes accents/ligatures from letters.
iex> Util.unaccent("Curaçao")
"Curacao"
iex> Util.unaccent("Republic of Foo (the)")
"Republic of Foo (the)"
iex> Util.unaccent("Åland Islands")
"Aland Islands"
"""
@spec unaccent(String.t()) :: String.t()
def unaccent(string) do
diacritics = Regex.compile!("[\u0300-\u036f]")
string
|> String.normalize(:nfd)
|> String.replace(diacritics, "")
end
@doc ~S"""
Returns `true` for `nil`, empty strings, and strings only containing
whitespace. Returns `false` otherwise.
iex> Util.blank?(nil)
true
iex> Util.blank?("")
true
iex> Util.blank?(" ")
true
iex> Util.blank?(" \t\r\n ")
true
iex> Util.blank?("Test")
false
iex> Util.blank?(100)
false
"""
@spec blank?(term()) :: boolean()
def blank?(nil), do: true
def blank?(""), do: true
def blank?(s) when is_binary(s), do: String.trim(s) == ""
def blank?(_), do: false
@doc """
Returns the given map with keys converted to strings, and the values trimmed
(if the values are also strings).
iex> Util.stringify_and_trim(%{foo: " bar "})
%{"foo" => "bar"}
"""
@spec stringify_and_trim(map()) :: map()
def stringify_and_trim(params) do
for {key, val} <- params, into: %{} do
key =
cond do
is_atom(key) -> Atom.to_string(key)
true -> key
end
val =
cond do
is_binary(val) -> String.trim(val)
true -> val
end
{key, val}
end
end
end
|
lib/shippex/util.ex
| 0.811601
| 0.537102
|
util.ex
|
starcoder
|
defmodule Ecto.Adapters.Connection do
@moduledoc """
Behaviour for adapters that rely on connections.
In order to use a connection, adapter developers need to implement
a single callback in a module: `connect/1` defined in this module.
The benefits of implementing this module is that the adapter can
then be used with all the different pools provided by Ecto.
"""
@doc """
Connects to the underlying database.
Should return a process which is linked to
the caller process or an error.
"""
@callback connect(Keyword.t) :: {:ok, pid} | {:error, term}
@doc """
Executes the connect in the given module, ensuring the repository's
`after_connect/1` is invoked in the process.
"""
def connect(module, opts) do
case module.connect(opts) do
{:ok, conn} ->
after_connect(conn, opts)
{:error, _} = error ->
error
end
end
defp after_connect(conn, opts) do
repo = opts[:repo]
if function_exported?(repo, :after_connect, 1) do
try do
Task.async(fn -> repo.after_connect(conn) end)
|> Task.await(opts[:timeout])
catch
:exit, {:timeout, [Task, :await, [%Task{pid: task_pid}, _]]} ->
shutdown(task_pid, :brutal_kill)
shutdown(conn, :brutal_kill)
{:error, :timeout}
:exit, {reason, {Task, :await, _}} ->
shutdown(conn, :brutal_kill)
{:error, reason}
else
_ -> {:ok, conn}
end
else
{:ok, conn}
end
end
@doc """
Shutdown the given connection `pid`.
If `pid` does not exit within `timeout` it is killed, or it is killed
immediately if `:brutal_kill`.
"""
@spec shutdown(pid, timeout | :brutal_kill) :: :ok
def shutdown(pid, shutdown \\ 5_000)
def shutdown(pid, :brutal_kill) do
ref = Process.monitor(pid)
Process.exit(pid, :kill)
receive do
{:DOWN, ^ref, _, _, _} -> :ok
end
end
def shutdown(pid, timeout) do
ref = Process.monitor(pid)
Process.exit(pid, :shutdown)
receive do
{:DOWN, ^ref, _, _, _} -> :ok
after
timeout ->
Process.exit(pid, :kill)
receive do
{:DOWN, ^ref, _, _, _} -> :ok
end
end
end
end
|
lib/ecto/adapters/connection.ex
| 0.673729
| 0.462594
|
connection.ex
|
starcoder
|
defmodule PTV do
@moduledoc """
API adaptor for the PTV Timetable API
"""
@doc """
Generates signed url for an API call.
Builds request url and calculates hashmac digest for authentication according to instructions at https://www.ptv.vic.gov.au/footer/data-and-reporting/datasets/ptv-timetable-api/.
"""
def signed_call_url(
%{
base_url: base_url,
api_version: api_version,
api_name: api_name,
search_string: search_string,
params: params
},
devid,
api_key
) do
query_params =
params
|> Map.put(:devid, devid)
query_string =
query_params
|> Map.keys()
|> Enum.map(fn key -> "#{key}=#{query_params[key]}" end)
|> Enum.join("&")
request_message = "/#{api_version}/#{api_name}/#{search_string}?#{query_string}"
hmac_digest =
:crypto.hmac(:sha, api_key, request_message)
|> Base.encode16()
"#{base_url}#{request_message}&signature=#{hmac_digest}"
end
@doc """
Executes HTTP request to API for an API call.
Returns `{:ok, response_map}` on successful response.
Returns `{:error, {:reason_atom, message_string}}` on erronous response where :reason_atom is one of the following:
- :invalid_request
- :access_denied
- :connection_failed
- :decode_failed
## Examples
```
iex> PTV.execute_call(%PTV.Call{
...> api_name: "stops",
...> search_string: "10/route_type/3",
...> params: %{stop_amenities: true}
...> },
...> "1234567",
...> "12345678901234567890"
...> )
{:ok, %{"stop" => %{}, "stop" => %{}""status" => %{}}}
```
"""
def execute_call(call, devid, api_key) do
case call |> signed_call_url(devid, api_key) |> HTTPoison.get() do
{:ok, %HTTPoison.Response{status_code: status_code, body: body}} ->
decode_call_response(status_code, body)
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, {:connection_failed, reason}}
end
end
defp decode_call_response(status_code, body) do
case body |> Poison.decode() do
{:ok, decoded_body} ->
case status_code do
200 ->
{:ok, decoded_body}
400 ->
{:error, {:invalid_request, decoded_body["message"]}}
403 ->
{:error, {:access_denied, decoded_body["message"]}}
end
{:error, reason} ->
{:error, {:decode_failed, reason}}
end
end
end
|
lib/ptv.ex
| 0.8288
| 0.42656
|
ptv.ex
|
starcoder
|
defmodule Aoc2020Day14 do
import Enum
def solve1(input) do
input
|> String.trim()
|> String.split("\n", trim: true)
|> map(&parse(&1))
|> reduce({[], %{}}, &reducer/2)
|> elem(1)
|> Map.values()
|> sum
end
def solve2(input) do
input
|> String.trim()
|> String.split("\n", trim: true)
|> map(&parse(&1))
|> reduce({[], %{}}, &reducer2/2)
|> elem(1)
|> Map.values()
|> sum
end
defp combination(bits, []) do
s = bits |> join("")
{dec, _} = Integer.parse(s, 2)
dec
end
defp combination(bits, [x | xs]) do
{_v, idx} = x
[
combination(List.replace_at(bits, idx, "0"), xs),
combination(List.replace_at(bits, idx, "1"), xs)
]
end
defp reducer2({:mask, mask}, {_omask, omem}) do
{mask, omem}
end
defp reducer2({idx, value}, {omask, omem}) do
vr = binary(idx) |> reverse
omar = omask |> reverse
newv =
zip(omar, vr)
|> map(fn {x, y} ->
cond do
x == "0" -> y
true -> x
end
end)
r = reverse(newv)
# IO.inspect({idx, value, r})
xs =
r
|> with_index
|> filter(fn {v, _idx} ->
v == "X"
end)
# IO.inspect(xs)
v =
combination(r, xs)
|> List.flatten()
r =
v
|> reduce(omem, fn x, acc ->
Map.put(acc, x, value)
end)
{omask, r}
end
defp reducer({:mask, mask}, {_omask, omem}) do
{mask, omem}
end
defp reducer({idx, value}, {omask, omem}) do
vr = binary(value) |> reverse
omar = omask |> reverse
newv =
zip(omar, vr)
|> map(fn {x, y} ->
cond do
x == "X" -> y
true -> x
end
end)
s = reverse(newv) |> join("")
s =
if s == "" do
"0"
else
s
end
{dec, _} = Integer.parse(s, 2)
{omask, Map.put(omem, idx, dec)}
end
defp binary(n) do
binary(n, [])
end
defp pad(xs) when length(xs) < 36 do
pad(["0" | xs])
end
defp pad(xs) do
xs
end
defp binary(0, acc) do
acc
|> map(&Integer.to_string/1)
|> pad
end
defp binary(n, acc) do
binary(div(n, 2), [rem(n, 2) | acc])
end
def parse("mask =" <> mask) do
{:mask, String.split(mask |> String.trim(), "", trim: true)}
end
def parse(line) do
matched = Regex.named_captures(~r/mem\[(?<idx>[0-9]+)\] = (?<value>.*)/, line)
{String.to_integer(matched["idx"]), String.to_integer(matched["value"])}
end
end
|
lib/2020/aoc2020_day14.ex
| 0.61231
| 0.459864
|
aoc2020_day14.ex
|
starcoder
|
defmodule Beamchmark.Suite.CPU.CpuTask do
@moduledoc """
This module contains the CPU benchmarking task.
Measurements are performed using [`:cpu_sup.util/1`](https://www.erlang.org/doc/man/cpu_sup.html)
Currently (according to docs), as busy processor states we identify:
- user
- nice_user (low priority use mode)
- kernel
Run example:
```
CpuTask.start_link()
```
"""
use Task
alias Beamchmark.Suite.Measurements.CpuInfo
@interfere_timeout 100
@doc """
"""
@spec start_link(cpu_interval :: pos_integer(), duration :: pos_integer()) :: Task.t()
def start_link(cpu_interval, duration) do
Task.async(fn ->
run_poll(
cpu_interval,
duration
)
end)
end
@spec run_poll(number(), number()) :: {:ok, CpuInfo.t()}
defp run_poll(cpu_interval, duration) do
iterations_number = trunc(duration / cpu_interval)
:cpu_sup.start()
# First run returns garbage acc to docs
:cpu_sup.util([:per_cpu])
# And the fact of measurement is polluting the results,
# So we need to wait for @interfere_timeout
Process.sleep(@interfere_timeout)
if cpu_interval < @interfere_timeout do
raise "cpu_interval (#{cpu_interval}) can't be less than #{@interfere_timeout}"
end
cpu_snapshots =
Enum.reduce(0..(iterations_number - 1), [], fn _x, cpu_snapshots ->
cpu_snapshots = [cpu_snapshot() | cpu_snapshots]
Process.sleep(cpu_interval)
cpu_snapshots
end)
{:ok, CpuInfo.from_cpu_snapshots(cpu_snapshots)}
end
@spec cpu_snapshot() :: CpuInfo.cpu_snapshot_t()
defp cpu_snapshot() do
to_cpu_snapshot(:cpu_sup.util([:per_cpu]))
end
# Converts output of `:cpu_sup.util([:per_cpu])` to `cpu_snapshot_t`
@spec to_cpu_snapshot(any()) :: CpuInfo.cpu_snapshot_t()
defp to_cpu_snapshot(cpu_util_result) do
cpu_core_usage_map =
Enum.reduce(cpu_util_result, %{}, fn {core_id, usage, _idle, _mix}, cpu_core_usage_acc ->
Map.put(cpu_core_usage_acc, core_id, usage)
end)
average_all_cores =
Enum.reduce(cpu_core_usage_map, 0, fn {_core_id, usage}, average_all_cores_acc ->
average_all_cores_acc + usage
end) / map_size(cpu_core_usage_map)
%{
cpu_usage: cpu_core_usage_map,
average_all_cores: average_all_cores
}
end
end
|
lib/beamchmark/suite/cpu/cpu_task.ex
| 0.871105
| 0.80112
|
cpu_task.ex
|
starcoder
|
defmodule ExIsbndb.Search do
@moduledoc """
The `ExIsbndb.Search` module contains an endpoint that is able
to search anything inside the ISBNdb database.
The available function needs to receive a map with params, but only those
needed for the endpoint will be taken.
"""
alias ExIsbndb.Client
@valid_indexes ["subjects", "publishers", "authors", "books"]
@doc """
Returns all the results related to a topic that match the given params.
No empty or nil values are permitted in this query.
Params required:
* index (string) - the topic where the search will be focused on
* valid values - `"subjects"`, `"publishers"`, `"authors"`, `"books"`
Params available:
* page (integer) - page number of the results
* page_size(integer) - number of results per page
* isbn (string) - an ISBN 10
* isbn13 (string) - an ISBN 13
* author (string) - the name of the author
* text (string) - a string to search in the determinated index topic
* subject (string) - a subject
* publisher (string) - the name of the publisher
Any other parameters will be ignored.
## Examples
iex> ExIsbndb.Search.all(%{index: "authors", page: 1, page_size: 5, author: "<NAME>"})
{:ok, %Finch.Response{body: "...", headers: [...], status: 200}}
"""
@spec all(map()) :: {:ok, Finch.Response.t()} | {:error, Exception.t()}
def all(%{index: index} = params) when index in @valid_indexes do
params =
%{
page: params[:page],
pageSize: params[:page_size],
isbn: params[:isbn],
isbn13: params[:isbn13],
author: params[:author],
text: params[:text],
subject: params[:subject],
publisher: params[:publisher]
}
# |> Map.filter(fn {_key, val} -> not is_nil(val) and val != "" end)
# We are only able to use Map.filter/2 from Elixir 1.13
# so we are using another solution to get more compatibility
|> Map.to_list()
|> Enum.filter(fn {_key, val} -> not is_nil(val) and val != "" end)
|> Map.new()
Client.request(:get, "search/#{index}", params)
end
end
|
lib/search.ex
| 0.894588
| 0.580114
|
search.ex
|
starcoder
|
defmodule Membrane.Dashboard.Charts.Update do
@moduledoc """
Module responsible for preparing data for uPlot charts when they are being updated.
Example (showing last 5 minutes of one chart data)
-305s -300s -5s now
_____________________________________________________
| | |
| Old data | |
|__________________________________________| New data |
| New series data | |
|___________________________________|__________|
|
|
|
V
______________________________________________
| |
| |
| Updated data |
| |
|______________________________________________|
Firstly, queries the database to get all the data from the last 5 seconds. Then applies the following steps for every chart to update the data:
1. Extract new paths (of pipeline elements) from the result of the query (all paths that appeared for the first time in the last 5 seconds).
2. Create a list of uPlot Series objects with the `label` attribute (as maps: `%{label: path_name}`). One map for every new path.
3. Every path needs to have a value for every timestamp thus new data series must be filled with nils until the first measurement timestamp.
4. Extract new data (data for all paths for the last 5 seconds; as a list of lists) from the database query result.
5. Truncate old data - delete its first 5 seconds (to maintain visibility of just last x minutes).
6. Concatenate truncated old data and new series data - it creates full data for the time before update.
7. Append new data to every path data.
8. Create map (of type `update_data_t`) that is serializable to ChartData in ChartsHook.
"""
import Membrane.Dashboard.Charts.Helpers
alias Membrane.Dashboard.Charts
alias Membrane.Dashboard.Charts.Context
@doc """
Returns:
- update data for uPlot, where new data is from between `time_from` and `time_to`. Consists of new series and full data for charts;
- full data as 3d list;
- list of all paths.
"""
@spec query(Context.t()) :: Charts.chart_query_result_t()
def query(%Context{time_to: time_to, metric: metric, accuracy: accuracy, df: old_df} = context) do
%Context{
paths_mapping: old_paths_mapping,
latest_time: last_time_to
} = context
# query 2 seconds back to compensate for potentially data that has not yet been inserted
back_shift = floor(1_000 / accuracy * 2)
update_from = last_time_to - accuracy * back_shift
case query_measurements(update_from, time_to, metric, accuracy) do
{:ok, rows, new_paths_mapping} ->
paths_mapping = Map.merge(old_paths_mapping, new_paths_mapping)
new_df =
Membrane.Dashboard.Charts.ChartDataFrame.from_rows(rows, update_from, time_to, accuracy)
# back_shift + 1 because we don't want to repeat the last timestamp twice
df = Membrane.Dashboard.Charts.ChartDataFrame.merge(old_df, new_df, back_shift + 1)
chart =
cond do
metric in ["caps", "event"] ->
Membrane.Dashboard.Charts.ChartDataFrame.to_cumulative_chart(df, paths_mapping)
metric in ["buffer", "bitrate"] ->
Membrane.Dashboard.Charts.ChartDataFrame.to_changes_per_second_chart(
df,
paths_mapping,
accuracy
)
true ->
Membrane.Dashboard.Charts.ChartDataFrame.to_simple_chart(df, paths_mapping)
end
{:ok, {chart, paths_mapping, df}}
:error ->
{:error, "Cannot fetch update data for charts"}
end
end
end
|
lib/membrane_dashboard/charts/update.ex
| 0.8709
| 0.689253
|
update.ex
|
starcoder
|
defmodule Day23 do
def part1(input) do
Interpreter.new(input)
|> Map.put(:a, 7)
|> Interpreter.execute
|> Map.get(:a)
end
def part2(input) do
Interpreter.new(input)
|> Map.put(:a, 12)
|> Interpreter.optimize
|> Interpreter.execute
|> Map.get(:a)
end
end
defmodule Interpreter do
def new(program, c \\ 0) do
machine(program)
|> Map.put(:c, c)
end
defp machine(program) do
read_program(program)
|> Map.put(:a, 0)
|> Map.put(:b, 0)
|> Map.put(:c, 0)
|> Map.put(:d, 0)
|> Map.put(:ip, 0)
end
def optimize(memory) do
case memory do
%{4 => {:cpy, r2, r3},
5 => {:inc, r1},
6 => {:dec, r3},
7 => {:jnz, r3, -2},
8 => {:dec, r4},
9 => {:jnz, r4, -5}} ->
case Enum.uniq([r1, r2, r3, r4]) |> Enum.count do
4 ->
Map.replace!(memory, 5, {:patched, mul_patch_fn(r1, r3, r4)})
_ ->
memory
end
%{} ->
memory
end
end
defp mul_patch_fn(r1, r3, r4) do
fn memory ->
%{^r1 => val1, ^r3 => val3, ^r4 => val4} = memory
val1 = val1 + val3 * val4
memory = %{memory | r1 => val1, r3 => 0, r4 => 0}
execute(memory, 10)
end
end
def execute(memory, ip \\ 0) do
instr = Map.get(memory, ip, :done)
case instr do
{:cpy, src, dst} when is_atom(dst) ->
memory = Map.replace!(memory, dst, get_value(memory, src))
execute(memory, ip + 1)
{:cpy, _src, dst} when is_integer(dst) ->
execute(memory, ip + 1)
{:inc, dst} ->
value = Map.fetch!(memory, dst)
memory = Map.replace!(memory, dst, value + 1)
execute(memory, ip + 1)
{:dec, dst} ->
value = Map.fetch!(memory, dst)
memory = Map.replace!(memory, dst, value - 1)
execute(memory, ip + 1)
{:jnz, src, offset} ->
case get_value(memory, src) do
0 -> execute(memory, ip + 1)
_ -> execute(memory, ip + get_value(memory, offset))
end
{:tgl, src} ->
address = ip + get_value(memory, src)
memory = toggle(memory, address)
execute(memory, ip + 1)
{:patched, f} ->
f.(memory)
:done ->
memory
end
end
defp toggle(memory, address) do
case memory do
%{^address => instr} ->
instr = toggle_instr(instr)
Map.replace!(memory, address, instr)
%{} ->
memory
end
end
defp toggle_instr(instr) do
case instr do
{:inc, dst} ->
{:dec, dst}
{_, arg} ->
{:inc, arg}
{:jnz, arg1, arg2} ->
{:cpy, arg1, arg2}
{_, arg1, arg2} ->
{:jnz, arg1, arg2}
end
end
defp get_value(memory, src) do
if is_atom(src) do
Map.fetch!(memory, src)
else
src
end
end
defp read_program(input) do
input
|> Enum.map(fn instr ->
[name | args] = String.split(instr, " ")
args = Enum.map(args, fn arg ->
case Integer.parse(arg) do
:error -> String.to_atom(arg)
{val, ""} -> val
end
end)
List.to_tuple([String.to_atom(name) | args])
end)
|> Stream.with_index
|> Stream.map(fn {code, index} -> {index, code} end)
|> Map.new
end
end
|
day23/lib/day23.ex
| 0.561575
| 0.499023
|
day23.ex
|
starcoder
|
defmodule ExMatrix do
@moduledoc """
`ExMatrix` is a new Matrix library for Elixir. This library helps you to create a matrix,
`manipulate` it with values and `add/subtract` two matrices.
## What is a Matrix
A matrix is a collection of numbers arranged into a fixed number of rows and columns.
Here is an example of a `3x2` matrix which means that we have `3 rows` and `2 columns`.
```
col 1 col 2
row 1 | 0 1 |
row 2 | 2 7 |
row 3 | 9 0 |
```
This is `row 2`
```
| 2 7 |
```
This is `col 2`
```
col 2
| 1 |
| 7 |
| 0 |
```
To get value from the above matrix we need to specify the row and colum that we need to get
the value from. The known syntax is `(number_of_rows,number_of_columns)`
`(0,1) = 1` and `(2,0) = 9`
## Elixir Matrix
So to generate an elixir matrix you can use `ExMatrix.create("RowsxColumns")`
will generate a map that can be used as a matrix
But note that `ExMatrix.create(...)` will generate an empty matrix as you can see in the example
```
iex> matrix = ExMatrix.create("3x2")
%{"0" => %{"0" => "", "1" => ""},
"1" => %{"0" => "", "1" => ""},
"2" => %{"0" => "", "1" => ""}
}
```
So to fill this matrix with values you can use `ExMatrix.set_matrix(matrix,data)`
```
matrix = %{"0" => %{"0" => "0", "1" => "1"},
"1" => %{"0" => "2", "1" => "7"},
"2" => %{"0" => "9", "1" => "0"}
}
```
Now you can get values `matrix["0"]["1"] = 1` and `matrix["2"]["0"] = 9`.
## Adding or Subtracting two matrices
There are many operation that you can apply on matrices and one of these operations is to add
and subtract two matrices.
Small review on how we deal with addition and subtraction of two matrices:
```
Matrix A + Matrix B = Result
| 0 1 | | 1 -1 | | 1 0 |
| 2 7 | | 7 -2 | | 9 5 |
| 9 0 | | -1 8 | | 8 8 |
```
You can use `ExMatrix.add_matrices(matrix_1,matrix_2)` or `ExMatrix.sub_matrices(matrix_1,matrix_2)`
## Example Function
In case that there is something vague please use a helper function
```
iex> ExMatrix.example("2x2")
%{"0" => %{"0" => "(0,0)", "1" => "(0,1)"},
"1" => %{"0" => "(1,0)", "1" => "(1,1)"}
}
```
## What's Next
This library will be extend to have the ability to:
1. Add or subtract two or more matrices
1. Multiply and Divide two or more matrices
1. Matrix Transpose
For more information please check the [github](https://github.com/jat10/ex_matrix)
## For contribution on GitHub
Please read the contribution requirements before start working
"""
@doc """
Create a matrix
## Example
iex> ExMatrix.create("2x2")
%{"0" => %{"0" => "", "1" => ""},
"1" => %{"0" => "", "1" => ""}
}
"""
def create(name) do
Helper.create_map_matrix(name)
end
@doc """
Set values to the base matrix
## Example
iex> matrix = ExMatrix.create("2x2")
iex> data = [{"(0,0)","1"},{"(0,1)","2"},{"(1,0)","3"},{"(1,1)","4"}]
iex> ExMatrix.set_matrix(matrix,data)
%{"0" => %{"0" => "1", "1" => "2"},
"1" => %{"0" => "3", "1" => "4"}
}
"""
def set_matrix(matrix,data) do
Helper.set_matrix_data(matrix,data)
end
@doc """
Helper function to undestand how elixir matrix is generated
## Example
iex> ExMatrix.example("2x2")
%{"0" => %{"0" => "(0,0)", "1" => "(0,1)"},
"1" => %{"0" => "(1,0)", "1" => "(1,1)"}
}
"""
def example(matrix) do
Helper.example(matrix)
end
@doc """
Change type of values in the martix
When you generate an elixir matrix the type of values will be in string, in case you need change
this function gives you the ability to change them only for integers only.
## Example
iex> ExMatrix.example("2x2")
%{"0" => %{"0" => "(0,0)", "1" => "(0,1)"},
"1" => %{"0" => "(1,0)", "1" => "(1,1)"}
}
"""
def change_type(matrix,type) do
Helper.change_type(matrix,type)
end
@doc """
Add two matrices
You can add two matrices with string values or integers value where the return matrix will be the
same type of the two matrices.
## Example
iex> matrix_1 = %{"0" => %{"0" => 2, "1" => 7},"1" => %{"0" => 4, "1" => 4}}
iex> matrix_2 = %{"0" => %{"0" => 8, "1" => 3},"1" => %{"0" => 6, "1" => 0}}
iex> ExMatrix.add_matrices(matrix_1,matrix_2)
%{"0" => %{"0" => 10, "1" => 10},
"1" => %{"0" => 10, "1" => 4}
}
"""
def add_matrices(matrix_1, matrix_2) do
Helper.add_sub_matrix(matrix_1, matrix_2,"add")
end
@doc """
Subtract two matrices
You can subtract two matrices with string values or integers value where the return matrix will be the
same type of the two matrices.
## Example
iex> matrix_1 = %{"0" => %{"0" => "2", "1" => "7"},"1" => %{"0" => "4", "1" => "4"}}
iex> matrix_2 = %{"0" => %{"0" => "8", "1" => "3"},"1" => %{"0" => "6", "1" => "0"}}
iex> ExMatrix.sub_matrices(matrix_1,matrix_2)
%{"0" => %{"0" => "-6", "1" => "4"},
"1" => %{"0" => "-2", "1" => "4"}
}
"""
def sub_matrices(matrix_1, matrix_2) do
Helper.add_sub_matrix(matrix_1, matrix_2,"sub")
end
end
|
lib/ex_matrix.ex
| 0.918881
| 0.980053
|
ex_matrix.ex
|
starcoder
|
defmodule Day03 do
@moduledoc """
Advent of Code 2018, day 3.
"""
use Private
defmodule Claim do
defstruct id: nil, left: nil, top: nil, width: nil, height: nil
end
@doc """
How many square inches of fabric are within two or more claims?
## Examples
iex> Day03.part1("data/day03.txt")
109716
"""
def part1(file_name) do
file_to_inches_map(file_name)
# Get the values out of the inches map.
# Each value is a list of claim id's.
|> Map.values()
# Filter out the single-id lists.
|> Enum.filter(&(length(&1) > 1))
# The length of the resulting list is the
# number of square inches with multiple claims.
|> length
end
@doc """
What is the ID of the only claim that doesn't overlap?
## Examples
iex> Day03.part2("data/day03.txt")
124
"""
def part2(file_name) do
m =
file_to_inches_map(file_name)
# Get the values out of the inches map.
# Each value is a list of claim id's.
|> Map.values()
# Separate single-id lists from multi-id lists
|> Enum.group_by(&(length(&1) == 1))
single_ids = m.true |> List.flatten() |> MapSet.new()
multi_ids = m.false |> List.flatten() |> MapSet.new()
# ids can be in both lists. The one difference is the answer.
MapSet.difference(single_ids, multi_ids)
|> Enum.at(0)
end
private do
# The inches map has keys that are square inch coordinates and
# values that are lists of claim ids that contain the key:
# %{
# {2,3} => ["902"],
# {2,4} => ["902", "81"]
# }
defp file_to_inches_map(file_name) do
re = ~r/#(\d+) @ (\d+),(\d+): (\d+)x(\d+)/
File.stream!(file_name)
|> Stream.map(&String.trim/1)
# Capture just the matching fields, not the matching part of the string
|> Stream.map(&Regex.run(re, &1, capture: :all_but_first))
|> Stream.map(fn fields -> Enum.map(fields, &String.to_integer/1) end)
|> Stream.map(fn [id, left, top, width, height] ->
%Claim{id: id, left: left, top: top, width: width, height: height}
end)
|> Enum.reduce(%{}, &map_inches/2)
end
defp map_inches(claim, inches) do
elements =
for x <- claim.left..(claim.left + claim.width - 1),
y <- claim.top..(claim.top + claim.height - 1),
# {{2,3}, "902"} - {{square inch coordinates}, "claim id"}
do: {{x, y}, claim.id}
Enum.reduce(elements, inches, fn {key, new_value}, inches ->
case Map.fetch(inches, key) do
{:ok, old_value} -> %{inches | key => [new_value | old_value]}
:error -> Map.put_new(inches, key, [new_value])
end
end)
end
end
end
|
day03/lib/day03.ex
| 0.860149
| 0.583025
|
day03.ex
|
starcoder
|
defmodule UnionFind do
alias __MODULE__
@moduledoc """
Documentation for UnionFind.
"""
@doc """
"""
defstruct [:nodes, :sizes]
def new(n) do
1..n
|> Enum.reduce(
%UnionFind{
nodes: List.duplicate(0, n),
sizes: List.duplicate(1, n)
},
fn i, %UnionFind{nodes: nodes} = u ->
%{
u |
nodes: nodes
|> List.replace_at(i, i)
}
end
)
end
def root(%UnionFind{nodes: nodes} = u, i) do
# Loop up the chain until reaching root
if(
nodes
|> Enum.at(i) != i
) do
# path compression for future lookups
root(
%{
u |
nodes: nodes
|> List.replace_at(
i,
(
nodes
|> Enum.at(
nodes
|> Enum.at(i)
))
)
},
nodes
|> Enum.at(i)
)
else
i
end
end
def union(%UnionFind{nodes: nodes, sizes: sizes} = u, i, j) do
rooti = root(u, i)
rootj = root(u, j)
# already connected
if (rooti == rootj) do
u
else
# root smaller to root of larger
{new_nodes, new_sizes} =
if (
sizes
|> Enum.at(i) < sizes
|> Enum.at(j)) do
{
nodes
|> List.replace_at(rooti, rootj),
sizes
|> List.replace_at(
rootj,
(sizes
|> Enum.at(rootj)) + (sizes
|> Enum.at(rooti))
)
}
else
{
nodes
|> List.replace_at(rootj, rooti),
sizes
|> List.replace_at(
rooti,
(sizes
|> Enum.at(rooti)) + (sizes
|> Enum.at(rootj))
)
}
end
%{u | nodes: new_nodes, sizes: new_sizes}
end
end
def connected(u, i, j) do
root(u, i) == root(u, j)
end
end
|
lib/union_find.ex
| 0.610686
| 0.44059
|
union_find.ex
|
starcoder
|
defmodule Clover.Conversation do
@moduledoc """
A multi-message conversation
A `Clover.Conversation` happens in a `Clover.Room` between a robot and a `Clover.User`.
"""
use GenServer
alias Clover.{
Message,
Robot,
Script
}
alias Clover.Conversation.Supervisor, as: ConversationSupervisor
defstruct assigns: %{},
scripts: [],
transcript: []
@type t :: %__MODULE__{
assigns: map,
scripts: [Script.t()],
transcript: [Message.t()]
}
def new do
%__MODULE__{}
end
@spec start_link({message :: Message.t(), robot :: module}, keyword) :: GenServer.on_start()
def start_link({message, _robot} = arg, opts \\ []) do
name = Keyword.get(opts, :name, via_tuple(message))
GenServer.start_link(__MODULE__, arg, name: name)
end
def init({_message, robot}) do
scripts = Robot.scripts(robot)
state = %__MODULE__{assigns: %{}, scripts: scripts}
{:ok, state}
end
@spec start(message :: Message.t(), robot :: module) :: GenServer.on_start()
def start(message, robot) do
ConversationSupervisor.start_link(message, robot)
end
def incoming(conversation, message) do
GenServer.call(conversation, {:incoming, message})
end
def scripts(conversation) do
GenServer.call(conversation, :scripts)
end
def transcript(conversation) do
GenServer.call(conversation, :transcript)
end
def handle_call({:incoming, message}, _from, state) do
response = Script.handle_message(message, state, state.scripts)
transcript = [response, message | state.transcript]
state = Map.put(state, :transcript, transcript)
{:reply, response, state}
end
def handle_call(:scripts, _from, %{scripts: scripts} = state) do
{:reply, scripts, state}
end
def handle_call(:transcript, _from, %{transcript: transcript} = state) do
{:reply, transcript, state}
end
def via_tuple(message) do
robot = Message.robot(message)
room = Message.room(message)
user = Message.user(message)
{:via, Registry, {Clover.registry(), {robot, :conversation, room, user}}}
end
end
|
lib/conversation/conversation.ex
| 0.855429
| 0.443118
|
conversation.ex
|
starcoder
|
defmodule ElxValidation.DateTime do
@moduledoc """
### date
- The field under validation must be a valid, non-relative date.
- "2020-06-26"
### time
- The field under validation must be a valid, non-relative time.
- am / pm is optional
- "20:13"
- "01:02"
- "02:40am"
- "05:20pm"
### datetime
- The field under validation must be a valid datetime identifier
- "2020-06-26 12:20"
### timezone
- The field under validation must be a valid timezone identifier
- "+04:30"
- "-01:30"
```
data = %{
birthdate: "1990-04-17",
start_time: "13:30",
expired: "2020-06-28 12:20",
my_zone: "+04:30"
}
rules = [
%{
field: "birthdate",
validate: ["date"]
},
%{
field: "start_time",
validate: ["time"]
},
%{
field: "expired",
validate: ["datetime"]
},
%{
field: "my_zone",
validate: ["timezone"]
},
]
```
### date_equals:date
- The field under validation must be equal to the given date.
### after:date
- The field under validation must be a value after a given date.
### after_or_equal:date
- The field under validation must be a value after or equal to the given date. For more information, see the after rule.
### before:date
- The field under validation must be a value preceding the given date.
### before_or_equal:date
- The field under validation must be a value preceding or equal to the given date.
```
data = %{
eq_bd: "1990-04-17", ---> == "1990-04-17"
after_bd: "1990-04-20", ---> > "1990-04-17"
after_equal_bd: "1990-04-18", ---> >= "1990-04-17"
before_bd: "1990-04-16", ---> < "1990-04-17"
before_equal_bd: "1990-04-17", ---> <= "1990-04-17"
}
rules = [
%{
field: "eq_bd",
validate: ["date_equals:1990-04-17"]
},
%{
field: "after_bd",
validate: ["after:1990-04-17"]
},
%{
field: "after_equal_bd",
validate: ["after_or_equal:1990-04-17"]
},
%{
field: "before_bd",
validate: ["before:1990-04-17"]
},
%{
field: "before_equal_bd",
validate: ["before_or_equal:1990-04-17"]
}
]
```
"""
@doc """
check target is Date
"""
def is_date(target) do
Regex.match?(~r/([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01]))/, target)
rescue
_ ->
false
end
@doc """
check target is Time
- 01:12am -> passed
- 04:30pm -> passed
- 13:12 -> passed
- am /pm -> optional
"""
def is_time(target) do
Regex.match?(~r/^([0-1]?[0-9]|2[0-3]):[0-5][0-9]([AaPp][Mm])?$/, target)
rescue
_ ->
false
end
@doc """
check target is DateTime
- YYYY-MM-DD HH:MM:SS -> passed
"""
def is_date_time(target) do
Regex.match?(~r/([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01])) ([0-1]?[0-9]|2[0-3]):[0-5][0-9]$/, target)
rescue
_ ->
false
end
@doc """
check target is Timezone data
- +04:30 -> passed
- -01:15 -> passed
"""
def is_timezone(target) do
Regex.match?(~r/[+-][0-9]{2}:[0-9]{2}\b/, target)
rescue
_ ->
false
end
@doc """
check target and value is date and equal
"""
def date_equals(target, value) do
if is_date(target) and is_date(value) do
t = Date.from_iso8601!(target)
v = Date.from_iso8601!(value)
Date.diff(v, t) == 0
else
false
end
rescue
_ ->
false
end
@doc """
check target and value is date and target after value
"""
def is_after(target, value) do
if is_date(target) and is_date(value) do
t = Date.from_iso8601!(target)
v = Date.from_iso8601!(value)
Date.diff(t, v) > 0
else
false
end
rescue
_ ->
false
end
@doc """
check target and value is date and target after or equal value
"""
def is_after_or_equal(target, value) do
if is_date(target) and is_date(value) do
t = Date.from_iso8601!(target)
v = Date.from_iso8601!(value)
Date.diff(t, v) >= 0
else
false
end
rescue
_ ->
false
end
@doc """
check target and value is date and target before value
"""
def is_before(target, value) do
if is_date(target) and is_date(value) do
t = Date.from_iso8601!(target)
v = Date.from_iso8601!(value)
Date.diff(t, v) < 0
else
false
end
rescue
_ ->
false
end
@doc """
check target and value is date and target before or equal value
"""
def is_before_or_equal(target, value) do
if is_date(target) and is_date(value) do
t = Date.from_iso8601!(target)
v = Date.from_iso8601!(value)
Date.diff(t, v) <= 0
else
false
end
rescue
_ ->
false
end
end
|
lib/rules/datetime.ex
| 0.769903
| 0.720786
|
datetime.ex
|
starcoder
|
defmodule BN.BN128Arithmetic do
require Integer
alias BN.{FQ, FQP, FQ2, FQ12}
# y^2 = x^3 + 3
@y_power 2
@x_power 3
@b FQ.new(3)
@b2 FQ2.divide(FQ2.new([3, 0]), FQ2.new([9, 1]))
@b12 FQ12.new([3] ++ List.duplicate(0, 11))
@type point :: {FQP.t(), FQP.t()} | {FQ.t(), FQ.t()}
@spec on_curve?(point()) :: boolean() | no_return
def on_curve?(point = {x, y} = {%FQ{}, %FQ{}}) do
if infinity?(point) do
true
else
minuend = FQ.pow(y, @y_power)
substrahend = FQ.pow(x, @x_power)
remainder = FQ.sub(minuend, substrahend)
remainder == @b
end
end
def on_curve?(point = {x, y} = {%FQP{}, %FQP{}}) do
if infinity?(point) do
true
else
minuend = FQP.pow(y, @y_power)
substrahend = FQP.pow(x, @x_power)
remainder = FQP.sub(minuend, substrahend)
if x.dim == 2 do
remainder == @b2
else
remainder == @b12
end
end
end
@spec add(point(), point()) :: {:ok, point()} | {:error, String.t()}
def add(point1, point2) do
cond do
!on_curve?(point1) ->
{:error, "point1 is not on the curve"}
!on_curve?(point2) ->
{:error, "point2 is not on the curve"}
true ->
{:ok, add_points(point1, point2)}
end
end
@spec mult(point(), integer()) :: {:ok, point()} | {:error, String.t()}
def mult(point, scalar) do
if on_curve?(point) do
{:ok, mult_point(point, scalar)}
else
{:error, "point is not on the curve"}
end
end
@spec mult_point(point(), integer()) :: point()
defp mult_point(point, scalar) do
cond do
scalar == 0 ->
case point do
{%FQ{}, %FQ{}} ->
{FQ.new(0), FQ.new(0)}
_ ->
{FQ12.zero(), FQ12.zero()}
end
scalar == 1 ->
point
Integer.is_even(scalar) ->
point
|> mult_point(div(scalar, 2))
|> double()
true ->
point
|> mult_point(div(scalar, 2))
|> double()
|> calculate_points_addition(point)
end
end
@spec add_points(point(), point()) :: point()
def add_points(point1, point2) do
cond do
point1 == point2 ->
double(point1)
infinity?(point1) ->
point2
infinity?(point2) ->
point1
true ->
calculate_points_addition(point1, point2)
end
end
@spec double(point()) :: point()
def double({x, y} = {%FQ{}, %FQ{}}) do
if y.value == 0 do
{FQ.new(0), FQ.new(0)}
else
double_y = FQ.mult(y, 2)
lambda =
x
|> FQ.pow(2)
|> FQ.mult(3)
|> FQ.divide(double_y)
double_x = FQ.mult(x, 2)
new_x = lambda |> FQ.pow(2) |> FQ.sub(double_x)
new_y =
x
|> FQ.sub(new_x)
|> FQ.mult(lambda)
|> FQ.sub(y)
{new_x, new_y}
end
end
def double({x, y} = {%FQP{}, %FQP{}}) do
if y == FQ12.zero() do
{FQ12.zero(), FQ12.zero()}
else
double_y = FQ12.mult(y, 2)
lambda =
x
|> FQ12.pow(2)
|> FQ12.mult(3)
|> FQ12.divide(double_y)
double_x = FQ12.mult(x, 2)
new_x = lambda |> FQ12.pow(2) |> FQ12.sub(double_x)
new_y =
x
|> FQ12.sub(new_x)
|> FQ12.mult(lambda)
|> FQ12.sub(y)
{new_x, new_y}
end
end
@spec calculate_points_addition(point(), point()) :: point()
defp calculate_points_addition({x1, y1} = {%FQ{}, %FQ{}}, {x2, y2}) do
if x1 == x2 do
{FQ.new(0), FQ.new(0)}
else
y_remainder = FQ.sub(y2, y1)
x_remainder = FQ.sub(x2, x1)
lambda = FQ.divide(y_remainder, x_remainder)
x =
lambda
|> FQ.pow(2)
|> FQ.sub(x1)
|> FQ.sub(x2)
y =
x1
|> FQ.sub(x)
|> FQ.mult(lambda)
|> FQ.sub(y1)
{x, y}
end
end
defp calculate_points_addition({x1, y1} = {%FQP{}, %FQP{}}, {x2, y2}) do
if x1 == x2 do
{FQ12.zero(), FQ12.zero()}
else
y_remainder = FQ12.sub(y2, y1)
x_remainder = FQ12.sub(x2, x1)
lambda = FQ12.divide(y_remainder, x_remainder)
x =
lambda
|> FQ12.pow(2)
|> FQ12.sub(x1)
|> FQ12.sub(x2)
y =
x1
|> FQ12.sub(x)
|> FQ12.mult(lambda)
|> FQ12.sub(y1)
{x, y}
end
end
def infinity?({x, y} = {%FQ{}, %FQ{}}) do
x.value == 0 && y.value == 0
end
def infinity?({x, y} = {%FQP{}, %FQP{}}) do
FQP.zero?(x) && FQP.zero?(y)
end
end
|
lib/bn/bn128_arithmetic.ex
| 0.852368
| 0.426023
|
bn128_arithmetic.ex
|
starcoder
|
defmodule ExAlgo.Set.DisjointSet do
@moduledoc """
Implementation of a discjoint set data structure.
More on this: https://en.wikipedia.org/wiki/Disjoint_sets
# TODO: Move some doctests to unit tests.
"""
defstruct parents: %{}, ranks: %{}
@type mapped_array() :: %{required(non_neg_integer()) => non_neg_integer()}
@type value() :: non_neg_integer()
@type t() :: %__MODULE__{
parents: mapped_array(),
ranks: mapped_array()
}
@doc """
State of a disjoint set with initialized ranks and parents.
## Example
iex> DisjointSet.new(0)
%DisjointSet{}
iex> DisjointSet.new(3)
%DisjointSet{
parents: %{0 => 0, 1 => 1, 2 => 2},
ranks: %{0 => 1, 1 => 1, 2 => 1}
}
"""
@spec new(non_neg_integer()) :: t()
def new(0), do: %__MODULE__{}
def new(size) do
%__MODULE__{
ranks: 0..(size - 1) |> Enum.map(&{&1, 1}) |> Enum.into(%{}),
parents: 0..(size - 1) |> Enum.map(&{&1, &1}) |> Enum.into(%{})
}
end
@doc """
Finds the parent of a given node. Returns `:error` if given a node that does
not exist.
## Example
iex> set = DisjointSet.new(4)
iex> Enum.reduce(0..3, true, fn x, acc ->
...> {value, _} = DisjointSet.find(set, x)
...> value == x and acc
...> end)
true
iex> set = %DisjointSet{
...> parents: %{0 => 1, 1 => 2, 2 => 3, 3 => 3, 4 => 1},
...> ranks: %{0 => 1, 1 => 1, 2 => 1, 3 => 1, 4 => 1}
...> }
iex> {3, new_set} = DisjointSet.find(set, 3)
iex> new_set.parents == set.parents
true
iex> new_set.ranks == set.ranks
true
iex> set = %DisjointSet{
...> parents: %{0 => 1, 1 => 2, 2 => 3, 3 => 3, 4 => 1},
...> ranks: %{0 => 1, 1 => 1, 2 => 1, 3 => 1, 4 => 1}
...> }
iex> {3, set} = DisjointSet.find(set, 0)
iex> set.parents
%{0 => 3, 1 => 3, 2 => 3, 3 => 3, 4 => 1}
iex> set.ranks
%{0 => 1, 1 => 1, 2 => 1, 3 => 1, 4 => 1}
iex> DisjointSet.new(4) |> DisjointSet.find(100)
:error
"""
@spec find(t(), value()) :: {value(), t()} | :error
def find(%__MODULE__{parents: parents} = disjoint_set, value) do
case parents[value] do
nil -> :error
parent -> do_find(disjoint_set, [value], parent)
end
end
defp do_find(%__MODULE__{parents: parents} = disjoint_set, path, value) do
case parents[value] do
^value ->
{value,
Enum.reduce(path, disjoint_set, fn x, acc ->
%{acc | parents: %{acc.parents | x => value}}
end)}
parent ->
do_find(disjoint_set, [value | path], parent)
end
end
@doc """
Performs a union between two elements and returns the updated set.
## Example
iex> set = DisjointSet.new(5)
iex> set =
...> set
...> |> DisjointSet.union(0, 2)
...> |> DisjointSet.union(4, 2)
...> |> DisjointSet.union(3, 1)
iex> set
%DisjointSet{
parents: %{0 => 0, 1 => 3, 2 => 0, 3 => 3, 4 => 0},
ranks: %{0 => 2, 1 => 1, 2 => 1, 3 => 2, 4 => 1}
}
iex> DisjointSet.union(set, 3, 1) == set
true
iex> DisjointSet.new(1) |> DisjointSet.union(100, 200)
:error
"""
@spec union(t(), value(), value()) :: t() | :error
def union(disjoint_set, a, b) do
with {root_a, disjoint_set} <- find(disjoint_set, a),
{root_b, disjoint_set} <- find(disjoint_set, b) do
union_by_rank(disjoint_set, root_a, root_b)
else
_ -> :error
end
end
defp union_by_rank(disjoint_set, parent, parent), do: disjoint_set
defp union_by_rank(%__MODULE__{ranks: ranks} = disjoint_set, root_a, root_b) do
case {ranks[root_a], ranks[root_b]} do
{rank, rank} ->
%{
disjoint_set
| parents: %{disjoint_set.parents | root_b => root_a},
ranks: %{disjoint_set.ranks | root_a => rank + 1}
}
{rank_a, rank_b} when rank_a < rank_b ->
%{disjoint_set | parents: %{disjoint_set.parents | root_a => root_b}}
{rank_a, rank_b} when rank_a > rank_b ->
%{disjoint_set | parents: %{disjoint_set.parents | root_b => root_a}}
end
end
end
|
lib/ex_algo/set/disjoint_set.ex
| 0.696578
| 0.700524
|
disjoint_set.ex
|
starcoder
|
defmodule InvoiceTracker.Rounding do
@moduledoc """
Perform various calculations on times by rounding to the nearest tenth of an
hour.
Operations are provided to:
- Round a time to the nearest tenth of an hour
- Compute a charge amount given a rate
- Adjust the rounding a list of time entries with a total time such that a
summary report or invoice will look correct when all time entries are
rounded.
"""
alias InvoiceTracker.TimeEntry
alias Timex.Duration
@doc """
Round a time to the nearest tenth of an hour.
"""
@spec round_time(Duration.t()) :: Duration.t()
def round_time(time), do: time |> to_tenths |> round |> from_tenths
@doc """
Compute the amount to charge for a time given a rate.
First rounds the time to the nearest tenth of an hour, then computes the
charge.
"""
@spec charge(Duration.t(), number) :: number
def charge(time, rate) do
time |> round_time |> Duration.to_hours() |> Kernel.*(rate)
end
@doc """
Reconciles time entries with a total time such that the list of entries, when
rounded, will add up to the total time when rounded to the nearest tenth of an
hour.
The basic approach is to figure out how many tenths of an hour need to be
accounted for (up or down), then choose that number of entries to adjust.
To find the entries, sort them based on their "rounding weight": how close was
an entry to rounding up? For adjusting up, take the entries with the highest
rounding weight; for adjusting down, take the entries that were furthest from
rounding up.
"""
@spec reconcile([TimeEntry.t()], Duration.t()) :: [TimeEntry.t()]
def reconcile(entries, total) do
entries
|> Enum.map(&rounded/1)
|> Enum.zip(adjustments(entries, total))
|> Enum.map(&apply_adjustment/1)
end
defp rounded(entry) do
Map.update!(entry, :time, &round_time/1)
end
defp adjustments(entries, total) do
Enum.map(
raw_adjustments(Enum.map(entries, &tenths_in/1), to_tenths(total)),
&from_tenths/1
)
end
defp apply_adjustment({entry, adjustment}) do
Map.update!(entry, :time, &Duration.add(&1, adjustment))
end
defp raw_adjustments([], _), do: []
defp raw_adjustments(tenths, total) do
rounded_total =
tenths
|> Enum.map(&Kernel.round/1)
|> Enum.reduce(&Kernel.+/2)
total_adjustment = round(total - rounded_total)
distribute(total_adjustment, tenths)
end
defp distribute(total, tenths) do
Enum.reduce(
distributions(total, tenths),
List.duplicate(0, length(tenths)),
&apply_distribution/2
)
end
defp distributions(0, _), do: []
defp distributions(total, tenths) do
tenths
|> Enum.map(&rounding_weight/1)
|> Enum.with_index()
|> Enum.sort(&(&1 >= &2))
|> Enum.take(total)
|> Enum.map(fn {_, index} -> {index, sign(total)} end)
end
defp apply_distribution({index, increment}, list) do
List.update_at(list, index, &(&1 + increment))
end
defp rounding_weight(tenth) do
tenth |> Kernel.*(1000) |> round |> Kernel.rem(500)
end
defp sign(n) when n < 0, do: -1
defp sign(_), do: 1
defp tenths_in(entry), do: entry |> Map.get(:time) |> to_tenths
defp to_tenths(time) do
time |> Duration.scale(10) |> Duration.to_hours()
end
defp from_tenths(tenths) do
tenths |> Duration.from_hours() |> Duration.scale(0.1)
end
end
|
lib/invoice_tracker/rounding.ex
| 0.889535
| 0.863737
|
rounding.ex
|
starcoder
|
defmodule Schism do
@moduledoc """
Schism allows you to create network partitions in erlang nodes without
needing to leave elixir.
Let's say that we have 5 nodes and we want to test what happens when they
disconnect from each other. We can use Schism like so:
```elixir
test "netsplits" do
[n1, n2, n3, n4, n5] = nodes
# Partition our nodes
Schism.partition([n1, n3])
Schism.partition([n4])
Schism.partition([n2, n5])
# Test some stuff...
# Heal our partitions
Schism.heal([n1, n3])
Schism.heal([n2, n4, n5])
end
```
This api is useful for testing and development in conjunction with tools like
[local cluster](https://github.com/whitfin/local-cluster) and
[propcheck](https://github.com/alfert/propcheck).
"""
@doc """
Creates a partition amongst a set of nodes. Any nodes in the partition
will be able to see each other but no other nodes in the network. The
partitioned nodes will still be able to see the node that induced the
partition. Otherwise we would not be able to heal the partition.
"""
@spec partition([Node.t], String.t) :: [Node.t] | none()
def partition(nodes, id \\ random_string()) when is_binary(id) do
manager = Node.self()
for node <- nodes do
# Force the node to disconnect from all nodes that aren't us
all_except_us = :rpc.call(node, Node, :list, []) -- [manager]
Enum.each(all_except_us, fn n -> :rpc.call(node, Node, :disconnect, [n]) end)
# Set the remote nodes cookie to a different value
true = :rpc.call(node, :erlang, :set_cookie, [node, String.to_atom(id)])
# Ensure we can still talk to the node
:pong = Node.ping(node)
end
# Reconnect the nodes in partition now that the cookie is the same
connect_nodes(nodes)
nodes
end
@doc """
Re-connects the nodes to the cluster.
"""
@spec heal([Node.t]) :: [Node.t] | none()
def heal(nodes) do
# Restore the cookie
partition(nodes, Atom.to_string(:erlang.get_cookie()))
end
defp connect_nodes([node | other_nodes]) do
Enum.each(other_nodes, fn n -> :rpc.call(node, Node, :connect, [n]) end)
connect_nodes(other_nodes)
end
defp connect_nodes([]), do: :ok
defp random_string do
:crypto.strong_rand_bytes(10)
|> Base.url_encode64
|> binary_part(0, 10)
end
end
|
lib/schism.ex
| 0.744935
| 0.932576
|
schism.ex
|
starcoder
|
defmodule Cldr.DateTime.Format.Backend do
@moduledoc false
backend = config.backend
def define_date_time_format_module(config) do
quote location: :keep, bind_quoted: [config: config, backend: backend] do
defmodule DateTime.Format do
@moduledoc """
Manages the Date, TIme and DateTime formats
defined by CLDR.
The functions in `Cldr.DateTime.Format` are
primarily concerned with encapsulating the
data from CLDR in functions that are used
during the formatting process.
"""
alias Cldr.Calendar, as: Kalendar
alias Cldr.Locale
alias Cldr.LanguageTag
alias Cldr.Config
@doc """
Returns a list of calendars defined for a given locale.
## Arguments
* `locale` is any valid locale name returned by `Cldr.known_locale_names/0`
or a `Cldr.LanguageTag` struct. The default is `Cldr.get_current_locale/0`
## Example
iex> Cldr.DateTime.Format.calendars_for "en"
{:ok, [:buddhist, :chinese, :coptic, :dangi, :ethiopic, :ethiopic_amete_alem,
:generic, :gregorian, :hebrew, :indian, :islamic, :islamic_civil,
:islamic_rgsa, :islamic_tbla, :islamic_umalqura, :japanese, :persian, :roc]}
"""
@spec calendars_for(Locale.name() | LanguageTag.t()) :: [calendar, ...]
def calendars_for(locale \\ unquote(backend).get_locale())
def calendars_for(%LanguageTag{cldr_locale_name: cldr_locale_name}) do
calendars_for(cldr_locale_name)
end
@doc """
Returns a map of the standard date formats for a given locale and calendar.
## Arguments
* `locale` is any locale returned by `Cldr.known_locale_names/0`
* `calendar` is any calendar returned by `Cldr.DateTime.Format.calendars_for/1`
The default is `:gregorian`
## Examples:
iex> Cldr.DateTime.Format.date_formats "en"
{:ok, %Cldr.Date.Formats{
full: "EEEE, MMMM d, y",
long: "MMMM d, y",
medium: "MMM d, y",
short: "M/d/yy"
}}
iex> Cldr.DateTime.Format.date_formats "en", :buddhist
{:ok, %Cldr.Date.Formats{
full: "EEEE, MMMM d, y G",
long: "MMMM d, y G",
medium: "MMM d, y G",
short: "M/d/y GGGGG"
}}
"""
@spec date_formats(Locale.name() | LanguageTag.t(), calendar) :: standard_formats
def date_formats(locale \\ unquote(backend).get_locale(), calendar \\ Kalendar.default_calendar())
def date_formats(%LanguageTag{cldr_locale_name: cldr_locale_name}, calendar) do
date_formats(cldr_locale_name, calendar)
end
@doc """
Returns a map of the standard time formats for a given locale and calendar.
## Arguments
* `locale` is any locale returned by `Cldr.known_locale_names/0`
* `calendar` is any calendar returned by `Cldr.DateTime.Format.calendars_for/1`
The default is `:gregorian`
## Examples:
iex> Cldr.DateTime.Format.time_formats "en"
{:ok, %Cldr.Time.Formats{
full: "h:mm:ss a zzzz",
long: "h:mm:ss a z",
medium: "h:mm:ss a",
short: "h:mm a"
}}
iex> Cldr.DateTime.Format.time_formats "en", :buddhist
{:ok, %Cldr.Time.Formats{
full: "h:mm:ss a zzzz",
long: "h:mm:ss a z",
medium: "h:mm:ss a",
short: "h:mm a"
}}
"""
@spec time_formats(Locale.name() | LanguageTag, calendar) :: standard_formats
def time_formats(locale \\ unquote(backend).get_locale(), calendar \\ Kalendar.default_calendar())
def time_formats(%LanguageTag{cldr_locale_name: cldr_locale_name}, calendar) do
time_formats(cldr_locale_name, calendar)
end
@doc """
Returns a map of the standard datetime formats for a given locale and calendar.
## Arguments
* `locale` is any locale returned by `Cldr.known_locale_names/0`
* `calendar` is any calendar returned by `Cldr.DateTime.Format.calendars_for/1`
The default is `:gregorian`
## Examples:
iex> Cldr.DateTime.Format.date_time_formats "en"
{:ok, %Cldr.DateTime.Formats{
full: "{1} 'at' {0}",
long: "{1} 'at' {0}",
medium: "{1}, {0}",
short: "{1}, {0}"
}}
iex> Cldr.DateTime.Format.date_time_formats "en", :buddhist
{:ok, %Cldr.DateTime.Formats{
full: "{1} 'at' {0}",
long: "{1} 'at' {0}",
medium: "{1}, {0}",
short: "{1}, {0}"
}}
"""
@spec date_time_formats(Locale.name() | LanguageTag, calendar) :: standard_formats
def date_time_formats(
locale \\ unquote(backend).get_locale(),
calendar \\ Kalendar.default_calendar()
)
def date_time_formats(%LanguageTag{cldr_locale_name: cldr_locale_name}, calendar) do
date_time_formats(cldr_locale_name, calendar)
end
@doc """
Returns a map of the available non-standard datetime formats for a
given locale and calendar.
## Arguments
* `locale` is any locale returned by `Cldr.known_locale_names/0`
* `calendar` is any calendar returned by `Cldr.DateTime.Format.calendars_for/1`
The default is `:gregorian`
## Examples:
iex> Cldr.DateTime.Format.date_time_available_formats "en"
{:ok,
%{
yw_count_other: "'week' w 'of' Y",
mmm: "LLL",
d: "d",
ehm: "E h:mm a",
y_mmm: "MMM y",
mm_md: "MMM d",
gy_mm_md: "MMM d, y G",
e_bhm: "E h:mm B",
ed: "d E",
mmm_md: "MMMM d",
ehms: "E h:mm:ss a",
y_qqq: "QQQ y",
y_qqqq: "QQQQ y",
m_ed: "E, M/d",
md: "M/d",
bhm: "h:mm B",
hmv: "HH:mm v",
y_m: "M/y",
gy_mmm: "MMM y G",
mmm_ed: "E, MMM d",
y_m_ed: "E, M/d/y",
y_mm_md: "MMM d, y",
gy_mmm_ed: "E, MMM d, y G",
e_hms: "E HH:mm:ss",
e: "ccc",
e_hm: "E HH:mm",
yw_count_one: "'week' w 'of' Y",
mmmmw_count_one: "'week' W 'of' MMMM",
e_bhms: "E h:mm:ss B",
hms: "HH:mm:ss",
y_mmm_ed: "E, MMM d, y",
y_md: "M/d/y",
ms: "mm:ss",
hmsv: "HH:mm:ss v",
hm: "HH:mm",
h: "HH",
mmmmw_count_other: "'week' W 'of' MMMM",
bh: "h B",
m: "L",
bhms: "h:mm:ss B",
y_mmmm: "MMMM y",
y: "y",
gy: "y G"
}}
"""
@spec date_time_available_formats(Locale.name() | LanguageTag, calendar) :: formats
def date_time_available_formats(
locale \\ unquote(backend).get_locale(),
calendar \\ Kalendar.default_calendar()
)
def date_time_available_formats(%LanguageTag{cldr_locale_name: cldr_locale_name}, calendar) do
date_time_available_formats(cldr_locale_name, calendar)
end
@doc """
Returns the postive and negative hour format
for a timezone offset for a given locale.
## Arguments
* `locale` is any locale returned by `Cldr.known_locale_names/0`
## Example
iex> Cldr.DateTime.Format.hour_format "en"
{:ok, {"+HH:mm", "-HH:mm"}}
"""
@spec hour_format(Locale.name() | LanguageTag) :: {String.t(), String.t()}
def hour_format(locale \\ unquote(backend).get_locale())
def hour_format(%LanguageTag{cldr_locale_name: cldr_locale_name}) do
hour_format(cldr_locale_name)
end
@doc """
Returns the GMT offset format list for a
for a timezone offset for a given locale.
## Arguments
* `locale` is any locale returned by `Cldr.known_locale_names/0`
## Example
iex> Cldr.DateTime.Format.gmt_format "en"
{:ok, ["GMT", 0]}
"""
@spec gmt_format(Locale.name() | LanguageTag) :: [non_neg_integer | String.t(), ...]
def gmt_format(locale \\ unquote(backend).get_locale())
def gmt_format(%LanguageTag{cldr_locale_name: cldr_locale_name}) do
gmt_format(cldr_locale_name)
end
@doc """
Returns the GMT format string for a
for a timezone with an offset of zero for
a given locale.
## Arguments
* `locale` is any locale returned by `Cldr.known_locale_names/0`
## Example
iex> Cldr.DateTime.Format.gmt_zero_format "en"
{:ok, "GMT"}
"""
@spec gmt_zero_format(Locale.name() | LanguageTag) :: String.t()
def gmt_zero_format(locale \\ unquote(backend).get_locale())
def gmt_zero_format(%LanguageTag{cldr_locale_name: cldr_locale_name}) do
gmt_zero_format(cldr_locale_name)
end
for locale <- Cldr.Config.known_locale_names(config) do
locale_data = Cldr.Config.get_locale(locale, config)
calendars = Cldr.Config.calendars_for_locale(locale_data)
def calendars_for(unquote(locale)), do: {:ok, unquote(calendars)}
def gmt_format(unquote(locale)),
do: {:ok, unquote(get_in(locale_data, [:dates, :time_zone_names, :gmt_format]))}
def gmt_zero_format(unquote(locale)),
do: {:ok, unquote(get_in(locale_data, [:dates, :time_zone_names, :gmt_zero_format]))}
hour_formats =
List.to_tuple(
String.split(get_in(locale_data, [:dates, :time_zone_names, :hour_format]), ";")
)
def hour_format(unquote(locale)), do: {:ok, unquote(hour_formats)}
for calendar <- calendars do
calendar_data =
locale_data
|> Map.get(:dates)
|> get_in([:calendars, calendar])
formats = struct(Cldr.Date.Formats, Map.get(calendar_data, :date_formats))
def date_formats(unquote(locale), unquote(calendar)) do
{:ok, unquote(Macro.escape(formats))}
end
formats = struct(Cldr.Time.Formats, Map.get(calendar_data, :time_formats))
def time_formats(unquote(locale), unquote(calendar)) do
{:ok, unquote(Macro.escape(formats))}
end
formats =
struct(
Cldr.DateTime.Formats,
Map.get(calendar_data, :date_time_formats) |> Map.take(@standard_formats)
)
def date_time_formats(unquote(locale), unquote(calendar)) do
{:ok, unquote(Macro.escape(formats))}
end
formats = get_in(calendar_data, [:date_time_formats, :available_formats])
def date_time_available_formats(unquote(locale), unquote(calendar)) do
{:ok, unquote(Macro.escape(formats))}
end
end
def date_formats(unquote(locale), calendar), do: {:error, Kalendar.calendar_error(calendar)}
def time_formats(unquote(locale), calendar), do: {:error, Kalendar.calendar_error(calendar)}
def date_time_formats(unquote(locale), calendar),
do: {:error, Kalendar.calendar_error(calendar)}
def date_time_available_formats(unquote(locale), calendar),
do: {:error, Kalendar.calendar_error(calendar)}
end
def calendars_for(locale), do: {:error, Locale.locale_error(locale)}
def gmt_format(locale), do: {:error, Locale.locale_error(locale)}
def gmt_zero_format(locale), do: {:error, Locale.locale_error(locale)}
def hour_format(locale), do: {:error, Locale.locale_error(locale)}
def date_formats(locale, _calendar), do: {:error, Locale.locale_error(locale)}
def time_formats(locale, _calendar), do: {:error, Locale.locale_error(locale)}
def date_time_formats(locale, _calendar), do: {:error, Locale.locale_error(locale)}
def date_time_available_formats(locale, _calendar), do: {:error, Locale.locale_error(locale)}
end
end
end
end
|
lib/cldr/backend/date_time_format.ex
| 0.926748
| 0.531635
|
date_time_format.ex
|
starcoder
|
defmodule ExVault.KV1 do
@moduledoc """
A very thin wrapper over the basic operations for working with KV v1 data.
Construct a *backend*--a client paired with the mount path for the `kv`
version 1 secrets engine it interacts with--using the `ExVault.KV1.new/2`
function.
Each of the operations in this module have a variant that operates on a client
and mount path, and another that operates on a backend.
See the [Vault documentation](https://www.vaultproject.io/docs/secrets/kv/kv-v1.html)
for the secrets engine.
"""
defstruct [:client, :mount]
@type t :: %__MODULE__{
client: ExVault.client(),
mount: String.t()
}
@doc """
Create a new backend for the `kv` version 1 secrets engine.
Params:
* `client` the `ExVault` client.
* `mount` the mount path for the `kv` secrets engine.
"""
@spec new(ExVault.client(), String.t()) :: t()
def new(client, mount), do: %__MODULE__{client: client, mount: mount}
@doc """
Read the value of a key.
Params:
* `client` the `ExVault` client.
* `mount` the mount path for the `kv` secrets engine.
* `path` the path to the key in the secrets engine.
"""
@spec read(ExVault.client(), String.t(), String.t()) :: ExVault.response()
def read(client, mount, path), do: ExVault.read(client, "#{mount}/#{path}")
@doc """
Read the value of a key.
Params:
* `backend` the `ExVault.KV1` backend.
* `path` the path to the key in the secrets engine.
"""
@spec read(t(), String.t()) :: ExVault.response()
def read(backend, path), do: read(backend.client, backend.mount, path)
@doc """
Write the value of a key.
Params:
* `client` the `ExVault` client.
* `mount` the mount path for the `kv` secrets engine.
* `path` the path to the key in the secrets engine.
* `data` the data to write as a map of string keys to string values.
"""
@spec write(ExVault.client(), String.t(), String.t(), %{String.t() => String.t()}) ::
ExVault.response()
def write(client, mount, path, data), do: ExVault.write(client, "#{mount}/#{path}", data)
@doc """
Write the value of a key.
Params:
* `backend` the `ExVault.KV1` backend.
* `path` the path to the key in the secrets engine.
* `data` the data to write as a map of string keys to string values.
"""
@spec write(t(), String.t(), any()) :: ExVault.response()
def write(backend, path, data), do: write(backend.client, backend.mount, path, data)
@doc """
Delete a key.
Params:
* `client` the `ExVault` client.
* `mount` the mount path for the `kv` secrets engine.
* `path` the path to the key in the secrets engine.
"""
@spec delete(ExVault.client(), String.t(), String.t()) :: ExVault.response()
def delete(client, mount, path), do: ExVault.delete(client, "#{mount}/#{path}")
@doc """
Delete a key.
Params:
* `backend` the `ExVault.KV1` backend.
* `path` the path to the key in the secrets engine.
"""
@spec delete(t(), String.t()) :: ExVault.response()
def delete(backend, path), do: delete(backend.client, backend.mount, path)
@doc """
List the keys.
Params:
* `client` the ExVault client.
* `mount` the mount path for the `kv` secrets engine.
* `path` the path to the key or key prefix in the secrets engine.
"""
@spec list(ExVault.client(), String.t(), String.t()) :: ExVault.response()
def list(client, mount, path), do: ExVault.list(client, "#{mount}/#{path}")
@doc """
List the keys.
Params:
* `backend` the `ExVault.KV1` backend.
* `path` the path to the key or key prefix in the secrets engine.
"""
@spec list(t(), String.t()) :: ExVault.response()
def list(backend, path), do: list(backend.client, backend.mount, path)
end
|
lib/exvault/kv1.ex
| 0.866203
| 0.804444
|
kv1.ex
|
starcoder
|
defmodule Crawlie.Page do
@moduledoc """
Defines the struct representing a url's state in the system.
"""
alias __MODULE__, as: This
@typedoc """
The `Crawlie.Page` struct type.
Fields' meaning:
- `:uri` - page `URI`
- `:depth` - the "depth" at which the url was found while recursively crawling the pages.
For example `depth=0` means it was passed directly from the caller, `depth=2` means
the crawler followed 2 links from one of the starting urls to get to the url.
- `:retries` - url fetch retry count. If the fetching of the url never failed before, `0`.
"""
@type t :: %This{
depth: integer,
uri: URI.t,
retries: integer
}
defstruct [
:uri,
depth: 0,
retries: 0,
]
#===========================================================================
# API Functions
#===========================================================================
@spec new(URI.t | String.t, integer) :: This.t
@doc """
Creates a new `Crawlie.Page` struct from the url
"""
def new(uri, depth \\ 0) when is_integer(depth) do
uri =
uri
|> URI.parse() # works with both binaries and %URI{}
|> strip_fragment()
%This{uri: uri, depth: depth}
end
@spec child(This.t, URI.t | String.t) :: This.t
@doc """
Creates a "child page" - a new `Crawlie.Page` struct with depth one greate than
the one of the parent and no retries.
"""
def child(%This{depth: depth}, uri) do
This.new(uri, depth + 1)
end
@spec retry(This.t) :: This.t
@doc """
Returns the `Crawlie.Page` object with the retry count increased
"""
def retry(%This{retries: r} = this), do: %This{this | retries: r + 1}
@spec url(This.t) :: String.t
@doc """
Returns the string url of the page
"""
def url(this), do: URI.to_string(this.uri)
#===========================================================================
# Internal Functions
#===========================================================================
defp strip_fragment(%URI{fragment: nil} = uri), do: uri
defp strip_fragment(%URI{fragment: _} = uri), do: %URI{uri | fragment: nil}
end
|
lib/crawlie/page.ex
| 0.814274
| 0.620923
|
page.ex
|
starcoder
|
defmodule DarknetToOnnx.WeightLoader do
@moduledoc """
Helper class used for loading the serialized weights of a binary file stream
and returning the initializers and the input tensors required for populating
the ONNX graph with weights.
"""
use Agent, restart: :transient
alias DarknetToOnnx.Learning, as: Utils
alias DarknetToOnnx.Helper, as: Helper
@doc """
Initialized with a path to the YOLO .weights file.
Keyword argument:
weights_file_path -- path to the weights file.
"""
def start_link(opts) do
initial_state = %{
weights_file: open_weights_file(Keyword.fetch!(opts, :weights_file))
}
{:ok, _pid} = Agent.start_link(fn -> initial_state end, name: __MODULE__)
initial_state
end
def get_state() do
Agent.get(__MODULE__, fn state -> state end)
end
def update_state(key, value) do
Agent.update(__MODULE__, fn state -> %{state | key => value} end)
end
def stop_link() do
Agent.stop(__MODULE__)
end
@doc """
Opens a YOLO DarkNet file stream and skips the header.
Keyword argument:
weights_file_path -- path to the weights file.
"""
def open_weights_file(weights_file_path) do
# Here we skip the first 20 bytes of this file. I don't know why they are assigning this header to an
# np.array since they are not using it anymore...
{:ok, weights_file} = File.open(weights_file_path, [:read, :binary])
# Move the file pointer after the header
IO.binread(weights_file, 5 * 4)
# param_data = [Nx.from_binary(header, {:s, 32})
weights_file
end
@doc """
Deserializes the weights from a file stream in the DarkNet order.
Keyword arguments:
conv_params -- a ConvParams object
param_category -- the category of parameters to be created ('bn' or 'conv')
suffix -- a string determining the sub-type of above param_category (e.g.,
'weights' or 'bias')
"""
def load_one_param_type(conv_params, param_category, suffix, force_raw \\ false) do
param_name = DarknetToOnnx.ConvParams.generate_param_name(conv_params.node_name, param_category, suffix)
[channels_out, channels_in, filter_h, filter_w] = conv_params.conv_weight_dims
param_shape =
case param_category do
"bn" ->
{channels_out}
"conv" ->
case suffix do
"weights" -> {channels_out, channels_in, filter_h, filter_w}
"bias" -> {channels_out}
end
end
param_size = Enum.reduce(Tuple.to_list(param_shape), 1, fn val, acc -> acc * val end)
%{weights_file: weights_file} = get_state()
bin_data = IO.binread(weights_file, param_size * 4)
param_data =
(force_raw == false && for <<data::float-32-native <- bin_data>>, do: data) || bin_data
if param_data == :eof do
raise "Reached end of file during weights loading. Is the file corrupt?"
end
update_state(:weights_file, weights_file)
[param_name, param_data, param_shape]
end
@doc """
Creates the initializers with weights from the weights file together with
the input tensors.
Keyword arguments:
conv_params -- a ConvParams object
param_category -- the category of parameters to be created ('bn' or 'conv')
suffix -- a string determining the sub-type of above param_category (e.g.,
'weights' or 'bias')
"""
def create_param_tensors(conv_params, param_category, suffix, force_raw \\ false) do
[param_name, param_data, param_data_shape] = load_one_param_type(conv_params, param_category, suffix, force_raw)
initializer_tensor =
Helper.make_tensor(
param_name,
:FLOAT,
param_data_shape,
param_data,
force_raw
)
input_tensor =
Helper.make_tensor_value_info(
param_name,
:FLOAT,
param_data_shape
)
[initializer_tensor, input_tensor]
end
@doc """
Returns the initializers with weights from the weights file and
the input tensors of a convolutional layer for all corresponding ONNX nodes.
Keyword argument:
conv_params -- a ConvParams object
"""
def load_conv_weights(conv_params, force_raw \\ false) do
[init, input] =
if conv_params.batch_normalize != nil and conv_params.batch_normalize == true do
[bias_init, bias_input] = create_param_tensors(conv_params, "bn", "bias", force_raw)
[bn_scale_init, bn_scale_input] = create_param_tensors(conv_params, "bn", "scale", force_raw)
[bn_mean_init, bn_mean_input] = create_param_tensors(conv_params, "bn", "mean", force_raw)
[bn_var_init, bn_var_input] = create_param_tensors(conv_params, "bn", "var", force_raw)
[
[bn_scale_init, bias_init, bn_mean_init, bn_var_init],
[bn_scale_input, bias_input, bn_mean_input, bn_var_input]
]
else
[bias_init, bias_input] = create_param_tensors(conv_params, "conv", "bias", force_raw)
[
[bias_init],
[bias_input]
]
end
[conv_init, conv_input] = create_param_tensors(conv_params, "conv", "weights", force_raw)
[Utils.cfl(init, conv_init), Utils.cfl(input, conv_input)]
end
@doc """
Returns the initializers with the value of the scale input
tensor given by upsample_params.
Keyword argument:
upsample_params -- a UpsampleParams object
"""
def load_upsample_scales(upsample_params) do
upsample_state = DarknetToOnnx.UpsampleParams.get_state(upsample_params.node_name)
name = DarknetToOnnx.UpsampleParams.generate_param_name(upsample_state)
scale_init =
Helper.make_tensor(
name,
1,
upsample_state.value.shape,
Nx.to_flat_list(upsample_state.value)
)
scale_input =
Helper.make_tensor_value_info(
name,
1,
upsample_state.value.shape
)
[[scale_init], [scale_input]]
end
end
|
lib/darknet_to_onnx/weightloader.ex
| 0.894663
| 0.578002
|
weightloader.ex
|
starcoder
|
defmodule ExOsrsApi.PlayerHighscores do
@moduledoc """
### PlayerHighscores
Holds PlayerHighscores `skills` and `activities` data
"""
alias ExOsrsApi.Models.Skills
alias ExOsrsApi.Models.Activities
alias ExOsrsApi.Errors.Error
@enforce_keys [:username, :type, :skills, :activities, :empty]
defstruct [:username, :type, :skills, :activities, :empty]
@type t() :: %__MODULE__{
type: atom(),
skills: Skills.t() | nil,
activities: Activities.t() | nil
}
@spec new_empty(String.t(), atom) :: {:ok, t()}
def new_empty(username, type) when is_atom(type) do
{:ok,
%__MODULE__{
username: username,
type: type,
skills: nil,
activities: nil,
empty: true
}}
end
@doc """
Creates new `%ExOsrsApi.PlayerHighscores{}` from "CSV" like string
You can supply your own activity list by specifying last argument with your own list of activities (list of strings)
"""
@spec new_from_bitstring(String.t(), atom(), String.t(), list(String.t())) ::
{:error, Error.t()} | {:ok, t()}
def new_from_bitstring(
username,
type,
data,
supported_activities \\ Activities.get_all_default_activities()
)
when is_bitstring(username) and is_bitstring(data) do
{skills, activities} =
data
|> String.split("\n", trim: true)
|> Enum.split(Skills.skill_length())
with {:ok, skills} <- skills |> Skills.new_from_bitstring(),
{:ok, activities} <- activities |> Activities.new_from_bitstring(supported_activities) do
{:ok,
%__MODULE__{
username: username,
type: type,
skills: skills,
activities: activities,
empty: false
}}
else
{:error, error} -> {:error, error}
end
end
@doc """
Check if `PlayerHighscores` data is empty
"""
@spec is_empty?(t()) :: boolean()
def is_empty?(%__MODULE__{empty: empty}) do
empty
end
@doc """
Get `PlayerHighscores` skills data
"""
@spec get_skills(t()) :: Skills.t() | nil
def get_skills(%__MODULE__{skills: skills}) do
skills
end
@doc """
Get `PlayerHighscores` activities data
"""
@spec get_activities(t()) :: Activities.t() | nil
def get_activities(%__MODULE__{activities: activities}) do
activities
end
@doc """
Get `PlayerHighscores` specific skill data by skill name (atom)
"""
@spec get_skill_data(t(), atom) ::
{:error, Error.t()} | {:ok, ExOsrsApi.Models.SkillEntry.t()}
def get_skill_data(%__MODULE__{skills: %Skills{} = skills}, skill) when is_atom(skill) do
Skills.get_skill_data(skills, skill)
end
@doc """
Get `PlayerHighscores` specific activity data by activity name (string)
"""
@spec get_activity_data(t(), binary) ::
{:error, Error.t()} | {:ok, ExOsrsApi.Models.ActivityEntry.t()}
def get_activity_data(%__MODULE__{activities: activities}, activity)
when is_bitstring(activity) do
Activities.get_activity_data(activities, activity)
end
@doc """
Get `PlayerHighscores` non nil skills data
"""
@spec get_non_nil_skills(t()) :: list(ExOsrsApi.Models.SkillEntry.t())
def get_non_nil_skills(%__MODULE__{skills: skills}) do
skills.data
|> Enum.filter(fn x -> x.rank != nil end)
end
@doc """
Get `PlayerHighscores` non nil activities data
"""
@spec get_non_nil_activities(t()) :: list(ExOsrsApi.Models.ActivityEntry.t())
def get_non_nil_activities(%__MODULE__{activities: activities}) do
activities.data
|> Enum.filter(fn x -> x.rank != nil end)
end
end
|
lib/player_highscores.ex
| 0.865309
| 0.429848
|
player_highscores.ex
|
starcoder
|
defmodule Draconic.Flag do
alias __MODULE__
defstruct name: nil, alias: nil, type: nil, description: "", default: nil
@typedoc "The name of the flag, `--verbose` would have the name `:verbose`"
@type name() :: atom()
@typedoc "A flags alias, if `-v` maps to `--verbose` then it's alias is `:v`."
@type alias() :: atom()
@typedoc """
Poorly named wrapper for the potential names of a flag (used outside this module).
A flag name can either be a name (atom) or a tuple of a name and alias (both atoms).
So `:verbose` is valid and `{:verbose, :v}` as well.
"""
@type flag_name() :: name() | {name(), alias()}
@typedoc """
A flag kind that is supported by `OptionParser`.
"""
@type flag_kind() :: :boolean | :string | :integer | :float | :count
@typedoc """
Represents the type of the _data_ associated to a flag. For example a flag like
`:num` may have a `flag_kind()` of `:integer`, but it's actual value (given by
the user) may be `10` (in the case of `--num 10`).
"""
@type flag_value_kind() :: boolean() | String.t() | integer() | float() | nil
@typedoc """
A simple type used in the spec of `t()` to define that a type can be a kind or a list
with a kind and the symbol :keep in it.
"""
@type flag_type() :: flag_kind() | [flag_kind() | :keep]
@typedoc """
Similar to the difference between `flag_kind()` and `flag_value_kind()` where this type
is referring to the value provided by the user (or the default value of the flag).
"""
@type flag_value_type() :: flag_value_kind() | [flag_value_kind()]
@typedoc """
A structure to represent an application flag, which has a name, an optional
alias (shorthand), a description, a type and an optional default.
"""
@type t() :: %Flag{
name: name(),
alias: alias(),
description: String.t(),
type: flag_type(),
default: term()
}
@typedoc """
A 2-tuple of string values where the first value is the flag representation
as you would expect users to pass them to them to the command line
application and the second value is simply the description.
"""
@type string_parts() :: {String.t(), String.t()}
@typedoc """
A map that maps a name (should be an atom) to a `%Draconic.Flag{}` struct defining
the flag. This flag definition is used when building a map from a set of flags
provided by the user when invoking the application.
"""
@type flag_definition() :: %{required(name()) => t()}
@typedoc """
A map mapping a flag name (should be an atom) to a value that represents the information
the user provided for that flag (or it's default value). All defined flags should appear
in a flag map, regardless of whether it was explicitly passed.
"""
@type flag_map() :: %{name() => flag_value_type()}
@doc """
Generates a tuple pair where the first value is the string flag names stylized as
they will be expected by the CLI (--flag or -f for long and alias respectively) and
the second item in the tuple is the description. This structure is chosen to ease
formatting so you can determine how to join the two parts before rendering it, for
example in a help renderer.
## Parameters
- flag: The `%Draconic.Flag{}` struct to provide string parts for.
## Returns
Returns a tuple, for a flag named 'name' with an alias 'n' and a description, "This
is a name." you would expect to see {"--name, -n", "This is a name."}.
## Examples
iex> Draconic.Flag.string_parts(%Draconic.Flag{name: "name", alias: "n", description: "This is the name."})
{"--name, -n", "This is the name."}
iex> Draconic.Flag.sring_parts(%Draconic.Flag{name: "verbose", description: "Display lots of information"})
{"--verbose", "Display lots of information"}
"""
@spec string_parts(t()) :: string_parts()
def string_parts(%Flag{name: name, type: type, alias: flag_alias, description: desc}) do
string_parts(name, flag_alias, desc, type)
end
@spec string_parts(name(), nil, String.t(), :boolean) :: string_parts()
defp string_parts(name, nil, desc, :boolean) do
{"--[no-]" <> to_string(name), desc}
end
@spec string_parts(name(), alias(), String.t(), flag_type()) :: string_parts()
defp string_parts(name, nil, desc, _type) do
{"--" <> to_string(name), desc}
end
@spec string_parts(name(), alias(), String.t(), :boolean) :: string_parts()
defp string_parts(name, flag_alias, desc, :boolean) do
long = "--[no-]" <> to_string(name)
short = "-" <> to_string(flag_alias)
{long <> ", " <> short, desc}
end
@spec string_parts(name(), alias(), String.t(), flag_type()) :: string_parts()
defp string_parts(name, flag_alias, desc, _type) do
long = "--" <> to_string(name)
short = "-" <> to_string(flag_alias)
{long <> ", " <> short, desc}
end
@doc """
Take a list of flags and produce a keyword list containing :strict and :aliases keys
based on the flag data provided, this will be fed to `OptionParser.parse/2` to parse
the provided input from the user.
## Parameters
- flags: A list of `%Draconic.Flag{}` structs that will be used to generate the
keyword list.
## Returns
Returns a keyword list, containing :strict and :aliases.
## Examples
iex> Draconic.Flag.to_options([%Draconic.Flag{name: :verbose, type: :boolean}, %Draconic.Flag{name: :input, alias: :i, type: :string}])
[strict: [verbose: :boolean, input: :string], aliases: [i: :input]]
"""
@spec to_options([t()]) :: keyword()
def to_options(flags) do
{switches, aliases} =
flags
|> Enum.map(&option_parser_parts/1)
|> Enum.reduce({[], []}, &reduce_option_parser_data/2)
[strict: Enum.reverse(switches), aliases: Enum.reverse(aliases)]
end
@spec reduce_option_parser_data({name(), nil}, {keyword(), keyword()}) :: {keyword(), keyword()}
defp reduce_option_parser_data({switch, nil}, {switches, aliases}) do
{[switch | switches], aliases}
end
@spec reduce_option_parser_data(
{{name(), flag_type()}, {name(), alias()}},
{keyword(), keyword()}
) :: {keyword(), keyword()}
defp reduce_option_parser_data({switch, alias_data}, {switches, aliases}) do
{[switch | switches], [alias_data | aliases]}
end
@spec option_parser_parts(t()) :: {{name(), flag_type()}, nil}
defp option_parser_parts(%Flag{name: name, alias: nil, type: type}) do
{{name, type}, nil}
end
@spec option_parser_parts(t()) :: {{name(), flag_type()}, {name(), alias()}}
defp option_parser_parts(%Flag{name: name, alias: flag_alias, type: type}) do
{{name, type}, {flag_alias, name}}
end
@doc """
"""
@spec to_map(flag_definition(), keyword()) :: flag_map()
def to_map(flag_definitions, passed_flags) do
built_map =
passed_flags
|> Enum.reduce(%{}, &insert_into_flag_map/2)
|> Enum.map(&reverse_flag_value_lists/1)
|> Enum.into(%{})
flag_definitions
|> Enum.map(find_missing_flags_from(built_map))
|> Enum.filter(fn x -> x != nil end)
|> Enum.into(built_map)
end
@spec insert_into_flag_map({name(), flag_value_type()}, flag_map()) :: flag_map()
defp insert_into_flag_map({flag_key, flag_val}, flag_map) do
case Map.fetch(flag_map, flag_key) do
{:ok, value_list} when is_list(value_list) ->
Map.put(flag_map, flag_key, [flag_val | value_list])
{:ok, value} ->
Map.put(flag_map, flag_key, [flag_val, value])
:error ->
Map.put(flag_map, flag_key, flag_val)
end
end
@spec find_missing_flags_from(flag_map()) ::
({name(), t()} -> nil | {name(), flag_value_type()})
defp find_missing_flags_from(map) do
fn {key, flag_def} ->
case Map.fetch(map, key) do
{:ok, _} -> nil
:error -> {key, flag_def.default}
end
end
end
@spec reverse_flag_value_lists({name(), flag_value_type()}) :: {name(), flag_value_type()}
defp reverse_flag_value_lists({key, value}) when is_list(value), do: {key, Enum.reverse(value)}
@spec reverse_flag_value_lists({name(), flag_value_type()}) :: {name(), flag_value_type()}
defp reverse_flag_value_lists(entry), do: entry
end
|
lib/draconic/flag.ex
| 0.928376
| 0.479808
|
flag.ex
|
starcoder
|
defmodule Porcelain.Process do
@moduledoc """
Module for working with external processes launched with `Porcelain.spawn/3`
or `Porcelain.spawn_shell/2`.
"""
alias __MODULE__, as: P
@doc """
A struct representing a wrapped OS processes which provides the ability to
exchange data with it.
"""
defstruct [:pid, :out, :err]
@type t :: %__MODULE__{}
@type signal :: :int | :kill | non_neg_integer
@doc """
Send iodata to the process's stdin.
End of input is indicated by sending an empty message.
**Caveat**: when using `Porcelain.Driver.Basic`, it is not possible to
indicate the end of input. You should stop the process explicitly using
`stop/1`.
"""
@spec send_input(t, iodata) :: iodata
def send_input(%P{pid: pid}, data) do
send(pid, {:input, data})
end
@doc """
Wait for the external process to terminate.
Returns `Porcelain.Result` struct with the process's exit status and output.
Automatically closes the underlying port in this case.
If timeout value is specified and the external process fails to terminate
before it runs out, atom `:timeout` is returned.
"""
@spec await(t, non_neg_integer | :infinity) :: {:ok, Porcelain.Result.t} | {:error, :noproc | :timeout}
def await(%P{pid: pid}, timeout \\ :infinity) do
mon = Process.monitor(pid)
ref = make_ref()
send(pid, {:get_result, self(), ref})
receive do
{^ref, result} ->
Process.demonitor(mon, [:flush])
{:ok, result}
{:DOWN, ^mon, _, _, _info} -> {:error, :noproc}
after timeout ->
Process.demonitor(mon, [:flush])
{:error, :timeout}
end
end
@doc """
Check if the process is still running.
"""
@spec alive?(t) :: true | false
def alive?(%P{pid: pid}) do
#FIXME: does not work with pids from another node
Process.alive?(pid)
end
@doc """
Stops the process created with `Porcelain.spawn/3` or
`Porcelain.spawn_shell/2`. Also closes the underlying Erlang port.
May cause "broken pipe" message to be written to stderr.
## Caveats
When using `Porcelain.Driver.Basic`, Porcelain will merely close the Erlang port
connected to that process. This normally causes an external process to terminate provided that it
is listening on its `stdin`. If not, the external process will continue running.
See http://erlang.org/pipermail/erlang-questions/2010-March/050227.html for some background info.
When using `Porcelain.Driver.Goon`, a `SIGTERM` signal will be sent to the external process. If it
doesn't terminate after `:goon_stop_timeout` seconds, a `SIGKILL` will be sent to the process.
"""
@spec stop(t) :: true
def stop(%P{pid: pid}) do
mon = Process.monitor(pid)
ref = make_ref()
send(pid, {:stop, self(), ref})
receive do
{^ref, :stopped} -> Process.demonitor(mon, [:flush])
{:DOWN, ^mon, _, _, _info} -> true
end
end
@doc """
Send an OS signal to the processes.
No further communication with the process is possible after sending it a
signal.
"""
@spec signal(t, signal) :: signal
def signal(%P{pid: pid}, sig) do
send(pid, {:signal, sig})
end
end
|
lib/porcelain/process.ex
| 0.719088
| 0.532425
|
process.ex
|
starcoder
|
defmodule Logger.Translator do
@moduledoc """
Default translation for Erlang log messages.
Logger allows developers to rewrite log messages provided by
OTP applications into a format more compatible with Elixir
log messages by providing a translator.
A translator is simply a tuple containing a module and a function
that can be added and removed via the `Logger.add_translator/1` and
`Logger.remove_translator/1` functions and is invoked for every Erlang
message above the minimum log level with four arguments:
* `min_level` - the current Logger level
* `level` - the level of the message being translated
* `kind` - if the message is a `:report` or `:format`
* `message` - the message to format. If it is `:report`, it is a tuple
with `{report_type, report_data}`, if it is `:format`, it is a
tuple with `{format_message, format_args}`.
The function must return:
* `{:ok, chardata, metadata}` - if the message translation with its metadata
* `{:ok, chardata}` - the translated message
* `:skip` - if the message is not meant to be translated nor logged
* `:none` - if there is no translation, which triggers the next translator
See the function `translate/4` in this module for an example implementation
and the default messages translated by Logger.
"""
@doc """
Built-in translation function.
"""
def translate(min_level, level, kind, message)
## Erlang/OTP 21 and after
def translate(min_level, _level, :report, {:logger, %{label: label} = report}) do
case label do
{:gen_server, :terminate} ->
report_gen_server_terminate(min_level, report)
{:gen_event, :terminate} ->
report_gen_event_terminate(min_level, report)
_ ->
:skip
end
end
def translate(min_level, _level, :report, {{:proc_lib, :crash}, data}) do
report_crash(min_level, data)
end
def translate(min_level, _level, :report, {{:supervisor, :progress}, data}) do
report_supervisor_progress(min_level, data)
end
def translate(min_level, _level, :report, {{:supervisor, _}, data}) do
report_supervisor(min_level, data)
end
def translate(
_min_level,
_level,
:report,
{{:application_controller, :progress}, [application: app, started_at: node]}
) do
{:ok, ["Application ", Atom.to_string(app), " started at " | inspect(node)]}
end
def translate(
_min_level,
_level,
:report,
{{:application_controller, :exit}, [application: app, exited: reason, type: _type]}
) do
{:ok, ["Application ", Atom.to_string(app), " exited: " | Application.format_error(reason)]}
end
def translate(
_min_level,
:error,
:report,
{{Task.Supervisor, :terminating},
%{
name: name,
starter: starter,
function: function,
args: args,
reason: reason
}}
) do
opts = Application.get_env(:logger, :translator_inspect_opts)
{formatted, reason} = format_reason(reason)
metadata = [crash_reason: reason] ++ registered_name(name)
msg =
["Task #{inspect(name)} started from #{inspect(starter)} terminating"] ++
[formatted, "\nFunction: #{inspect(function, opts)}"] ++
["\n Args: #{inspect(args, opts)}"]
{:ok, msg, metadata}
end
def translate(min_level, :error, :format, message) do
case message do
# This is no longer emitted by Erlang/OTP but it may be
# manually emitted by libraries like connection.
# TODO: Remove this translation on Elixir v1.14
{'** Generic server ' ++ _, [name, last, state, reason | client]} ->
opts = Application.get_env(:logger, :translator_inspect_opts)
{formatted, reason} = format_reason(reason)
metadata = [crash_reason: reason] ++ registered_name(name)
msg =
["GenServer #{inspect(name)} terminating", formatted] ++
["\nLast message#{format_from(client)}: #{inspect(last, opts)}"]
if min_level == :debug do
msg = [msg, "\nState: #{inspect(state, opts)}" | format_client(client)]
{:ok, msg, metadata}
else
{:ok, msg, metadata}
end
{'Error in process ' ++ _, [pid, node, {reason, stack}]} ->
reason = Exception.normalize(:error, reason, stack)
msg = [
"Process ",
inspect(pid),
" on node ",
inspect(node),
" raised an exception" | format(:error, reason, stack)
]
{:ok, msg, [crash_reason: exit_reason(:error, reason, stack)]}
{'Error in process ' ++ _, [pid, {reason, stack}]} ->
reason = Exception.normalize(:error, reason, stack)
msg = ["Process ", inspect(pid), " raised an exception" | format(:error, reason, stack)]
{:ok, msg, [crash_reason: exit_reason(:error, reason, stack)]}
_ ->
:none
end
end
def translate(_min_level, :info, :report, {
:std_info,
[application: app, exited: reason, type: _type]
}) do
{:ok, ["Application ", Atom.to_string(app), " exited: " | Application.format_error(reason)]}
end
def translate(min_level, :error, :report, {{:error_logger, :error_report}, data}) do
report_supervisor(min_level, data)
end
def translate(min_level, :error, :report, {:supervisor_report, data}) do
report_supervisor(min_level, data)
end
def translate(min_level, :error, :report, {:crash_report, data}) do
report_crash(min_level, data)
end
def translate(min_level, :info, :report, {:progress, [{:supervisor, _} | _] = data}) do
report_supervisor_progress(min_level, data)
end
def translate(_min_level, :info, :report, {:progress, [application: app, started_at: node]}) do
{:ok, ["Application ", Atom.to_string(app), " started at " | inspect(node)]}
end
## Helpers
def translate(_min_level, _level, _kind, _message) do
:none
end
defp report_gen_server_terminate(min_level, report) do
inspect_opts = Application.get_env(:logger, :translator_inspect_opts)
%{
client_info: client,
last_message: last,
name: name,
reason: reason,
state: state
} = report
{formatted, reason} = format_reason(reason)
metadata = [crash_reason: reason] ++ registered_name(name)
msg =
["GenServer ", inspect(name), " terminating", formatted] ++
["\nLast message", format_last_message_from(client), ": ", inspect(last, inspect_opts)]
if min_level == :debug do
msg = [msg, "\nState: ", inspect(state, inspect_opts) | format_client_info(client)]
{:ok, msg, metadata}
else
{:ok, msg, metadata}
end
end
defp report_gen_event_terminate(min_level, report) do
inspect_opts = Application.get_env(:logger, :translator_inspect_opts)
%{
handler: handler,
last_message: last,
name: name,
reason: reason,
state: state
} = report
reason =
case reason do
{:EXIT, why} -> why
_ -> reason
end
{formatted, reason} = format_reason(reason)
metadata = [crash_reason: reason] ++ registered_name(name)
msg =
[":gen_event handler ", inspect(handler), " installed in ", inspect(name), " terminating"] ++
[formatted, "\nLast message: ", inspect(last, inspect_opts)]
if min_level == :debug do
{:ok, [msg, "\nState: ", inspect(state, inspect_opts)], metadata}
else
{:ok, msg, metadata}
end
end
defp report_supervisor_progress(
min_level,
supervisor: sup,
started: [{:pid, pid}, {:id, id} | started]
) do
msg =
["Child ", inspect(id), " of Supervisor ", sup_name(sup), " started"] ++
["\nPid: ", inspect(pid)] ++ child_info(min_level, started)
{:ok, msg}
end
defp report_supervisor_progress(
min_level,
supervisor: sup,
started: [{:pid, pid} | started]
) do
msg =
["Child of Supervisor ", sup_name(sup), " started", "\nPid: ", inspect(pid)] ++
child_info(min_level, started)
{:ok, msg}
end
defp report_supervisor_progress(_min_level, _other), do: :none
defp report_supervisor(
min_level,
supervisor: sup,
errorContext: context,
reason: reason,
offender: [{:pid, pid}, {:id, id} | offender]
) do
pid_info =
if is_pid(pid) and context != :shutdown do
["\nPid: ", inspect(pid)]
else
[]
end
msg =
["Child ", inspect(id), " of Supervisor ", sup_name(sup)] ++
[?\s, sup_context(context), "\n** (exit) ", offender_reason(reason, context)] ++
pid_info ++ child_info(min_level, offender)
{:ok, msg}
end
defp report_supervisor(
min_level,
supervisor: sup,
errorContext: context,
reason: reason,
offender: [{:nb_children, n}, {:id, id} | offender]
) do
msg =
["Children ", inspect(id), " of Supervisor ", sup_name(sup), ?\s, sup_context(context)] ++
["\n** (exit) ", offender_reason(reason, context), "\nNumber: ", Integer.to_string(n)] ++
child_info(min_level, offender)
{:ok, msg}
end
defp report_supervisor(
min_level,
supervisor: sup,
errorContext: context,
reason: reason,
offender: [{:pid, pid} | offender]
) do
msg =
["Child of Supervisor ", sup_name(sup), ?\s, sup_context(context)] ++
["\n** (exit) ", offender_reason(reason, context), "\nPid: ", inspect(pid)] ++
child_info(min_level, offender)
{:ok, msg}
end
defp report_supervisor(_min_level, _other), do: :none
# If start call raises reason will be of form {:EXIT, reason}
defp offender_reason({:EXIT, reason}, :start_error) do
Exception.format_exit(reason)
end
defp offender_reason(reason, _context) do
Exception.format_exit(reason)
end
defp sup_name({:local, name}), do: inspect(name)
defp sup_name({:global, name}), do: inspect(name)
defp sup_name({:via, _mod, name}), do: inspect(name)
defp sup_name({pid, mod}), do: [inspect(pid), " (", inspect(mod), ?)]
defp sup_name(unknown_name), do: inspect(unknown_name)
defp sup_context(:start_error), do: "failed to start"
defp sup_context(:child_terminated), do: "terminated"
defp sup_context(:shutdown), do: "caused shutdown"
defp sup_context(:shutdown_error), do: "shut down abnormally"
defp child_info(min_level, [{:mfargs, {mod, fun, args}} | debug]) do
["\nStart Call: ", format_mfa(mod, fun, args) | child_debug(min_level, debug)]
end
# Comes from bridge with MFA
defp child_info(min_level, [{:mfa, {mod, fun, args}} | debug]) do
["\nStart Call: ", format_mfa(mod, fun, args) | child_debug(min_level, debug)]
end
# Comes from bridge with Mod
defp child_info(min_level, [{:mod, mod} | debug]) do
["\nStart Module: ", inspect(mod) | child_debug(min_level, debug)]
end
defp child_info(_min_level, _child) do
[]
end
defp child_debug(:debug, restart_type: restart, shutdown: shutdown, child_type: type) do
["\nRestart: ", inspect(restart), "\nShutdown: ", inspect(shutdown)] ++
["\nType: ", inspect(type)]
end
defp child_debug(_min_level, _child) do
[]
end
defp report_crash(min_level, [[{:initial_call, initial_call} | crashed], linked]) do
mfa = initial_call_to_mfa(initial_call)
report_crash(min_level, crashed, [{:initial_call, mfa}], linked)
end
defp report_crash(min_level, [crashed, linked]) do
report_crash(min_level, crashed, [], linked)
end
defp report_crash(min_level, crashed, extra, linked) do
[
{:pid, pid},
{:registered_name, name},
{:error_info, {kind, reason, stack}} | crashed
] = crashed
dictionary = crashed[:dictionary]
reason = Exception.normalize(kind, reason, stack)
case Keyword.get(dictionary, :logger_enabled, true) do
false ->
:skip
true ->
user_metadata = Keyword.get(dictionary, :"$logger_metadata$", %{}) |> Map.to_list()
msg =
["Process ", crash_name(pid, name), " terminating", format(kind, reason, stack)] ++
[crash_info(min_level, extra ++ crashed, [?\n]), crash_linked(min_level, linked)]
extra =
if ancestors = crashed[:ancestors], do: [{:ancestors, ancestors} | extra], else: extra
extra =
if callers = dictionary[:"$callers"], do: [{:callers, callers} | extra], else: extra
extra = [{:crash_reason, exit_reason(kind, reason, stack)} | extra]
{:ok, msg, registered_name(name) ++ extra ++ user_metadata}
end
end
defp initial_call_to_mfa({:supervisor, module, _}), do: {module, :init, 1}
defp initial_call_to_mfa({:supervisor_bridge, module, _}), do: {module, :init, 1}
defp initial_call_to_mfa({mod, fun, args}) when is_list(args), do: {mod, fun, length(args)}
defp initial_call_to_mfa(mfa), do: mfa
defp crash_name(pid, []), do: inspect(pid)
defp crash_name(pid, name), do: [inspect(name), " (", inspect(pid), ?)]
defp crash_info(min_level, [{:initial_call, {mod, fun, args}} | info], prefix) do
[prefix, "Initial Call: ", crash_call(mod, fun, args) | crash_info(min_level, info, prefix)]
end
defp crash_info(min_level, [{:current_function, {mod, fun, args}} | info], prefix) do
[prefix, "Current Call: ", crash_call(mod, fun, args) | crash_info(min_level, info, prefix)]
end
defp crash_info(min_level, [{:current_function, []} | info], prefix) do
crash_info(min_level, info, prefix)
end
defp crash_info(min_level, [{:ancestors, ancestors} | debug], prefix) do
[prefix, "Ancestors: ", inspect(ancestors) | crash_info(min_level, debug, prefix)]
end
defp crash_info(:debug, debug, prefix) do
for {key, value} <- debug do
crash_debug(key, value, prefix)
end
end
defp crash_info(_, _, _) do
[]
end
defp crash_call(mod, fun, arity) when is_integer(arity) do
format_mfa(mod, fun, arity)
end
defp crash_call(mod, fun, args) do
format_mfa(mod, fun, length(args))
end
defp crash_debug(:current_stacktrace, stack, prefix) do
stack_prefix = [prefix | " "]
stacktrace = Enum.map(stack, &[stack_prefix | Exception.format_stacktrace_entry(&1)])
[prefix, "Current Stacktrace:" | stacktrace]
end
defp crash_debug(key, value, prefix) do
[prefix, crash_debug_key(key), ?:, ?\s, inspect(value)]
end
defp crash_debug_key(key) do
case key do
:message_queue_len -> "Message Queue Length"
:messages -> "Messages"
:links -> "Links"
:dictionary -> "Dictionary"
:trap_exit -> "Trapping Exits"
:status -> "Status"
:heap_size -> "Heap Size"
:stack_size -> "Stack Size"
:reductions -> "Reductions"
end
end
defp crash_linked(_min_level, []), do: []
defp crash_linked(min_level, neighbours) do
Enum.reduce(neighbours, "\nNeighbours:", fn {:neighbour, info}, acc ->
[acc | crash_neighbour(min_level, info)]
end)
end
@indent " "
defp crash_neighbour(min_level, [{:pid, pid}, {:registered_name, []} | info]) do
[?\n, @indent, inspect(pid) | crash_info(min_level, info, [?\n, @indent | @indent])]
end
defp crash_neighbour(min_level, [{:pid, pid}, {:registered_name, name} | info]) do
[?\n, @indent, inspect(name), " (", inspect(pid), ")"] ++
crash_info(min_level, info, [?\n, @indent | @indent])
end
defp format_last_message_from({_, {name, _}}), do: [" (from ", inspect(name), ")"]
defp format_last_message_from({from, _}), do: [" (from ", inspect(from), ")"]
defp format_last_message_from(_), do: []
defp format_client_info({from, :dead}),
do: ["\nClient ", inspect(from), " is dead"]
defp format_client_info({from, :remote}),
do: ["\nClient ", inspect(from), " is remote on node ", inspect(node(from))]
defp format_client_info({_, {name, stacktrace}}),
do: ["\nClient ", inspect(name), " is alive\n" | format_stacktrace(stacktrace)]
defp format_client_info(_),
do: []
defp format_reason({maybe_exception, [_ | _] = maybe_stacktrace} = reason) do
try do
format_stacktrace(maybe_stacktrace)
catch
:error, _ ->
{format_stop(reason), {reason, []}}
else
formatted_stacktrace ->
{formatted, reason} = maybe_normalize(maybe_exception, maybe_stacktrace)
{[formatted | formatted_stacktrace], {reason, maybe_stacktrace}}
end
end
defp format_reason(reason) do
{format_stop(reason), {reason, []}}
end
defp format_stop(reason) do
["\n** (stop) " | Exception.format_exit(reason)]
end
# Erlang processes rewrite the :undef error to these reasons when logging
@gen_undef [:"module could not be loaded", :"function not exported"]
defp maybe_normalize(undef, [{mod, fun, args, _info} | _] = stacktrace)
when undef in @gen_undef and is_atom(mod) and is_atom(fun) do
cond do
is_list(args) ->
format_undef(mod, fun, length(args), undef, stacktrace)
is_integer(args) ->
format_undef(mod, fun, args, undef, stacktrace)
true ->
{format_stop(undef), undef}
end
end
defp maybe_normalize(reason, stacktrace) do
# If this is already an exception (even an ErlangError), we format it as an
# exception. Otherwise, we try to normalize it, and if it's normalized as an
# ErlangError we instead format it as an exit.
if Exception.exception?(reason) do
{[?\n | Exception.format_banner(:error, reason, stacktrace)], reason}
else
case Exception.normalize(:error, reason, stacktrace) do
%ErlangError{} ->
{format_stop(reason), reason}
exception ->
{[?\n | Exception.format_banner(:error, exception, stacktrace)], exception}
end
end
end
defp format(kind, payload, stacktrace) do
[?\n, Exception.format_banner(kind, payload, stacktrace) | format_stacktrace(stacktrace)]
end
defp format_stacktrace(stacktrace) do
for entry <- stacktrace do
["\n " | Exception.format_stacktrace_entry(entry)]
end
end
defp registered_name(name) when is_atom(name), do: [registered_name: name]
defp registered_name(_name), do: []
defp format_mfa(mod, fun, :undefined),
do: [inspect(mod), ?., Code.Identifier.inspect_as_function(fun) | "/?"]
defp format_mfa(mod, fun, args),
do: Exception.format_mfa(mod, fun, args)
defp exit_reason(:exit, reason, stack), do: {reason, stack}
defp exit_reason(:error, reason, stack), do: {reason, stack}
defp exit_reason(:throw, value, stack), do: {{:nocatch, value}, stack}
## Deprecated helpers
defp format_from([]), do: ""
defp format_from([from]), do: " (from #{inspect(from)})"
defp format_from([from, stacktrace]) when is_list(stacktrace), do: " (from #{inspect(from)})"
defp format_from([from, node_name]) when is_atom(node_name),
do: " (from #{inspect(from)} on #{inspect(node_name)})"
defp format_client([from]) do
"\nClient #{inspect(from)} is dead"
end
defp format_client([from, stacktrace]) when is_list(stacktrace) do
["\nClient #{inspect(from)} is alive\n" | format_stacktrace(stacktrace)]
end
defp format_client(_) do
[]
end
defp format_undef(mod, fun, arity, undef, stacktrace) do
opts = [module: mod, function: fun, arity: arity, reason: undef]
exception = UndefinedFunctionError.exception(opts)
{[?\n | Exception.format_banner(:error, exception, stacktrace)], exception}
end
end
|
lib/logger/lib/logger/translator.ex
| 0.753557
| 0.454835
|
translator.ex
|
starcoder
|
defmodule Phoenix.Endpoint do
@moduledoc """
Defines a Phoenix endpoint.
The endpoint is the boundary where all requests to your
web application start. It is also the interface your
application provides to the underlying web servers.
Overall, an endpoint has three responsibilities:
* to provide a wrapper for starting and stopping the
endpoint as part of a supervision tree;
* to define an initial plug pipeline where requests
are sent through;
* to host web specific configuration for your
application.
## Endpoints
An endpoint is simply a module defined with the help
of `Phoenix.Endpoint`. If you have used the `mix phoenix.new`
generator, an endpoint was automatically generated as
part of your application:
defmodule YourApp.Endpoint do
use Phoenix.Endpoint, otp_app: :your_app
# plug ...
# plug ...
plug YourApp.Router
end
Before being used, an endpoint must be explicitly started as part
of your application supervision tree too (which is again done by
default in generated applications):
supervisor(YourApp.Endpoint, [])
### Endpoint configuration
All endpoints are configured in your application environment.
For example:
config :your_app, YourApp.Endpoint,
secret_key_base: "<KEY>"
Endpoint configuration is split into two categories. Compile-time
configuration means the configuration is read during compilation
and changing it at runtime has no effect. The compile-time
configuration is mostly related to error handling and instrumentation.
Runtime configuration, instead, is accessed during or
after your application is started and can be read and written through the
`config/2` function:
YourApp.Endpoint.config(:port)
YourApp.Endpoint.config(:some_config, :default_value)
### Compile-time configuration
* `:code_reloader` - when `true`, enables code reloading functionality
* `:debug_errors` - when `true`, uses `Plug.Debugger` functionality for
debugging failures in the application. Recommended to be set to `true`
only in development as it allows listing of the application source
code during debugging. Defaults to `false`.
* `:render_errors` - responsible for rendering templates whenever there
is a failure in the application. For example, if the application crashes
with a 500 error during a HTML request, `render("500.html", assigns)`
will be called in the view given to `:render_errors`. Defaults to:
[view: MyApp.ErrorView, accepts: ~w(html), layout: false]
The default format is used when none is set in the connection.
* `:instrumenters` - a list of instrumenters modules whose callbacks will
be fired on instrumentation events. Read more on instrumentation in the
"Instrumentation" section below.
### Runtime configuration
* `:root` - the root of your application for running external commands.
This is only required if the watchers or code reloading functionality
are enabled.
* `:cache_static_manifest` - a path to a json manifest file that contains
static files and their digested version. This is typically set to
"priv/static/manifest.json" which is the file automatically generated
by `mix phoenix.digest`.
* `:check_origin` - configure transports to check origins or not. May
be false, true or a list of hosts that are allowed.
* `:http` - the configuration for the HTTP server. Currently uses
cowboy and accepts all options as defined by
[`Plug.Adapters.Cowboy`](https://hexdocs.pm/plug/Plug.Adapters.Cowboy.html).
Defaults to `false`.
* `:https` - the configuration for the HTTPS server. Currently uses
cowboy and accepts all options as defined by
[`Plug.Adapters.Cowboy`](https://hexdocs.pm/plug/Plug.Adapters.Cowboy.html).
Defaults to `false`.
* `:force_ssl` - ensures no data is ever sent via http, always redirecting
to https. It expects a list of options which are forwarded to `Plug.SSL`.
By default, it redirects http requests and sets the
"strict-transport-security" header for https ones.
* `:secret_key_base` - a secret key used as a base to generate secrets
to encode cookies, session and friends. Defaults to `nil` as it must
be set per application.
* `:server` - when `true`, starts the web server when the endpoint
supervision tree starts. Defaults to `false`. The `mix phoenix.server`
task automatically sets this to `true`.
* `:url` - configuration for generating URLs throughout the app.
Accepts the `:host`, `:scheme`, `:path` and `:port` options. All
keys except `:path` can be changed at runtime. Defaults to:
[host: "localhost", path: "/"]
The `:port` option requires either an integer, string, or
`{:system, "ENV_VAR"}`. When given a tuple like `{:system, "PORT"}`,
the port will be referenced from `System.get_env("PORT")` at runtime
as a workaround for releases where environment specific information
is loaded only at compile-time.
* `:static_url` - configuration for generating URLs for static files.
It will fallback to `url` if no option is provided. Accepts the same
options as `url`.
* `:watchers` - a set of watchers to run alongside your server. It
expects a list of tuples containing the executable and its arguments.
Watchers are guaranteed to run in the application directory but only
when the server is enabled. For example, the watcher below will run
the "watch" mode of the brunch build tool when the server starts.
You can configure it to whatever build tool or command you want:
[node: ["node_modules/brunch/bin/brunch", "watch"]]
* `:live_reload` - configuration for the live reload option.
Configuration requires a `:paths` option which should be a list of
files to watch. When these files change, it will trigger a reload.
If you are using a tool like [pow](http://pow.cx) in development,
you may need to set the `:url` option appropriately.
[url: "ws://localhost:4000",
paths: [Path.expand("priv/static/js/phoenix.js")]]
* `:pubsub` - configuration for this endpoint's pubsub adapter.
Configuration either requires a `:name` of the registered pubsub
server or a `:name` and `:adapter` pair. The given adapter and
name pair will be started as part of the supervision tree. if
no adapter is specified, the pubsub system will work by sending
events and subscribing to the given name. Defaults to:
[adapter: Phoenix.PubSub.PG2, name: MyApp.PubSub]
It also supports custom adapter configuration:
[name: :my_pubsub, adapter: Phoenix.PubSub.Redis,
host: "192.168.100.1"]
## Endpoint API
In the previous section, we have used the `config/2` function which is
automatically generated in your endpoint. Here is a summary of all the
functions that are automatically defined in your endpoint.
#### Paths and URLs
* `struct_url()` - generates the endpoint base URL but as a `URI` struct
* `url()` - generates the endpoint base URL without any path information
* `path(path)` - generates the path information when routing to this endpoint
* `static_url()` - generates the static URL without any path information
* `static_path(path)` - generates a route to a static file in `priv/static`
#### Channels
* `subscribe(pid, topic, opts)` - subscribes the pid to the given topic.
See `Phoenix.PubSub.subscribe/4` for options.
* `unsubscribe(pid, topic)` - unsubscribes the pid from the given topic.
* `broadcast(topic, event, msg)` - broadcasts a `msg` with as `event`
in the given `topic`.
* `broadcast!(topic, event, msg)` - broadcasts a `msg` with as `event`
in the given `topic`. Raises in case of failures.
* `broadcast_from(from, topic, event, msg)` - broadcasts a `msg` from
the given `from` as `event` in the given `topic`.
* `broadcast_from!(from, topic, event, msg)` - broadcasts a `msg` from
the given `from` as `event` in the given `topic`. Raises in case of failures.
#### Endpoint configuration
* `start_link()` - starts the Endpoint supervision tree, including its
configuration cache and possibly the servers for handling requests
* `config(key, default)` - access the endpoint configuration given by key
* `config_change(changed, removed)` - reload the endpoint configuration
on application upgrades
#### Plug API
* `init(opts)` - invoked when starting the endpoint server
* `call(conn, opts)` - invoked on every request (simply dispatches to
the defined plug pipeline)
#### Instrumentation API
* `instrument(event, runtime_metadata \\ nil, function)` - read more about
instrumentation in the "Instrumentation" section
## Instrumentation
Phoenix supports instrumentation through an extensible API. Each endpoint
defines an `instrument/3` macro that both users and Phoenix internals can call
to instrument generic events. This macro is responsible for measuring the time
it takes for the event to happen and for notifying a list of interested
instrumenter modules of this measurement.
You can configure this list of instrumenter modules in the compile-time
configuration of your endpoint. (see the `:instrumenters` option above). The
way these modules express their interest in events is by exporting public
functions where the name of each function is the name of an event. For
example, if someone instruments the `:render_view` event, then each
instrumenter module interested in that event will have to export
`render_view/3`.
**Note**: since the configuration for the list of instrumenters is specified
at compile time but it's used inside Phoenix itself, if you change this
configuration you'll have to recompile Phoenix manually:
$ mix deps.compile phoenix
$ mix compile
### Callbacks cycle
The way event callbacks are called is the following.
1. The event callback is called *before* the event happens (in this case,
before the view is rendered) with the atom `:start` as the first
argument; see the "Before clause" section below.
2. The event happens (in this case, the view is rendered).
3. The same event callback is called again, this time with the atom `:stop`
as the first argument; see the "After clause" section below.
The second and third argument that each event callback takes depend on the
callback being an "after" or a "before" callback (i.e., they depend on the
value of the first argument, `:start` or `:stop`). For this reason, most of
the time you will want to define (at least) two separate clauses for each
event callback, one for the "before" and one for the "after" callbacks.
All event callbacks are run in the same process that calls the `instrument/3`
macro; hence, instrumenters should be careful in performing blocking actions.
If an event callback fails in any way (exits, throws, or raises), it won't
affect anything (the error is caught) but the failure will be logged. Note
that "after" callbacks are not guaranteed to be called as, for example, a link
may break before they've been called.
#### "Before" clause
When the first argument to an event callback is `:start`, the signature of
that callback is:
event_callback(:start, compile_metadata, runtime_metadata)
where:
* `compile_metadata` is a map of compile-time metadata about the environment
where `instrument/3` has been called. It contains the module where the
instrumentation is happening (under the `:module` key), the file and line
(`:file` and `:line`), and the function inside which the instrumentation
is happening (under `:function`). This information can be used arbitrarely
by the callback.
* `runtime_metadata` is a map of runtime data that the instrumentation
passes to the callbacks. This can be used for any purposes: for example,
when instrumenting the rendering of a view, the name of the view could be
passed in these runtime data so that instrumenters know which view is
being rendered (`instrument(:view_render, %{view: "index.html"}, fn
...)`).
#### "After" clause
When the first argument to an event callback is `:stop`, the signature of that
callback is:
event_callback(:stop, time_diff, result_of_before_callback)
where:
* `time_diff` is an integer representing the time it took to execute the
instrumented function **in microseconds**.
* `result_of_before_callback` is the return value of the "before" clause of
the same `event_callback`. This is a means of passing data from the
"before" clause to the "after" clause when instrumenting. For example, an
instrumenter can implement custom time measuring with this:
defmodule MyInstrumenter do
def event_callback(:start, _compile, _runtime) do
:erlang.monotonic_time(:micro_seconds)
end
def event_callback(:stop, _time_diff, start_time) do
stop_time = :erlang.monotonic_time(:micro_seconds)
do_something_with_diff(stop_time - start_time)
end
end
The return value of each "before" event callback will be stored and passed to
the corresponding "after" callback.
### Using instrumentation
Each Phoenix endpoint defines its own `instrument/3` macro. This macro is
called like this:
require MyApp.Endpoint
MyApp.Endpoint.instrument :render_view, %{view: "index.html"}, fn ->
# actual view rendering
end
All the instrumenter modules that export a `render_view/3` function will be
notified of the event so that they can perform their respective actions.
### Phoenix default events
By default, Phoenix instruments the following events:
* `:phoenix_controller_call` - it's the whole controller pipeline. No
runtime metadata is passed to the instrumentation here.
* `:phoenix_controller_render` - the rendering of a view from a
controller. The map of runtime metadata passed to instrumentation
callbacks has the `:template` key - for the name of the template, e.g.,
`"index.html"` - and the `:format` key - for the format of the template.
### Dynamic instrumentation
If you want to instrument a piece of code but the endpoint that should
instrument it (the one that contains the `instrument/3` macro you want to use)
is not known at compile time, but only at runtime, then you can use the
`Phoenix.Endpoint.instrument/4` macro. Refer to its documentation for more
information.
"""
alias Phoenix.Endpoint.Adapter
@doc false
defmacro __using__(opts) do
quote do
unquote(config(opts))
unquote(pubsub())
unquote(plug())
unquote(server())
end
end
defp config(opts) do
quote do
@otp_app unquote(opts)[:otp_app] || raise "endpoint expects :otp_app to be given"
var!(config) = Adapter.config(@otp_app, __MODULE__)
var!(code_reloading?) = var!(config)[:code_reloader]
# Avoid unused variable warnings
_ = var!(code_reloading?)
end
end
defp pubsub() do
quote do
@pubsub_server var!(config)[:pubsub][:name] ||
(if var!(config)[:pubsub][:adapter] do
raise ArgumentError, "an adapter was given to :pubsub but no :name was defined, " <>
"please pass the :name option accordingly"
end)
def __pubsub_server__, do: @pubsub_server
def subscribe(pid, topic, opts \\ []) do
Phoenix.PubSub.subscribe(@pubsub_server, pid, topic, opts)
end
def unsubscribe(pid, topic) do
Phoenix.PubSub.unsubscribe(@pubsub_server, pid, topic)
end
def broadcast_from(from, topic, event, msg) do
Phoenix.Channel.Server.broadcast_from(@pubsub_server, from, topic, event, msg)
end
def broadcast_from!(from, topic, event, msg) do
Phoenix.Channel.Server.broadcast_from!(@pubsub_server, from, topic, event, msg)
end
def broadcast(topic, event, msg) do
Phoenix.Channel.Server.broadcast(@pubsub_server, topic, event, msg)
end
def broadcast!(topic, event, msg) do
Phoenix.Channel.Server.broadcast!(@pubsub_server, topic, event, msg)
end
end
end
defp plug() do
quote location: :keep do
@behaviour Plug
import Phoenix.Endpoint
Module.register_attribute(__MODULE__, :plugs, accumulate: true)
Module.register_attribute(__MODULE__, :phoenix_sockets, accumulate: true)
if force_ssl = Phoenix.Endpoint.__force_ssl__(__MODULE__, var!(config)) do
plug Plug.SSL, force_ssl
end
def init(opts) do
opts
end
def call(conn, _opts) do
phoenix_pipeline(conn)
end
if var!(config)[:debug_errors] do
use Plug.Debugger, otp_app: @otp_app
end
# Compile after the debugger so we properly wrap it.
@before_compile Phoenix.Endpoint
@phoenix_render_errors var!(config)[:render_errors]
defoverridable [init: 1, call: 2]
end
end
defp server() do
quote location: :keep, unquote: false do
@doc """
Starts the endpoint supervision tree.
"""
def start_link do
Adapter.start_link(@otp_app, __MODULE__)
end
@doc """
Returns the endpoint configuration for `key`
Returns `default` if the key does not exist.
"""
def config(key, default \\ nil) do
case :ets.lookup(__MODULE__, key) do
[{^key, val}] -> val
[] -> default
end
end
@doc """
Reloads the configuration given the application environment changes.
"""
def config_change(changed, removed) do
Phoenix.Endpoint.Adapter.config_change(__MODULE__, changed, removed)
end
@doc """
Generates the endpoint base URL without any path information.
It uses the configuration under `:url` to generate such.
"""
def url do
Phoenix.Config.cache(__MODULE__,
:__phoenix_url__,
&Phoenix.Endpoint.Adapter.url/1)
end
@doc """
Generates the static URL without any path information.
It uses the configuration under `:static_url` to generate
such. It fallsback to `:url` if `:static_url` is not set.
"""
def static_url do
Phoenix.Config.cache(__MODULE__,
:__phoenix_static_url__,
&Phoenix.Endpoint.Adapter.static_url/1)
end
@doc """
Generates the endpoint base URL but as a `URI` struct.
It uses the configuration under `:url` to generate such.
Useful for manipulating the url data and passing to
URL helpers.
"""
def struct_url do
Phoenix.Config.cache(__MODULE__,
:__phoenix_struct_url__,
&Phoenix.Endpoint.Adapter.struct_url/1)
end
@doc """
Generates the path information when routing to this endpoint.
"""
script_name = var!(config)[:url][:path]
if script_name == "/" do
def path(path), do: path
defp put_script_name(conn) do
conn
end
else
def path(path), do: unquote(script_name) <> path
defp put_script_name(conn) do
put_in conn.script_name, unquote(Plug.Router.Utils.split(script_name))
end
end
# The static path should be properly scoped according to
# the static_url configuration. If one is not available,
# we fallback to the url configuration as in the adapter.
static_script_name = (var!(config)[:static_url] || var!(config)[:url])[:path] || "/"
static_script_name = if static_script_name == "/", do: "", else: static_script_name
@doc """
Generates a route to a static file in `priv/static`.
"""
def static_path(path) do
# This should be in sync with the endpoint warmup.
unquote(static_script_name) <>
Phoenix.Config.cache(__MODULE__, {:__phoenix_static__, path},
&Phoenix.Endpoint.Adapter.static_path(&1, path))
end
end
end
@doc false
def __force_ssl__(module, config) do
if force_ssl = config[:force_ssl] do
force_ssl = Keyword.put_new(force_ssl, :host, var!(config)[:url][:host] || "localhost")
if force_ssl[:host] == "localhost" do
IO.puts :stderr, """
warning: you have enabled :force_ssl but your host is currently set to localhost.
Please configure your endpoint url host properly:
config #{inspect module}, url: [host: "YOURHOST.com"]
"""
end
force_ssl
end
end
@doc false
defmacro __before_compile__(env) do
sockets = Module.get_attribute(env.module, :phoenix_sockets)
plugs = Module.get_attribute(env.module, :plugs)
{conn, body} = Plug.Builder.compile(env, plugs, [])
otp_app = Module.get_attribute(env.module, :otp_app)
instrumentation = Phoenix.Endpoint.Instrument.definstrument(otp_app, env.module)
quote do
defoverridable [call: 2]
# Inline render errors so we set the endpoint before calling it.
def call(conn, opts) do
conn = put_in conn.secret_key_base, config(:secret_key_base)
conn =
conn
|> Plug.Conn.put_private(:phoenix_endpoint, __MODULE__)
|> put_script_name()
try do
super(conn, opts)
catch
kind, reason ->
Phoenix.Endpoint.RenderErrors.__catch__(conn, kind, reason, @phoenix_render_errors)
end
end
defp phoenix_pipeline(unquote(conn)), do: unquote(body)
@doc """
Returns all sockets configured in this endpoint.
"""
def __sockets__, do: unquote(sockets)
unquote(instrumentation)
end
end
## API
@doc """
Stores a plug to be executed as part of the pipeline.
"""
defmacro plug(plug, opts \\ []) do
quote do
@plugs {unquote(plug), unquote(opts), true}
end
end
@doc """
Defines a mount-point for a Socket module to handle channel definitions.
## Examples
socket "/ws", MyApp.UserSocket
socket "/ws/admin", MyApp.AdminUserSocket
By default, the given path is a websocket upgrade endpoint,
with long-polling fallback. The transports can be configured
within the Socket handler. See `Phoenix.Socket` for more information
on defining socket handlers.
"""
defmacro socket(path, module) do
# Tear the alias to simply store the root in the AST.
# This will make Elixir unable to track the dependency
# between endpoint <-> socket and avoid recompiling the
# endpoint (alongside the whole project ) whenever the
# socket changes.
module = tear_alias(module)
quote do
@phoenix_sockets {unquote(path), unquote(module)}
end
end
@doc """
Instruments the given function using the instrumentation provided by
the given endpoint.
To specify the endpoint that will provide instrumentation, the first argument
can be:
* a module name - the endpoint itself
* a `Plug.Conn` struct - this macro will look for the endpoint module in the
`:private` field of the connection; if it's not there, `fun` will be
executed with no instrumentation
* a `Phoenix.Socket` struct - this macro will look for the endpoint module in the
`:endpoint` field of the socket; if it's not there, `fun` will be
executed with no instrumentation
Usually, users should prefer to instrument events using the `instrument/3`
macro defined in every Phoenix endpoint. This macro should only be used for
cases when the endpoint is dynamic and not known at compile time instead.
## Examples
endpoint = MyApp.Endpoint
Phoenix.Endpoint.instrument endpoint, :render_view, fn -> ... end
"""
defmacro instrument(endpoint_or_conn_or_socket, event, runtime \\ Macro.escape(%{}), fun) do
compile = Phoenix.Endpoint.Instrument.strip_caller(__CALLER__) |> Macro.escape()
quote do
case Phoenix.Endpoint.Instrument.extract_endpoint(unquote(endpoint_or_conn_or_socket)) do
nil -> unquote(fun).()
endpoint -> endpoint.instrument(unquote(event), unquote(compile), unquote(runtime), unquote(fun))
end
end
end
@doc """
Checks if Endpoint's web server has been configured to start.
* `otp_app` - The otp app running the endpoint, for example `:my_app`
* `endpoint` - The endpoint module, for example `MyApp.Endpoint`
## Exampes
iex> Phoenix.Endpoint.server?(:my_app, MyApp.Endpoint)
true
"""
def server?(otp_app, endpoint) when is_atom(otp_app) and is_atom(endpoint) do
Adapter.server?(otp_app, endpoint)
end
defp tear_alias({:__aliases__, meta, [h|t]}) do
alias = {:__aliases__, meta, [h]}
quote do
Module.concat([unquote(alias)|unquote(t)])
end
end
defp tear_alias(other), do: other
end
|
lib/phoenix/endpoint.ex
| 0.925006
| 0.524577
|
endpoint.ex
|
starcoder
|
defmodule Config.Provider do
@moduledoc """
Specifies a provider API that loads configuration during boot.
Config providers are typically used during releases to load
external configuration while the system boots. This is done
by starting the VM with the minimum amount of applications
running, then invoking all of the providers, and then
restarting the system. This requires a mutable configuration
file on disk, as the results of the providers are written to
the file system. For more information on runtime configuration,
see `mix release`.
## Multiple config files
One common use of config providers is to specify multiple
configuration files in a release. Elixir ships with one provider,
called `Config.Reader`, which is capable of handling Elixir's
built-in config files.
For example, imagine you want to list some basic configuration
on Mix's built-in `config/runtime.exs` file, but you also want
to support additional configuration files. To do so, you can add
this inside the `def project` portion of your `mix.exs`:
releases: [
demo: [
config_providers: [
{Config.Reader, {:system, "RELEASE_ROOT", "/extra_config.exs"}}
]
]
]
You can place this `extra_config.exs` file in your release in
multiple ways:
1. If it is available on the host when assembling the release,
you can place it on "rel/overlays/extra_config.exs" and it
will be automatically copied to the release root
2. If it is available on the target during deployment, you can
simply copy it to the release root as a step in your deployment
Now once the system boots, it will load both `config/runtime.exs`
and `extra_config.exs` early in the boot process. You can learn
more options on `Config.Reader`.
## Custom config provider
You can also implement custom config providers, similar to how
`Config.Reader` works. For example, imagine you need to load
some configuration from a JSON file and load that into the system.
Said configuration provider would look like:
defmodule JSONConfigProvider do
@behaviour Config.Provider
# Let's pass the path to the JSON file as config
@impl true
def init(path) when is_binary(path), do: path
@impl true
def load(config, path) do
# We need to start any app we may depend on.
{:ok, _} = Application.ensure_all_started(:jason)
json = path |> File.read!() |> Jason.decode!()
Config.Reader.merge(
config,
my_app: [
some_value: json["my_app_some_value"],
another_value: json["my_app_another_value"],
]
)
end
end
Then, when specifying your release, you can specify the provider in
the release configuration:
releases: [
demo: [
config_providers: [
{JSONConfigProvider, "/etc/config.json"}
]
]
]
"""
@type config :: keyword
@type state :: term
@typedoc """
A path pointing to a configuration file.
Since configuration files are often accessed on target machines,
it can be expressed either as:
* a binary representing an absolute path
* a `{:system, system_var, path}` tuple where the config is the
concatenation of the environment variable `system_var` with
the given `path`
"""
@type config_path :: {:system, binary(), binary()} | binary()
@doc """
Invoked when initializing a config provider.
A config provider is typically initialized on the machine
where the system is assembled and not on the target machine.
The `c:init/1` callback is useful to verify the arguments
given to the provider and prepare the state that will be
given to `c:load/2`.
Furthermore, because the state returned by `c:init/1` can
be written to text-based config files, it should be
restricted only to simple data types, such as integers,
strings, atoms, tuples, maps, and lists. Entries such as
PIDs, references, and functions cannot be serialized.
"""
@callback init(term) :: state
@doc """
Loads configuration (typically during system boot).
It receives the current `config` and the `state` returned by
`c:init/1`. Then, you typically read the extra configuration
from an external source and merge it into the received `config`.
Merging should be done with `Config.Reader.merge/2`, as it
performs deep merge. It should return the updated config.
Note that `c:load/2` is typically invoked very early in the
boot process, therefore if you need to use an application
in the provider, it is your responsibility to start it.
"""
@callback load(config, state) :: config
@doc false
defstruct [
:providers,
:config_path,
extra_config: [],
prune_runtime_sys_config_after_boot: false,
reboot_system_after_config: false,
validate_compile_env: false
]
@reserved_apps [:kernel, :stdlib]
@doc """
Validates a `t:config_path/0`.
"""
@doc since: "1.9.0"
@spec validate_config_path!(config_path) :: :ok
def validate_config_path!({:system, name, path})
when is_binary(name) and is_binary(path),
do: :ok
def validate_config_path!(path) do
if is_binary(path) and Path.type(path) != :relative do
:ok
else
raise ArgumentError, """
expected configuration path to be:
* a binary representing an absolute path
* a tuple {:system, system_var, path} where the config is the \
concatenation of the `system_var` with the given `path`
Got: #{inspect(path)}
"""
end
end
@doc """
Resolves a `t:config_path/0` to an actual path.
"""
@doc since: "1.9.0"
@spec resolve_config_path!(config_path) :: binary
def resolve_config_path!(path) when is_binary(path), do: path
def resolve_config_path!({:system, name, path}), do: System.fetch_env!(name) <> path
# Private keys
@init_key :config_provider_init
@booted_key :config_provider_booted
# Public keys
@reboot_mode_key :config_provider_reboot_mode
@doc false
def init(providers, config_path, opts \\ []) when is_list(providers) and is_list(opts) do
validate_config_path!(config_path)
providers = for {provider, init} <- providers, do: {provider, provider.init(init)}
init = struct!(%Config.Provider{config_path: config_path, providers: providers}, opts)
[elixir: [{@init_key, init}]]
end
@doc false
def boot(reboot_fun \\ &restart_and_sleep/0) do
# The config provider typically runs very early in the
# release process, so we need to make sure Elixir is started
# before we go around running Elixir code.
{:ok, _} = :application.ensure_all_started(:elixir)
case Application.fetch_env(:elixir, @booted_key) do
{:ok, {:booted, path}} ->
path && File.rm(path)
with {:ok, %Config.Provider{} = provider} <- Application.fetch_env(:elixir, @init_key) do
maybe_validate_compile_env(provider)
end
:booted
_ ->
case Application.fetch_env(:elixir, @init_key) do
{:ok, %Config.Provider{} = provider} ->
path = resolve_config_path!(provider.config_path)
reboot_config = [elixir: [{@booted_key, booted_value(provider, path)}]]
boot_providers(path, provider, reboot_config, reboot_fun)
_ ->
:skip
end
end
end
defp boot_providers(path, provider, reboot_config, reboot_fun) do
original_config = read_config!(path)
config =
original_config
|> Config.__merge__(provider.extra_config)
|> run_providers(provider)
if provider.reboot_system_after_config do
config
|> Config.__merge__(reboot_config)
|> write_config!(path)
reboot_fun.()
else
for app <- @reserved_apps, config[app] != original_config[app] do
abort("""
Cannot configure #{inspect(app)} because :reboot_system_after_config has been set \
to false and #{inspect(app)} has already been loaded, meaning any further \
configuration won't have an effect.
The configuration for #{inspect(app)} before config providers was:
#{inspect(original_config[app])}
The configuration for #{inspect(app)} after config providers was:
#{inspect(config[app])}
""")
end
_ = Application.put_all_env(config, persistent: true)
maybe_validate_compile_env(provider)
:ok
end
end
defp maybe_validate_compile_env(provider) do
with [_ | _] = compile_env <- provider.validate_compile_env do
validate_compile_env(compile_env)
end
end
@doc false
def validate_compile_env(compile_env, ensure_loaded? \\ true) do
for {app, [key | path], compile_return} <- compile_env,
ensure_app_loaded?(app, ensure_loaded?) do
try do
traverse_env(Application.fetch_env(app, key), path)
rescue
e ->
abort("""
application #{inspect(app)} failed reading its compile environment #{path(key, path)}:
#{Exception.format(:error, e, __STACKTRACE__)}
Expected it to match the compile time value of #{return_to_text(compile_return)}.
#{compile_env_tips(app)}
""")
else
^compile_return ->
:ok
runtime_return ->
abort("""
the application #{inspect(app)} has a different value set #{path(key, path)} \
during runtime compared to compile time. Since this application environment entry was \
marked as compile time, this difference can lead to different behaviour than expected:
* Compile time value #{return_to_text(compile_return)}
* Runtime value #{return_to_text(runtime_return)}
#{compile_env_tips(app)}
""")
end
end
:ok
end
defp ensure_app_loaded?(app, true), do: Application.ensure_loaded(app) == :ok
defp ensure_app_loaded?(app, false), do: Application.spec(app, :vsn) != nil
defp path(key, []), do: "for key #{inspect(key)}"
defp path(key, path), do: "for path #{inspect(path)} inside key #{inspect(key)}"
defp compile_env_tips(app),
do: """
To fix this error, you might:
* Make the runtime value match the compile time one
* Recompile your project. If the misconfigured application is a dependency, \
you may need to run "mix deps.compile #{app} --force"
* Alternatively, you can disable this check. If you are using releases, you can \
set :validate_compile_env to false in your release configuration. If you are \
using Mix to start your system, you can pass the --no-validate-compile-env flag
"""
defp return_to_text({:ok, value}), do: "was set to: #{inspect(value)}"
defp return_to_text(:error), do: "was not set"
defp traverse_env(return, []), do: return
defp traverse_env(:error, _paths), do: :error
defp traverse_env({:ok, value}, [key | keys]), do: traverse_env(Access.fetch(value, key), keys)
@compile {:no_warn_undefined, {:init, :restart, 1}}
defp restart_and_sleep() do
mode = Application.get_env(:elixir, @reboot_mode_key)
if mode in [:embedded, :interactive] do
:init.restart(mode: mode)
else
:init.restart()
end
Process.sleep(:infinity)
end
defp booted_value(%{prune_runtime_sys_config_after_boot: true}, path), do: {:booted, path}
defp booted_value(%{prune_runtime_sys_config_after_boot: false}, _path), do: {:booted, nil}
defp read_config!(path) do
case :file.consult(path) do
{:ok, [inner]} ->
inner
{:error, reason} ->
bad_path_abort(
"Could not read runtime configuration due to reason: #{inspect(reason)}",
path
)
end
end
defp run_providers(config, %{providers: providers}) do
Enum.reduce(providers, config, fn {provider, state}, acc ->
try do
provider.load(acc, state)
catch
kind, error ->
IO.puts(:stderr, "ERROR! Config provider #{inspect(provider)} failed with:")
IO.puts(:stderr, Exception.format(kind, error, __STACKTRACE__))
:erlang.raise(kind, error, __STACKTRACE__)
else
term when is_list(term) ->
term
term ->
abort("Expected provider #{inspect(provider)} to return a list, got: #{inspect(term)}")
end
end)
end
defp write_config!(config, path) do
contents = :io_lib.format("%% coding: utf-8~n~tw.~n", [config])
case File.write(path, IO.chardata_to_string(contents)) do
:ok ->
:ok
{:error, reason} ->
bad_path_abort(
"Could not write runtime configuration due to reason: #{inspect(reason)}",
path
)
end
end
defp bad_path_abort(msg, path) do
abort(
msg <>
". Please make sure #{inspect(path)} is writable and accessible " <>
"or choose a different path"
)
end
defp abort(msg) do
IO.puts("ERROR! " <> msg)
:erlang.raise(:error, "aborting boot", [{Config.Provider, :boot, 2, []}])
end
end
|
lib/elixir/lib/config/provider.ex
| 0.889123
| 0.558989
|
provider.ex
|
starcoder
|
defmodule Blogit.RepositoryProvider do
@moduledoc """
A behaviour module for implementing access to remote or local repository
containing files which can be used as a source for a blog and its posts.
A provider to a repository should be able to check if files exist in it,
if files were updated or deleted, to check the author of a file and its
dates of creation and last update. Also it should provide a way to read
a file and its meta data.
A repository provider can be set for the Blogit OTP application using the
configuration key `:repository_provider`. By default it is
`Blogit.RepositoryProviders.Git`.
An example of implementing this behaviour could be a local folder.
When new files are added, modified and removed the
`Blogit.RepositoryProvider.fetch/1` should have in its result the paths of
these files. The meta data of the file can be used as meta data
and creation and last update dates. The author of the file could be its owner.
The `Blogit.RepositoryProvider` struct could contain absolute path
to the parent folder of the folder representing the repository and the
`Blogit.RepositoryProvider.local_path/0` could return its name.
For now `Blogit` comes with two implementations.
`Blogit.RepositoryProvider.Git` provides access to Git repository and is the
default provider if none is specified in the configuration.
`Blogit.RepositoryProvider.Memory` provides access to in-memory repository,
which can be used (and is used) mainly for testing purposes.
"""
@type repository :: term
@type provider :: module
@type fetch_result :: {:no_updates} | {:updates, [String.t()]}
@type timestamp :: String.t()
@type file_path :: String.t()
@type folder :: String.t()
@type file_read_result :: {:ok, binary} | {:error, File.posix()}
@type t :: %__MODULE__{repo: repository, provider: provider}
@enforce_keys :provider
defstruct [:repo, :provider]
@doc """
Invoked to get a representation value of the repository the provider manages.
The actual data represented by this struct should be updated to its
newest version first.
If for example the repository is remote, all the files in it should be
downloaded so their most recent versions are accessible.
This structure can be passed to other callbacks in order to manage files
in the repository.
"""
@callback repository() :: repository
@doc """
Invoked to update the data represented by the given `repository` to its most
recent version.
If, for example the repository is remote, all the files in it should be
downloaded so their most recent versions are accessible.
Returns the path to the changed files in the form of the tuple
`{:updates, list-of-paths}`. These paths should be paths to deleted, updated
or newly created files.
"""
@callback fetch(repository) :: fetch_result
@doc """
Invoked to get the path to the locally downloaded data. If the repository
is remote, it should have local copy or something like that.
"""
@callback local_path() :: String.t()
@doc """
Invoked to get a list of file paths of set of files contained in the locally
downloaded repository.
"""
@callback list_files(folder) :: [file_path]
@doc """
Checks if a file path is contained in the local version of the repository.
"""
@callback file_in?(file_path) :: boolean
@doc """
Returns file information for the file located at the given `file_path` in
the given `repository`. The result should be in the form of a map and should
be structured like this:
```
%{
"author" => the-file-author,
"created_at" => the-date-the-file-was-created-in-iso-8601-format,
"updated_at" => the-date-of-the-last-update-of-the-file-in-iso-8601-format
}
```
"""
@callback file_info(repository, file_path) :: %{atom => String.t() | timestamp}
@doc """
Invoked in order to read the contents of the file located at the given
`file_path`.
The second parameter can be a path to a folder relative to
`Blogit.RepositoryProvider.local_path/0` in which the given `file_path` should
exist.
"""
@callback read_file(file_path, folder) :: file_read_result
end
|
lib/blogit/repository_provider.ex
| 0.813868
| 0.775137
|
repository_provider.ex
|
starcoder
|
defmodule Cassette.User do
@moduledoc """
This is the struct that represents the user returned by a Validation request
"""
alias Cassette.Config
alias Cassette.User
defstruct login: "", type: "", attributes: %{}, authorities: MapSet.new([])
@type t :: %__MODULE__{login: String.t(), attributes: map()}
@doc """
Initializes a `Cassette.User` struct, mapping the list of authorities to it's
internal representation
"""
@spec new(String.t(), [String.t()]) :: User.t()
def new(login, authorities) do
new(login, "", authorities)
end
@doc """
Initializes a `Cassette.User` struct, with a `type` attribute and mapping the
list of authorities to it's internal representation
"""
@spec new(String.t(), String.t(), [String.t()]) :: User.t()
def new(login, type, authorities) do
%User{login: login, type: String.downcase(type), authorities: MapSet.new(authorities)}
end
@doc """
Initializes a `Cassette.User` struct, with a `type` attribute, mapping the
list of authorities, and any extra attribute returned by the server
"""
@spec new(String.t(), String.t(), [String.t()], map()) :: User.t()
def new(login, type, authorities, attributes) do
%User{
login: login,
type: String.downcase(type),
attributes: attributes,
authorities: MapSet.new(authorities)
}
end
@doc """
Tests if the user has the given `role` respecting the `base_authority` set in
the default configuration
If your `base_authority` is `ACME` and the user has the `ACME_ADMIN`
authority, then the following is true:
```elixir
iex> Cassette.User.role?(some_user, "ADMIN")
true
iex> Cassette.User.has_role?(some_user, "ADMIN")
true
```
This function returns false when user is not a Cassette.User.t
"""
@spec role?(User.t() | any, String.t() | any) :: boolean
def role?(user = %User{}, role) do
User.role?(user, Config.default(), role)
end
def role?(_, _), do: false
defdelegate has_role?(user, role), to: __MODULE__, as: :role?
@doc """
Tests if the user has the given `role` using the `base_authority` set in the
default configuration
If you are using custom a `Cassette.Support` server you can use this function
to respect it's `base_authority`
This function returns false when user is not a Cassette.User.t
"""
@spec role?(User.t() | any, Config.t() | any, String.t() | any) :: boolean
def role?(user = %User{}, %Config{base_authority: base}, role) do
User.raw_role?(user, to_raw_role(base, role))
end
def role?(_, _, _), do: false
defdelegate has_role?(user, config, role), to: __MODULE__, as: :role?
@doc """
Tests if the user has the given `role`.
This function does not alter the role when checking against the list of
authorities.
If your user has the `ACME_ADMIN` authority the following is true:
```elixir
iex> Cassette.User.raw_role?(some_user, "ACME_ADMIN")
true
iex> Cassette.User.has_raw_role?(some_user, "ACME_ADMIN")
true
```
This function returns false when user is not a Cassette.User.t
"""
@spec raw_role?(User.t() | any, String.t() | any) :: boolean
def raw_role?(%User{authorities: authorities}, raw_role) do
MapSet.member?(authorities, String.upcase(to_string(raw_role)))
end
def raw_role?(_, _), do: false
defdelegate has_raw_role?(user, role), to: __MODULE__, as: :raw_role?
@spec to_raw_role(String.t() | nil, String.t()) :: String.t()
defp to_raw_role(base, role) do
[base, role]
|> Enum.reject(&is_nil/1)
|> Enum.join("_")
end
end
|
lib/cassette/user.ex
| 0.851876
| 0.655357
|
user.ex
|
starcoder
|
defmodule Exshape do
@moduledoc """
This module just contains a helper function for working wtih zip
archives. If you have a stream of bytes that you want to parse
directly, use the Shp or Dbf modules to parse.
"""
alias Exshape.{Dbf, Shp}
defp open_file(c, size), do: File.stream!(c, [], size)
defp zip(nil, nil), do: []
defp zip(nil, d), do: Dbf.read(d)
defp zip(s, nil), do: Shp.read(s)
defp zip(s, d), do: Stream.zip(Shp.read(s), Dbf.read(d))
defp unzip!(path, cwd, false), do: :zip.extract(to_charlist(path), cwd: cwd)
defp unzip!(path, cwd, true) do
{_, 0} = System.cmd("unzip", [path, "-d", to_string(cwd)])
end
def keep_file?({:zip_file, charlist, _, _, _, _}) do
filename = :binary.list_to_bin(charlist)
not String.starts_with?(filename, "__MACOSX") and not String.starts_with?(filename, ".")
end
def keep_file?(_), do: false
defmodule Filesystem do
@moduledoc """
An abstraction over a filesystem. The `list` field contains
a function that returns a list of filenames, and the `stream`
function takes one of those filenames and returns a stream of
binaries.
"""
@enforce_keys [:list, :stream]
defstruct @enforce_keys
end
@doc """
Given a zip file path, unzip it and open streams for the underlying
shape data.
Returns a list of all the layers, where each layer is a tuple of layer name,
projection, and the stream of features
By default this unzips to `/tmp/exshape_some_uuid`. Make sure
to clean up when you're done consuming the stream. Pass the `:working_dir`
option to change this destination.
By default this reads in 1024 * 512 byte chunks. Pass the `:read_size`
option to change this.
By default this shells out to the `unzip` system cmd, to use the built in erlang
one, pass `unzip_shell: true`. The default behavior is to use the system one because
the erlang one tends to not support as many formats.
```
[{layer_name, projection, feature_stream}] = Exshape.from_zip("single_layer.zip")
```
"""
@type projection :: String.t
@type layer_name :: String.t
@type layer :: {layer_name, projection, Stream.t}
@spec from_zip(String.t) :: [layer]
def from_zip(path, opts \\ []) do
cwd = Keyword.get(opts, :working_dir, '/tmp/exshape_#{UUID.uuid4}')
size = Keyword.get(opts, :read_size, 1024 * 1024)
with {:ok, files} <- :zip.table(String.to_charlist(path)) do
from_filesystem(
%Filesystem{
list: fn -> files end,
stream: fn file ->
if !File.exists?(Path.join(cwd, file)) do
File.mkdir_p!(cwd)
unzip!(path, cwd, Keyword.get(opts, :unzip_shell, true))
end
open_file(Path.join(cwd, file), size)
end
})
end
end
@spec from_filesystem(Filesystem.t) :: [layer]
def from_filesystem(fs) do
fs.list.()
|> Enum.filter(&keep_file?/1)
|> Enum.map(fn {:zip_file, filename, _, _, _, _} -> filename end)
|> Enum.group_by(&Path.rootname/1)
|> Enum.flat_map(fn {root, components} ->
prj = Enum.find(components, fn c -> extension_equals(c, ".prj") end)
shp = Enum.find(components, fn c -> extension_equals(c, ".shp") end)
dbf = Enum.find(components, fn c -> extension_equals(c, ".dbf") end)
if !is_nil(shp) && !is_nil(dbf) do
[{
root,
List.to_string(shp),
List.to_string(dbf),
prj && List.to_string(prj)
}]
else
[]
end
end)
|> Enum.map(fn {root, shp, dbf, prj} ->
prj_contents = prj && (fs.stream.(prj) |> Enum.join)
# zip up the unzipped shp and dbf components
stream = zip(
shp && fs.stream.(shp),
dbf && fs.stream.(dbf)
)
{Path.basename(root), prj_contents, stream}
end)
end
defp extension_equals(path, wanted_ext) do
case Path.extname(path) do
nil -> false
ext -> String.downcase(ext) == wanted_ext
end
end
end
|
lib/exshape.ex
| 0.754915
| 0.839537
|
exshape.ex
|
starcoder
|
defmodule Paranoid.Ecto do
import Ecto.Query
@moduledoc """
Module for interacting with an Ecto Repo that leverages soft delete functionality.
"""
defmacro __using__(opts) do
verify_ecto_dep()
if repo = Keyword.get(opts, :repo) do
quote do
def all(queryable, opts \\ []) do
unquote(repo)
|> apply(:all, [update_queryable(queryable, opts), opts])
end
def stream(queryable, opts \\ []) do
unquote(repo)
|> apply(:stream, [update_queryable(queryable, opts), opts])
end
def get(queryable, id, opts \\ []) do
unquote(repo)
|> apply(:get, [update_queryable(queryable, opts), id, opts])
end
def get!(queryable, id, opts \\ []) do
unquote(repo)
|> apply(:get!, [update_queryable(queryable, opts), id, opts])
end
def get_by(queryable, clauses, opts \\ []) do
unquote(repo)
|> apply(:get_by, [update_queryable(queryable, opts), clauses, opts])
end
def get_by!(queryable, clauses, opts \\ []) do
unquote(repo)
|> apply(:get_by!, [update_queryable(queryable, opts), clauses, opts])
end
def one(queryable, opts \\ []) do
unquote(repo)
|> apply(:one, [update_queryable(queryable, opts), opts])
end
def one!(queryable, opts \\ []) do
unquote(repo)
|> apply(:one!, [update_queryable(queryable, opts), opts])
end
def aggregate(queryable, aggregate, field, opts \\ []) do
unquote(repo)
|> apply(:aggregate, [update_queryable(queryable, opts), aggregate, field, opts])
end
def insert_all(schema_or_source, entries, opts \\ []) do
unquote(repo)
|> apply(:insert_all, [schema_or_source, entries, opts])
end
def update_all(queryable, updates, opts \\ []) do
unquote(repo)
|> apply(:update_all, [update_queryable(queryable, opts), updates, opts])
end
def delete_all(queryable, opts \\ []) do
delete_all(queryable, opts, has_deleted_column?(queryable))
end
def delete_all(queryable, opts, _has_deleted_column = true) do
unquote(repo)
|> apply(:update_all, [update_queryable(queryable, opts), [set: [deleted_at: DateTime.utc_now()]]])
end
def delete_all(queryable, opts, _no_deleted_column) do
unquote(repo)
|> apply(:delete_all, [queryable, opts])
end
def insert(struct, opts \\ []) do
unquote(repo)
|> apply(:insert, [struct, opts])
end
def insert!(struct, opts \\ []) do
unquote(repo)
|> apply(:insert!, [struct, opts])
end
def update(struct, opts \\ []) do
unquote(repo)
|> apply(:update, [struct, opts])
end
def update!(struct, opts \\ []) do
unquote(repo)
|> apply(:update!, [struct, opts])
end
def insert_or_update(changeset, opts \\ []) do
unquote(repo)
|> apply(:insert_or_update, [changeset, opts])
end
def insert_or_update!(changeset, opts \\ []) do
unquote(repo)
|> apply(:insert_or_update!, [changeset, opts])
end
def delete(struct, opts \\ []) do
delete(struct, opts, has_deleted_column?(struct))
end
def delete(struct, opts, _has_deleted_column = true) do
unquote(repo)
|> apply(:update, [delete_changeset(struct), opts])
end
def delete(struct, opts, _no_deleted_column = false) do
unquote(repo)
|> apply(:delete, [struct, opts])
end
def delete!(struct, opts \\ []) do
delete!(struct, opts, has_deleted_column?(struct))
end
def delete!(struct, opts, _has_deleted_column = true) do
unquote(repo)
|> apply(:update!, [delete_changeset(struct), opts])
end
def delete!(struct, opts, _no_deleted_column = false) do
unquote(repo)
|> apply(:delete!, [struct, opts])
end
def undelete!(struct, opts \\ []) do
unquote(repo)
|> apply(:update!, [undelete_changeset(struct), opts])
end
defp delete_changeset(struct) do
struct.__struct__.paranoid_delete_changeset(struct, %{})
end
defp undelete_changeset(struct) do
struct.__struct__.paranoid_undelete_changeset(struct, %{})
end
def preload(struct_or_structs_or_nil, preloads, opts \\ []) do
unquote(repo)
|> apply(:preload, [struct_or_structs_or_nil, preloads, opts])
end
def load(schema_or_types, data) do
unquote(repo)
|> apply(:load, [schema_or_types, data])
end
defp update_queryable(queryable, opts) when is_list(opts) do
queryable
|> update_queryable(Enum.into(opts, %{}))
end
defp update_queryable(queryable, %{include_deleted: true}) do
queryable
end
defp update_queryable(queryable, %{has_deleted_column: false}), do: queryable
defp update_queryable(queryable, %{has_deleted_column: true}) do
queryable
|> where([t], is_nil(t.deleted_at))
end
defp update_queryable(queryable, opts) do
opts = opts |> Map.put(:has_deleted_column, has_deleted_column?(queryable))
queryable
|> update_queryable(opts)
end
defp has_deleted_column?(%{from: %{source: {_, struct}}}) do
struct.__schema__(:fields)
|> Enum.member?(:deleted_at)
end
defp has_deleted_column?(%{} = struct) do
struct.__struct__.__schema__(:fields)
|> Enum.member?(:deleted_at)
end
defp has_deleted_column?(queryable) do
queryable |> Ecto.Queryable.to_query() |> has_deleted_column?()
end
end
else
raise ArgumentError, """
expected :repo to be provided as an option.
Example:
use Paranoid.Ecto, repo: MyApp.Repo
"""
end
end
defp verify_ecto_dep do
unless Code.ensure_loaded?(Ecto) do
raise "Paranoid requires Ecto to be added as a dependency."
end
end
end
|
lib/paranoid/ecto.ex
| 0.637708
| 0.516778
|
ecto.ex
|
starcoder
|
defmodule Slack.Group do
@moduledoc """
Functions for working with private channels (groups)
"""
@base "groups"
use Slack.Request
@doc """
Archive a private channel.
https://api.slack.com/methods/groups.archive
## Examples
Slack.Group.archive(client, channel: "G1234567890")
"""
@spec archive(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :archive
@doc """
Close a private channel.
https://api.slack.com/methods/groups.close
## Examples
Slack.Group.close(client, channel: "G1234567890")
"""
@spec close(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :close
@doc """
Create a private channel.
https://api.slack.com/methods/groups.create
## Examples
Slack.Group.create(client, name: "newchannel")
"""
@spec create(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :create
@doc """
Replace a private channel.
https://api.slack.com/methods/groups.createChild
## Examples
Slack.Group.createChild(client, channel: "G1234567890")
"""
@spec createChild(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :createChild
@doc """
Get the history of a private channel.
https://api.slack.com/methods/groups.history
## Examples
Slack.Group.history(client, channel: "G1234567890")
"""
@spec history(Slack.Client.t, Keyword.t) :: Slack.slack_response
defget :history
@doc """
Get the info of a private channel.
https://api.slack.com/methods/groups.info
## Examples
Slack.Group.info(client, channel: "G1234567890")
"""
@spec info(Slack.Client.t, Keyword.t) :: Slack.slack_response
defget :info
@doc """
Invite a user to a private channel.
https://api.slack.com/methods/groups.invite
## Examples
Slack.Group.invite(client, channel: "G1234567890", user: "U1234567890")
"""
@spec invite(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :invite
@doc """
Kick a user from a private channel.
https://api.slack.com/methods/groups.kick
## Examples
Slack.Group.kick(client, channel: "G1234567890", user: "U1234567890")
"""
@spec kick(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :kick
@doc """
Leave a private channel.
https://api.slack.com/methods/groups.leave
## Examples
Slack.Group.leave(client, channel: "G1234567890")
"""
@spec leave(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :leave
@doc """
List private channels.
https://api.slack.com/methods/groups.list
## Examples
Slack.Group.list(client)
"""
@spec list(Slack.Client.t, Keyword.t) :: Slack.slack_response
defget :list
@doc """
Move the read cursor in a private channel.
https://api.slack.com/methods/groups.mark
## Examples
Slack.Group.mark(client, channel: "G1234567890", ts: 1234567890.123456)
"""
@spec mark(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :mark
@doc """
Open a private channel.
https://api.slack.com/methods/groups.open
## Examples
Slack.Group.open(client, channel: "G1234567890")
"""
@spec open(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :open
@doc """
Rename a private channel.
https://api.slack.com/methods/groups.rename
## Examples
Slack.Group.rename(client, channel: "G1234567890", name: "newname")
"""
@spec rename(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :rename
@doc """
Set the purpose of a private channel.
https://api.slack.com/methods/groups.setPurpose
## Examples
Slack.Group.setPurpose(client, channel: "G1234567890", purpose: "purpose")
"""
@spec setPurpose(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :setPurpose
@doc """
Set the topic of a private channel.
https://api.slack.com/methods/groups.setTopic
## Examples
Slack.Group.setTopic(client, channel: "G1234567890", topic: "topic")
"""
@spec setTopic(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :setTopic
@doc """
Unarchive a private channel.
https://api.slack.com/methods/groups.unarchive
## Examples
Slack.Group.unarchive(client, channel: "G1234567890")
"""
@spec unarchive(Slack.Client.t, Keyword.t) :: Slack.slack_response
defpost :unarchive
end
|
lib/slack/group.ex
| 0.779909
| 0.543166
|
group.ex
|
starcoder
|
defmodule Vivid.Line do
alias Vivid.{Line, Point}
defstruct ~w(origin termination)a
import Vivid.Math
@moduledoc ~S"""
Represents a line segment between two Points in 2D space.
## Example
iex> use Vivid
...> Line.init(Point.init(0,0), Point.init(5,5))
...> |> to_string()
"@@@@@@@@\n" <>
"@@@@@@ @\n" <>
"@@@@@ @@\n" <>
"@@@@ @@@\n" <>
"@@@ @@@@\n" <>
"@@ @@@@@\n" <>
"@ @@@@@@\n" <>
"@@@@@@@@\n"
"""
@opaque t :: %Line{origin: Point.t(), termination: Point.t()}
@doc ~S"""
Create a Line given an `origin` and `termination` point.
## Examples
iex> Vivid.Line.init(Vivid.Point.init(1,1), Vivid.Point.init(4,4))
%Vivid.Line{origin: %Vivid.Point{x: 1, y: 1}, termination: %Vivid.Point{x: 4, y: 4}}
"""
@spec init(Point.t(), Point.t()) :: Line.t()
def init(%Point{} = origin, %Point{} = termination) do
%Line{origin: origin, termination: termination}
end
@doc """
Create a `Line` from a two-element list of points.
"""
@spec init([Point.t()]) :: Line.t()
def init([o, t]) do
init(o, t)
end
@doc ~S"""
Returns the origin (starting) point of the line segment.
## Example
iex> Vivid.Line.init(Vivid.Point.init(1,1), Vivid.Point.init(4,4)) |> Vivid.Line.origin
%Vivid.Point{x: 1, y: 1}
"""
@spec origin(Line.t()) :: Point.t()
def origin(%Line{origin: o}), do: o
@doc ~S"""
Returns the termination (ending) point of the line segment.
## Example
iex> use Vivid
...> Line.init(Point.init(1,1), Point.init(4,4))
...> |> Line.termination
#Vivid.Point<{4, 4}>
"""
@spec termination(Line.t()) :: Point.t()
def termination(%Line{termination: t}), do: t
@doc ~S"""
Calculates the absolute X (horizontal) distance between the origin and termination points.
## Example
iex> Vivid.Line.init(Vivid.Point.init(1,1), Vivid.Point.init(14,4)) |> Vivid.Line.width
13
"""
@spec width(Line.t()) :: number
def width(%Line{} = line), do: abs(x_distance(line))
@doc ~S"""
Calculates the X (horizontal) distance between the origin and termination points.
## Example
iex> Vivid.Line.init(Vivid.Point.init(14,1), Vivid.Point.init(1,4)) |> Vivid.Line.x_distance
-13
"""
@spec x_distance(Line.t()) :: number
def x_distance(%Line{origin: %Point{x: x0}, termination: %Point{x: x1}}), do: x1 - x0
@doc ~S"""
Calculates the absolute Y (vertical) distance between the origin and termination points.
## Example
iex> Vivid.Line.init(Vivid.Point.init(1,1), Vivid.Point.init(4,14)) |> Vivid.Line.height
13
"""
@spec height(Line.t()) :: number
def height(%Line{} = line), do: abs(y_distance(line))
@doc ~S"""
Calculates the Y (vertical) distance between the origin and termination points.
## Example
iex> Vivid.Line.init(Vivid.Point.init(1,14), Vivid.Point.init(4,1)) |> Vivid.Line.y_distance
-13
"""
@spec y_distance(Line.t()) :: number
def y_distance(%Line{origin: %Point{y: y0}, termination: %Point{y: y1}}), do: y1 - y0
@doc ~S"""
Calculates straight-line distance between the two ends of the line segment using
Pythagoras' Theorem
## Example
iex> Vivid.Line.init(Vivid.Point.init(1,1), Vivid.Point.init(4,5)) |> Vivid.Line.length
5.0
"""
@spec length(Line.t()) :: number
def length(%Line{} = line) do
dx2 = line |> width |> pow(2)
dy2 = line |> height |> pow(2)
sqrt(dx2 + dy2)
end
@doc """
Returns whether a point is on the line.
## Example
iex> use Vivid
...> Line.init(Point.init(1,1), Point.init(3,1))
...> |> Line.on?(Point.init(2,1))
true
iex> use Vivid
...> Line.init(Point.init(1,1), Point.init(3,1))
...> |> Line.on?(Point.init(2,2))
false
"""
@spec on?(Line.t(), Point.t()) :: boolean
def on?(%Line{origin: origin, termination: termination}, %Point{} = point) do
x_distance_point = point.x - termination.x
y_distance_point = point.y - termination.y
x_distance_origin = origin.x - termination.x
y_distance_origin = origin.y - termination.y
cross_product = x_distance_point * y_distance_origin - x_distance_origin * y_distance_point
cross_product == 0.0
end
@doc """
Find the point on the line where it intersects with the specified `x` axis.
## Example
iex> use Vivid
...> Line.init(Point.init(25, 15), Point.init(5, 2))
...> |> Line.x_intersect(10)
#Vivid.Point<{10, 5.25}>
"""
@spec x_intersect(Line.t(), integer) :: Point.t() | nil
def x_intersect(%Line{origin: %Point{x: x0} = p, termination: %Point{x: x1}}, x)
when x == x0 and x == x1,
do: p
def x_intersect(%Line{origin: %Point{x: x0} = p0, termination: %Point{x: x1} = p1}, x)
when x0 > x1 do
x_intersect(%Line{origin: p1, termination: p0}, x)
end
def x_intersect(%Line{origin: %Point{x: x0} = p}, x) when x0 == x, do: p
def x_intersect(%Line{termination: %Point{x: x0} = p}, x) when x0 == x, do: p
def x_intersect(%Line{origin: %Point{x: x0, y: y0}, termination: %Point{x: x1, y: y1}}, x)
when x0 < x and x < x1 do
rx = (x - x0) / (x1 - x0)
y = rx * (y1 - y0) + y0
Point.init(x, y)
end
def x_intersect(_line, _x), do: nil
@doc """
Find the point on the line where it intersects with the specified `y` axis.
## Example
iex> use Vivid
...> Line.init(Point.init(25, 15), Point.init(5, 2))
...> |> Line.y_intersect(10)
#Vivid.Point<{17.307692307692307, 10}>
"""
@spec y_intersect(Line.t(), integer) :: Point.t() | nil
def y_intersect(%Line{origin: %Point{y: y0} = p, termination: %Point{y: y1}}, y)
when y == y0 and y == y1,
do: p
def y_intersect(%Line{origin: %Point{y: y0} = p0, termination: %Point{y: y1} = p1}, y)
when y0 > y1 do
y_intersect(%Line{origin: p1, termination: p0}, y)
end
def y_intersect(%Line{origin: %Point{y: y0} = p}, y) when y0 == y, do: p
def y_intersect(%Line{termination: %Point{y: y0} = p}, y) when y0 == y, do: p
def y_intersect(%Line{origin: %Point{x: x0, y: y0}, termination: %Point{x: x1, y: y1}}, y)
when y0 < y and y < y1 do
ry = (y - y0) / (y1 - y0)
x = ry * (x1 - x0) + x0
Point.init(x, y)
end
def y_intersect(_line, _y), do: nil
@doc """
Returns true if a line is horizontal.
## Example
iex> use Vivid
...> Line.init(Point.init(10,10), Point.init(20,10))
...> |> Line.horizontal?
true
iex> use Vivid
...> Line.init(Point.init(10,10), Point.init(20,11))
...> |> Line.horizontal?
false
"""
@spec horizontal?(Line.t()) :: boolean
def horizontal?(%Line{origin: %Point{y: y0}, termination: %Point{y: y1}}) when y0 == y1,
do: true
def horizontal?(_line), do: false
@doc """
Returns true if a line is vertical.
## Example
iex> use Vivid
...> Line.init(Point.init(10,10), Point.init(10,20))
...> |> Line.vertical?
true
iex> use Vivid
...> Line.init(Point.init(10,10), Point.init(11,20))
...> |> Line.vertical?
false
"""
@spec vertical?(Line.t()) :: boolean
def vertical?(%Line{origin: %Point{x: x0}, termination: %Point{x: x1}}) when x0 == x1, do: true
def vertical?(_line), do: false
end
|
lib/vivid/line.ex
| 0.927577
| 0.514461
|
line.ex
|
starcoder
|
defmodule Moonsugar.Validation do
@moduledoc """
The Validation module contains functions that help create and interact with the validation type.
The Validation type is represented as either `{:success, value}` or `{:failure, reasons}`
"""
@doc """
Helper function to create a success tuple.
## Examples
iex> Validation.success(3)
{:success, 3}
"""
def success(val) do
{:success, val}
end
@doc """
Helper function to create a failure tuple.
## Examples
iex> Validation.failure(["Goat is floating"])
{:failure, ["Goat is floating"]}
"""
def failure(reasons) do
{:failure, reasons}
end
@doc """
Combines validation types. Failures are concatenated. Concatenating two success types returns the last one.
## Examples
iex> Validation.concat({:failure, ["not enough chars"]}, {:failure, ["not long enough"]})
{:failure, ["not enough chars", "not long enough"]}
iex> Validation.concat({:failure, ["Game Crashed"]}, {:success, 3})
{:failure, ["Game Crashed"]}
iex> Validation.concat({:success, 2}, {:success, 3})
{:success, 3}
"""
def concat({:success, _}, {:failure, reasonsB}), do: {:failure, reasonsB}
def concat({:failure, reasonsA}, {:success, _}), do: {:failure, reasonsA}
def concat({:success, _}, {:success, valB}), do: {:success, valB}
def concat({:failure, reasonsA}, {:failure, reasonsB}) do
{:failure, Enum.concat(reasonsA, reasonsB)}
end
@doc """
Combines an array of validation types.
## Examples
iex> Validation.collect([{:failure, ["not long enough"]}, {:failure, ["not enough special chars"]}, {:failure, ["not enough capital letters"]}])
{:failure, ["not long enough", "not enough special chars", "not enough capital letters"]}
"""
def collect(validators) do
Enum.reduce(Enum.reverse(validators), &concat/2)
end
@doc """
Maps over a validation type, only applies the function to success tuples.
## Examples
iex> Validation.map({:success, 3}, fn(x) -> x * 2 end)
{:success, 6}
iex> Validation.map({:failure, ["Dwarves"]}, fn(x) -> x * 2 end)
{:failure, ["Dwarves"]}
"""
def map(result, fun) do
case result do
{:success, val} -> success(fun.(val))
error -> error
end
end
@doc """
Maps over a validation type, only applies the function to failure tuples.
## Examples
iex> Validation.mapFailure({:success, 3}, fn(x) -> x * 2 end)
{:success, 3}
iex> Validation.mapFailure({:failure, ["Dwarves"]}, &String.upcase/1)
{:failure, ["DWARVES"]}
"""
def mapFailure(validation, fun) do
case validation do
{:failure, reasonss} -> failure(Enum.map(reasonss, &fun.(&1)))
success -> success
end
end
@doc """
Converts a variable that might be nil to a validation type.
## Examples
iex> Validation.from_nilable("khajiit has wares", ["khajiit does not have wares"])
{:success, "khajiit has wares"}
iex> Validation.from_nilable(nil, ["khajiit does not have wares"])
{:failure, ["khajiit does not have wares"]}
"""
def from_nilable(val, failure) do
cond do
is_nil(val) -> {:failure, failure}
true -> success(val)
end
end
@doc """
Converts a variable from a maybe type to a validation type.
## Examples
iex> Validation.from_maybe({:just, 3}, ["Not a number"])
{:success, 3}
iex> Validation.from_maybe(:nothing, ["Not a number"])
{:failure, ["Not a number"]}
"""
def from_maybe(result, failure) do
case result do
{:just, val} -> {:success, val}
_ -> {:failure, failure}
end
end
@doc """
Converts a variable from a result type to a validation type.
## Examples
iex> Validation.from_result({:ok, "Dragon Slayed"})
{:success, "Dragon Slayed"}
iex> Validation.from_result({:error, "You Died"})
{:failure, ["You Died"]}
"""
def from_result(result) do
case result do
{:ok, val} -> {:success, val}
{:error, error} -> {:failure, [error]}
end
end
end
|
lib/validation.ex
| 0.891375
| 0.718644
|
validation.ex
|
starcoder
|
defmodule Dict do
@moduledoc %B"""
This module specifies the Dict API expected to be
implemented by different dictionaries. It also provides
functions that redirect to the underlying Dict, allowing
a developer to work with different Dict implementations
using one API.
To create a new dict, use the `new` functions defined
by each dict type:
HashDict.new #=> creates an empty HashDict
For simplicity's sake, in the examples below everytime
`new` is used, it implies one of the module-specific
calls like above. Likewise, when the result of a function
invocation is shown in the form `[a: 1, b: 2]`, it implies
that the returned value is actually of the same dict type
as the input one.
## Protocols
Besides implementing the functions in this module, all
dictionaries are also required to implement the `Access`
protocol:
iex> dict = HashDict.new
...> dict = Dict.put(dict, :hello, :world)
...> dict[:hello]
:world
And also the `Enumerable` protocol, allowing one to write:
Enum.each(dict, fn ({ k, v }) ->
IO.puts "#{k}: #{v}"
end)
"""
use Behaviour
@type key :: any
@type value :: any
@type keys :: [ key ]
@type t :: tuple | list
defcallback delete(t, key) :: t
defcallback drop(t, keys) :: t
defcallback empty(t) :: t
defcallback equal?(t, t) :: boolean
defcallback get(t, key) :: value
defcallback get(t, key, value) :: value
defcallback get!(t, key) :: value | no_return
defcallback has_key?(t, key) :: boolean
defcallback keys(t) :: list(key)
defcallback merge(t, t) :: t
defcallback merge(t, t, (key, value, value -> value)) :: t
defcallback pop(t, key) :: {value, t}
defcallback pop(t, key, value) :: {value, t}
defcallback put(t, key, value) :: t
defcallback put_new(t, key, value) :: t
defcallback size(t) :: non_neg_integer()
defcallback split(t, keys) :: {t, t}
defcallback take(t, keys) :: t
defcallback to_list(t) :: list()
defcallback update(t, key, (value -> value)) :: t | no_return
defcallback update(t, key, value, (value -> value)) :: t
defcallback values(t) :: list(value)
defmacrop target(dict) do
quote do
cond do
is_tuple(unquote(dict)) ->
elem(unquote(dict), 0)
is_list(unquote(dict)) ->
ListDict
end
end
end
@doc """
Returns a list containing all dict's keys.
The keys are not guaranteed to be sorted, unless
the underlying dict implementation defines so.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> Enum.sort(Dict.keys(d))
[:a,:b]
"""
@spec keys(t) :: [key]
def keys(dict) do
target(dict).keys(dict)
end
@doc """
Returns a list containing all dict's values.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> Enum.sort(Dict.values(d))
[1,2]
"""
@spec values(t) :: [value]
def values(dict) do
target(dict).values(dict)
end
@doc """
Returns the number of elements in `dict`.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> Dict.size(d)
2
"""
@spec size(t) :: non_neg_integer
def size(dict) do
target(dict).size(dict)
end
@doc """
Returns whether the given key exists in the given dict.
## Examples
iex> d = HashDict.new([a: 1])
...> Dict.has_key?(d, :a)
true
iex> d = HashDict.new([a: 1])
...> Dict.has_key?(d, :b)
false
"""
@spec has_key?(t, key) :: boolean
def has_key?(dict, key) do
target(dict).has_key?(dict, key)
end
@doc """
Returns the value associated with `key` in `dict`. If `dict` does not
contain `key`, returns `default` (or nil if not provided).
## Examples
iex> d = HashDict.new([a: 1])
...> Dict.get(d, :a)
1
iex> d = HashDict.new([a: 1])
...> Dict.get(d, :b)
nil
iex> d = HashDict.new([a: 1])
...> Dict.get(d, :b, 3)
3
"""
@spec get(t, key, value) :: value
def get(dict, key, default // nil) do
target(dict).get(dict, key, default)
end
@doc false
def get!(dict, key) do
target(dict).get!(dict, key)
end
@doc """
Returns the `{ :ok, value }` associated with `key` in `dict`.
If `dict` does not contain `key`, returns `:error`.
## Examples
iex> d = HashDict.new([a: 1])
...> Dict.fetch(d, :a)
{ :ok, 1 }
iex> d = HashDict.new([a: 1])
...> Dict.fetch(d, :b)
:error
"""
@spec fetch(t, key) :: value
def fetch(dict, key) do
target(dict).fetch(dict, key)
end
@doc """
Returns the value associated with `key` in `dict`. If `dict` does not
contain `key`, it raises `KeyError`.
## Examples
iex> d = HashDict.new([a: 1])
...> Dict.fetch!(d, :a)
1
iex> d = HashDict.new([a: 1])
...> Dict.fetch!(d, :b)
** (KeyError) key not found: :b
"""
@spec fetch!(t, key) :: value | no_return
def fetch!(dict, key) do
target(dict).fetch!(dict, key)
end
@doc """
Stores the given `value` under `key` in `dict`.
If `dict` already has `key`, the stored value is replaced by the new one.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> d = Dict.put(d, :a, 3)
...> Dict.get(d, :a)
3
"""
@spec put(t, key, value) :: t
def put(dict, key, val) do
target(dict).put(dict, key, val)
end
@doc """
Puts the given `value` under `key` in `dict` unless `key` already exists.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> d = Dict.put_new(d, :a, 3)
...> Dict.get(d, :a)
1
"""
@spec put_new(t, key, value) :: t
def put_new(dict, key, val) do
target(dict).put_new(dict, key, val)
end
@doc """
Removes the entry stored under the given key from `dict`.
If `dict` does not contain `key`, returns the dictionary unchanged.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> d = Dict.delete(d, :a)
...> Dict.get(d, :a)
nil
iex> d = HashDict.new([b: 2])
...> Dict.delete(d, :a) == d
true
"""
@spec delete(t, key) :: t
def delete(dict, key) do
target(dict).delete(dict, key)
end
@doc """
Merges the given enum into the dict. In case one of the enum entries
alread exist in the dict, it is given higher preference.
## Examples
iex> d1 = HashDict.new([a: 1, b: 2])
...> d2 = HashDict.new([a: 3, d: 4])
...> d = Dict.merge(d1, d2)
...> [a: Dict.get(d, :a), b: Dict.get(d, :b), d: Dict.get(d, :d)]
[a: 3, b: 2, d: 4]
"""
@spec merge(t, t) :: t
def merge(dict, enum) do
merge(dict, enum, fn(_k, _v1, v2) -> v2 end)
end
@doc """
Merges the given enum into the dict. In case one of the enum entries
alread exist in the dict, the given function is invoked to solve
conflicts.
## Examples
iex> d1 = HashDict.new([a: 1, b: 2])
...> d2 = HashDict.new([a: 3, d: 4])
...> d = Dict.merge(d1, d2, fn(_k, v1, v2) ->
...> v1 + v2
...> end)
...> [a: Dict.get(d, :a), b: Dict.get(d, :b), d: Dict.get(d, :d)]
[a: 4, b: 2, d: 4]
"""
@spec merge(t, t, (key, value, value -> value)) :: t
def merge(dict, enum, fun) do
target(dict).merge(dict, enum, fun)
end
@doc """
Returns the value associated with `key` in `dict` as
well as the `dict` without `key`.
## Examples
iex> dict = HashDict.new [a: 1]
...> {v, d} = Dict.pop dict, :a
...> {v, Enum.sort(d)}
{1,[]}
iex> dict = HashDict.new [a: 1]
...> {v, d} = Dict.pop dict, :b
...> {v, Enum.sort(d)}
{nil,[a: 1]}
iex> dict = HashDict.new [a: 1]
...> {v, d} = Dict.pop dict, :b, 3
...> {v, Enum.sort(d)}
{3,[a: 1]}
"""
@spec pop(t, key, value) :: {value, t}
def pop(dict, key, default // nil) do
target(dict).pop(dict, key, default)
end
@doc """
Update a value in `dict` by calling `fun` on the value to get a new
value. An exception is generated if `key` is not present in the dict.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> d = Dict.update(d, :a, fn(val) -> -val end)
...> Dict.get(d, :a)
-1
"""
@spec update(t, key, (value -> value)) :: t
def update(dict, key, fun) do
target(dict).update(dict, key, fun)
end
@doc """
Update a value in `dict` by calling `fun` on the value to get a new value. If
`key` is not present in `dict` then `initial` will be stored as the first
value.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> d = Dict.update(d, :c, 3, fn(val) -> -val end)
...> Dict.get(d, :c)
3
"""
@spec update(t, key, value, (value -> value)) :: t
def update(dict, key, initial, fun) do
target(dict).update(dict, key, initial, fun)
end
@doc """
Returns a tuple of two dicts, where the first dict contains only
entries from `dict` with keys in `keys`, and the second dict
contains only entries from `dict` with keys not in `keys`
Any non-member keys are ignored.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> { d1, d2 } = Dict.split(d, [:a, :c])
...> { Dict.to_list(d1), Dict.to_list(d2) }
{ [a: 1], [b: 2] }
iex> d = HashDict.new([])
...> { d1, d2 } = Dict.split(d, [:a, :c])
...> { Dict.to_list(d1), Dict.to_list(d2) }
{ [], [] }
iex> d = HashDict.new([a: 1, b: 2])
...> { d1, d2 } = Dict.split(d, [:a, :b, :c])
...> { Dict.to_list(d1), Dict.to_list(d2) }
{ [a: 1, b: 2], [] }
"""
@spec split(t, keys) :: {t, t}
def split(dict, keys) do
target(dict).split(dict, keys)
end
@doc """
Returns a new dict where the the given `keys` a removed from `dict`.
Any non-member keys are ignored.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> d = Dict.drop(d, [:a, :c, :d])
...> Dict.to_list(d)
[b: 2]
iex> d = HashDict.new([a: 1, b: 2])
...> d = Dict.drop(d, [:c, :d])
...> Dict.to_list(d)
[a: 1, b: 2]
"""
@spec drop(t, keys) :: t
def drop(dict, keys) do
target(dict).drop(dict, keys)
end
@doc """
Returns a new dict where only the keys in `keys` from `dict` are
included. Any non-member keys are ignored.
## Examples
iex> d = HashDict.new([a: 1, b: 2])
...> d = Dict.take(d, [:a, :c, :d])
...> Dict.to_list(d)
[a: 1]
iex> d = HashDict.new([a: 1, b: 2])
...> d = Dict.take(d, [:c, :d])
...> Dict.to_list(d)
[]
"""
@spec take(t, keys) :: t
def take(dict, keys) do
target(dict).take(dict, keys)
end
@doc """
Returns an empty dict of the same type as `dict`.
"""
@spec empty(t) :: t
def empty(dict) do
target(dict).empty(dict)
end
@doc """
Check if two dicts are equal, if the dicts are of different types they're
first converted to lists.
## Examples
iex> a = HashDict.new(a: 2, b: 3, f: 5, c: 123)
...> b = ListDict.new(a: 2, b: 3, f: 5, c: 123)
...> Dict.equal?(a, b)
true
iex> a = HashDict.new(a: 2, b: 3, f: 5, c: 123)
...> b = []
...> Dict.equal?(a, b)
false
"""
@spec equal?(t, t) :: boolean
def equal?(a, b) do
a_target = target(a)
b_target = target(b)
cond do
a_target == b_target ->
a_target.equal?(a, b)
a_target.size(a) == b_target.size(b) ->
ListDict.equal?(a_target.to_list(a), b_target.to_list(b))
true ->
false
end
end
@doc """
Returns a list of key-value pairs stored in `dict`.
No particular order is enforced.
"""
@spec to_list(t) :: list
def to_list(dict) do
target(dict).to_list(dict)
end
end
|
lib/elixir/lib/dict.ex
| 0.917474
| 0.693038
|
dict.ex
|
starcoder
|
defmodule Crux.Structs.Permissions do
@moduledoc """
Custom non discord api struct to help with working with permissions.
For more informations see [Discord Docs](https://discordapp.com/developers/docs/topics/permissions).
"""
use Bitwise
alias Crux.Structs
alias Crux.Structs.Util
require Util
Util.modulesince("0.1.3")
@permissions %{
create_instant_invite: 1 <<< 0,
kick_members: 1 <<< 1,
ban_members: 1 <<< 2,
administrator: 1 <<< 3,
manage_channels: 1 <<< 4,
manage_guild: 1 <<< 5,
add_reactions: 1 <<< 6,
view_audit_log: 1 <<< 7,
priority_speaker: 1 <<< 8,
# 9
view_channel: 1 <<< 10,
send_messages: 1 <<< 11,
send_tts_message: 1 <<< 12,
manage_messages: 1 <<< 13,
embed_links: 1 <<< 14,
attach_files: 1 <<< 15,
read_message_history: 1 <<< 16,
mention_everyone: 1 <<< 17,
use_external_emojis: 1 <<< 18,
# 19
connect: 1 <<< 20,
speak: 1 <<< 21,
mute_members: 1 <<< 22,
deafen_members: 1 <<< 23,
move_members: 1 <<< 24,
use_vad: 1 <<< 25,
change_nickname: 1 <<< 26,
manage_nicknames: 1 <<< 27,
manage_roles: 1 <<< 28,
manage_webhooks: 1 <<< 29,
manage_emojis: 1 <<< 30
}
@doc """
Returns a map of all permissions.
"""
@spec flags() :: %{name() => non_neg_integer()}
Util.since("0.2.0")
def flags, do: @permissions
@names Map.keys(@permissions)
@doc """
Returns a list of all permission keys.
"""
@spec names() :: [name()]
Util.since("0.2.0")
def names, do: @names
@all @permissions |> Map.values() |> Enum.reduce(&|||/2)
@doc """
Returns the integer value of all permissions summed up.
"""
@spec all :: pos_integer()
Util.since("0.2.0")
def all, do: @all
@typedoc """
Union type of all valid permission name atoms.
"""
Util.typesince("0.2.0")
@type name ::
:create_instant_invite
| :kick_members
| :ban_members
| :administrator
| :manage_channels
| :manage_guild
| :add_reactions
| :view_audit_log
| :priority_speaker
| :view_channel
| :send_messages
| :send_tts_message
| :manage_messages
| :embed_links
| :attach_files
| :read_message_histroy
| :mention_everyone
| :use_external_emojis
| :connect
| :speak
| :mute_members
| :deafen_members
| :move_members
| :use_vad
| :change_nickname
| :manage_nicknames
| :manage_roles
| :manage_webhooks
| :manage_emojis
defstruct(bitfield: 0)
@typedoc """
All valid types which can be directly resolved into a permissions bitfield.
"""
Util.typesince("0.2.0")
@type resolvable :: t() | non_neg_integer() | name() | [resolvable()]
@typedoc """
Represents a `Crux.Structs.Permissions`.
* `:bitfield`: The raw bitfield of permission flags.
"""
Util.typesince("0.1.3")
@type t :: %__MODULE__{
bitfield: non_neg_integer()
}
@doc """
Creates a new `Crux.Structs.Permissions` struct from a valid `t:resolvable/0`.
"""
@spec new(permissions :: resolvable()) :: t()
Util.since("0.1.3")
def new(permissions \\ 0), do: %__MODULE__{bitfield: resolve(permissions)}
@doc ~S"""
Resolves a `t:resolvable/0` into a bitfield representing the set permissions.
## Examples
```elixir
# A single bitflag
iex> 0x8
...> |> Crux.Structs.Permissions.resolve()
0x8
# A single name
iex> :administrator
...> |> Crux.Structs.Permissions.resolve()
0x8
# A list of bitflags
iex> [0x8, 0x4]
...> |> Crux.Structs.Permissions.resolve()
0xC
# A list of names
iex> [:administrator, :ban_members]
...> |> Crux.Structs.Permissions.resolve()
0xC
# A mixture of both
iex> [:manage_roles, 0x400, 0x800, :add_reactions]
...> |> Crux.Structs.Permissions.resolve()
0x10000C40
# An empty list
iex> []
...> |> Crux.Structs.Permissions.resolve()
0x0
```
"""
@spec resolve(permissions :: resolvable()) :: non_neg_integer()
Util.since("0.1.3")
def resolve(permissions)
def resolve(%__MODULE__{bitfield: bitfield}), do: bitfield
def resolve(permissions) when is_integer(permissions) and permissions >= 0 do
Enum.reduce(@permissions, 0, fn {_name, value}, acc ->
if (permissions &&& value) == value, do: acc ||| value, else: acc
end)
end
def resolve(permissions) when permissions in @names do
Map.get(@permissions, permissions)
end
def resolve(permissions) when is_list(permissions) do
permissions
|> Enum.map(&resolve/1)
|> Enum.reduce(0, &|||/2)
end
def resolve(permissions) do
raise """
Expected a name atom, a non negative integer, or a list of them.
Received:
#{inspect(permissions)}
"""
end
@doc ~S"""
Serializes permissions into a map keyed by `t:name/0` with a boolean indicating whether the permission is set.
"""
@spec to_map(permissions :: resolvable()) :: %{name() => boolean()}
Util.since("0.1.3")
def to_map(permissions) do
permissions = resolve(permissions)
Map.new(@names, &{&1, has(permissions, &1)})
end
@doc ~S"""
Serializes permissions into a list of set `t:name/0`s.
## Examples
```elixir
iex> 0x30
...> |> Crux.Structs.Permissions.to_list()
[:manage_guild, :manage_channels]
```
"""
@spec to_list(permissions :: resolvable()) :: [name()]
Util.since("0.1.3")
def to_list(permissions) do
permissions = resolve(permissions)
Enum.reduce(@permissions, [], fn {name, val}, acc ->
if has(permissions, val), do: [name | acc], else: acc
end)
end
@doc ~S"""
Adds permissions to the base permissions.
## Examples
```elixir
iex> :administrator
...> |> Crux.Structs.Permissions.add(:manage_guild)
%Crux.Structs.Permissions{bitfield: 0x28}
```
"""
@spec add(base :: resolvable(), to_add :: resolvable()) :: t()
Util.since("0.1.3")
def add(base, to_add) do
to_add = resolve(to_add)
base
|> resolve()
|> bor(to_add)
|> new()
end
@doc ~S"""
Removes permissions from the base permissions
## Examples
```elixir
iex> [0x8, 0x10, 0x20]
...> |> Crux.Structs.Permissions.remove([0x10, 0x20])
%Crux.Structs.Permissions{bitfield: 0x8}
```
"""
@spec remove(base :: resolvable(), to_remove :: resolvable()) :: t()
Util.since("0.1.3")
def remove(base, to_remove) do
to_remove = to_remove |> resolve() |> bnot()
base
|> resolve()
|> band(to_remove)
|> new()
end
@doc ~S"""
Check whether the second permissions are all present in the first.
## Examples
```elixir
# Administrator won't grant any other permissions
iex> Crux.Structs.Permissions.has(0x8, Crux.Structs.Permissions.all())
false
# Resolving a list of `permissions_name`s
iex> Crux.Structs.Permissions.has([:send_messages, :view_channel, :read_message_history], [:send_messages, :view_channel])
true
# Resolving different types of `permissions`s
iex> Crux.Structs.Permissions.has(:administrator, 0x8)
true
# In different order
iex> Crux.Structs.Permissions.has(0x8, :administrator)
true
```
"""
@spec has(
have :: resolvable(),
want :: resolvable()
) :: boolean()
Util.since("0.1.3")
def has(have, want) do
have = resolve(have)
want = resolve(want)
(have &&& want) == want
end
@doc ~S"""
Similar to `has/2` but returns a `Crux.Structs.Permissions` of the missing permissions.
## Examples
```
iex> Crux.Structs.Permissions.missing([:send_messages, :view_channel], [:send_messages, :view_channel, :embed_links])
%Crux.Structs.Permissions{bitfield: 0x4000}
# Administrator won't implicilty grant other permissions
iex> Crux.Structs.Permissions.missing([:administrator], [:send_messages])
%Crux.Structs.Permissions{bitfield: 0x800}
# Everything set
iex> Crux.Structs.Permissions.missing([:kick_members, :ban_members, :view_audit_log], [:kick_members, :ban_members])
%Crux.Structs.Permissions{bitfield: 0}
# No permissions
iex> Crux.Structs.Permissions.missing([:send_messages, :view_channel], [])
%Crux.Structs.Permissions{bitfield: 0}
"""
@spec missing(resolvable(), resolvable()) :: t()
Util.since("0.2.0")
def missing(have, want) do
have = resolve(have)
want = resolve(want)
want
|> band(~~~have)
|> new()
end
@doc """
Resolves permissions for a user in a guild, optionally including channel permission overwrites.
> Raises when the member is not cached.
> The guild-wide administrator flag or being owner implicitly grants all permissions, see `explicit/3`.
"""
@spec implicit(
member :: Structs.Member.t() | Structs.User.t() | Crux.Rest.snowflake(),
guild :: Structs.Guild.t(),
channel :: Structs.Channel.t() | nil
) :: t()
Util.since("0.2.0")
def implicit(member, guild, channel \\ nil)
def implicit(%Structs.User{id: user_id}, guild, channel), do: implicit(user_id, guild, channel)
def implicit(%Structs.Member{user: user_id}, guild, channel),
do: implicit(user_id, guild, channel)
def implicit(user_id, %Structs.Guild{owner_id: user_id}, _), do: new(@all)
def implicit(user_id, guild, channel) do
permissions = explicit(user_id, guild)
cond do
has(permissions, :administrator) ->
new(@all)
channel ->
explicit(user_id, guild, channel)
true ->
permissions
end
end
@doc """
Resolves permissions for a user in a guild, optionally including channel permission overwrites.
> Raises when the member is not cached.
> The administrator flag or being owner implicitly does not grant permissions, see `implicit/3`.
"""
@spec explicit(
member :: Structs.Member.t() | Structs.User.t() | Crux.Rest.snowflake(),
guild :: Structs.Guild.t(),
channel :: Structs.Channel.t() | nil
) :: t()
Util.since("0.2.0")
def explicit(member, guild, channel \\ nil)
def explicit(%Structs.Member{user: user_id}, guild, channel),
do: explicit(user_id, guild, channel)
def explicit(%Structs.User{id: user_id}, guild, channel), do: explicit(user_id, guild, channel)
# -> compute_base_permissions from
# https://discordapp.com/developers/docs/topics/permissions#permission-overwrites
def explicit(user_id, %Structs.Guild{id: guild_id, members: members, roles: roles}, nil) do
member =
Map.get(members, user_id) ||
raise """
There is no member with the ID "#{inspect(user_id)}" in the cache of the guild.
The member is uncached or not in the guild.
"""
permissions =
roles
|> Map.get(guild_id)
|> Map.get(:permissions)
member_roles = MapSet.put(member.roles, guild_id)
roles
|> Map.take(member_roles)
|> Enum.map(fn {_id, %{permissions: permissions}} -> permissions end)
|> List.insert_at(0, permissions)
|> new()
end
# -> compute_permissions and compute_overwrites from
# https://discordapp.com/developers/docs/topics/permissions#permission-overwrites
def explicit(
user_id,
%Structs.Guild{id: guild_id, members: members} = guild,
%Structs.Channel{permission_overwrites: overwrites}
) do
%{bitfield: permissions} = explicit(user_id, guild)
# apply @everyone overwrite
base_permissions =
overwrites
|> Map.get(guild_id)
|> apply_overwrite(permissions)
role_ids = members |> Map.get(user_id) |> Map.get(:roles)
# apply all other overwrites
role_permissions =
overwrites
|> Map.take(role_ids)
|> Map.values()
# reduce all relevant overwrites into a single dummy one
|> Enum.reduce(%{allow: 0, deny: 0}, &acc_overwrite/2)
# apply it to the base permissions
|> apply_overwrite(base_permissions)
# apply user overwrite
overwrites
|> Map.get(user_id)
|> apply_overwrite(role_permissions)
|> new()
end
defp acc_overwrite(nil, acc), do: acc
defp acc_overwrite(%{allow: cur_allow, deny: cur_deny}, %{allow: allow, deny: deny}) do
%{allow: cur_allow ||| allow, deny: cur_deny ||| deny}
end
defp apply_overwrite(nil, permissions), do: permissions
defp apply_overwrite(%{allow: allow, deny: deny}, permissions) do
permissions
|> band(~~~deny)
|> bor(allow)
end
end
|
lib/structs/permissions.ex
| 0.866796
| 0.807081
|
permissions.ex
|
starcoder
|
defmodule AdventOfCode.Y2021.Day3 do
@moduledoc """
--- Day 3: Binary Diagnostic ---
The submarine has been making some odd creaking noises, so you ask it to produce a diagnostic report just in case.
The diagnostic report (your puzzle input) consists of a list of binary numbers which, when decoded properly, can tell you many useful things about the conditions of the submarine. The first parameter to check is the power consumption.
You need to use the binary numbers in the diagnostic report to generate two new binary numbers (called the gamma rate and the epsilon rate). The power consumption can then be found by multiplying the gamma rate by the epsilon rate.
Each bit in the gamma rate can be determined by finding the most common bit in the corresponding position of all numbers in the diagnostic report. For example, given the following diagnostic report:
00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010
Considering only the first bit of each number, there are five 0 bits and seven 1 bits. Since the most common bit is 1, the first bit of the gamma rate is 1.
The most common second bit of the numbers in the diagnostic report is 0, so the second bit of the gamma rate is 0.
The most common value of the third, fourth, and fifth bits are 1, 1, and 0, respectively, and so the final three bits of the gamma rate are 110.
So, the gamma rate is the binary number 10110, or 22 in decimal.
The epsilon rate is calculated in a similar way; rather than use the most common bit, the least common bit from each position is used. So, the epsilon rate is 01001, or 9 in decimal. Multiplying the gamma rate (22) by the epsilon rate (9) produces the power consumption, 198.
Use the binary numbers in your diagnostic report to calculate the gamma rate and epsilon rate, then multiply them together. What is the power consumption of the submarine? (Be sure to represent your answer in decimal, not binary.)
## Examples
iex> AdventOfCode.Y2021.Day3.part1()
1092896
"""
def part1() do
parse_file()
|> transpose_rows()
|> compute_gamma_eps()
end
def compute_gamma_eps(columns) do
columns
|> Enum.map(fn col ->
Enum.frequencies(col)
|> get_max_val()
end)
|> build_gamma_eps()
|> solve()
end
def build_gamma_eps(gamma) do
eps =
gamma
|> Enum.map(fn dig ->
case dig do
0 -> 1
1 -> 0
end
end)
[gamma, eps]
end
def solve([gamma, eps]) do
[gamma, eps]
|> Enum.reduce(1, fn arr, acc ->
bin_int = bin_arr_to_int(arr)
acc * bin_int
end)
end
def bin_arr_to_int(arr) do
{bin_int, ""} = Enum.join(arr) |> Integer.parse(2)
bin_int
end
def get_max_val(%{0 => zeros, 1 => ones}) when zeros > ones, do: 0
def get_max_val(%{0 => zeros, 1 => ones}) when zeros <= ones, do: 1
def solve_gamma(weights, rows) do
rows
|> Enum.map(fn row -> row ++ weights end)
end
def transpose_rows(rows) do
rows
|> List.zip()
|> Enum.map(&Tuple.to_list/1)
end
def parse_file() do
AdventOfCode.etl_file("lib/y_2021/d3/input.txt", &parse_row/1)
end
def parse_row(s) do
s
|> String.split("")
|> Enum.reduce([], fn ss, acc ->
if ss != "" do
acc ++ [get_int(Integer.parse(ss), s)]
else
acc
end
end)
end
defp get_int({n, ""}, _), do: n
@doc """
--- Part Two ---
Next, you should verify the life support rating, which can be determined by multiplying the oxygen generator rating by the CO2 scrubber rating.
Both the oxygen generator rating and the CO2 scrubber rating are values that can be found in your diagnostic report - finding them is the tricky part. Both values are located using a similar process that involves filtering out values until only one remains. Before searching for either rating value, start with the full list of binary numbers from your diagnostic report and consider just the first bit of those numbers. Then:
Keep only numbers selected by the bit criteria for the type of rating value for which you are searching. Discard numbers which do not match the bit criteria.
If you only have one number left, stop; this is the rating value for which you are searching.
Otherwise, repeat the process, considering the next bit to the right.
The bit criteria depends on which type of rating value you want to find:
To find oxygen generator rating, determine the most common value (0 or 1) in the current bit position, and keep only numbers with that bit in that position. If 0 and 1 are equally common, keep values with a 1 in the position being considered.
To find CO2 scrubber rating, determine the least common value (0 or 1) in the current bit position, and keep only numbers with that bit in that position. If 0 and 1 are equally common, keep values with a 0 in the position being considered.
For example, to determine the oxygen generator rating value using the same example diagnostic report from above:
Start with all 12 numbers and consider only the first bit of each number. There are more 1 bits (7) than 0 bits (5), so keep only the 7 numbers with a 1 in the first position: 11110, 10110, 10111, 10101, 11100, 10000, and 11001.
Then, consider the second bit of the 7 remaining numbers: there are more 0 bits (4) than 1 bits (3), so keep only the 4 numbers with a 0 in the second position: 10110, 10111, 10101, and 10000.
In the third position, three of the four numbers have a 1, so keep those three: 10110, 10111, and 10101.
In the fourth position, two of the three numbers have a 1, so keep those two: 10110 and 10111.
In the fifth position, there are an equal number of 0 bits and 1 bits (one each). So, to find the oxygen generator rating, keep the number with a 1 in that position: 10111.
As there is only one number left, stop; the oxygen generator rating is 10111, or 23 in decimal.
Then, to determine the CO2 scrubber rating value from the same example above:
Start again with all 12 numbers and consider only the first bit of each number. There are fewer 0 bits (5) than 1 bits (7), so keep only the 5 numbers with a 0 in the first position: 00100, 01111, 00111, 00010, and 01010.
Then, consider the second bit of the 5 remaining numbers: there are fewer 1 bits (2) than 0 bits (3), so keep only the 2 numbers with a 1 in the second position: 01111 and 01010.
In the third position, there are an equal number of 0 bits and 1 bits (one each). So, to find the CO2 scrubber rating, keep the number with a 0 in that position: 01010.
As there is only one number left, stop; the CO2 scrubber rating is 01010, or 10 in decimal.
Finally, to find the life support rating, multiply the oxygen generator rating (23) by the CO2 scrubber rating (10) to get 230.
Use the binary numbers in your diagnostic report to calculate the oxygen generator rating and CO2 scrubber rating, then multiply them together. What is the life support rating of the submarine? (Be sure to represent your answer in decimal, not binary.)
##Examples
iex> AdventOfCode.Y2021.Day3.part2()
%{ co2_val: 3443, life_support_rating: 4672151, o2_val: 1357 }
"""
def part2() do
rows = parse_file()
cols_with_index = transpose_rows(rows) |> Enum.with_index()
o2_val = iterate_and_reduce(rows, cols_with_index, &get_max_val/1)
co2_val = iterate_and_reduce(rows, cols_with_index, &get_min_val/1)
%{
o2_val: o2_val,
co2_val: co2_val,
life_support_rating: o2_val * co2_val
}
end
def get_min_val(%{0 => zeros, 1 => ones}) when ones < zeros, do: 1
def get_min_val(%{0 => _zeros}), do: 0
def get_min_val(%{0 => zeros, 1 => ones}) when zeros <= ones, do: 0
def get_min_val(%{1 => _ones}), do: 1
def iterate_and_reduce([elem], _cols, _func) do
bin_arr_to_int(elem)
end
def iterate_and_reduce(rows, [{head_cols, idx} | rest_cols], func)
when is_list(rest_cols) and length(rows) > 1 do
most_freq =
head_cols
|> Enum.frequencies()
|> func.()
sub_rows =
rows
|> Enum.reject(fn row -> most_freq == Enum.at(row, idx) end)
sub_rows = pick_row_set(sub_rows, rows)
list =
sub_rows
|> transpose_rows()
|> Enum.with_index()
|> Enum.reject(fn {_col, col_idx} ->
col_idx <= idx
end)
iterate_and_reduce(sub_rows, list, func)
end
def pick_row_set([], rows), do: rows
def pick_row_set(sub_rows, _rows), do: sub_rows
end
|
lib/y_2021/d3/day3.ex
| 0.877857
| 0.877791
|
day3.ex
|
starcoder
|
defmodule Adventofcode.Day13TransparentOrigami do
use Adventofcode
alias __MODULE__.{Parser, Part1, Part2, Printer, State}
def part_1(input) do
input
|> Parser.parse()
|> Part1.solve()
end
def part_2(input) do
input
|> Parser.parse()
|> Part2.solve()
|> Printer.to_s()
end
defmodule State do
@enforce_keys []
defstruct grid: [], folds: []
def new({grid, folds}), do: struct(__MODULE__, grid: grid, folds: folds)
end
defmodule Part1 do
def solve(%{folds: [fold | _rest]} = state) do
state.grid
|> Enum.flat_map(fn {x, y} -> fold(fold, {x, y}) end)
|> Enum.uniq()
|> length
end
def fold({:y, y_fold}, {x, y}) when y <= y_fold, do: [{x, y}]
def fold({:x, x_fold}, {x, y}) when x <= x_fold, do: [{x, y}]
def fold({:y, y_fold}, {x, y}), do: [{x, (y - y_fold) * -1 + y_fold}]
def fold({:x, x_fold}, {x, y}), do: [{(x - x_fold) * -1 + x_fold, y}]
end
defmodule Part2 do
def solve(%{folds: []} = state), do: state
def solve(%{folds: [fold | rest]} = state) do
state.grid
|> Enum.flat_map(fn {x, y} -> fold(fold, {x, y}) end)
|> Enum.uniq()
|> (fn grid -> solve(%{state | folds: rest, grid: grid}) end).()
end
def fold({:y, y_fold}, {x, y}) when y <= y_fold, do: [{x, y}]
def fold({:x, x_fold}, {x, y}) when x <= x_fold, do: [{x, y}]
def fold({:y, y_fold}, {x, y}), do: [{x, (y - y_fold) * -1 + y_fold}]
def fold({:x, x_fold}, {x, y}), do: [{(x - x_fold) * -1 + x_fold, y}]
end
defmodule Parser do
def parse(input) do
input
|> String.trim()
|> String.split("\n\n")
|> (fn [grid, folds] -> {parse_grid(grid), parse_folds(folds)} end).()
|> State.new()
end
defp parse_grid(input) do
input
|> String.split("\n")
|> Enum.map(&parse_grid_line/1)
end
defp parse_grid_line(line) do
line
|> String.split(",")
|> Enum.map(&String.to_integer/1)
|> List.to_tuple()
end
defp parse_folds(input) do
input
|> String.split("\n")
|> Enum.map(&parse_fold_line/1)
end
defp parse_fold_line("fold along " <> fold) do
fold
|> String.split("=")
|> (fn [dir, pos] -> {String.to_atom(dir), String.to_integer(pos)} end).()
end
end
defmodule Printer do
def print(%State{} = state) do
state
|> to_s
|> IO.puts()
end
def to_s(%State{} = state) do
x_min = state.grid |> Enum.map(&elem(&1, 0)) |> Enum.min()
x_max = state.grid |> Enum.map(&elem(&1, 0)) |> Enum.max()
y_min = state.grid |> Enum.map(&elem(&1, 1)) |> Enum.min()
y_max = state.grid |> Enum.map(&elem(&1, 1)) |> Enum.max()
grid = MapSet.new(state.grid)
Enum.map_join(y_min..y_max, "\n", fn y ->
Enum.map_join(x_min..x_max, "", fn x ->
if {x, y} in grid, do: "#", else: " "
end)
end)
end
end
end
|
lib/day_13_transparent_origami.ex
| 0.635562
| 0.62701
|
day_13_transparent_origami.ex
|
starcoder
|
defmodule Grizzly.ZWave.SmartStart.MetaExtension.MaxInclusionRequestInterval do
@moduledoc """
This is used to advertise if a power constrained Smart Start node will issue
inclusion request at a higher interval value than the default 512 seconds.
"""
@typedoc """
The interval (in seconds) must be in the range of 640..12672 inclusive, and
has to be in steps of 128 seconds.
So after 640 the next valid interval is `640 + 128` which is `768` seconds.
See `SDS13944 Node Provisioning Information Type Registry.pdf` section
`3.1.2.3` for more information.
"""
@behaviour Grizzly.ZWave.SmartStart.MetaExtension
@type interval :: 640..12672
@type t :: %__MODULE__{
interval: interval()
}
defstruct interval: nil
@spec new(interval()) ::
{:ok, t()}
| {:error,
:interval_too_small | :interval_too_big | :interval_step_invalid | :interval_required}
def new(interval) do
case validate_interval(interval) do
:ok ->
{:ok, %__MODULE__{interval: interval}}
error ->
error
end
end
@doc """
Make a `MaxInclusionRequestInterval.t()` from a binary string
If the interval provided in the binary is invalid this function will return
`{:error, :interval_too_big | :interval_too_small}`. See the typedoc for
more information regarding the interval specification.
If the critical bit is set this is considered invalid to the specification and
the function will return `{:error, :critical_bit_set}`.
"""
@impl true
@spec from_binary(binary()) ::
{:ok, t()}
| {:error,
:interval_too_big | :interval_too_small | :critical_bit_set | :invalid_binary}
def from_binary(<<0x02::size(7), 0::size(1), 0x01, interval>>) do
case interval_from_byte(interval) do
{:ok, interval_seconds} ->
new(interval_seconds)
error ->
error
end
end
def from_binary(<<0x02::size(7), 1::size(1), _rest::binary>>) do
{:error, :critical_bit_set}
end
def from_binary(_), do: {:error, :invalid_binary}
@doc """
Make a binary string from a `MaxInclusionRequestInterval.t()`
If the interval provided in the binary is invalid this function will return
`{:error, :interval_too_big | :interval_too_small}`. See the typedoc for
more information regarding the interval specification.
"""
@impl true
@spec to_binary(t()) :: {:ok, binary()}
def to_binary(%__MODULE__{interval: interval}) do
interval_byte = interval_to_byte(interval)
{:ok, <<0x04, 0x01, interval_byte>>}
end
defp interval_from_byte(interval) when interval < 5, do: {:error, :interval_too_small}
defp interval_from_byte(interval) when interval > 99, do: {:error, :interval_too_big}
defp interval_from_byte(byte) do
steps = byte - 5
{:ok, 640 + steps * 128}
end
defp validate_interval(interval) when interval < 640, do: {:error, :interval_too_small}
defp validate_interval(interval) when interval > 12672, do: {:error, :interval_too_big}
defp validate_interval(nil), do: {:error, :interval_required}
defp validate_interval(interval) do
if Integer.mod(interval, 128) == 0 do
:ok
else
{:error, :interval_step_invalid}
end
end
defp interval_to_byte(interval) do
Integer.floor_div(interval - 640, 128)
end
end
|
lib/grizzly/zwave/smart_start/meta_extension/max_inclusion_request_interval.ex
| 0.942228
| 0.604078
|
max_inclusion_request_interval.ex
|
starcoder
|
defmodule Conduit.Plug.Retry do
use Conduit.Plug.Builder
require Logger
@moduledoc """
Retries messages that were nacked or raised an exception.
## Options
* `attempts` - Number of times to process the message before giving up. (defaults to 3)
* `backoff_factor` - What multiple of the delay should be backoff on each attempt. For
a backoff of 2, on each retry we double the amount of time of the last delay. Set to
1 to use the same delay each retry.
(defaults to 2)
* `jitter` - Size of randomness applied to delay. This is useful to prevent multiple
processes from retrying at the same time. (defaults to 0)
* `delay` - How long to wait between attempts. (defaults to 1000ms)
## Examples
plug Retry
plug Retry, attempts: 10, delay: 10_000
"""
@defaults %{
attempts: 3,
backoff_factor: 2,
jitter: 0,
delay: 1000
}
def call(message, next, opts) do
opts = Map.merge(@defaults, Map.new(opts))
attempt(message, next, 0, opts)
end
defp attempt(message, next, retries, opts) do
message = next.(message)
case message.status do
:nack -> retry(message, next, retries, :nack, opts)
:ack -> message
end
rescue
error ->
retry(message, next, retries, error, System.stacktrace(), opts)
end
defp retry(message, _, retries, :nack, %{attempts: attempts})
when retries >= attempts - 1 do
nack(message)
end
defp retry(_, _, retries, error, stacktrace, %{attempts: attempts})
when retries >= attempts - 1 do
reraise error, stacktrace
end
defp retry(message, next, retries, error, stacktrace, opts) do
delay = opts.delay * :math.pow(opts.backoff_factor, retries)
jitter = :rand.uniform() * delay * opts.jitter
wait_time = round(delay + jitter)
log_error(error, stacktrace, wait_time)
Process.sleep(wait_time)
message
|> put_header("retries", retries + 1)
|> ack
|> attempt(next, retries + 1, opts)
end
defp log_error(:nack, _, wait_time) do
Logger.warn("Message will be retried in #{wait_time}ms because it was nacked")
end
defp log_error(error, stacktrace, wait_time) do
formatted_error = Exception.format(:error, error, stacktrace)
Logger.warn([
"Message will be retried in #{wait_time}ms because an exception was raised\n",
formatted_error
])
end
end
|
lib/conduit/plug/retry.ex
| 0.829906
| 0.569972
|
retry.ex
|
starcoder
|
defmodule Akd.Hook do
@moduledoc """
This module represents an `Akd.Hook` struct which contains metadata about
a hook.
Please refer to `Nomenclature` for more information about the terms used.
The meta data involves:
* `ensure` - A list of `Akd.Operation.t` structs that run after a deployment,
if the hook was successfully executed (independent of whether the
deployment itself was successful or not), and `run_ensure` is `true`.
* `ignore_failure` - If `true`, the deployment continues to happen even if this
hook fails. Defaults to `false`.
* `main` - A list of `Akd.Operation.t` that run when the hook is executed.
* `rollback` - A list of `Akd.Operation.t` that run when a deployment is a
failure, but the hook was called.
* `run_ensure` - If `true`, `ensure` commands run independent of whether
deployment was successful or not. Defaults to `true`.
This struct is mainly used by native hooks in `Akd`, but it can be leveraged
to write custom hooks.
"""
alias Akd.{Deployment, Operation}
defstruct [ensure: [], ignore_failure: false,
main: [], rollback: [], run_ensure: true]
@typedoc ~s(Generic type for a Hook struct)
@type t :: %__MODULE__{
ensure: [Operation.t],
ignore_failure: boolean(),
main: [Operation.t],
rollback: [Operation.t],
run_ensure: boolean()
}
@callback get_hooks(Deployment.t, list) :: [__MODULE__.t]
@doc """
This macro allows another module to behave like `Akd.Hook`.
This also allows a module to use `Akd.Dsl.FormHook` to write
readable hooks.
## Examples:
iex> defmodule CustomHook do
...> use Akd.Hook
...> def get_hooks(deployment, opts) do
...> [form_hook do
...> main "some command", Akd.Destination.local()
...> end]
...> end
...> end
iex> CustomHook.get_hooks(nil, nil)
[%Akd.Hook{ensure: [], ignore_failure: false,
main: [%Akd.Operation{cmd: "some command", cmd_envs: [],
destination: %Akd.Destination{host: :local, path: ".",
user: :current}}], rollback: [], run_ensure: true}]
"""
defmacro __using__(_) do
quote do
import Akd.Dsl.FormHook
@behaviour unquote(__MODULE__)
@spec get_hooks(Akd.Deployment.t, list) :: unquote(__MODULE__).t
def get_hooks(_, _), do: raise "`get_hooks/2` not defined for #{__MODULE__}"
defoverridable [get_hooks: 2]
end
end
@doc """
Takes a `Akd.Hook.t` struct and calls the list of `Akd.Operation.t`
corresponding to `rollback` type.
## Examples:
iex> hook = %Akd.Hook{}
iex> Akd.Hook.rollback(hook)
{:ok, []}
"""
@spec rollback(__MODULE__.t) :: list()
def rollback(%__MODULE__{} = hook) do
hook
|> Map.get(:rollback)
|> Enum.reduce_while({:ok, []}, &runop/2)
|> (& {elem(&1, 0), &1 |> elem(1) |> Enum.reverse()}).()
end
@doc """
Takes a `Akd.Hook.t` struct and calls the list of `Akd.Operation.t`
corresponding to `main` type.
## Examples:
iex> hook = %Akd.Hook{}
iex> Akd.Hook.main(hook)
{:ok, []}
"""
@spec main(__MODULE__.t) :: list()
def main(%__MODULE__{} = hook) do
hook
|> Map.get(:main)
|> Enum.reduce_while({:ok, []}, &runop/2)
|> (& {elem(&1, 0), &1 |> elem(1) |> Enum.reverse()}).()
end
@doc """
Takes a `Akd.Hook.t` struct and calls the list of `Akd.Operation.t`
corresponding to `ensure` type.
If `run_ensure` is `false`, it doesn't run any operations.
## Examples:
iex> hook = %Akd.Hook{}
iex> Akd.Hook.ensure(hook)
{:ok, []}
iex> ensure = [%Akd.Operation{destination: %Akd.Destination{}, cmd: "echo 1"}]
iex> hook = %Akd.Hook{run_ensure: false, ensure: ensure}
iex> Akd.Hook.ensure(hook)
{:ok, []}
"""
@spec ensure(__MODULE__.t) :: list()
def ensure(%__MODULE__{run_ensure: false}), do: {:ok, []}
def ensure(%__MODULE__{} = hook) do
hook
|> Map.get(:ensure)
|> Enum.reduce_while({:ok, []}, &runop/2)
|> (& {elem(&1, 0), &1 |> elem(1) |> Enum.reverse()}).()
end
# Delegates to running the operation and translates the return tuple to
# :halt vs :cont form usable by `reduce_while`
defp runop(%Operation{} = op, {_, io}) do
case Operation.run(op) do
{:error, error} -> {:halt, {:error, [error | io]}}
{:ok, output} -> {:cont, {:ok, [output | io]}}
end
end
end
|
lib/akd/hook.ex
| 0.851135
| 0.565569
|
hook.ex
|
starcoder
|
defmodule DiscordBot.Util do
@moduledoc """
Various utility methods and helpers.
"""
@doc """
Gets the PID of a child process from a supervisor given an ID.
`supervisor` is the supervisor to query, and `id` is the ID
to lookup. Do not call this from the `start_link/1` or
the `init/1` function of any child process of `supervisor`,
or deadlock will occur.
"""
@spec child_by_id(pid, any) :: {:ok, pid} | :error
def child_by_id(supervisor, id) do
children = Supervisor.which_children(supervisor)
case Enum.filter(children, fn child -> matches_id?(child, id) end) do
[] -> :error
[{_, pid, _, _}] -> {:ok, pid}
_ -> :error
end
end
defp matches_id?({_, :undefined, _, _}, _), do: false
defp matches_id?({_, :restarting, _, _}, _), do: false
defp matches_id?({id, _, _, _}, id), do: true
defp matches_id?(_, _), do: false
@doc ~S"""
Gets a required option from a keyword list.
Raises an `ArgumentError` if the option is not present.
## Examples
iex> opts = [option: :value]
...> DiscordBot.Util.require_opt!(opts, :option)
:value
iex> opts = [option: :value]
...> DiscordBot.Util.require_opt!(opts, :not_present)
** (ArgumentError) Required option :not_present is missing.
"""
@spec require_opt!(list, atom) :: any()
def require_opt!(opts, key) do
require_opt!(opts, key, "Required option #{Kernel.inspect(key)} is missing.")
end
@doc ~S"""
Gets a required option from a keyword list with a custom message.
Raises an `ArgumentError` with the provided `msg` if the option
is not present.
## Examples
iex> opts = [option: :value]
...> DiscordBot.Util.require_opt!(opts, :option, "Error!")
:value
iex> opts = [option: :value]
...> DiscordBot.Util.require_opt!(opts, :not_present, "Error!")
** (ArgumentError) Error!
"""
@spec require_opt!(list, atom, String.t()) :: any()
def require_opt!(opts, key, message) do
case Keyword.fetch(opts, key) do
{:ok, value} -> value
:error -> raise ArgumentError, message: message
end
end
end
|
apps/discordbot/lib/discordbot/util.ex
| 0.795142
| 0.457137
|
util.ex
|
starcoder
|
defmodule Poxa.WebHook.EventTable do
@moduledoc """
This module provides functions to initialize and manipulate a table of events
to be sent to the web hook.
"""
@table_name :web_hook_events
import :ets, only: [new: 2, select: 2, select_delete: 2]
@doc """
This function initializes the ETS table where the events will be stored.
"""
def init do
new(@table_name, [:bag, :public, :named_table])
end
@doc """
This function inserts events in the ETS table. It receives a `delay` in
milliseconds corresponding to how much time each event must be delayed before
being sent. This `delay` is summed up with the current timestamp and used
as key of the ETS table.
It doesn't always insert a new value, though. In the case where there is a
corresponding event(e.g. `member_added` is being inserted when `member_removed`
of the same user and channel is already in the the table), it does not insert the
new event but it removes the old one. This is done to achieve delaying events as
it is described in [pusher's documentation](https://pusher.com/docs/webhooks#delay)
"""
def insert(_, delay \\ 0)
def insert(event, delay) when not is_list(event), do: insert([event], delay)
def insert(events, delay) do
Enum.filter events, fn(event) ->
if delete_corresponding(event) == 0 do
:ets.insert(@table_name, {time_ms() + delay, event})
end
end
end
@doc """
This function returns all events present in the ETS table.
"""
def all, do: filter_events []
@doc """
This function returns all events in the ETS table that are ready to be sent
for a given `timestamp`.
"""
def ready(timestamp \\ time_ms() + 1), do: {timestamp, filter_events [{:<, :"$1", timestamp}]}
defp filter_events(filter) do
select(@table_name, [{{:"$1", :"$2"}, filter, [:"$2"]}])
end
@doc """
This function removes all events in the ETS table that are ready to be sent
before a certain timestamp.
"""
def clear_older(timestamp), do: select_delete(@table_name, [{{:"$1", :"$2"}, [], [{:<, :"$1", timestamp}]}])
defp delete_corresponding(%{name: "channel_occupied", channel: channel}) do
match = [
{{:_, %{channel: :"$1", name: "channel_vacated"}},
[],
[{:==, {:const, channel}, :"$1"}]}
]
select_delete(@table_name, match)
end
defp delete_corresponding(%{name: "member_added", channel: channel, user_id: user_id}) do
match = [
{{:_, %{channel: :"$1", name: "member_removed", user_id: :"$2"}},
[],
[{:andalso, {:==, {:const, user_id}, :"$2"}, {:==, {:const, channel}, :"$1"}}]}
]
select_delete(@table_name, match)
end
defp delete_corresponding(_), do: 0
defp time_ms, do: :erlang.system_time(:milli_seconds)
end
|
lib/poxa/web_hook/event_table.ex
| 0.757705
| 0.611179
|
event_table.ex
|
starcoder
|
defmodule ExWordNet.Synset do
@moduledoc """
Provides abstraction over a synset (or group of synonymous words) in WordNet.
Synsets are related to each other by various (and numerous!) relationships, including
Hypernym (x is a hypernym of y <=> x is a parent of y) and Hyponym (x is a child of y)
Struct members:
- `:part_of_speech`: A shorthand representation of the part of speech this synset represents.
- `:word_counts`: The list of words (and their frequencies within the WordNet graph) for this
`Synset`.
- `:gloss`: A string representation of this synset's gloss. "Gloss" is a human-readable
description of this concept, often with example usage.
"""
@enforce_keys ~w(part_of_speech word_counts gloss)a
defstruct @enforce_keys
@type t :: %__MODULE__{
part_of_speech: ExWordNet.Constants.PartsOfSpeech.atom_part_of_speech(),
word_counts: %{required(String.t()) => integer()},
gloss: String.t()
}
import ExWordNet.Constants.PartsOfSpeech
@doc """
Creates an `ExWordNet.Synset` struct by reading from the data file specified by `part_of_speech`,
at `offset` bytes into the file.
This is how the WordNet database is organized. You shouldn't be calling this function directly;
instead, use `ExWordNet.Lemma.synsets/1`
"""
@spec new(ExWordNet.Constants.PartsOfSpeech.atom_part_of_speech(), integer()) ::
{:ok, __MODULE__.t()} | {:error, any()}
def new(part_of_speech, offset)
when is_atom_part_of_speech(part_of_speech) and is_integer(offset) do
path =
ExWordNet.Config.db()
|> Path.join("dict")
|> Path.join("data.#{part_of_speech}")
with {:ok, file} <- File.open(path),
{:ok, _} <- :file.position(file, offset),
{:ok, line} <- :file.read_line(file) do
result = process_line(line, part_of_speech)
{:ok, result}
else
{:error, reason} ->
{:error, reason}
end
end
@doc """
Gets a list of words included in this synset.
"""
@spec words(__MODULE__.t()) :: [String.t()]
def words(%__MODULE__{word_counts: word_counts}) when is_map(word_counts) do
Map.keys(word_counts)
end
defp process_line(line, part_of_speech)
when is_binary(line) and is_atom_part_of_speech(part_of_speech) do
[info_line, gloss] = line |> String.trim() |> String.split(" | ", parts: 2)
[_synset_offset, _lex_filenum, _synset_type, word_count | xs] = String.split(info_line, " ")
{word_count, _} = Integer.parse(word_count, 16)
{words_list, _} = Enum.split(xs, word_count * 2)
word_counts =
words_list
|> Enum.chunk_every(2)
|> Enum.reduce(%{}, fn [word, count], acc ->
{count, _} = Integer.parse(count, 16)
Map.put(acc, word, count)
end)
# TODO: Read pointers
%__MODULE__{part_of_speech: part_of_speech, word_counts: word_counts, gloss: gloss}
end
end
defimpl String.Chars, for: ExWordNet.Synset do
import ExWordNet.Constants.PartsOfSpeech
def to_string(synset = %ExWordNet.Synset{part_of_speech: part_of_speech, gloss: gloss})
when is_atom_part_of_speech(part_of_speech) and is_binary(gloss) do
words =
synset
|> ExWordNet.Synset.words()
|> Enum.map(&String.replace(&1, "_", " "))
|> Enum.join(", ")
short_part_of_speech = atom_to_short_part_of_speech(part_of_speech)
"(#{short_part_of_speech}) #{words} (#{gloss})"
end
end
|
lib/exwordnet/synset.ex
| 0.785432
| 0.636424
|
synset.ex
|
starcoder
|
defmodule Nanoid.NonSecure do
@moduledoc """
Generate an URL-friendly unique ID. This method use the non-secure, predictable random generator.
By default, the ID will have 21 symbols with a collision probability similar to UUID v4.
"""
alias Nanoid.Configuration
@doc """
Generates a non-secure NanoID using the default alphabet.
## Example
Generate a non-secure NanoID with the default size of 21 characters.
iex> Nanoid.NonSecure.generate()
"mJUHrGXZBZpNX50x2xkzf"
Generate a non-secure NanoID with a custom size of 64 characters.
iex> Nanoid.NonSecure.generate(64)
"wk9fsUrhK9k-MxY0hLazRKpcSlic8XYDFusks7Jb8FwCVnoQaKFSPsmmLHzP7qCX"
"""
@spec generate(non_neg_integer()) :: binary()
def generate(size \\ Configuration.default_size())
def generate(size) when is_integer(size) and size > 0,
do: generator(size, Configuration.default_alphabet())
def generate(_size),
do: generator(Configuration.default_size(), Configuration.default_alphabet())
@doc """
Generate a non-secure NanoID using a custom size and an individual alphabet.
## Example
Generate a non-secure NanoID with the default size of 21 characters and an individual alphabet.
iex> Nanoid.NonSecure.generate(21, "abcdef123")
"d1dcd2dee333cae1bfdea"
Generate a non-secure NanoID with custom size of 64 characters and an individual alphabet.
iex> Nanoid.NonSecure.generate(64, "abcdef123")
"aabbaca3c11accca213babed2bcd1213efb3e3fa1ad23ecbf11c2ffc123f3bbe"
"""
@spec generate(non_neg_integer(), binary() | list()) :: binary()
def generate(size, alphabet)
def generate(size, alphabet) when is_integer(size) and size > 0 and is_binary(alphabet),
do: generator(size, alphabet)
def generate(size, alphabet) when is_integer(size) and size > 0 and is_list(alphabet),
do: generator(size, alphabet)
def generate(size, _alphabet) when is_integer(size) and size > 0,
do: generate(size, Configuration.default_alphabet())
def generate(_size, _alphabet),
do: generate(Configuration.default_size(), Configuration.default_alphabet())
@spec generator(non_neg_integer(), binary() | list()) :: binary()
defp generator(size, alphabet)
defp generator(size, alphabet) when is_integer(size) and size > 0 and is_binary(alphabet),
do: generator(size, String.graphemes(alphabet))
defp generator(size, alphabet) when is_integer(size) and size > 0 and is_list(alphabet) and length(alphabet) > 1 do
1..size
|> Enum.reduce([], fn _, acc -> [Enum.random(alphabet) | acc] end)
|> Enum.join()
end
defp generator(_size, _alphabet),
do: generator(Configuration.default_size(), Configuration.default_alphabet())
end
|
lib/nanoid/non_secure.ex
| 0.884769
| 0.551513
|
non_secure.ex
|
starcoder
|
defmodule Mix.Tasks.Autox.Phoenix.Migration do
use Mix.Task
@shortdoc "Generates an Ecto migration"
@moduledoc """
Code copy+pasted from https://github.com/phoenixframework/phoenix/blob/master/lib/mix/tasks/phoenix.gen.model.ex
"""
def run(args) do
switches = [migration: :boolean, binary_id: :boolean, instructions: :string]
{opts, parsed, _} = OptionParser.parse(args, switches: switches)
[xg, singular, plural | attrs] = validate_args!(parsed)
default_opts = Application.get_env(:phoenix, :generators, [])
opts = Keyword.merge(default_opts, opts)
uniques = Mix.Phoenix.uniques(attrs)
attrs = Mix.Phoenix.attrs(attrs)
binding = Mix.Phoenix.inflect(singular)
params = Mix.Phoenix.params(attrs)
path = binding[:path]
migration = String.replace(path, "/", "_")
{assocs, attrs} = partition_attrs_and_assocs(attrs)
create? = Enum.empty?(assocs)
binding = binding ++
[attrs: attrs, plural: plural, types: types(attrs), uniques: uniques,
assocs: assocs(assocs), indexes: indexes(plural, assocs, uniques),
defaults: defaults(attrs), params: params,
binary_id: opts[:binary_id], create?: create?]
files = []
if opts[:migration] != false do
action = if create?, do: "create", else: "alter"
files =
[{:eex, "migration.exs", "priv/repo/migrations/#{timestamp(xg)}_#{action}_#{migration}.exs"}|files]
end
Mix.Phoenix.copy_from Mix.Autox.paths, "priv/templates/autox.phoenix.migration", "", binding, files
# Print any extra instruction given by parent generators
Mix.shell.info opts[:instructions] || ""
if opts[:migration] != false do
Mix.shell.info """
Remember to update your repository by running migrations:
$ mix ecto.migrate
"""
end
end
defp validate_args!([_, _, plural | _] = args) do
cond do
String.contains?(plural, ":") ->
raise_with_help
plural != Phoenix.Naming.underscore(plural) ->
Mix.raise "expected the second argument, #{inspect plural}, to be all lowercase using snake_case convention"
true ->
args
end
end
defp validate_args!(_) do
raise_with_help
end
defp raise_with_help do
Mix.raise """
mix phoenix.gen.model expects both singular and plural names
of the generated resource followed by any number of attributes:
mix phoenix.gen.model User users name:string
"""
end
defp partition_attrs_and_assocs(attrs) do
Enum.partition attrs, fn
{_, {:references, _}} ->
true
{key, :references} ->
Mix.raise """
Phoenix generators expect the table to be given to #{key}:references.
For example:
mix phoenix.gen.model Comment comments body:text post_id:references:posts
"""
_ ->
false
end
end
defp assocs(assocs) do
Enum.map assocs, fn {key_id, {:references, source}} ->
key = String.replace(Atom.to_string(key_id), "_id", "")
assoc = Mix.Phoenix.inflect key
{String.to_atom(key), key_id, assoc[:module], source}
end
end
defp indexes(plural, assocs, uniques) do
Enum.concat(
Enum.map(assocs, fn {key, _} ->
"create index(:#{plural}, [:#{key}])"
end),
Enum.map(uniques, fn key ->
"create unique_index(:#{plural}, [:#{key}])"
end))
end
defp timestamp(xg\\"") do
{{y, m, d}, {hh, mm, ss}} = :calendar.universal_time()
"#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}#{pad(xg)}"
end
defp pad(i) when i < 10, do: << ?0, ?0 + i >>
defp pad(i), do: to_string(i)
defp types(attrs) do
Enum.into attrs, %{}, fn
{k, {c, v}} -> {k, {c, value_to_type(v)}}
{k, v} -> {k, value_to_type(v)}
end
end
defp defaults(attrs) do
Enum.into attrs, %{}, fn
{k, :boolean} -> {k, ", default: false"}
{k, _} -> {k, ""}
end
end
defp value_to_type(:text), do: :string
defp value_to_type(:uuid), do: Ecto.UUID
defp value_to_type(:date), do: Ecto.Date
defp value_to_type(:time), do: Ecto.Time
defp value_to_type(:datetime), do: Ecto.DateTime
defp value_to_type(v) do
if Code.ensure_loaded?(Ecto.Type) and not Ecto.Type.primitive?(v) do
Mix.raise "Unknown type `#{v}` given to generator"
else
v
end
end
end
|
lib/mix/tasks/autox.phoenix.migration.ex
| 0.797636
| 0.422892
|
autox.phoenix.migration.ex
|
starcoder
|
defmodule Pigeon.LegacyFCM do
@moduledoc """
`Pigeon.Adapter` for Legacy Firebase Cloud Messaging (FCM) push notifications.
## Getting Started
1. Create a `LegacyFCM` dispatcher.
```
# lib/legacy_fcm.ex
defmodule YourApp.LegacyFCM do
use Pigeon.Dispatcher, otp_app: :your_app
end
```
2. (Optional) Add configuration to your `config.exs`.
```
# config.exs
config :your_app, YourApp.LegacyFCM,
adapter: Pigeon.LegacyFCM,
key: "your_fcm_key_here"
```
3. Start your dispatcher on application boot.
```
defmodule YourApp.Application do
@moduledoc false
use Application
@doc false
def start(_type, _args) do
children = [
YourApp.LegacyFCM
]
opts = [strategy: :one_for_one, name: YourApp.Supervisor]
Supervisor.start_link(children, opts)
end
end
```
If you skipped step two, include your configuration.
```
defmodule YourApp.Application do
@moduledoc false
use Application
@doc false
def start(_type, _args) do
children = [
{YourApp.ADM, legacy_fcm_opts()}
]
opts = [strategy: :one_for_one, name: YourApp.Supervisor]
Supervisor.start_link(children, opts)
end
defp legacy_fcm_opts do
[
adapter: Pigeon.LegacyFCM,
key: "your_fcm_key_here"
]
end
end
```
4. Create a notification.
```
msg = %{"body" => "your message"}
n = Pigeon.LegacyFCM.Notification.new("your device registration ID", msg)
```
5. Send the notification.
Pushes are synchronous and return the notification with
updated `:status` and `:response` keys. If `:status` is success, `:response`
will contain a keyword list of individual registration ID responses.
```
YourApp.LegacyFCM.push(n)
```
## Sending to Multiple Registration IDs
Pass in a list of registration IDs, as many as you want.
```
msg = %{"body" => "your message"}
n = Pigeon.FCM.Notification.new(["first ID", "second ID"], msg)
```
## Notification Struct
```
%Pigeon.LegacyFCM.Notification{
collapse_key: nil | String.t(),
dry_run: boolean,
message_id: nil | String.t(),
payload: %{...},
priority: :normal | :high,
registration_id: String.t() | [String.t(), ...],
response: [] | [{atom, String.t()}, ...], | atom,
restricted_package_name: nil | String.t(),
status: atom | nil,
time_to_live: non_neg_integer
}
```
## Notifications with Custom Data
FCM accepts both `notification` and `data` keys in its JSON payload. Set them like so:
```
notification = %{"body" => "your message"}
data = %{"key" => "value"}
Pigeon.LegacyFCM.Notification.new("registration ID", notification, data)
```
or
```
Pigeon.LegacyFCM.Notification.new("registration ID")
|> put_notification(%{"body" => "your message"})
|> put_data(%{"key" => "value"})
```
## Handling Push Responses
1. Pass an optional anonymous function as your second parameter.
```
data = %{message: "your message"}
n = Pigeon.FCM.Notification.new(data, "device registration ID")
Pigeon.FCM.push(n, fn(x) -> IO.inspect(x) end)
{:ok, %Pigeon.FCM.Notification{...}}
```
2. Responses return the notification with an updated response.
```
on_response = fn(n) ->
case n.status do
:success ->
bad_regids = FCM.Notification.remove?(n)
to_retry = FCM.Notification.retry?(n)
# Handle updated regids, remove bad ones, etc
:unauthorized ->
# Bad FCM key
error ->
# Some other error
end
end
data = %{message: "your message"}
n = Pigeon.FCM.Notification.new("your device token", data)
Pigeon.FCM.push(n, on_response: on_response)
```
## Error Responses
*Slightly modified from [FCM Server Reference](https://firebase.google.com/docs/cloud-messaging/http-server-ref#error-codes)*
| Reason | Description |
|----------------------------------|------------------------------|
| `:missing_registration` | Missing Registration Token |
| `:invalid_registration` | Invalid Registration Token |
| `:not_registered` | Unregistered Device |
| `:invalid_package_name` | Invalid Package Name |
| `:authentication_error` | Authentication Error |
| `:mismatch_sender_id` | Mismatched Sender |
| `:invalid_json` | Invalid JSON |
| `:message_too_big` | Message Too Big |
| `:invalid_data_key` | Invalid Data Key |
| `:invalid_ttl` | Invalid Time to Live |
| `:unavailable` | Timeout |
| `:internal_server_error` | Internal Server Error |
| `:device_message_rate_exceeded` | Message Rate Exceeded |
| `:topics_message_rate_exceeded` | Topics Message Rate Exceeded |
| `:unknown_error` | Unknown Error |
"""
defstruct queue: Pigeon.NotificationQueue.new(),
stream_id: 1,
socket: nil,
config: nil
@behaviour Pigeon.Adapter
alias Pigeon.{Configurable, NotificationQueue}
alias Pigeon.Http2.{Client, Stream}
@impl true
def init(opts) do
config = Pigeon.LegacyFCM.Config.new(opts)
Configurable.validate!(config)
state = %__MODULE__{config: config}
case connect_socket(config) do
{:ok, socket} ->
Configurable.schedule_ping(config)
{:ok, %{state | socket: socket}}
{:error, reason} ->
{:stop, reason}
end
end
@impl true
def handle_push(notification, %{config: config, queue: queue} = state) do
headers = Configurable.push_headers(config, notification, [])
payload = Configurable.push_payload(config, notification, [])
Client.default().send_request(state.socket, headers, payload)
new_q = NotificationQueue.add(queue, state.stream_id, notification)
state =
state
|> inc_stream_id()
|> Map.put(:queue, new_q)
{:noreply, state}
end
def handle_info(:ping, state) do
Client.default().send_ping(state.socket)
Configurable.schedule_ping(state.config)
{:noreply, state}
end
def handle_info({:closed, _}, %{config: config} = state) do
case connect_socket(config) do
{:ok, socket} ->
Configurable.schedule_ping(config)
{:noreply, %{state | socket: socket}}
{:error, reason} ->
{:stop, reason}
end
end
@impl true
def handle_info(msg, state) do
case Client.default().handle_end_stream(msg, state) do
{:ok, %Stream{} = stream} -> process_end_stream(stream, state)
_else -> {:noreply, state}
end
end
defp connect_socket(config), do: connect_socket(config, 0)
defp connect_socket(_config, 3), do: {:error, :timeout}
defp connect_socket(config, tries) do
case Configurable.connect(config) do
{:ok, socket} -> {:ok, socket}
{:error, _reason} -> connect_socket(config, tries + 1)
end
end
@doc false
def process_end_stream(%Stream{id: stream_id} = stream, state) do
%{queue: queue, config: config} = state
case NotificationQueue.pop(queue, stream_id) do
{nil, new_queue} ->
# Do nothing if no queued item for stream
{:noreply, %{state | queue: new_queue}}
{notif, new_queue} ->
Configurable.handle_end_stream(config, stream, notif)
{:noreply, %{state | queue: new_queue}}
end
end
@doc false
def inc_stream_id(%{stream_id: stream_id} = state) do
%{state | stream_id: stream_id + 2}
end
end
|
lib/pigeon/legacy_fcm.ex
| 0.876707
| 0.652428
|
legacy_fcm.ex
|
starcoder
|
defmodule Absinthe.IntegrationCase do
@moduledoc """
Integration tests consist of:
- A `.graphql` file containing a GraphQL document to execute
- A `.exs` file alongside with the same basename, containing the scenario(s) to execute
The files are located under the directory passed as the `:root`
option to `use Absinthe.IntegrationCase`.
## Setting the Schema
The schema for a GraphQL document can be set by adding a comment at
the beginning of the `.graphql` file, eg:
```
# Schema: ColorsSchema
```
The schema name provided must be under `Absinthe.Fixtures`. (For
example, the schema set in the example above would be
`Absinthe.Fixtures.ColorsSchema`.)
If no schema is set, the integration test will use the
`:default_schema` option passed to `use Absinthe.IntegrationCase`.
## Defining Scenarios
You can place one or more scenarios in the `.exs` file.
A normal scenario that checks the result of Absinthe's GraphQL
execution is a tuple of options for `Absinthe.run` (see
`Absinthe.run_opts`) and the expected result.
You can omit the options if you aren't setting any. For instance,
here's a simple result expectation:
```
{:ok, %{data: %{"defaultThing" => %{"name" => "Foo"}}}}
```
This could also have been written as:
```
{[], {:ok, %{data: %{"defaultThing" => %{"name" => "Foo"}}}}}
```
Here's another scenario example, this time making use of the options
to set a variable:
```
{[variables: %{"thingId" => "foo"}], {:ok, %{data: %{"thing" => %{"name" => "Foo"}}}}}
```
If you have more than one scenario, just wrap them in a list:
```
[
{:ok, %{data: %{"defaultThing" => %{"name" => "Foo"}}}},
{[variables: %{"thingId" => "foo"}], {:ok, %{data: %{"thing" => %{"name" => "Foo"}}}}}
]
```
Under normal circumstances, `assert_result/2` will be used to
compare the result of a scenario against the expectation. (Notably,
`assert_result` ignores error `:locations`, so they do not need to
be included in results.)
### Checking Exceptions
If a tuple containing `:raise` and a module name is provided as the
expected result for a scenario, `assert_raise/2` will be used
instead of the normal `Absinthe.Case.assert_result/2`; this can be
used to check scenarios with invalid resolvers, etc:
```
{:raise, Absinthe.ExecutionError}
```
Once again, with options for `Absinthe.run`, this would look like:
```
{[variables: %{"someVar" => "value}], {:raise, Absinthe.ExecutionError}}
```
### Complex Scenario Assertions
You can totally override the assertion logic and do your own
execution, just using the GraphQL reading and schema setting logic,
by defining a `run_scenario/2` function in your test module. It
should narrowly match the test definition (so that the rest of your
tests fall through to the normal `run_scenario/2` logic).
```
def run_scenario(%{name: "path/to/integration/name"} = definition, {options, expectation} = scenario) do
result = run(definition.graphql, definition.schema, options)
# Do something to check the expectation against the result, etc
end
```
(For more information on the values available in `definition` above,
see `Absinthe.IntegrationCase.Definition`.)
In the event that you don't care about the result value, set the
expectation to `:custom_assertion` (this is just a convention). For
example, here's a scenario using a variable that uses a custom
`run_scenario` match to provide its own custom assertion logic:
```
{[variables: %{"name" => "something"}], :custom_assertion}
```
"""
defp term_from_file!(filename) do
elem(Code.eval_file(filename), 0)
end
defp definitions(root, default_schema) do
for graphql_file <- Path.wildcard(Path.join(root, "**/*.graphql")) do
dirname = Path.dirname(graphql_file)
basename = Path.basename(graphql_file, ".graphql")
integration_name =
String.replace_leading(dirname, root, "")
|> Path.join(basename)
|> String.slice(1..-1)
graphql = File.read!(graphql_file)
raw_scenarios =
Path.join(dirname, basename <> ".exs")
|> term_from_file!
__MODULE__.Definition.create(
integration_name,
graphql,
default_schema,
raw_scenarios
)
end
end
def scenario_tests(definition) do
count = length(definition.scenarios)
for {scenario, index} <- Enum.with_index(definition.scenarios) do
quote do
test unquote(definition.name) <> ", scenario #{unquote(index) + 1} of #{unquote(count)}" do
assert_scenario(unquote(Macro.escape(definition)), unquote(Macro.escape(scenario)))
end
end
end
end
defmacro __using__(opts) do
root = Keyword.fetch!(opts, :root)
default_schema = Macro.expand(Keyword.fetch!(opts, :default_schema), __ENV__)
definitions = definitions(root, default_schema)
[
quote do
use Absinthe.Case, unquote(opts)
@before_compile unquote(__MODULE__)
end,
for definition <- definitions do
scenario_tests(definition)
end
]
end
defmacro __before_compile__(_env) do
quote do
def assert_scenario(definition, {options, {:raise, exception}}) when is_list(options) do
assert_raise(exception, fn -> run(definition.graphql, definition.schema, options) end)
end
def assert_scenario(definition, {options, result}) when is_list(options) do
assert_result(
result,
run(definition.graphql, definition.schema, options)
)
end
end
end
end
|
test/support/integration_case.ex
| 0.898273
| 0.963265
|
integration_case.ex
|
starcoder
|
defmodule Pow.Ecto.Schema.Migration do
@moduledoc """
Generates schema migration content.
## Configuration options
* `:repo` - the ecto repo to use. This value defaults to the derrived
context base repo from the `context_base` argument in `gen/2`.
* `:table` - the ecto table name, defaults to "users".
* `:attrs` - list of attributes, defaults to the results from
`Pow.Ecto.Schema.Fields.attrs/1`.
* `:indexes` - list of indexes, defaults to the results from
`Pow.Ecto.Schema.Fields.indexes/1`.
"""
alias Pow.{Config, Ecto.Schema.Fields}
@template """
defmodule <%= inspect schema.repo %>.Migrations.<%= schema.migration_name %> do
use Ecto.Migration
def change do
create table(:<%= schema.table %><%= if schema.binary_id do %>, primary_key: false<% end %>) do
<%= if schema.binary_id do %> add :id, :binary_id, primary_key: true
<% end %><%= for {k, v} <- schema.attrs do %> add <%= inspect k %>, <%= inspect v %><%= schema.migration_defaults[k] %>
<% end %><%= for {_, i, _, s} <- schema.assocs do %> add <%= if(String.ends_with?(inspect(i), "_id"), do: inspect(i), else: inspect(i) <> "_id") %>, references(<%= inspect(s) %>), on_delete: :nothing<%= if schema.binary_id do %>, type: :binary_id<% end %>
<% end %>
timestamps()
end
<%= for index <- schema.indexes do %>
<%= index %><% end %>
end
end
"""
@doc """
Generates migration file content.
"""
@spec gen(map()) :: binary()
def gen(schema) do
EEx.eval_string(unquote(@template), schema: schema)
end
@doc """
Generates migration schema map.
"""
@spec new(atom(), binary(), Config.t()) :: map()
def new(context_base, schema_plural, config \\ []) do
repo = Config.get(config, :repo, Module.concat([context_base, "Repo"]))
attrs = Config.get(config, :attrs, Fields.attrs(config))
indexes = Config.get(config, :indexes, Fields.indexes(config))
migration_name = name(schema_plural)
schema(context_base, repo, schema_plural, migration_name, attrs, indexes, binary_id: config[:binary_id])
end
defp name(schema_plural), do: "Create#{Macro.camelize(schema_plural)}"
@doc """
Generates a schema map to be used with the schema template.
"""
@spec schema(atom(), atom(), binary(), binary(), list(), list(), Keyword.t()) :: map()
def schema(context_base, repo, table, migration_name, attrs, indexes, opts) do
migration_attrs = migration_attrs(attrs)
binary_id = opts[:binary_id]
migration_defaults = defaults(migration_attrs)
{assocs, attrs} = partition_attrs(context_base, migration_attrs)
indexes = migration_indexes(indexes, table)
%{
migration_name: migration_name,
repo: repo,
table: table,
binary_id: binary_id,
attrs: attrs,
migration_defaults: migration_defaults,
assocs: assocs,
indexes: indexes
}
end
defp migration_attrs(attrs) do
attrs
|> Enum.reject(&is_virtual?/1)
|> Enum.map(&to_migration_attr/1)
end
defp is_virtual?({_name, _type}), do: false
defp is_virtual?({_name, _type, defaults}) do
Keyword.get(defaults, :virtual, false)
end
defp to_migration_attr({name, type}) do
{name, type, ""}
end
defp to_migration_attr({name, type, []}) do
to_migration_attr({name, type})
end
defp to_migration_attr({name, type, defaults}) do
defaults = Enum.map_join(defaults, ", ", fn {k, v} -> "#{k}: #{v}" end)
{name, type, ", #{defaults}"}
end
defp defaults(attrs) do
Enum.map(attrs, fn {key, _value, defaults} ->
{key, defaults}
end)
end
defp partition_attrs(context_base, attrs) do
{assocs, attrs} =
Enum.split_with(attrs, fn
{_, {:references, _}, _} -> true
_ -> false
end)
attrs = Enum.map(attrs, fn {key_id, type, _defaults} -> {key_id, type} end)
assocs =
Enum.map(assocs, fn {key_id, {:references, source}, _} ->
key = String.replace(Atom.to_string(key_id), "_id", "")
context = Macro.camelize(source)
schema = Macro.camelize(key)
module = Module.concat([context_base, context, schema])
{String.to_atom(key), key_id, inspect(module), source}
end)
{assocs, attrs}
end
defp migration_indexes(indexes, table) do
Enum.map(indexes, &to_migration_index(table, &1))
end
defp to_migration_index(table, {key_or_keys, true}),
do: "create unique_index(:#{table}, #{inspect(List.wrap(key_or_keys))})"
end
|
lib/pow/ecto/schema/migration.ex
| 0.667798
| 0.405743
|
migration.ex
|
starcoder
|
defmodule Philtre.Wrapper do
@moduledoc """
Defines a view and a set of utility functions to test how the editor
component interacts with the view.
Editor tests should only ever interact with the component via functions defined here.
"""
use Phoenix.LiveView
import Phoenix.LiveView.Helpers
import Phoenix.LiveViewTest
alias Editor.Block
alias Phoenix.LiveViewTest.View
@doc false
@impl Phoenix.LiveView
def mount(:not_mounted_at_router, _session, socket) do
{:ok, assign(socket, :editor, Editor.new())}
end
@doc false
@impl Phoenix.LiveView
def handle_info({:update, %Editor{} = editor}, socket) do
{:noreply, assign(socket, :editor, editor)}
end
@doc false
@impl Phoenix.LiveView
def render(assigns) do
~H"""
<.live_component module={Editor} id={@editor.id} editor={@editor} />
"""
end
@doc """
Sets the editor struct of the component to the specified value.
Convenient when we want to quickly get to a complex state of the editor
struct, without performining individual updates.
"""
def set_editor(%View{} = view, %Editor{} = editor) do
send(view.pid, {:update, editor})
end
@doc """
Retrieves the current editor from the component state
"""
def get_editor(%View{} = view) do
%{socket: %{assigns: %{editor: %Editor{} = editor}}} = :sys.get_state(view.pid)
editor
end
def flush(%View{} = view) do
:sys.get_state(view.pid)
end
@doc """
Retrieves block at specified index
"""
def block_at(%View{} = view, index) do
%Editor{blocks: blocks} = get_editor(view)
Enum.at(blocks, index)
end
@doc """
Sends newline command at the location
"""
def trigger_split_block(%View{} = view, :end_of_page) do
%Editor{} = editor = get_editor(view)
trigger_split_block(view, List.last(editor.blocks), :end)
end
@model %{selection: "[id^=editor__selection__]", history: "[id^=editor__history__]"}
@doc """
Sends newline command at the location
"""
def trigger_split_block(%View{} = view, %_{cells: _} = block, :end) do
end_cell = Enum.at(block.cells, -1)
trigger_split_block(view, block, %{
selection: %{
start_id: end_cell.id,
end_id: end_cell.id,
start_offset: String.length(end_cell.text),
end_offset: String.length(end_cell.text)
}
})
end
def trigger_split_block(%View{} = view, index, %{selection: selection})
when is_integer(index) do
trigger_split_block(view, block_at(view, index), %{selection: selection})
end
def trigger_split_block(%View{} = view, %_{} = block, %{selection: selection}) do
view
|> element("##{block.id}")
|> render_hook("split_block", %{"selection" => selection})
end
@doc """
Updates cell at specified location with specified value
"""
def trigger_update(%View{} = view, index, %{selection: selection, cells: cells})
when is_integer(index) do
trigger_update(view, block_at(view, index), %{selection: selection, cells: cells})
end
def trigger_update(%View{} = view, %_{} = block, %{selection: selection, cells: cells}) do
view
|> element("##{block.id}")
|> render_hook("update", %{"selection" => selection, "cells" => cells})
end
@doc """
Simulates downgrade of a block (presing backspace from index 0 of first cell)
"""
def trigger_backspace_from_start(%View{} = view, index) when is_integer(index) do
trigger_backspace_from_start(view, block_at(view, index))
end
def trigger_backspace_from_start(
%View{} = view,
%Block{cells: [%Block.Cell{} = cell | _]} = block
) do
view
|> element("##{block.id}")
|> render_hook("backspace_from_start", %{
start_id: cell.id,
end_id: cell.id,
start_offset: 0,
end_offset: 0
})
end
def trigger_undo(%View{} = view) do
view
|> element(@model.history)
|> render_hook("undo")
end
def trigger_redo(%View{} = view) do
view
|> element(@model.history)
|> render_hook("redo")
end
@doc """
Simulates selection of a block
"""
def select_blocks(%View{} = view, block_ids) when is_list(block_ids) do
view
|> element(@model.selection)
|> render_hook("select_blocks", %{"block_ids" => block_ids})
end
@doc """
Simulates copy action of selected blocks
"""
def copy_blocks(%View{} = view, block_ids) when is_list(block_ids) do
view
|> element(@model.selection)
|> render_hook("copy_blocks", %{"block_ids" => block_ids})
end
@doc """
Simulates paste action of selected blocks
"""
def paste_blocks(%View{} = view, index, %{selection: selection}) when is_integer(index) do
paste_blocks(view, block_at(view, index), %{selection: selection})
end
def paste_blocks(%View{} = view, %Block{} = block, %{selection: selection}) do
view
|> element("##{block.id}")
|> render_hook("paste_blocks", %{"selection" => selection})
end
def block_text(%View{} = view, index) when is_integer(index) do
%_{} = block = block_at(view, index)
Editor.text(block)
end
end
|
test/support/wrapper.ex
| 0.814533
| 0.624365
|
wrapper.ex
|
starcoder
|
defmodule Smoke.Metrics do
@moduledoc """
Creates Metrics from a list of events.
"""
def filter(events, {tag, value}) do
events
|> Stream.filter(fn
{_time, _measurement, metadata} ->
Enum.any?(metadata, fn
{^tag, ^value} -> true
_ -> false
end)
end)
end
def filter(events, [head | tail]) do
events
|> filter(head)
|> filter(tail)
end
def filter(events, []), do: events
def time_bucket(events, precision) do
Stream.chunk_by(events, fn {time, _measurement, _metadata} ->
truncate(time, precision)
end)
|> Stream.map(fn [{time, _measurement, _metadata} | _tail] = events ->
%{time: truncate(time, precision), events: events}
end)
end
def truncate(date_time, :month), do: %{truncate(date_time, :day) | day: 1}
def truncate(date_time, :day), do: %{truncate(date_time, :hour) | hour: 0}
def truncate(date_time, :hour), do: %{truncate(date_time, :minute) | minute: 0}
def truncate(date_time, :minute), do: %{truncate(date_time, :second) | second: 0}
def truncate(date_time, precision), do: DateTime.truncate(date_time, precision)
def tags(events) do
events
|> Enum.reduce(MapSet.new(), fn
{_time, _measurement, metadata}, acc ->
metadata |> Map.keys() |> Enum.into(acc)
end)
|> MapSet.to_list()
end
def apply_to_bucketed_events(bucketed_events, key, metrics_function) do
Stream.map(bucketed_events, fn %{time: time, events: events} ->
%{time: time, metric: metrics_function.(events, key)}
end)
end
def measurements(events) do
events
|> Enum.reduce(MapSet.new(), fn
{_time, measurement, _metadata}, acc ->
measurement |> Map.keys() |> Enum.into(acc)
end)
|> MapSet.to_list()
end
def tag_values(events, tag_name) do
events
|> Enum.reduce(MapSet.new(), fn
{_time, _measurement, metadata}, acc ->
value = Map.get(metadata, tag_name)
MapSet.put(acc, value)
end)
|> MapSet.to_list()
end
def counter(events, key) do
get_measure = measurement_value(key)
events
|> Stream.map(get_measure)
|> Enum.count(fn x -> not is_nil(x) end)
end
def sum(events, key) do
get_measure = measurement_value(key, 0)
events
|> Stream.map(get_measure)
|> Enum.sum()
end
def last_value([{_time, measurement, _metadata} = _head | tail], key) do
if Map.has_key?(measurement, key) do
Map.get(measurement, key)
else
last_value(tail, key)
end
end
def last_value(_events, _key), do: nil
def statistics(events, key) do
get_measure = measurement_value(key)
measurements =
events
|> Enum.map(get_measure)
|> Enum.filter(fn measure -> not is_nil(measure) end)
%{
median: Statistics.median(measurements),
mean: Statistics.mean(measurements),
max: Statistics.max(measurements),
min: Statistics.min(measurements),
mode: Statistics.mode(measurements),
variance: Statistics.variance(measurements),
p95: Statistics.percentile(measurements, 95),
p99: Statistics.percentile(measurements, 99)
}
end
def distribution(events, key) do
get_measure = measurement_value(key)
events
|> Enum.map(get_measure)
|> Statistics.hist()
end
def max(events, key) do
get_measure = measurement_value(key)
events
|> Enum.map(get_measure)
|> Statistics.max()
end
def mean(events, key) do
get_measure = measurement_value(key)
events
|> Enum.map(get_measure)
|> Statistics.mean()
end
def median(events, key) do
get_measure = measurement_value(key)
events
|> Enum.map(get_measure)
|> Statistics.median()
end
def min(events, key) do
get_measure = measurement_value(key)
events
|> Enum.map(get_measure)
|> Statistics.min()
end
def mode(events, key) do
get_measure = measurement_value(key)
events
|> Enum.map(get_measure)
|> Statistics.mode()
end
def p95(events, key) do
get_measure = measurement_value(key)
events
|> Enum.map(get_measure)
|> Statistics.percentile(95)
end
def p99(events, key) do
get_measure = measurement_value(key)
events
|> Enum.map(get_measure)
|> Statistics.percentile(99)
end
def variance(events, key) do
get_measure = measurement_value(key)
events
|> Enum.map(get_measure)
|> Statistics.variance()
end
def first_event_time([]), do: nil
def first_event_time(events) do
events |> List.last() |> elem(0)
end
defp measurement_value(key, default \\ nil) do
fn {_time, measurement, _metadata} ->
Map.get(measurement, key, default)
end
end
end
|
lib/smoke/metrics.ex
| 0.81899
| 0.507446
|
metrics.ex
|
starcoder
|
defmodule LruCache do
@moduledoc """
This modules implements a simple LRU cache, using 2 ets tables for it.
For using it, you need to start it:
iex> LruCache.start_link(:my_cache, 1000)
Or add it to your supervisor tree, like: `worker(LruCache, [:my_cache, 1000])`
## Using
iex> LruCache.start_link(:my_cache, 1000)
{:ok, #PID<0.60.0>}
iex> LruCache.put(:my_cache, "id", "value")
:ok
iex> LruCache.get(:my_cache, "id", touch = false)
"value"
## Design
First ets table save the key values pairs, the second save order of inserted elements.
"""
use GenServer
@table LruCache
defstruct table: nil, ttl_table: nil, size: 0
@doc """
Creates an LRU of the given size as part of a supervision tree with a registered name
"""
def start_link(name, size) do
Agent.start_link(__MODULE__, :init, [name, size], [name: name])
end
@doc """
Stores the given `value` under `key` in `cache`. If `cache` already has `key`, the stored
`value` is replaced by the new one. This updates the order of LRU cache.
"""
def put(name, key, value), do: Agent.get(name, __MODULE__, :handle_put, [key, value])
@doc """
Updates a `value` in `cache`. If `key` is not present in `cache` then nothing is done.
`touch` defines, if the order in LRU should be actualized. The function assumes, that
the element exists in a cache.
"""
def update(name, key, value, touch \\ true) do
if :ets.update_element(name, key, {3, value}) do
touch && Agent.get(name, __MODULE__, :handle_touch, [key])
end
:ok
end
@doc """
Returns the `value` associated with `key` in `cache`. If `cache` does not contain `key`,
returns nil. `touch` defines, if the order in LRU should be actualized.
"""
def get(name, key, touch \\ true) do
case :ets.lookup(name, key) do
[{_, _, value}] ->
touch && Agent.get(name, __MODULE__, :handle_touch, [key])
value
[] ->
nil
end
end
@doc """
Removes the entry stored under the given `key` from cache.
"""
def delete(name, key), do: Agent.get(name, __MODULE__, :handle_delete, [key])
@doc false
def init(name, size) do
ttl_table = :"#{name}_ttl"
:ets.new(ttl_table, [:named_table, :ordered_set])
:ets.new(name, [:named_table, :public, {:read_concurrency, true}])
%LruCache{ttl_table: ttl_table, table: name, size: size}
end
@doc false
def handle_put(state = %{table: table}, key, value) do
delete_ttl(state, key)
uniq = insert_ttl(state, key)
:ets.insert(table, {key, uniq, value})
clean_oversize(state)
:ok
end
@doc false
def handle_touch(state = %{table: table}, key) do
delete_ttl(state, key)
uniq = insert_ttl(state, key)
:ets.update_element(table, key, [{2, uniq}])
:ok
end
@doc false
def handle_delete(state = %{table: table}, key) do
delete_ttl(state, key)
:ets.delete(table, key)
:ok
end
defp delete_ttl(%{ttl_table: ttl_table, table: table}, key) do
case :ets.lookup(table, key) do
[{_, old_uniq, _}] ->
:ets.delete(ttl_table, old_uniq)
_ ->
nil
end
end
defp insert_ttl(%{ttl_table: ttl_table}, key) do
uniq = :erlang.unique_integer([:monotonic])
:ets.insert(ttl_table, {uniq, key})
uniq
end
defp clean_oversize(%{ttl_table: ttl_table, table: table, size: size}) do
if :ets.info(table, :size) > size do
oldest_tstamp = :ets.first(ttl_table)
[{_, old_key}] = :ets.lookup(ttl_table, oldest_tstamp)
:ets.delete(ttl_table, oldest_tstamp)
:ets.delete(table, old_key)
true
else nil end
end
end
|
lib/lru_cache.ex
| 0.746971
| 0.533033
|
lru_cache.ex
|
starcoder
|
defmodule Phoenix.PubSub.ConduitAMQP do
use Supervisor
require Logger
@moduledoc """
Phoenix PubSub adapter based on PG2.
To use it as your PubSub adapter, simply add it to your Endpoint's config:
config :my_app, MyApp.Endpoint,
pubsub: [name: MyApp.PubSub,
adapter: Phoenix.PubSub.ConduitAQMP]
## Options
* `:name` - The registered name and optional node name for the PubSub
processes, for example: `MyApp.PubSub`, `{MyApp.PubSub, :node@host}`.
When only a server name is provided, the node name defaults to `node()`.
* `:pool_size` - Both the size of the local pubsub server pool and subscriber
shard size. Defaults to the number of schedulers (cores). A single pool is
often enough for most use-cases, but for high subscriber counts on a single
topic or greater than 1M clients, a pool size equal to the number of
schedulers (cores) is a well rounded size.
"""
def start_link(name, opts) do
supervisor_name = Module.concat(name, Supervisor)
Supervisor.start_link(__MODULE__, [name, opts], name: supervisor_name)
end
@doc false
def init([server_name, opts]) do
scheduler_count = :erlang.system_info(:schedulers)
pool_size = Keyword.get(opts, :pool_size, scheduler_count)
node_name = opts[:node_name]
broker = Keyword.fetch!(opts, :broker)
dispatch_rules = [
{:broadcast, __MODULE__, [opts[:fastlane], server_name, pool_size]},
{:direct_broadcast, __MODULE__, [opts[:fastlane], server_name, pool_size]},
{:node_name, __MODULE__, [node_name]}
]
table = :phoenix_pubsub_conduit_amqp
if :ets.info(table) == :undefined do
:ets.new(table, [:set, :public, :named_table, read_concurrency: true])
:ets.insert(
table,
{:local_state,
%{
server_name: server_name,
namespace: "phx",
node_ref: :crypto.strong_rand_bytes(24)
}}
)
:ets.insert(table, {:broker, broker})
end
children = [
supervisor(Phoenix.PubSub.LocalSupervisor, [server_name, pool_size, dispatch_rules])
]
supervise(children, strategy: :rest_for_one)
end
@doc false
def node_name(nil), do: node()
def node_name(configured_name), do: configured_name
@doc false
def direct_broadcast(fastlane, server_name, pool_size, node_name, from_pid, topic, msg) do
do_broadcast(fastlane, server_name, pool_size, node_name, from_pid, topic, msg)
end
@doc false
def broadcast(fastlane, server_name, pool_size, from_pid, topic, msg) do
do_broadcast(fastlane, server_name, pool_size, nil, from_pid, topic, msg)
end
@conduit_amqp_msg_vsn "v1.0"
defp do_broadcast(fastlane, _server_name, pool_size, node_name, from_pid, topic, msg) do
import Conduit.Message
[broker: broker] = :ets.lookup(:phoenix_pubsub_conduit_amqp, :broker)
conduit_amqp_msg =
{@conduit_amqp_msg_vsn, node_name, fastlane, pool_size, from_pid, topic, msg}
message =
%Conduit.Message{}
|> put_content_type("application/x-erlang-binary")
|> put_body(conduit_amqp_msg)
broker.publish(:phoenix_pubsub_broadcast, message)
end
def queue_name(_ \\ nil) do
:inet.gethostname()
|> elem(1)
|> to_string()
|> String.replace("-", "_")
end
end
|
lib/phoenix/pubsub/conduit_amqp.ex
| 0.796767
| 0.40925
|
conduit_amqp.ex
|
starcoder
|
defmodule Stripe.Plans do
@moduledoc """
A subscription plan contains the pricing information for different products
and feature levels on your site. For example, you might have a $10/month
plan for basic features and a different $20/month plan for premium features.
"""
@endpoint "plans"
@doc """
You can create plans easily via the plan management page of the Stripe
dashboard. Plan creation is also accessible via the API if you need to
create plans on the fly.
## Arguments
- `id` - required - Unique string of your choice that will be used to
identify this plan when subscribing a customer. This could be an
identifier like "gold" or a primary key from your own database.
- `amount` - required - A positive integer in cents (or 0 for a free plan)
representing how much to charge (on a recurring basis).
- `currency` - required - 3-letter ISO code for currency.
- `interval` - required - Specifies billing frequency. Either week, month
or year.
- `interval_count` - optional - default is 1 - The number of intervals
between each subscription billing. For example, interval=month and
interval_count=3 bills every 3 months. Maximum of one year
- `interval` - allowed - (1 year, 12 months, or 52 weeks).
- `name` - required - Name of the plan, to be displayed on invoices and in
the web interface.
- `trial_period_days` - optional - Specifies a trial period in (an integer
number of) days. If you include a trial period, the customer won't be
billed for the first time until the trial period ends. If the customer
cancels before the trial period is over, she'll never be billed at all.
- `metadata` - optional - default is { } - A set of key/value pairs that
you can attach to a plan object. It can be useful for storing
additional information about the plan in a structured format.
- `statement_description` - optional - default is null - An arbitrary
string to be displayed on your customers' credit card statements
(alongside your company name) for charges created by this plan. This
may be up to 15 characters. As an example, if your website is RunClub
and you specify Silver Plan, the user will see RUNCLUB SILVER PLAN.
The statement description may not include `<>"'` characters. While
most banks display this information consistently, some may display
it incorrectly or not at all.
"""
def create(params) do
#default the currency and interval
params = Keyword.put_new params, :currency, "USD"
params = Keyword.put_new params, :interval, "month"
Stripe.make_request(:post, @endpoint, params)
|> Stripe.Util.handle_stripe_response
end
def list(limit \\ 10) do
Stripe.make_request(:get, "#{@endpoint}?limit=#{limit}")
|> Stripe.Util.handle_stripe_response
end
def delete(id) do
Stripe.make_request(:delete, "#{@endpoint}/#{id}")
|> Stripe.Util.handle_stripe_response
end
def change(id, params) do
Stripe.make_request(:post, "#{@endpoint}/#{id}", params)
|> Stripe.Util.handle_stripe_response
end
end
|
lib/stripe/plans.ex
| 0.829354
| 0.570541
|
plans.ex
|
starcoder
|
defmodule Tensorflow.CreateSessionRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
graph_def: Tensorflow.GraphDef.t() | nil,
config: Tensorflow.ConfigProto.t() | nil,
target: String.t()
}
defstruct [:graph_def, :config, :target]
field(:graph_def, 1, type: Tensorflow.GraphDef)
field(:config, 2, type: Tensorflow.ConfigProto)
field(:target, 3, type: :string)
end
defmodule Tensorflow.CreateSessionResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
graph_version: integer
}
defstruct [:session_handle, :graph_version]
field(:session_handle, 1, type: :string)
field(:graph_version, 2, type: :int64)
end
defmodule Tensorflow.ExtendSessionRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
graph_def: Tensorflow.GraphDef.t() | nil,
current_graph_version: integer
}
defstruct [:session_handle, :graph_def, :current_graph_version]
field(:session_handle, 1, type: :string)
field(:graph_def, 2, type: Tensorflow.GraphDef)
field(:current_graph_version, 3, type: :int64)
end
defmodule Tensorflow.ExtendSessionResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
new_graph_version: integer
}
defstruct [:new_graph_version]
field(:new_graph_version, 4, type: :int64)
end
defmodule Tensorflow.RunStepRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
feed: [Tensorflow.NamedTensorProto.t()],
fetch: [String.t()],
target: [String.t()],
options: Tensorflow.RunOptions.t() | nil,
partial_run_handle: String.t(),
store_errors_in_response_body: boolean,
request_id: integer
}
defstruct [
:session_handle,
:feed,
:fetch,
:target,
:options,
:partial_run_handle,
:store_errors_in_response_body,
:request_id
]
field(:session_handle, 1, type: :string)
field(:feed, 2, repeated: true, type: Tensorflow.NamedTensorProto)
field(:fetch, 3, repeated: true, type: :string)
field(:target, 4, repeated: true, type: :string)
field(:options, 5, type: Tensorflow.RunOptions)
field(:partial_run_handle, 6, type: :string)
field(:store_errors_in_response_body, 7, type: :bool)
field(:request_id, 8, type: :int64)
end
defmodule Tensorflow.RunStepResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tensor: [Tensorflow.NamedTensorProto.t()],
metadata: Tensorflow.RunMetadata.t() | nil,
status_code: Tensorflow.Error.Code.t(),
status_error_message: String.t()
}
defstruct [:tensor, :metadata, :status_code, :status_error_message]
field(:tensor, 1, repeated: true, type: Tensorflow.NamedTensorProto)
field(:metadata, 2, type: Tensorflow.RunMetadata)
field(:status_code, 3, type: Tensorflow.Error.Code, enum: true)
field(:status_error_message, 4, type: :string)
end
defmodule Tensorflow.PartialRunSetupRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
feed: [String.t()],
fetch: [String.t()],
target: [String.t()],
request_id: integer
}
defstruct [:session_handle, :feed, :fetch, :target, :request_id]
field(:session_handle, 1, type: :string)
field(:feed, 2, repeated: true, type: :string)
field(:fetch, 3, repeated: true, type: :string)
field(:target, 4, repeated: true, type: :string)
field(:request_id, 5, type: :int64)
end
defmodule Tensorflow.PartialRunSetupResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
partial_run_handle: String.t()
}
defstruct [:partial_run_handle]
field(:partial_run_handle, 1, type: :string)
end
defmodule Tensorflow.CloseSessionRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t()
}
defstruct [:session_handle]
field(:session_handle, 1, type: :string)
end
defmodule Tensorflow.CloseSessionResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.ResetRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
container: [String.t()],
device_filters: [String.t()]
}
defstruct [:container, :device_filters]
field(:container, 1, repeated: true, type: :string)
field(:device_filters, 2, repeated: true, type: :string)
end
defmodule Tensorflow.ResetResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.ListDevicesRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t()
}
defstruct [:session_handle]
field(:session_handle, 1, type: :string)
end
defmodule Tensorflow.ListDevicesResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
local_device: [Tensorflow.DeviceAttributes.t()],
remote_device: [Tensorflow.DeviceAttributes.t()]
}
defstruct [:local_device, :remote_device]
field(:local_device, 1, repeated: true, type: Tensorflow.DeviceAttributes)
field(:remote_device, 2, repeated: true, type: Tensorflow.DeviceAttributes)
end
defmodule Tensorflow.MakeCallableRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
options: Tensorflow.CallableOptions.t() | nil,
request_id: integer
}
defstruct [:session_handle, :options, :request_id]
field(:session_handle, 1, type: :string)
field(:options, 2, type: Tensorflow.CallableOptions)
field(:request_id, 3, type: :int64)
end
defmodule Tensorflow.MakeCallableResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
handle: integer
}
defstruct [:handle]
field(:handle, 1, type: :int64)
end
defmodule Tensorflow.RunCallableRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
handle: integer,
feed: [Tensorflow.TensorProto.t()],
request_id: integer
}
defstruct [:session_handle, :handle, :feed, :request_id]
field(:session_handle, 1, type: :string)
field(:handle, 2, type: :int64)
field(:feed, 3, repeated: true, type: Tensorflow.TensorProto)
field(:request_id, 4, type: :int64)
end
defmodule Tensorflow.RunCallableResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
fetch: [Tensorflow.TensorProto.t()],
metadata: Tensorflow.RunMetadata.t() | nil
}
defstruct [:fetch, :metadata]
field(:fetch, 1, repeated: true, type: Tensorflow.TensorProto)
field(:metadata, 2, type: Tensorflow.RunMetadata)
end
defmodule Tensorflow.ReleaseCallableRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
session_handle: String.t(),
handle: integer
}
defstruct [:session_handle, :handle]
field(:session_handle, 1, type: :string)
field(:handle, 2, type: :int64)
end
defmodule Tensorflow.ReleaseCallableResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
|
lib/tensorflow/core/protobuf/master.pb.ex
| 0.780913
| 0.607838
|
master.pb.ex
|
starcoder
|
defmodule ESpec.Assertions.Be do
@moduledoc """
Defines 'be' assertion.
it do: expect(2).to be :>, 1
"""
use ESpec.Assertions.Interface
alias ESpec.DatesTimes.Comparator
defp match(subject, [op, val, [{granularity, delta}]]) do
actual_delta =
subject
|> Comparator.diff(val, granularity)
|> abs()
result = apply(Kernel, op, [subject, val]) && apply(Kernel, op, [actual_delta, delta])
{result, {granularity, actual_delta}}
end
defp match(subject, [op, val, {granularity, delta}]) do
actual_delta =
subject
|> Comparator.diff(val, granularity)
|> abs()
result = apply(Kernel, op, [subject, val]) && apply(Kernel, op, [actual_delta, delta])
{result, {granularity, actual_delta}}
end
defp match(%Date{} = subject, [op, %Date{} = val]), do: match_date_times(subject, [op, val])
defp match(%Time{} = subject, [op, %Time{} = val]), do: match_date_times(subject, [op, val])
defp match(%DateTime{} = subject, [op, %DateTime{} = val]),
do: match_date_times(subject, [op, val])
defp match(%NaiveDateTime{} = subject, [op, %NaiveDateTime{} = val]),
do: match_date_times(subject, [op, val])
defp match(subject, [op, val]) do
result = apply(Kernel, op, [subject, val])
{result, result}
end
defp match_date_times(subject, [op, val]) do
delta = subject |> Comparator.diff(val, :microseconds)
result = apply(Kernel, op, [delta, 0])
{result, result}
end
defp success_message(subject, [op, val, [{granularity, delta}]], _result, positive) do
to = if positive, do: "is true", else: "is false"
"`#{inspect(subject)} #{op} #{inspect(val)}` #{to} with delta `[#{granularity}: #{delta}]`."
end
defp success_message(subject, [op, val, {granularity, delta}], _result, positive) do
to = if positive, do: "is true", else: "is false"
"`#{inspect(subject)} #{op} #{inspect(val)}` #{to} with delta `{:#{granularity}, #{delta}}`."
end
defp success_message(subject, [op, val], _result, positive) do
to = if positive, do: "is true", else: "is false"
"`#{inspect(subject)} #{op} #{inspect(val)}` #{to}."
end
defp error_message(
subject,
[op, val, [{granularity, delta}]],
{actual_granularity, actual_delta},
positive
) do
"Expected `#{inspect(subject)} #{op} #{inspect(val)}` to be `#{positive}` but got `#{
!positive
}` with delta `[#{granularity}: #{delta}]`. The actual delta is [#{actual_granularity}: #{
actual_delta
}], with an inclusive boundary."
end
defp error_message(
subject,
[op, val, {granularity, delta}],
{actual_granularity, actual_delta},
positive
) do
"Expected `#{inspect(subject)} #{op} #{inspect(val)}` to be `#{positive}` but got `#{
!positive
}` with delta `{:#{granularity}, #{delta}}`. The actual delta is {:#{actual_granularity}, #{
actual_delta
}}, with an inclusive boundary."
end
defp error_message(subject, [op, val], _result, positive) do
"Expected `#{inspect(subject)} #{op} #{inspect(val)}` to be `#{positive}` but got `#{
!positive
}`."
end
end
|
lib/espec/assertions/be.ex
| 0.81231
| 0.786213
|
be.ex
|
starcoder
|
defmodule AshPolicyAuthorizer.Check do
@moduledoc """
A behaviour for declaring checks, which can be used to easily construct
authorization rules.
If a check can be expressed simply as a function of the actor, or the context of the request,
see `AshPolicyAuthorizer.SimpleCheck` for an easy way to write that check.
If a check can be expressed simply with a filter statement, see `AshPolicyAuthorizer.FilterCheck`
for an easy way to write that check.
"""
@type options :: Keyword.t()
@type authorizer :: AshPolicyAuthorizer.Authorizer.t()
@type check_type :: :simple | :filter | :manual
@doc """
Strict checks should be cheap, and should never result in external calls (like database or api)
It should return `{:ok, true}` if it can tell that the request is authorized, and `{:ok, false}` if
it can tell that it is not. If unsure, it should return `{:ok, :unknown}`
"""
@callback strict_check(Ash.actor(), authorizer(), options) :: {:ok, boolean | :unknown}
@doc """
An optional callback, that allows the check to work with policies set to `access_type :filter`
Return a keyword list filter that will be applied to the query being made, and will scope the results to match the rule
"""
@callback auto_filter(Ash.actor(), authorizer(), options()) :: Keyword.t()
@doc """
An optional callback, hat allows the check to work with policies set to `access_type :runtime`
Takes a list of records, and returns `{:ok, true}` if they are all authorized, or `{:ok, list}` containing the list
of records that are authorized. You can also just return the whole list, `{:ok, true}` is just a shortcut.
Can also return `{:error, error}` if something goes wrong
"""
@callback check(Ash.actor(), list(Ash.record()), map, options) ::
{:ok, list(Ash.record()) | boolean} | {:error, Ash.error()}
@doc "Describe the check in human readable format, given the options"
@callback describe(options()) :: String.t()
@doc """
The type fo the check
`:manual` checks must be written by hand as standard check modules
`:filter` checks can use `AshPolicyAuthorizer.FilterCheck` for simplicity
`:simple` checks can use `AshPolicyAuthorizer.SimpleCheck` for simplicity
"""
@callback type() :: check_type()
@optional_callbacks check: 4, auto_filter: 3
def defines_check?(module) do
:erlang.function_exported(module, :check, 4)
end
def defines_auto_filter?(module) do
:erlang.function_exported(module, :auto_filter, 3)
end
defmacro __using__(_opts) do
quote do
@behaviour AshPolicyAuthorizer.Check
def type, do: :manual
end
end
end
|
lib/ash_policy_authorizer/check.ex
| 0.867191
| 0.546799
|
check.ex
|
starcoder
|
defmodule Breadboard.Pinout do
@moduledoc """
Manage the pinout for the supported platform.
Note that accessing the GPIO pins through sysfs in some case (i.e. ARM SoCs family from Allwinner Technology) the pinout number/label may differ from real pin reference number.
The real pin number using `Circuits.GPIO.info/0` 'name' key
"""
@doc """
Get real pin reference from 'pinout label'.
Returns the real pin number.
## Example for Allwinner platform:
calling `Breadboard.Pinout.label_to_pin/1` with `"PG8"`, `:pg8` or `32` the value returne is always `200` (the real reference for sysfs number):
iex> if(Breadboard.get_platform()==:sunxi ) do
iex> 200 = Breadboard.Pinout.label_to_pin("PG8")
iex> 200 = Breadboard.Pinout.label_to_pin(:pg8)
iex> 200 = Breadboard.Pinout.label_to_pin(32)
iex> 200 = Breadboard.Pinout.label_to_pin(:pin32)
iex> nil
iex> end
nil
## Examples for the default 'stub' reference:
iex> if(Breadboard.get_platform()==:stub ) do
iex> 18 = Breadboard.Pinout.label_to_pin("GPIO18")
iex> 18 = Breadboard.Pinout.label_to_pin(:gpio18)
iex> 18 = Breadboard.Pinout.label_to_pin(18)
iex> 18 = Breadboard.Pinout.label_to_pin(:pin18)
iex> nil
iex> end
nil
"""
@spec label_to_pin(any()) :: non_neg_integer()
def label_to_pin(label) do
GenServer.call(Breadboard.GPIO.PinoutServer.server_name(), {:label_to_pin, label})
end
@doc """
Get pinout label from the pinout number.
Returns the pin label as atom.
## Examples (for the default 'stub' reference)
iex> if(Breadboard.get_platform()==:stub ) do
iex> :gpio18 = Breadboard.Pinout.pin_to_label(:gpio18)
iex> :gpio18 = Breadboard.Pinout.pin_to_label("GPIO18")
iex> :gpio18 = Breadboard.Pinout.pin_to_label(:pin18)
iex> :gpio18 = Breadboard.Pinout.pin_to_label(18)
iex> nil
iex> end
nil
"""
@spec pin_to_label(any()) :: atom()
def pin_to_label(pin) do
GenServer.call(Breadboard.GPIO.PinoutServer.server_name(), {:pin_to_label, pin})
end
end
# SPDX-License-Identifier: Apache-2.0
|
lib/breadboard/pinout.ex
| 0.754373
| 0.481332
|
pinout.ex
|
starcoder
|
defmodule Herd.Cluster do
@moduledoc """
Macro for generating a cluster manager. It will create, populate and refresh
a `Herd.Balancer` in an ets table by polling the configured `Herd.Discovery` implementation.
Usage:
```
defmodule MyHerdCluster do
use Herd.Cluster, otp_app: :myapp,
router: MyRouter,
discovery: MyDiscovery,
pool: MyPool
end
```
"""
require Logger
defmacro __using__(opts) do
app = Keyword.get(opts, :otp_app)
health_check = Keyword.get(opts, :health_check, 60_000)
router = Keyword.get(opts, :router, Herd.Router.HashRing)
discovery = Keyword.get(opts, :discovery)
pool = Keyword.get(opts, :pool)
table_name = :"#{app}_servers"
quote do
use GenServer
import Herd.Cluster
require Logger
@otp unquote(app)
@table_name unquote(table_name)
@health_check unquote(health_check)
@router unquote(router)
@discovery unquote(discovery)
@pool unquote(pool)
def start_link(options) do
GenServer.start_link(__MODULE__, options, name: __MODULE__)
end
def init(_) do
servers = @discovery.nodes()
Logger.info("starting cluster with servers: #{inspect(servers)}")
ring =
@router.new()
|> @router.add_nodes(servers)
table = :ets.new(@table_name, [:set, :protected, :named_table, read_concurrency: true])
:ets.insert(table, {:lb, ring})
@pool.initialize(servers)
schedule_healthcheck()
{:ok, table}
end
def servers(), do: servers(@table_name, @router)
def get_node(key), do: get_node(@table_name, @router, key)
def get_nodes(keys), do: get_nodes(@table_name, @router, keys)
def handle_info(:health_check, table) do
schedule_healthcheck()
health_check(table, @router, @pool, @discovery)
end
defp get_router(), do: get_router(@table_name)
defp schedule_healthcheck() do
Process.send_after(self(), :health_check, @health_check)
end
end
end
def get_node(table, router, key) do
with {:ok, lb} <- get_router(table), do: router.get_node(lb, key)
end
def get_nodes(table, router, keys) do
with {:ok, lb} <- get_router(table), do: router.get_nodes(lb, keys)
end
def health_check(table, router, pool, discovery) do
servers = discovery.nodes()
do_health_check(table, router, pool, servers)
end
def get_router(table) do
case :ets.lookup(table, :lb) do
[{:lb, ring}] -> {:ok, ring}
_ -> {:error, :not_found}
end
end
def servers(table, router) do
case get_router(table) do
{:ok, lb} -> router.nodes(lb)
_ -> []
end
end
# never drain the cluster to guard against bad service disco methods
defp do_health_check(table, _router, _pool, []), do: {:noreply, table}
defp do_health_check(table, router, pool, nodes) do
servers = MapSet.new(nodes)
{:ok, lb} = get_router(table)
current = router.nodes(lb) |> MapSet.new()
added = MapSet.difference(servers, current) |> MapSet.to_list()
removed = MapSet.difference(current, servers) |> MapSet.to_list()
handle_diff(added, removed, lb, router, pool, table)
end
defp handle_diff([], [], _, _, _, table), do: {:noreply, table}
defp handle_diff(add, remove, lb, router, pool, table) do
Logger.info "Added #{inspect(add)} servers to cluster"
Logger.info "Removed #{inspect(remove)} servers from cluster"
lb = router.add_nodes(lb, add) |> router.remove_nodes(remove)
:ets.insert(table, {:lb, lb})
pool.handle_diff(add, remove)
{:noreply, table}
end
end
|
lib/herd/cluster.ex
| 0.781956
| 0.664652
|
cluster.ex
|
starcoder
|
defmodule Quadtreex.BoundingBox.Guards do
@moduledoc false
defguard is_within(lx, ly, rx, ry, x, y) when x >= lx and x <= rx and y >= ly and y <= ry
end
defmodule Quadtreex.BoundingBox do
@moduledoc """
Describes a box of 2 dimensional space
"""
@enforce_keys [:l, :r]
defstruct l: {nil, nil}, r: {nil, nil}, center: {nil, nil}, height: 0.0, width: 0.0
@type coordinate() :: {number(), number()}
@type quadrant :: :ne | :se | :sw | :nw
@type t() :: %__MODULE__{
center: {float(), float()},
height: number(),
l: coordinate(),
r: coordinate(),
width: number()
}
import Quadtreex.BoundingBox.Guards
@spec new(number(), number(), number(), number()) :: t()
def new(lx, ly, rx, ry) do
width = rx - lx
height = ry - ly
cx = lx + width * 0.5
cy = ly + height * 0.5
%__MODULE__{l: {lx, ly}, r: {rx, ry}, center: {cx, cy}, height: height, width: width}
end
@spec new(coordinate(), coordinate()) :: t()
def new({lx, ly}, {rx, ry}) do
new(lx, ly, rx, ry)
end
@spec for_quadrant(t(), quadrant()) :: t()
def for_quadrant(%__MODULE__{l: {lx, ly}, r: {rx, ry}, center: {cx, cy}}, quadrant) do
case quadrant do
:ne ->
new(cx, cy, rx, ry)
:se ->
new(cx, ly, rx, cy)
:sw ->
new(lx, ly, cx, cy)
:nw ->
new(lx, cy, cx, ry)
end
end
@spec contains?(t(), coordinate()) :: boolean()
def contains?(%__MODULE__{l: {lx, ly}, r: {rx, ry}}, {x, y})
when is_within(lx, ly, rx, ry, x, y),
do: true
def contains?(%__MODULE__{}, _coord), do: false
@spec find_quadrant(t(), coordinate()) :: {:ok, quadrant()} | {:error, :out_of_bounds}
def find_quadrant(%__MODULE__{l: {lx, ly}, r: {rx, ry}, center: {cx, cy}}, {x, y})
when is_within(lx, ly, rx, ry, x, y) do
quadrant =
if x < cx do
if y < cy do
:sw
else
:nw
end
else
if y < cy do
:se
else
:ne
end
end
{:ok, quadrant}
end
def find_quadrant(%__MODULE__{}, _coord), do: {:error, :out_of_bounds}
def distance_from(_bbox, _point, point \\ :l)
def distance_from(%__MODULE__{l: {lx, ly}}, {x, y}, :l) do
:math.sqrt(:math.pow(ly - y, 2) + :math.pow(lx - x, 2))
end
def distance_from(%__MODULE__{r: {rx, ry}}, {x, y}, :r) do
:math.sqrt(:math.pow(ry - y, 2) + :math.pow(rx - x, 2))
end
def distance_from(%__MODULE__{center: {cx, cy}}, {x, y}, :center) do
:math.sqrt(:math.pow(cy - y, 2) + :math.pow(cx - x, 2))
end
def distance_between({x1, y1}, {x2, y2}) do
:math.sqrt(:math.pow(y2 - y1, 2) + :math.pow(x2 - x1, 2))
end
end
|
lib/quadtreex/bounding_box.ex
| 0.916381
| 0.808521
|
bounding_box.ex
|
starcoder
|
defmodule ArtemisWeb.EventIntegrationView do
use ArtemisWeb, :view
# Data Table
def data_table_available_columns() do
[
{"Actions", "actions"},
{"Active", "active"},
{"Integration", "integration_type"},
{"Name", "name"},
{"Notification Type", "notification_type"},
{"Schedule", "schedule"},
{"Upcoming", "upcoming"}
]
end
def data_table_allowed_columns() do
%{
"actions" => [
label: fn _conn -> nil end,
value: fn _conn, _row -> nil end,
value_html: &data_table_actions_column_html/2
],
"active" => [
label: fn _conn -> "Active" end,
label_html: fn conn ->
sortable_table_header(conn, "active", "Active")
end,
value: fn _conn, row -> row.active end
],
"integration_type" => [
label: fn _conn -> "Integration" end,
label_html: fn conn ->
sortable_table_header(conn, "integration_type", "Integration")
end,
value: fn _conn, row -> row.name end,
value_html: fn conn, row ->
case has?(conn, "event-integrations:show") do
true -> link(row.integration_type, to: Routes.event_integration_path(conn, :show, row.event_template, row))
false -> row.integration_type
end
end
],
"name" => [
label: fn _conn -> "Name" end,
label_html: fn conn ->
sortable_table_header(conn, "name", "Name")
end,
value: fn _conn, row -> render_event_integration_name(row) end,
value_html: fn conn, row ->
label = render_event_integration_name(row)
case has?(conn, "event-integrations:show") do
true -> link(label, to: Routes.event_integration_path(conn, :show, row.event_template, row))
false -> label
end
end
],
"notification_type" => [
label: fn _conn -> "Notification Type" end,
label_html: fn conn ->
sortable_table_header(conn, "notification_type", "Notification Type")
end,
value: fn _conn, row -> row.name end,
value_html: fn conn, row ->
case has?(conn, "event-integrations:show") do
true -> link(row.notification_type, to: Routes.event_integration_path(conn, :show, row.event_template, row))
false -> row.notification_type
end
end
],
"schedule" => [
label: fn _conn -> "Schedule" end,
value: fn _conn, row -> get_schedule_summary(row.schedule) end
],
"upcoming" => [
label: fn _conn -> "Upcoming" end,
value: fn _conn, row -> get_schedule_occurrences(row.schedule, Timex.now(), 3) end
]
}
end
defp data_table_actions_column_html(conn, row) do
allowed_actions = [
[
verify: has?(conn, "event-integrations:show"),
link: link("Show", to: Routes.event_integration_path(conn, :show, row.event_template, row))
],
[
verify: has?(conn, "event-integrations:update"),
link: link("Edit", to: Routes.event_integration_path(conn, :edit, row.event_template, row))
]
]
content_tag(:div, class: "actions") do
Enum.reduce(allowed_actions, [], fn action, acc ->
case Keyword.get(action, :verify) do
true -> [acc | Keyword.get(action, :link)]
_ -> acc
end
end)
end
end
# Helpers
@doc """
Renders the name value, falling back on notification and integration type if omitted
"""
def render_event_integration_name(event_integration) do
fallback = "#{event_integration.notification_type} - #{event_integration.integration_type}"
event_integration.name || fallback
end
@doc """
Returns the value of a key in the event_integration.settings field
"""
def get_event_integration_setting(%Ecto.Changeset{} = changeset, key) when is_bitstring(key) do
changeset
|> Ecto.Changeset.get_field(:settings)
|> Kernel.||(%{})
|> Artemis.Helpers.keys_to_strings()
|> Map.get(key)
end
def render_show_link(_conn, nil), do: nil
def render_show_link(conn, record) do
link(record.name, to: Routes.event_integration_path(conn, :show, record.event_template, record))
end
end
|
apps/artemis_web/lib/artemis_web/views/event_integration_view.ex
| 0.588298
| 0.423786
|
event_integration_view.ex
|
starcoder
|
defmodule Ockam.Examples.Session.Routing do
@moduledoc """
Simple routing session example
Creates a spawner for sessions and establishes a routing session
with a simple forwarding data worker.
Usage:
```
{:ok, spawner} = Ockam.Examples.Session.Routing.create_spawner() # create a responder spawner
{:ok, initiator} = Ockam.Examples.Session.Routing.create_initiator([spawner]) # creates an initiator using a route to spawner
Ockam.Examples.Session.Routing.run_local() # creates local spawner and initiator and sends a message through
```
"""
alias Ockam.Session.Pluggable, as: Session
alias Ockam.Session.Spawner
alias Ockam.Examples.Session.Routing.DataWorker, as: DataWorker
def create_spawner() do
responder_options = [
worker_mod: DataWorker,
worker_options: [messages: [:message_from_options]]
]
spawner_options = [
worker_mod: Session.Responder,
worker_options: responder_options
]
Spawner.create(spawner_options)
end
def create_responder() do
responder_options = [
worker_mod: DataWorker,
worker_options: [messages: [:message_from_options]]
]
Session.Responder.create(responder_options)
end
def create_initiator(init_route) do
Session.Initiator.create(
worker_mod: DataWorker,
worker_options: [messages: [:message_from_options_initiator]],
init_route: init_route
)
end
def run_without_spawner() do
{:ok, responder} = create_responder()
{:ok, responder_inner} = Ockam.AsymmetricWorker.get_inner_address(responder)
{:ok, initiator} = create_initiator([responder_inner])
Session.Initiator.wait_for_session(initiator)
Ockam.Node.register_address("me")
Ockam.Router.route(%{
onward_route: [initiator, "me"],
return_route: ["me"],
payload: "Hi me!"
})
:sys.get_state(Ockam.Node.whereis(initiator))
end
def run() do
{:ok, spawner} = create_spawner()
{:ok, initiator} = create_initiator([spawner])
Session.Initiator.wait_for_session(initiator)
Ockam.Node.register_address("me")
Ockam.Router.route(%{
onward_route: [initiator, "me"],
return_route: ["me"],
payload: "Hi me!"
})
:sys.get_state(Ockam.Node.whereis(initiator))
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/examples/session/routing.ex
| 0.812161
| 0.836488
|
routing.ex
|
starcoder
|
defmodule WordSearch do
@moduledoc """
Generate a word search with various options.
## Examples
iex> WordSearch.generate(["word", "another", "yetanother", "food", "red", "car", "treetop"], 10)
[
["R", "A", "T", "F", "K", "Y", "K", "V", "K", "R"],
["Q", "N", "B", "I", "H", "M", "T", "C", "E", "G"],
["D", "O", "G", "W", "O", "R", "D", "H", "F", "N"],
["T", "T", "C", "A", "G", "D", "T", "A", "U", "H"],
["I", "H", "A", "F", "O", "O", "A", "K", "G", "Z"],
["N", "E", "R", "O", "N", "A", "L", "P", "B", "M"],
["P", "R", "F", "A", "W", "R", "E", "D", "K", "G"],
["R", "G", "T", "E", "J", "M", "R", "Q", "P", "I"],
["X", "E", "E", "S", "S", "E", "C", "T", "Z", "D"],
["Y", "S", "D", "L", "Q", "Y", "K", "R", "J", "F"]
]
## Parameters
words - list of words to be used in the word search
size - size of word search (a size of 10 will generate a 10x10 word search)
difficulty - easy or hard. easy will use the entire alphabet for filling spaces, hard will only use characters from the input words
directions - list of directions the words can be placed: forward, diagonal and backward. combining diagonal and backward will allow backward-diagonal words
language - which character set to use. right now only english is supported, however words with special characters will be added to the alphabet for the word search
"""
@doc """
Generate a word search from a list of words, with directions, difficulty and language set to a default.
directions will be set to ["forward", "diagonal"]
difficulty will be set to "easy"
language will be set to "english"
"""
def generate(words, size) do
generate(words, size, ["forward", "diagonal"], "easy", "english")
end
@doc """
Generate a word search from a list of words, with difficulty and language set to a default.
difficulty will be set to "easy"
language will be set to "english"
"""
def generate(words, size, directions) do
generate(words, size, directions, "easy", "english")
end
@doc """
Generate a word search from a list of words, with language set to a default.
language will be set to "english"
"""
def generate(words, size, directions, difficulty) do
generate(words, size, directions, difficulty, "english")
end
@doc """
Generate a word search from a list of words.
"""
def generate(words, size, directions, difficulty, language) do
WordSearch.Alphabet.list(words, difficulty, language)
|> WordSearch.Grid.build(words, size, directions)
|> Map.fetch!(:grid)
end
end
|
lib/word_search.ex
| 0.772187
| 0.549278
|
word_search.ex
|
starcoder
|
defmodule Hunter.Notification do
@moduledoc """
Notification entity
This module defines a `Hunter.Notification` struct and the main functions
for working with Notifications.
## Fields
* `id` - The notification ID
* `type` - One of: "mention", "reblog", "favourite", "follow"
* `created_at` - The time the notification was created
* `account` - The `Hunter.Account` sending the notification to the user
* `status` - The `Hunter.Status` associated with the notification, if applicable
"""
@hunter_api Hunter.Config.hunter_api()
@type t :: %__MODULE__{
id: String.t(),
type: String.t(),
created_at: String.t(),
account: Hunter.Account.t(),
status: Hunter.Status.t()
}
@derive [Poison.Encoder]
defstruct [:id, :type, :created_at, :account, :status]
@doc """
Retrieve user's notifications
## Parameters
* `conn` - connection credentials
* `options` - option list
## Options
* `max_id` - get a list of notifications with id less than or equal this value
* `since_id` - get a list of notifications with id greater than this value
* `limit` - maximum number of notifications to get, default: 15, max: 30
## Examples
Hunter.Notification.notifications(conn)
#=> [%Hunter.Notification{account: %{"acct" => "<EMAIL>", ...}]
"""
@spec notifications(Hunter.Client.t(), Keyword.t()) :: [Hunter.Notification.t()]
def notifications(conn, options \\ []) do
@hunter_api.notifications(conn, options)
end
@doc """
Retrieve single notification
## Parameters
* `conn` - connection credentials
* `id` - notification identifier
## Examples
Hunter.Notification.notification(conn, 17_476)
#=> %Hunter.Notification{account: %{"acct" => "<EMAIL>", ...}
"""
@spec notification(Hunter.Client.t(), non_neg_integer) :: Hunter.Notification.t()
def notification(conn, id) do
@hunter_api.notification(conn, id)
end
@doc """
Deletes all notifications from the Mastodon server for the authenticated user
## Parameters
* `conn` - connection credentials
"""
@spec clear_notifications(Hunter.Client.t()) :: boolean
def clear_notifications(conn) do
@hunter_api.clear_notifications(conn)
end
@doc """
Dismiss a single notification
## Parameters
* `conn` - connection credentials
* `id` - notification id
"""
@spec clear_notification(Hunter.Client.t(), non_neg_integer) :: boolean
def clear_notification(conn, id) do
@hunter_api.clear_notification(conn, id)
end
end
|
lib/hunter/notification.ex
| 0.858348
| 0.467879
|
notification.ex
|
starcoder
|
defmodule Sizeable do
@moduledoc """
A library to make file sizes human-readable.
Forked from https://github.com/arvidkahl/sizeable under MIT.
"""
@bytes ~w(B KB MB GB TB PB EB ZB YB)
@doc """
see `filesize(value, options)`
"""
def filesize(value) do
filesize(value, [])
end
def filesize(value, options) when is_bitstring(value) do
case Integer.parse(value) do
{parsed, _rem} -> filesize(parsed, options)
:error -> raise "Value is not a number"
end
end
def filesize(value, options) when is_integer(value) do
{parsed, _rem} = value |> Integer.to_string() |> Float.parse()
filesize(parsed, options)
end
def filesize(0.0, _options) do
{:ok, unit} = Enum.fetch(@bytes, 0)
filesize_output(0, unit)
end
@doc """
Returns a human-readable string for the given numeric value.
## Arguments:
- `value` (Integer/Float/String) representing the filesize to be converted.
- `options` (Struct) representing the options to determine base, rounding and units.
## Options
- `round`: the precision that the number should be rounded down to. Defaults to `2`.
- `base`: the base for exponent calculation. `2` for binary-based numbers, any other Integer can be used. Defaults to `2`.
"""
def filesize(value, options) when is_float(value) and is_list(options) do
base = Keyword.get(options, :base, 2)
round = Keyword.get(options, :round, 2)
ceil =
if base > 2 do
1000
else
1024
end
neg = value < 0
value =
case neg do
true -> -value
false -> value
end
{exponent, _rem} =
(:math.log(value) / :math.log(ceil))
|> Float.floor()
|> Float.to_string()
|> Integer.parse()
result = Float.round(value / :math.pow(ceil, exponent), base)
result =
if Float.floor(result) == result do
round(result)
else
Float.round(result, round)
end
{:ok, unit} = Enum.fetch(@bytes, exponent)
result =
case neg do
true -> result * -1
false -> result
end
filesize_output(result, unit)
end
def filesize(_value, options) when is_list(options) do
raise "Invalid value"
end
def filesize(_value, _options) do
raise "Invalid options argument"
end
def filesize_output(result, unit) do
Enum.join([result, unit], " ")
end
end
|
lib/sizeable.ex
| 0.909075
| 0.609321
|
sizeable.ex
|
starcoder
|
defmodule Profiler do
@moduledoc """
This sampling profiler is intendend for shell and remote shell usage.
Most commands here print their results to the screen for human inspection.
Example usage:
```
iex(2)> Profiler.profile("<0.187.0>")
100% {:proc_lib, :init_p_do_apply, 3, [file: 'proc_lib.erl', line: 249]}
100% {IEx.Evaluator, :init, 4, [file: 'lib/iex/evaluator.ex', line: 27]}
100% {IEx.Evaluator, :loop, 1, [file: 'lib/iex/evaluator.ex', line: 103]}
100% {IEx.Evaluator, :eval, 3, [file: 'lib/iex/evaluator.ex', line: 217]}
100% {IEx.Evaluator, :do_eval, 3, [file: 'lib/iex/evaluator.ex', line: 239]}
100% {IEx.Evaluator, :handle_eval, 5, [file: 'lib/iex/evaluator.ex', line: 258]}
100% {:elixir, :eval_forms, 3, [file: 'src/elixir.erl', line: 263]}
100% {:elixir, :recur_eval, 3, [file: 'src/elixir.erl', line: 278]}
100% {:erl_eval, :do_apply, 6, [file: 'erl_eval.erl', line: 680]}
100% {Profiler, :profile, 2, [file: 'lib/profiler.ex', line: 120]}
100% {Enum, :reduce_range_inc, 4, [file: 'lib/enum.ex', line: 3371]}
100% {Profiler, :"-profile/2-fun-0-", 3, [file: 'lib/profiler.ex', line: 121]}
```
"""
@type task :: String.t() | atom() | pid() | fun()
@doc """
Times the given function and prints the result.
Example usage:
```
iex(1)> Profiler.time(fn() -> Process.sleep 1000 end)
timer: 1004
:ok
```
"""
@spec time(fun(), String.t()) :: any()
def time(fun, msg \\ "timer") do
t1 = Time.utc_now()
ret = fun.()
t2 = Time.utc_now()
IO.puts("#{msg}: #{Time.diff(t2, t1, :millisecond)}ms")
ret
end
@doc """
Processes lists all processes ordered by reductions withing the given
timeout. For that it takes an initial snapshot, sleeps the given timeout
and takes a second snapshot.
```
iex(1)> Profiler.processes
[<0.187.0>,{'Elixir.IEx.Evaluator',init,4},1339]
[<0.132.0>,tls_client_ticket_store,32]
[<0.182.0>,{'Elixir.Logger.Watcher',init,1},1]
[<0.181.0>,'Elixir.Logger.BackendSupervisor',1]
[<0.180.0>,{'Elixir.Logger.Watcher',init,1},1]
[<0.179.0>,'Elixir.Logger',1]
[<0.178.0>,'Elixir.Logger.Supervisor',1]
[<0.177.0>,{application_master,start_it,4},1]
[<0.176.0>,{application_master,init,4},1]
[<0.161.0>,'Elixir.Hex.UpdateChecker',1]
:ok
```
"""
@spec processes(non_neg_integer()) :: :ok
def processes(timeout \\ 5_000) do
pids = :erlang.processes()
info1 = Enum.map(pids, &:erlang.process_info/1)
Process.sleep(timeout)
info2 = Enum.map(pids, &:erlang.process_info/1)
info =
Enum.zip([pids, info1, info2])
|> Enum.reject(fn {_pid, info1, info2} -> info1 == :undefined or info2 == :undefined end)
|> Enum.map(fn {pid, info1, info2} ->
name =
if info2[:registered_name] == nil do
if info2[:dictionary] == nil or info2[:dictionary][:"$initial_call"] == nil do
info2[:initial_call]
else
info2[:dictionary][:"$initial_call"]
end
else
info2[:registered_name]
end
[
{:pid, pid},
{:reductionsd, info2[:reductions] - info1[:reductions]},
{:name, name}
| info2
]
end)
|> Enum.sort(&(&1[:reductionsd] > &2[:reductionsd]))
|> Enum.take(10)
for n <- info do
:io.format("~p~n", [[n[:pid], n[:name], n[:reductionsd]]])
end
:ok
end
@doc """
Arguments are the same as for profile() but this sampling profiler does not
analyze stacktrace but instead just samples the current function and prints
the result.
The first number shows the total number of samples that have been recorded
per function call.
For pid there are five different input formats allowed:
1. fun() which will then be spwaned called in a loop and killed after the test
2. Native pid()
3. An atom that is resolved using whereis(name)
4. A string of the format "<a.b.c>" or "0.b.c" or just "b" in which
case the pid is interpreted as "<0.b.0>"
5. An integer, in which case the pid is interpreted as "<0.\#{int}.0>"
```
iex(2)> Profiler.profile_simple 197
{10000, {Profiler, :"-profile_simple/2-fun-0-", 3}}
```
"""
@spec profile_simple(task(), non_neg_integer()) :: :ok
def profile_simple(pid, n \\ 10000)
def profile_simple(pid, n) when is_pid(pid) do
pid = to_pid(pid)
samples =
for _ <- 1..n do
{:current_function, what} = :erlang.process_info(pid, :current_function)
Process.sleep(1)
{Time.utc_now(), what}
end
ret =
Enum.reduce(samples, %{}, fn {_time, what}, map ->
Map.update(map, what, 1, fn n -> n + 1 end)
end)
ret = Enum.map(ret, fn {k, v} -> {v, k} end) |> Enum.sort()
for n <- ret, do: IO.puts("#{inspect(n)}")
:ok
end
def profile_simple(pid, msecs) do
with_pid(pid, fn pid -> profile_simple(pid, msecs) end)
end
@doc """
This runs the sampling profiler for the given amount of milliseconds or
10 seconds by default. The sampling profiler will collect stack traces
of the given process pid or process name and print the collected samples
based on frequency.
For pid there are five different input formats allowed:
1. fun() which will then be spwaned called in a loop and killed after the test
2. Native pid()
3. An atom that is resolved using whereis(name)
4. A string of the format "<a.b.c>" or "0.b.c" or just "b" in which
case the pid is interpreted as "<0.b.0>"
5. An integer, in which case the pid is interpreted as "<0.\#{int}.0>"
In this example the profiler is used to profile itself. The first percentage
number shows how many samples were found in the given function call.
Indention indicates the call stack:
```
iex(2)> Profiler.profile(187)
100% {:proc_lib, :init_p_do_apply, 3, [file: 'proc_lib.erl', line: 249]}
100% {IEx.Evaluator, :init, 4, [file: 'lib/iex/evaluator.ex', line: 27]}
100% {IEx.Evaluator, :loop, 1, [file: 'lib/iex/evaluator.ex', line: 103]}
100% {IEx.Evaluator, :eval, 3, [file: 'lib/iex/evaluator.ex', line: 217]}
100% {IEx.Evaluator, :do_eval, 3, [file: 'lib/iex/evaluator.ex', line: 239]}
100% {IEx.Evaluator, :handle_eval, 5, [file: 'lib/iex/evaluator.ex', line: 258]}
100% {:elixir, :eval_forms, 3, [file: 'src/elixir.erl', line: 263]}
100% {:elixir, :recur_eval, 3, [file: 'src/elixir.erl', line: 278]}
100% {:erl_eval, :do_apply, 6, [file: 'erl_eval.erl', line: 680]}
100% {Profiler, :profile, 2, [file: 'lib/profiler.ex', line: 120]}
100% {Enum, :reduce_range_inc, 4, [file: 'lib/enum.ex', line: 3371]}
100% {Profiler, :"-profile/2-fun-0-", 3, [file: 'lib/profiler.ex', line: 121]}
```
"""
@spec profile(task(), non_neg_integer()) :: :ok
def profile(pid, n \\ 10_000) do
with_pid(pid, fn pid ->
for _ <- 1..n do
what = stacktrace(pid)
Process.sleep(1)
{Time.utc_now(), what}
end
end)
|> Enum.reduce(%{}, fn {_time, what}, map ->
Map.update(map, what, 1, fn n -> n + 1 end)
end)
|> Enum.map(fn {k, v} -> {v, Enum.reverse(k)} end)
|> Enum.sort()
|> Enum.reduce(%{}, &update/2)
|> Enum.sort_by(fn {_key, {count, _subtree}} -> count end)
|> print()
:ok
end
@doc """
This runs fprof the given amount of milliseconds or 5 seconds by default.
When the run completes the code tries to open kcachegrind to show the resultung
kcachegrind file. Ensure to have kcachegrind installed. If kcachgrind is not found
the function will just return the name of the generated report file. It
can then be copied to another lcoation for analysis
For pid there are five different input formats allowed:
1. fun() which will then be spwaned called in a loop and killed after the test
2. Native pid()
3. An atom that is resolved using whereis(name)
4. A string of the format "<a.b.c>" or "0.b.c" or just "b" in which
case the pid is interpreted as "<0.b.0>"
5. An integer, in which case the pid is interpreted as "<0.\#{int}.0>"
In this example the profiler is used to profile itself. The first percentage
number shows how many samples were found in the given function call.
Indention indicates the call stack:
```
iex(1)> Profiler.fprof(fn -> Profiler.demo_fib(30) end)
```
"""
@spec fprof(task(), non_neg_integer()) :: binary()
def fprof(pid, msecs \\ 5_000) do
prefix = "profile_#{:rand.uniform(999_999_999)}"
with_pid(pid, fn pid ->
:fprof.trace([:start, {:procs, pid}, {:cpu_time, false}])
Process.sleep(msecs)
:fprof.trace(:stop)
:fprof.profile()
:fprof.analyse({:dest, String.to_charlist(prefix <> ".fprof")})
end)
# convert(prefix, %Profiler)
convert(prefix)
end
@doc """
Returns current stacktrace for the given pid and allows setting the erlang
internal stacktrace depth.
"""
def stacktrace(pid \\ nil, depth \\ 30) do
:erlang.system_flag(:backtrace_depth, depth)
pid = pid || self()
{:current_stacktrace, what} = :erlang.process_info(pid, :current_stacktrace)
what
end
@doc """
Demo function for the documentation.
Never implement fibonacci like this. Never
"""
@spec demo_fib(integer()) :: pos_integer
def demo_fib(n) do
if n < 2 do
1
else
demo_fib(n - 1) + demo_fib(n - 2)
end
end
# =========================== ===========================
# =========================== INTERNAL FUNCTIONS ===========================
# =========================== ===========================
def convert(prefix) do
destination = prefix <> ".cprof"
{:ok, file} = :file.open(String.to_charlist(destination), [:write])
{:ok, terms} = :file.consult(String.to_charlist(prefix <> ".fprof"))
:io.format(file, "events: Time~n", [])
process_terms(file, terms, [])
(System.find_executable("kcachegrind") || System.find_executable("qcachegrind"))
|> case do
nil ->
IO.puts("Run kcachegrind #{destination}")
bin ->
spawn(fn -> System.cmd(bin, [destination], stderr_to_stdout: true) end)
end
destination
end
defstruct [:pid, :in_file, :in_file_format, :out_file]
defp process_terms(file, [], _opts) do
:file.close(file)
end
defp process_terms(file, [{:analysis_options, _opt} | rest], opts) do
process_terms(file, rest, opts)
end
defp process_terms(file, [[{:totals, _count, acc, _own}] | rest], opts) do
:io.format(file, "summary: ~w~n", [:erlang.trunc(acc * 1000)])
process_terms(file, rest, opts)
end
defp process_terms(file, [[{pid, _count, _acc, _own} | _T] | rest], opts = %Profiler{pid: true})
when is_list(pid) do
:io.format(file, "ob=~s~n", [pid])
process_terms(file, rest, opts)
end
defp process_terms(file, [list | rest], opts) when is_list(list) do
process_terms(file, rest, opts)
end
defp process_terms(file, [entry | rest], opts) do
process_entry(file, entry)
process_terms(file, rest, opts)
end
defp process_entry(file, {_calling_list, actual, called_list}) do
process_actual(file, actual)
process_called_list(file, called_list)
end
defp process_actual(file, {func, _count, _acc, own}) do
file_name = get_file(func)
:io.format(file, "fl=~w~n", [file_name])
:io.format(file, "fn=~w~n", [func])
:io.format(file, "1 ~w~n", [trunc(own * 1000)])
end
defp process_called_list(_, []) do
:ok
end
defp process_called_list(file, [called | rest]) do
process_called(file, called)
process_called_list(file, rest)
end
defp process_called(file, {func, count, acc, _own}) do
file_name = get_file(func)
:io.format(file, "cfl=~w~n", [file_name])
:io.format(file, "cfn=~w~n", [func])
:io.format(file, "calls=~w 1~n", [count])
:io.format(file, "1 ~w~n", [trunc(acc * 1000)])
end
defp get_file({mod, _func, _arity}) do
mod
end
defp get_file(_func) do
:pseudo
end
defp print(tree) do
sum = Enum.reduce(tree, 0, fn {_, {count, _}}, sum -> sum + count end)
print(0, tree, sum / 20)
end
defp print(level, tree, min) do
prefix = level * 3
for {key, {count, subtree}} <- tree do
if count > min do
count = round(count * 5 / min) |> Integer.to_string()
IO.puts("#{count |> String.pad_leading(4 + prefix)}% #{inspect(key)}")
print(level + 1, subtree, min)
end
end
end
defp update({_count, []}, map) do
map
end
defp update({count, [head | list]}, map) do
{nil, map} =
Map.get_and_update(map, head, fn
{c, tree} ->
{nil, {c + count, update({count, list}, tree)}}
nil ->
{nil, {count, update({count, list}, %{})}}
end)
map
end
defp to_pid(pid) when is_binary(pid) do
case String.first(pid) do
"0" -> "<#{pid}>"
"<" -> pid
_ -> "<0.#{pid}.0>"
end
|> :erlang.binary_to_list()
|> :erlang.list_to_pid()
end
defp to_pid(pid) when is_atom(pid) do
:erlang.whereis(pid)
end
defp to_pid(pid) when is_integer(pid) do
to_pid("<0.#{pid}.0>")
end
defp to_pid(pid) do
pid
end
defp with_pid(pid, fun) when is_function(pid) do
pid = spawn_link(fn -> looper(pid) end)
ret = fun.(pid)
Process.unlink(pid)
Process.exit(pid, :kill)
ret
end
defp with_pid(pid, fun) do
fun.(to_pid(pid))
end
defp looper(fun) do
fun.()
looper(fun)
end
end
|
lib/profiler.ex
| 0.801159
| 0.745236
|
profiler.ex
|
starcoder
|
defmodule Bigtable.ReadRows do
@moduledoc """
Provides functionality for to building and submitting a `Google.Bigtable.V2.ReadRowsRequest`.
"""
alias Bigtable.ChunkReader
alias Bigtable.{Request, Utils}
alias Google.Bigtable.V2
alias V2.Bigtable.Stub
@type response :: {:ok, ChunkReader.chunk_reader_result()} | {:error, any()}
@doc """
Builds a `Google.Bigtable.V2.ReadRowsRequest` given an optional table name.
Defaults to the configured table name if none is provided.
## Examples
iex> table_name = "projects/project-id/instances/instance-id/tables/table-name"
iex> Bigtable.ReadRows.build(table_name)
%Google.Bigtable.V2.ReadRowsRequest{
app_profile_id: "",
filter: nil,
rows: nil,
rows_limit: 0,
table_name: "projects/project-id/instances/instance-id/tables/table-name"
}
"""
@spec build(binary()) :: V2.ReadRowsRequest.t()
def build(table_name \\ Utils.configured_table_name()) when is_binary(table_name) do
V2.ReadRowsRequest.new(table_name: table_name, app_profile_id: "")
end
@doc """
Submits a `Google.Bigtable.V2.ReadRowsRequest` to Bigtable.
Can be called with either a `Google.Bigtable.V2.ReadRowsRequest` or an optional table name.
"""
@spec read(V2.ReadRowsRequest.t() | binary()) :: response()
def read(table_name \\ Utils.configured_table_name())
def read(%V2.ReadRowsRequest{} = request) do
request
|> Request.process_request(&Stub.read_rows/3, stream: true)
|> handle_response()
end
def read(table_name) when is_binary(table_name) do
table_name
|> build()
|> read()
end
defp handle_response({:error, _} = response), do: response
defp handle_response({:ok, response}) do
response
|> Enum.filter(&contains_chunks?/1)
|> Enum.flat_map(fn {:ok, resp} -> resp.chunks end)
|> process_chunks()
end
defp process_chunks(chunks) do
{:ok, cr} = ChunkReader.open()
chunks
|> process_chunks(nil, cr)
end
defp process_chunks([], _result, chunk_reader) do
ChunkReader.close(chunk_reader)
end
defp process_chunks(_chunks, {:error, _}, chunk_reader) do
ChunkReader.close(chunk_reader)
end
defp process_chunks([h | t], _result, chunk_reader) do
result =
chunk_reader
|> ChunkReader.process(h)
process_chunks(t, result, chunk_reader)
end
defp contains_chunks?({:ok, response}), do: !Enum.empty?(response.chunks)
end
|
lib/data/read_rows.ex
| 0.892587
| 0.403097
|
read_rows.ex
|
starcoder
|
defmodule BubbleLib.MapUtil do
@moduledoc """
Map utility functions
"""
alias BubbleLib.MapUtil.AutoMap.ETS
def normalize(value) do
value
|> deep_keyword_to_map()
|> enum_materialize()
|> deatomize()
|> drop_nils()
end
def deatomize(%{__struct__: _} = struct) do
struct
|> Map.keys()
|> Enum.reduce(struct, fn k, s -> Map.put(s, k, deatomize(Map.get(struct, k))) end)
end
def deatomize(%{} = map) do
map
|> Enum.map(fn
{%{__struct__: _} = k, v} -> {k, deatomize(v)}
{k, v} -> {to_string(k), deatomize(v)}
end)
|> Map.new()
end
def deatomize(list) when is_list(list) do
list
|> Enum.map(&deatomize/1)
end
def deatomize(value) do
value
end
def drop_nils(%{__struct__: _} = struct) do
struct
|> Map.delete(:__struct__)
|> drop_nils()
end
def drop_nils(%{} = map) do
map
|> Enum.filter(fn {_, v} -> v != nil end)
|> Enum.map(fn {k, v} -> {k, drop_nils(v)} end)
|> Enum.into(%{})
end
def drop_nils(list) when is_list(list) do
list
|> Enum.map(&drop_nils/1)
end
def drop_nils(value) do
value
end
def deep_keyword_to_map(%{__struct__: _} = value) do
value
end
def deep_keyword_to_map(value) when is_map(value) do
Enum.map(value, fn {k, v} -> {k, deep_keyword_to_map(v)} end)
|> Enum.into(%{})
end
def deep_keyword_to_map([{_, _} | _] = value) do
Enum.map(value, fn {k, v} -> {k, deep_keyword_to_map(v)} end)
|> Enum.into(%{})
end
def deep_keyword_to_map(value) when is_list(value) do
Enum.map(value, fn v -> deep_keyword_to_map(v) end)
end
def deep_keyword_to_map(value), do: value
def enum_materialize(%ETS{} = value) do
Enum.to_list(value)
end
def enum_materialize(%{__struct__: _} = value) do
value
end
def enum_materialize(%{} = map) do
map
|> Enum.map(fn {k, v} -> {k, enum_materialize(v)} end)
|> Map.new()
end
def enum_materialize(list) when is_list(list) do
list
|> Enum.map(&enum_materialize/1)
end
def enum_materialize(value) do
value
end
end
|
lib/bubble_lib/map_util.ex
| 0.707203
| 0.471527
|
map_util.ex
|
starcoder
|
defmodule Ockam.Messaging.PipeChannel do
@moduledoc """
Ockam channel using pipes to deliver messages
Can be used with different pipe implementations to get different delivery properties
See `Ockam.Messaging.PipeChannel.Initiator` and `Ockam.Messaging.PipeChannel.Responder` for usage
Session setup:
Initiator is started with a route to spawner
Initiator starts a local receiver
Initiator sends handshake to spawner route
handshake message return route contains receiver address
Spawner starts a Responder with:
return route from the handshake message
Initiator address from the handshake message metadata
Responder starts a local receiver
Responder starts a sender using the return route of the handshake
Responder sends handshake response to Initiator through local sender
Using route: [responder_sender, initiator]
Responder Sender forwards handshake response to Initiator Receiver
Initiator Receiver forwards handshake response to Initiator
Initiator takes receiver address and responder address from the handshake response metadata
Initiator creates a route to Responder Receiver using receiver address and spawner route
Initiator creates a local sender using this route
Message forwarding:
Each channel endpoint is using two addresses: INNER and OUTER.
INNER address us used to communicate with the pipes
OUTER address is used to communicate to other workers
On receiving a message from OUTER address with:
OR: [outer] ++ onward_route
RR: return_route
Channel endpoint sends a message with:
OR: [local_sender, remote_endpoint] ++ onward_route
RR: return_route
On receiving a message from INNER address with:
OR: [inner] ++ onward_route
RR: return_route
It forwards a message with:
OR: onward_route
RR: [outer] ++ return_route
"""
alias Ockam.Message
alias Ockam.Router
@doc false
## Inner message is forwarded with outer address in return route
def forward_inner(message, state) do
[_me | onward_route] = Message.onward_route(message)
return_route = Message.return_route(message)
payload = Message.payload(message)
Router.route(%{
onward_route: onward_route,
return_route: [state.address | return_route],
payload: payload
})
end
@doc false
## Outer message is forwarded through sender
## to other channel endpoints inner address
def forward_outer(message, state) do
channel_route = Map.get(state, :channel_route)
[_me | onward_route] = Message.onward_route(message)
return_route = Message.return_route(message)
payload = Message.payload(message)
sender = Map.fetch!(state, :sender)
Router.route(%{
onward_route: [sender | channel_route ++ onward_route],
return_route: return_route,
payload: payload
})
end
@doc false
def register_inner_address(state) do
{:ok, inner_address} = Ockam.Node.register_random_address()
Map.put(state, :inner_address, inner_address)
end
@doc false
def pipe_mod(options) do
case Keyword.fetch(options, :pipe_mod) do
{:ok, {sender_mod, receiver_mod}} ->
{:ok, {sender_mod, receiver_mod}}
{:ok, pipe_mod} when is_atom(pipe_mod) ->
{:ok, {pipe_mod.sender(), pipe_mod.receiver()}}
end
end
end
defmodule Ockam.Messaging.PipeChannel.Metadata do
@moduledoc """
Encodable data structure for pipechannel handshake metadata
"""
defstruct [:receiver_route, :channel_route]
@type t() :: %__MODULE__{}
## TODO: use proper address encoding
@schema {:struct, [receiver_route: {:array, :data}, channel_route: {:array, :data}]}
@spec encode(t()) :: binary()
def encode(meta) do
:bare.encode(meta, @schema)
end
@spec decode(binary()) :: t()
def decode(data) do
case :bare.decode(data, @schema) do
{:ok, meta, ""} ->
struct(__MODULE__, meta)
other ->
exit({:meta_decode_error, data, other})
end
end
end
defmodule Ockam.Messaging.PipeChannel.Simple do
@moduledoc """
Simple implementation of pipe channel.
Does not manage the session.
Requires a known address to the local pipe sender and remote channel end
Options:
`sender` - address of the sender worker
`channel_route` - route from remote receiver to remote channel end
"""
use Ockam.AsymmetricWorker
alias Ockam.Messaging.PipeChannel
@impl true
def inner_setup(options, state) do
sender = Keyword.fetch!(options, :sender)
channel_route = Keyword.fetch!(options, :channel_route)
{:ok, Map.merge(state, %{sender: sender, channel_route: channel_route})}
end
@impl true
def handle_inner_message(message, state) do
PipeChannel.forward_inner(message, state)
{:ok, state}
end
@impl true
def handle_outer_message(message, state) do
PipeChannel.forward_outer(message, state)
{:ok, state}
end
end
defmodule Ockam.Messaging.PipeChannel.Initiator do
@moduledoc """
Pipe channel initiator.
Using two addresses for inner and outer communication.
Starts a local receiver and sends a handshake message to the remote spawner.
The handshake message is sent wiht a RECEIVER address in the retourn route.
In handshake stage:
buffers all messages received on outer address.
On handshake response creates a local sender using handshake metadata (receiver route) and spawner route.
In ready stage:
forwards messages from outer address to the sender and remote responder
forwards messages from inner address to the onward route and traces own outer address in the return route
Options:
`pipe_mod` - pipe modules to use, either {sender, receiver} or a module implementing `Ockam.Messaging.Pipe`
`spawner_route` - a route to responder spawner
"""
use Ockam.AsymmetricWorker
alias Ockam.Messaging.PipeChannel
alias Ockam.Messaging.PipeChannel.Metadata
alias Ockam.Message
alias Ockam.Router
@impl true
def inner_setup(options, state) do
spawner_route = Keyword.fetch!(options, :spawner_route)
sender_options = Keyword.get(options, :sender_options, [])
receiver_options = Keyword.get(options, :receiver_options, [])
{:ok, {sender_mod, receiver_mod}} = PipeChannel.pipe_mod(options)
{:ok, receiver} = receiver_mod.create(receiver_options)
send_handshake(spawner_route, receiver, state)
{:ok,
Map.merge(state, %{
receiver: receiver,
spawner_route: spawner_route,
state: :handshake,
sender_mod: sender_mod,
receiver_mod: receiver_mod,
sender_options: sender_options
})}
end
@impl true
def handle_inner_message(message, %{state: :handshake} = state) do
payload = Message.payload(message)
%Metadata{
channel_route: channel_route,
receiver_route: remote_receiver_route
} = Metadata.decode(payload)
spawner_route = Map.fetch!(state, :spawner_route)
receiver_route = make_receiver_route(spawner_route, remote_receiver_route)
sender_mod = Map.get(state, :sender_mod)
sender_options = Map.get(state, :sender_options, [])
{:ok, sender} =
sender_mod.create(Keyword.merge([receiver_route: receiver_route], sender_options))
process_buffer(
Map.merge(state, %{
sender: sender,
channel_route: channel_route,
state: :ready
})
)
end
def handle_inner_message(message, %{state: :ready} = state) do
PipeChannel.forward_inner(message, state)
{:ok, state}
end
@impl true
def handle_outer_message(message, %{state: :handshake} = state) do
## TODO: find a better solution than buffering
state = buffer_message(message, state)
{:ok, state}
end
def handle_outer_message(message, %{state: :ready} = state) do
PipeChannel.forward_outer(message, state)
{:ok, state}
end
defp process_buffer(state) do
buffer = Map.get(state, :buffer, [])
Enum.reduce(buffer, {:ok, state}, fn message, {:ok, state} ->
handle_outer_message(message, state)
end)
end
defp buffer_message(message, state) do
buffer = Map.get(state, :buffer, [])
Map.put(state, :buffer, buffer ++ [message])
end
defp make_receiver_route(spawner_route, remote_receiver_route) do
Enum.take(spawner_route, Enum.count(spawner_route) - 1) ++ remote_receiver_route
end
defp send_handshake(spawner_route, receiver, state) do
msg = %{
onward_route: spawner_route,
return_route: [receiver],
payload:
Metadata.encode(%Metadata{
channel_route: [state.inner_address],
receiver_route: [receiver]
})
}
Router.route(msg)
end
end
defmodule Ockam.Messaging.PipeChannel.Responder do
@moduledoc """
Pipe channel responder
Using two addresses for inner and outer communication.
Created with remote receiver route and channel route
On start:
creates a local receiver
creates a sender for a remote receiver route
sends a channel handshake confirmation through the sender
confirmation contains local receiver address and responder inner address
forwards messages from outer address through the sender and remote initiator
forwards messages from inner address and traces own outer address in the return route
Options:
`pipe_mod` - pipe modules to use, either {sender, receiver} or an atom namespace, which has .Sender and .Receiver (e.g. `Ockam.Messaging.Ordering.Monotonic.IndexPipe`)
`receiver_route` - route to the receiver on the initiator side, used to create a sender
`channel_route` - route from initiator receiver to initiator, used in forwarding
"""
use Ockam.AsymmetricWorker
alias Ockam.Messaging.PipeChannel
alias Ockam.Messaging.PipeChannel.Metadata
alias Ockam.Router
require Logger
@impl true
def inner_setup(options, state) do
receiver_route = Keyword.fetch!(options, :receiver_route)
channel_route = Keyword.fetch!(options, :channel_route)
sender_options = Keyword.get(options, :sender_options, [])
receiver_options = Keyword.get(options, :receiver_options, [])
{:ok, {sender_mod, receiver_mod}} = PipeChannel.pipe_mod(options)
{:ok, receiver} = receiver_mod.create(receiver_options)
{:ok, sender} =
sender_mod.create(Keyword.merge([receiver_route: receiver_route], sender_options))
send_handshake_response(receiver, sender, channel_route, state)
{:ok,
Map.merge(state, %{
receiver: receiver,
sender: sender,
channel_route: channel_route,
sender_mod: sender_mod,
receiver_mod: receiver_mod
})}
end
@impl true
def handle_inner_message(message, state) do
PipeChannel.forward_inner(message, state)
{:ok, state}
end
@impl true
def handle_outer_message(message, state) do
PipeChannel.forward_outer(message, state)
{:ok, state}
end
defp send_handshake_response(receiver, sender, channel_route, state) do
msg = %{
onward_route: [sender | channel_route],
return_route: [state.inner_address],
payload:
Metadata.encode(%Metadata{
channel_route: [state.inner_address],
receiver_route: [receiver]
})
}
# Logger.info("Handshake response #{inspect(msg)}")
Router.route(msg)
end
end
defmodule Ockam.Messaging.PipeChannel.Spawner do
@moduledoc """
Pipe channel receiver spawner
On message spawns a channel receiver
with remote route as a remote receiver route
and channel route taken from the message metadata
Options:
`responder_options` - additional options to pass to the responder
"""
use Ockam.Worker
alias Ockam.Messaging.PipeChannel.Metadata
alias Ockam.Messaging.PipeChannel.Responder
alias Ockam.Message
require Logger
@impl true
def setup(options, state) do
responder_options = Keyword.fetch!(options, :responder_options)
{:ok, Map.put(state, :responder_options, responder_options)}
end
@impl true
def handle_message(message, state) do
return_route = Message.return_route(message)
payload = Message.payload(message)
## We ignore receiver route here and rely on return route tracing
%Metadata{channel_route: channel_route} = Metadata.decode(payload)
responder_options = Map.get(state, :responder_options)
Responder.create(
Keyword.merge(responder_options, receiver_route: return_route, channel_route: channel_route)
)
{:ok, state}
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/messaging/pipe_channel.ex
| 0.880739
| 0.436322
|
pipe_channel.ex
|
starcoder
|
defmodule Plaid.Webhook do
@moduledoc """
Creates a Plaid Event from webhook's payload if signature is valid.
Verification flow following docs::: plaid.com/docs/#webhook-verification
"""
import Plaid, only: [make_request_with_cred: 4, validate_cred: 1]
alias Plaid.Utils
@endpoint :webhook_verification_key
@type params :: %{required(atom) => String.t()}
@type config :: %{required(atom) => String.t()}
defmodule WebHookVerificationKey do
@type t :: %__MODULE__{
key: map,
request_id: String.t()
}
@derive Jason.Encoder
defstruct [
:key,
:request_id
]
end
defmodule Event do
@type t :: %__MODULE__{
type: String.t(),
data: map
}
@derive Jason.Encoder
defstruct [
:type,
:data
]
end
@doc """
Constructs an plaid event after validating the jwt_string
Parameters
```
%{
body: "payload_received_from_webhook",
client_id: "client_identifier",
jwt_string: "plaid_verification_header",
secret: "plaid_env_secret"
}
```
"""
@spec construct_event(params, config | nil) ::
{:ok, map()} | {:error, any}
when params: %{
:body => String.t(),
:client_id => String.t(),
:jwt_string => String.t(),
:secret => String.t()
}
def construct_event(params, config \\ %{}) do
with {:ok, %{"alg" => "ES256", "kid" => kid}} <- Joken.peek_header(params.jwt_string),
{:ok, %Plaid.Webhook.WebHookVerificationKey{} = wvk} <-
retreive_public_key(
%{client_id: params.client_id, secret: params.secret, key_id: kid},
config
),
{:ok,
%{
"iat" => iat,
"request_body_sha256" => body_sha256
}} <- validate_the_signature(params.jwt_string, wvk),
true <- less_than_five_minutes_ago(iat),
true <- bodies_match(params.body, body_sha256) do
create_event(params.body)
else
{:ok, %{"alg" => _alg}} ->
{:error, :unauthorized, reason: "incorrect alg"}
{:erro, %Plaid.Error{}} ->
{:error, :unauthorized, reason: "invalid plaid credentials"}
false ->
{:error, :unauthorized, reason: "received too late"}
_error ->
{:error, :unauthorized}
end
end
defp retreive_public_key(params, config) do
config = validate_cred(config)
endpoint = "#{@endpoint}/get"
make_request_with_cred(:post, endpoint, config, params)
|> Utils.handle_resp(@endpoint)
end
defp validate_the_signature(jwt_string, jwk) do
Joken.Signer.verify(jwt_string, Joken.Signer.create(jwk.key["alg"], jwk.key))
end
defp less_than_five_minutes_ago(iat) do
with now <- DateTime.utc_now(),
five_mins_ago <- DateTime.add(now, -300, :second),
res when res in [:eq, :lt] <-
DateTime.compare(five_mins_ago, DateTime.from_unix!(iat, :second)) do
true
else
_ ->
false
end
end
defp bodies_match(body, body_sha256) do
with hash <- :crypto.hash(:sha256, body),
encoded <- Base.encode16(hash),
^body_sha256 <- String.downcase(encoded) do
true
else
_ -> false
end
end
defp create_event(body) do
body = Jason.decode!(body)
# type = body["webhook_type"]
type_code = String.downcase("#{body["webhook_type"]}.#{body["webhook_code"]}")
data =
body
|> Map.drop(["webhook_type"])
|> Map.drop(["webhook_code"])
{:ok, %Event{type: type_code, data: data}}
end
end
|
lib/plaid/webhook.ex
| 0.795301
| 0.58347
|
webhook.ex
|
starcoder
|
defmodule LruCache do
@moduledoc ~S"""
This modules implements a simple LRU cache, using 2 ets tables for it.
For using it, you need to start it:
iex> LruCache.start_link(:my_cache, 1000)
Or add it to your supervisor tree, like: `worker(LruCache, [:my_cache, 1000])`
## Using
iex> LruCache.start_link(:my_cache, 1000)
{:ok, #PID<0.60.0>}
iex> LruCache.put(:my_cache, "id", "value")
:ok
iex> LruCache.get(:my_cache, "id", touch = false)
"value"
To take some action when old keys are evicted from the cache when it is full,
you can pass an `:evict_fn` option to `LruCache.start_link/3`. This is
helpful for cleaning up processes that depend on values in the cache, or
logging, or instrumentation of cache evictions etc.
iex> evict = fn(key,value) -> IO.inspect("#{key}=#{value} evicted") end
iex> LruCache.start_link(:my_cache, 10, evict_fn: evict)
{:ok, #PID<0.60.0>}
## Design
First ets table save the key values pairs, the second save order of inserted elements.
"""
use GenServer
defstruct table: nil, ttl_table: nil, size: 0, evict_fn: nil
@doc """
Creates an LRU of the given size as part of a supervision tree with a registered name
## Options
* `:evict_fn` - function that accepts (key, value) and takes some action when keys are
evicted when the cache is full.
"""
def start_link(name, size, opts \\ []) do
Agent.start_link(__MODULE__, :init, [name, size, opts], name: name)
end
@doc """
Stores the given `value` under `key` in `cache`. If `cache` already has `key`, the stored
`value` is replaced by the new one. This updates the order of LRU cache.
"""
def put(name, key, value, timeout \\ 5000),
do: Agent.get(name, __MODULE__, :handle_put, [key, value], timeout)
@doc """
Updates a `value` in `cache`. If `key` is not present in `cache` then nothing is done.
`touch` defines, if the order in LRU should be actualized. The function assumes, that
the element exists in a cache.
"""
def update(name, key, value, touch \\ true, timeout \\ 5000) do
if :ets.update_element(name, key, {3, value}) do
touch && Agent.get(name, __MODULE__, :handle_touch, [key], timeout)
end
:ok
end
@doc """
Returns the `value` associated with `key` in `cache`. If `cache` does not contain `key`,
returns nil. `touch` defines, if the order in LRU should be actualized.
"""
def get(name, key, touch \\ true, timeout \\ 5000) do
case :ets.lookup(name, key) do
[{_, _, value}] ->
touch && Agent.get(name, __MODULE__, :handle_touch, [key], timeout)
value
[] ->
nil
end
end
@doc """
Removes the entry stored under the given `key` from cache.
"""
def delete(name, key, timeout \\ 5000),
do: Agent.get(name, __MODULE__, :handle_delete, [key], timeout)
@doc false
def init(name, size, opts \\ []) do
ttl_table = :"#{name}_ttl"
:ets.new(ttl_table, [:named_table, :ordered_set])
:ets.new(name, [:named_table, :public, {:read_concurrency, true}])
evict_fn = Keyword.get(opts, :evict_fn)
%LruCache{ttl_table: ttl_table, table: name, size: size, evict_fn: evict_fn}
end
@doc false
def init({name, size, opts}) do
init(name, size, opts)
end
@doc false
def handle_put(state = %{table: table}, key, value) do
delete_ttl(state, key)
uniq = insert_ttl(state, key)
:ets.insert(table, {key, uniq, value})
clean_oversize(state)
:ok
end
@doc false
def handle_touch(state = %{table: table}, key) do
delete_ttl(state, key)
uniq = insert_ttl(state, key)
:ets.update_element(table, key, [{2, uniq}])
:ok
end
@doc false
def handle_delete(state = %{table: table}, key) do
delete_ttl(state, key)
:ets.delete(table, key)
:ok
end
defp delete_ttl(%{ttl_table: ttl_table, table: table}, key) do
case :ets.lookup(table, key) do
[{_, old_uniq, _}] ->
:ets.delete(ttl_table, old_uniq)
_ ->
nil
end
end
defp insert_ttl(%{ttl_table: ttl_table}, key) do
uniq = :erlang.unique_integer([:monotonic])
:ets.insert(ttl_table, {uniq, key})
uniq
end
defp clean_oversize(state = %{ttl_table: ttl_table, table: table, size: size}) do
if :ets.info(table, :size) > size do
oldest_tstamp = :ets.first(ttl_table)
[{_, old_key}] = :ets.lookup(ttl_table, oldest_tstamp)
:ets.delete(ttl_table, oldest_tstamp)
call_evict_fn(state, old_key)
:ets.delete(table, old_key)
true
else
nil
end
end
defp call_evict_fn(%{evict_fn: nil}, _old_key), do: nil
defp call_evict_fn(%{evict_fn: evict_fn, table: table}, key) do
[{_, _, value}] = :ets.lookup(table, key)
evict_fn.(key, value)
end
end
|
lib/lru_cache.ex
| 0.769384
| 0.490907
|
lru_cache.ex
|
starcoder
|
defmodule Spellex do
@words Spellex.Dictionary.words()
@letters String.split("abcdefghijklmnopqrstuvwxyz", "")
@moduledoc """
Documentation for Spellex.
"""
@doc """
Returns a list of possible corrections for a `word`.
## Examples
iex> Spellex.corrections("wrod")
{:ok, ["word"]}
iex> Spellex.corrections("")
{:ok, []}
iex> Spellex.corrections(1)
{:error, :invalid_input}
"""
@spec corrections(binary) :: {:error, :invalid_input}
def corrections(word) when not is_binary(word), do: {:error, :invalid_input}
@spec corrections(binary) :: {:ok, List.t()}
def corrections(word) when word == "", do: {:ok, []}
@spec corrections(binary) :: {:ok, List.t()}
def corrections(word) when is_binary(word) do
{:ok, Enum.uniq(known([word]) ++ one_edit_away(word) ++ [word])}
end
@doc """
Returns the most probable correction for a `word`.
## Examples
iex> Spellex.correction("wrod")
{:ok, "word"}
iex> Spellex.correction("")
{:ok, ""}
iex> Spellex.correction(1)
{:error, :invalid_input}
"""
@spec correction(binary) :: {:error, :invalid_input}
def correction(word) when not is_binary(word), do: {:error, :invalid_input}
@spec correction(binary) :: {:ok, String.t()}
def correction(word) when word == "", do: {:ok, ""}
@spec correction(binary) :: {:ok, String.t()}
def correction(word) when is_binary(word) do
{:ok, corrections} = corrections(word)
{:ok, Enum.max_by(corrections, &probability/1)}
end
defp probability(word) do
case Enum.count(@words, fn el -> el == word end) do
0 -> 0
p -> Enum.count(@words) / p
end
end
defp known(words) do
for w <- words, Enum.find(@words, fn el -> el == w end), do: w
end
defp one_edit_away(word) do
Enum.uniq(deletes(word) ++ transposes(word) ++ replaces(word) ++ inserts(word))
end
def splits(word) do
for i <- 0..String.length(word), do: String.split_at(word, i)
end
defp deletes(word) do
splits(word)
|> Enum.filter(fn {_left, right} -> right != "" end)
|> Enum.map(fn {left, right} -> left <> elem(String.split_at(right, 1), 1) end)
end
defp transposes(word) do
splits(word)
|> Enum.filter(fn {_left, right} -> String.length(right) > 1 end)
|> Enum.map(fn {left, right} -> left <> String.at(right, 1) <> String.at(right, 0) <> elem(String.split_at(right, 2), 1) end)
end
defp replaces(word) do
splits(word)
|> Enum.filter(fn {_left, right} -> right != "" end)
|> Enum.map(fn {left, right} ->
Enum.map(@letters, fn letter -> left <> letter <> elem(String.split_at(right, 1), 1) end)
end)
|> List.flatten
|> Enum.uniq
end
defp inserts(word) do
splits(word)
|> Enum.filter(fn {_left, right} -> right != "" end)
|> Enum.map(fn {left, right} ->
Enum.map(@letters, fn letter -> left <> letter <> right end)
end)
|> List.flatten
|> Enum.uniq
end
end
|
lib/spellex.ex
| 0.903552
| 0.493226
|
spellex.ex
|
starcoder
|
defmodule Prog do
@moduledoc """
Documentation for `Prog`.
"""
@doc """
Day 12
"""
def solve do
{:ok, raw} = File.read("data/day_12")
# raw = "F10
# N3
# F7
# R90
# F11"
data = String.split(raw, "\n", trim: true)
|> Enum.map(&extract/1)
distance_to_end = find_path_to_ending_location(data, {0, 0}, :east)
{y, x} = distance_to_end
part_1 = Kernel.abs(y) + Kernel.abs(x)
IO.inspect part_1, label: "Part 1"
actual_distance_to_end = find_path_to_ending_location_waypoints(data, {0, 0}, {{:east, 10}, {:north, 1}})
{actual_y, actual_x} = actual_distance_to_end
part_2 = Kernel.abs(actual_y) + Kernel.abs(actual_x)
IO.inspect part_2, label: "Part 2"
end
def find_path_to_ending_location([], {y, x}, _direction), do: {y, x}
def find_path_to_ending_location([instruction | remaining], {y, x}, direction) do
case instruction do
%{cardinal: {dir, amount}} ->
find_path_to_ending_location(remaining, move(dir, {y, x}, amount), direction)
%{scalar: amount} ->
find_path_to_ending_location(remaining, move(direction, {y, x}, amount), direction)
%{turn: {:right, amount}} ->
turns = div(amount, 90)
starting = find_starting(direction)
after_turning = starting + (turns * 90)
new_direction = find_after(rem(after_turning, 360))
find_path_to_ending_location(remaining, {y, x}, new_direction)
%{turn: {:left, amount}} ->
turns = div(amount, 90)
starting = find_starting(direction)
after_turning = (360 + starting) - (turns * 90)
new_direction = find_after(rem(after_turning, 360))
find_path_to_ending_location(remaining, {y, x}, new_direction)
end
end
def move(:north, {y, x}, amount), do: {y + amount, x}
def move(:east, {y, x}, amount), do: {y, x + amount}
def move(:south, {y, x}, amount), do: {y - amount, x}
def move(:west, {y, x}, amount), do: {y, x - amount}
def find_starting(:north), do: 0
def find_starting(:east), do: 90
def find_starting(:south), do: 180
def find_starting(:west), do: 270
def find_after(0), do: :north
def find_after(90), do: :east
def find_after(180), do: :south
def find_after(270), do: :west
def find_path_to_ending_location_waypoints([], {y, x}, _waypoint), do: {y, x}
def find_path_to_ending_location_waypoints([instruction | remaining], {y, x}, {{horizontal, horizontal_amount}, {vertical, vertical_amount}} = waypoint ) do
case instruction do
%{cardinal: {:north, amount}} ->
updated_vertical = handle_waypoint_update({vertical, vertical_amount}, {:north, amount})
find_path_to_ending_location_waypoints(remaining, {y, x}, {{horizontal, horizontal_amount}, updated_vertical})
%{cardinal: {:east, amount}} ->
updated_horizontal = handle_waypoint_update({horizontal, horizontal_amount}, {:east, amount})
find_path_to_ending_location_waypoints(remaining, {y, x}, {updated_horizontal, {vertical, vertical_amount}})
%{cardinal: {:south, amount}} ->
updated_vertical = handle_waypoint_update({vertical, vertical_amount}, {:south, amount})
find_path_to_ending_location_waypoints(remaining, {y, x}, {{horizontal, horizontal_amount}, updated_vertical})
%{cardinal: {:west, amount}} ->
updated_horizontal = handle_waypoint_update({horizontal, horizontal_amount}, {:west, amount})
find_path_to_ending_location_waypoints(remaining, {y, x}, {updated_horizontal, {vertical, vertical_amount}})
%{scalar: amount} ->
updated_y = if vertical == :north do
y + (vertical_amount * amount)
else
y - (vertical_amount * amount)
end
updated_x = if horizontal == :east do
x + (horizontal_amount * amount)
else
x - (horizontal_amount * amount)
end
find_path_to_ending_location_waypoints(remaining, {updated_y, updated_x}, waypoint)
%{turn: {:right, amount}} ->
turns = div(amount, 90)
updated_waypoint = rotate_right(turns, waypoint)
find_path_to_ending_location_waypoints(remaining, {y, x}, updated_waypoint)
%{turn: {:left, amount}} ->
turns = div(amount, 90)
updated_waypoint = rotate_left(turns, waypoint)
find_path_to_ending_location_waypoints(remaining, {y, x}, updated_waypoint)
end
end
def rotate_right(0, waypoint), do: waypoint
def rotate_right(turns, {{:east, horizontal_amount}, {:north, vertical_amount}}), do: rotate_right(turns - 1, {{:east, vertical_amount}, {:south, horizontal_amount}})
def rotate_right(turns, {{:east, horizontal_amount}, {:south, vertical_amount}}), do: rotate_right(turns - 1, {{:west, vertical_amount}, {:south, horizontal_amount}})
def rotate_right(turns, {{:west, horizontal_amount}, {:south, vertical_amount}}), do: rotate_right(turns - 1, {{:west, vertical_amount}, {:north, horizontal_amount}})
def rotate_right(turns, {{:west, horizontal_amount}, {:north, vertical_amount}}), do: rotate_right(turns - 1, {{:east, vertical_amount}, {:north, horizontal_amount}})
def rotate_left(0, waypoint), do: waypoint
def rotate_left(turns, {{:east, horizontal_amount}, {:north, vertical_amount}}), do: rotate_left(turns - 1, {{:west, vertical_amount}, {:north, horizontal_amount}})
def rotate_left(turns, {{:west, horizontal_amount}, {:north, vertical_amount}}), do: rotate_left(turns - 1, {{:west, vertical_amount}, {:south, horizontal_amount}})
def rotate_left(turns, {{:west, horizontal_amount}, {:south, vertical_amount}}), do: rotate_left(turns - 1, {{:east, vertical_amount}, {:south, horizontal_amount}})
def rotate_left(turns, {{:east, horizontal_amount}, {:south, vertical_amount}}), do: rotate_left(turns - 1, {{:east, vertical_amount}, {:north, horizontal_amount}})
def handle_waypoint_update({current_direction, current_amount}, {current_direction, update_amount}), do: {current_direction, current_amount + update_amount}
def handle_waypoint_update({current_direction, current_amount}, {reverse_direction, update_amount}) do
if current_amount - update_amount < 0 do
{reverse_direction, update_amount - current_amount}
else
{current_direction, current_amount - update_amount}
end
end
def extract(row) do
[action | amount] = String.split(row, "", trim: true)
amount = Enum.join(amount, "")
|> String.to_integer()
case action do
"N" ->
%{cardinal: {:north, amount}}
"E" ->
%{cardinal: {:east, amount}}
"S" ->
%{cardinal: {:south, amount}}
"W" ->
%{cardinal: {:west, amount}}
"F" ->
%{scalar: amount}
"R" ->
%{turn: {:right, amount}}
"L" ->
%{turn: {:left, amount}}
end
end
end
Prog.solve
|
lib/days/day_12.ex
| 0.850562
| 0.561696
|
day_12.ex
|
starcoder
|
defmodule SecretGrinch.Matches do
@moduledoc """
The Matches context.
"""
import Ecto.Query, warn: false
alias SecretGrinch.Repo
alias SecretGrinch.Matches.Match
@doc """
Returns the list of matches.
## Examples
iex> list_matches()
[%Match{}, ...]
"""
def list_matches do
Repo.all(Match)
end
@doc """
Returns the list of matches for a user.
## Examples
iex> list_matches_for_user(user)
[%Match{}, ...]
"""
def list_matches_for_user(user) do
Repo.preload(user, :matches).matches
end
@doc """
Returns the list of matches for a user.
## Examples
iex> list_matches_for_user(user)
[%Match{}, ...]
"""
def list_matches_for_user_to_subscribe(user) do
Repo.all(Match) -- Repo.preload(user, :matches).matches
end
@doc """
Gets a single match.
Raises `Ecto.NoResultsError` if the Match does not exist.
## Examples
iex> get_match!(123)
%Match{}
iex> get_match!(456)
** (Ecto.NoResultsError)
"""
def get_match!(id), do: Repo.get!(Match, id)
@doc """
Creates a match.
## Examples
iex> create_match(%{field: value})
{:ok, %Match{}}
iex> create_match(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_match(attrs \\ %{}) do
%Match{}
|> Match.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a match.
## Examples
iex> update_match(match, %{field: new_value})
{:ok, %Match{}}
iex> update_match(match, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_match(%Match{} = match, attrs) do
match
|> Match.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Match.
## Examples
iex> delete_match(match)
{:ok, %Match{}}
iex> delete_match(match)
{:error, %Ecto.Changeset{}}
"""
def delete_match(%Match{} = match) do
Repo.delete(match)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking match changes.
## Examples
iex> change_match(match)
%Ecto.Changeset{source: %Match{}}
"""
def change_match(%Match{} = match) do
Match.changeset(match, %{})
end
end
|
lib/secret_grinch/matches/matches.ex
| 0.820469
| 0.411584
|
matches.ex
|
starcoder
|
defmodule CouchjitsuTrack.ActivityHistory do
@moduledoc """
A collection of queries for activities.
"""
import Ecto.Query
alias CouchjitsuTrack.Record
alias CouchjitsuTrack.Activity
@doc """
Gets all activities for a specified user id.
Will return a list of %{date, name, time, note, activity_id}
"""
def get_history_for_user(user_id) do
query = from r in Record,
join: a in Activity, on: r.activity_id == a.id,
where: a.user_id == ^user_id,
select: %{date: r.date,
name: a.name,
time: r.duration,
note: r.note,
activity_id: a.id,
id: r.id
},
order_by: [desc: r.date]
CouchjitsuTrack.Repo.all(query)
end
def get_history_for_user_and_span(user_id, number_of_months) do
interval =%Postgrex.Interval{months: number_of_months}
query = from r in Record,
join: a in Activity, on: r.activity_id == a.id,
where: a.user_id == ^user_id and
fragment("? > now() - ? * Interval '1 month'", r.date, ^number_of_months),
select: %{date: r.date,
name: a.name,
time: r.duration,
default_duration: a.default_duration,
note: r.note,
activity_id: a.id,
id: r.id
},
order_by: [desc: r.date]
CouchjitsuTrack.Repo.all(query)
end
@doc """
Gets the history for a specific date and user. This can be used to find what activities a user did on a specific day.
"""
def get_history_for_user_and_date(user_id, date) do
query = from r in Record,
join: a in Activity, on: r.activity_id == a.id,
where: a.user_id == ^user_id and r.date == ^date,
select: %{date: r.date,
name: a.name,
time: r.duration,
note: r.note,
activity_id: a.id,
id: r.id
},
order_by: fragment("lower(?)", a.name)
CouchjitsuTrack.Repo.all(query)
end
@doc """
Gets all the history for a specific activity id.
Will return a list of activities grouped by month & year, and a sum of all the activities for that month.
"""
def get_history_for_id(activity_id) do
query = from r in Record,
where: r.activity_id == ^activity_id,
select: %{month: fragment("date_part('month', ?)::integer", r.date),
year: fragment("date_part('year', ?)::integer", r.date),
hours: sum(r.duration)
},
group_by: fragment("date_part('month', ?), date_part('year', ?)", r.date, r.date),
order_by: fragment("date_part('year', ?), date_part('month', ?)", r.date, r.date)
CouchjitsuTrack.Repo.all(query)
end
@doc """
Takes a list of `Activity` structs, finds the max and min values and sets them on the list
"""
def set_statistics(activities) do
max_val = Enum.max_by(activities, &get_hours/1)
min_val = Enum.min_by(activities, &get_hours/1)
Enum.map(activities, fn(a) ->
a
|> Map.put_new(:max, a.hours == max_val.hours)
|> Map.put_new(:min, a.hours == min_val.hours)
end)
end
defp get_hours(activity) do
activity.hours
end
@doc """
Gets the name for the specified activity id
"""
def get_name(activity_id) do
CouchjitsuTrack.Repo.get(Activity, activity_id).name
end
@doc """
Gets a distinct list of dates for a specified user id.
"""
def get_dates_for_user(user_id) do
query = from r in Record,
join: a in Activity, on: r.activity_id == a.id,
where: a.user_id == ^user_id,
select: r.date,
distinct: true,
order_by: [desc: r.date]
CouchjitsuTrack.Repo.all(query)
end
end
|
lib/couchjitsu_track/activity_history.ex
| 0.738386
| 0.412678
|
activity_history.ex
|
starcoder
|
defmodule FileSize do
@moduledoc """
A file size calculator, parser and formatter.
## Usage
You can build your own file size by creating it with a number and a unit using
the `new/2` function. See the "Supported Units" section for a list of possible
unit atoms.
iex> FileSize.new(16, :gb)
#FileSize<"16.0 GB">
iex> FileSize.new(16, "GB")
#FileSize<"16.0 GB">
### Sigil
There is also a sigil defined that you can use to quickly build file sizes
from a number and unit symbol. Import the `FileSize.Sigil` module and you are
ready to go. See the "Supported Units" section for a list of possible unit
symbols.
iex> import FileSize.Sigil
...>
...> ~F(16 GB)
#FileSize<"16.0 GB">
### From File
With `from_file/1` it is also possible to retrieve the size of an actual file.
iex> FileSize.from_file("path/to/my/file.txt")
{:ok, #FileSize<"127.3 kB">}
### Conversions
You can convert file sizes between different units or unit systems by using
the `convert/2` function.
### Calculations
You can calculate with file sizes. The particular units don't need to be the
same for that.
* `add/2` - Add two file sizes.
* `subtract/2` - Subtracts two file sizes.
### Comparison
For comparison the units of the particular file sizes don't need to be the
same.
* `compare/2` - Compares two file sizes and returns a value indicating whether
one file size is greater than or less than the other.
* `equals?/2` - Determines whether two file sizes are equal.
* `lt?/2` - Determines whether file size a < b.
* `lte?/2` - Determines whether file size a <= b.
* `gt?/2` - Determines whether file size a > b.
* `gte?/2` - Determines whether file size a >= b.
To sort a collection of file sizes from smallest to greatest, you can use
`lte?/2` as sort function. To sort descending use `gte?/2`.
iex> sizes = [~F(16 GB), ~F(100 Mbit), ~F(27.4 MB), ~F(16 Gbit)]
...> Enum.sort(sizes, &FileSize.lte?/2)
[#FileSize<"100.0 Mbit">, #FileSize<"27.4 MB">, #FileSize<"16.0 Gbit">, #FileSize<"16.0 GB">]
## Supported Units
### Bit-based
#### SI (Système international d'unités)
| Atom | Symbol | Name | Factor |
|----------|--------|------------|--------|
| `:bit` | bit | Bits | 1 |
| `:kbit` | kbit | Kilobits | 1000 |
| `:mbit` | Mbit | Megabits | 1000^2 |
| `:gbit` | GBit | Gigabits | 1000^3 |
| `:tbit` | TBit | Terabits | 1000^4 |
| `:pbit` | PBit | Petabits | 1000^5 |
| `:ebit` | EBit | Exabits | 1000^6 |
| `:zbit` | ZBit | Zetabits | 1000^7 |
| `:ybit` | YBit | Yottabits | 1000^8 |
#### IEC (International Electrotechnical Commission)
| Atom | Symbol | Name | Factor |
|----------|--------|------------|--------|
| `:bit` | Bit | Bits | 1 |
| `:kibit` | Kibit | Kibibits | 1024 |
| `:mibit` | Mibit | Mebibits | 1024^2 |
| `:gibit` | Gibit | Gibibits | 1024^3 |
| `:tibit` | Tibit | Tebibits | 1024^4 |
| `:pibit` | Pibit | Pebibits | 1024^5 |
| `:eibit` | Eibit | Exbibits | 1024^6 |
| `:zibit` | Zibit | Zebibits | 1024^7 |
| `:yibit` | Yibit | Yobibits | 1024^8 |
### Byte-based
The most common unit of digital information. A single Byte represents 8 Bits.
#### SI (Système international d'unités)
| Atom | Symbol | Name | Factor |
|----------|--------|------------|--------|
| `:b` | B | Bytes | 1 |
| `:kb` | kB | Kilobytes | 1000 |
| `:mb` | MB | Megabytes | 1000^2 |
| `:gb` | GB | Gigabytes | 1000^3 |
| `:tb` | TB | Terabytes | 1000^4 |
| `:pb` | PB | Petabytes | 1000^5 |
| `:eb` | EB | Exabytes | 1000^6 |
| `:zb` | ZB | Zetabytes | 1000^7 |
| `:yb` | YB | Yottabytes | 1000^8 |
#### IEC (International Electrotechnical Commission)
| Atom | Symbol | Name | Factor |
|----------|--------|------------|--------|
| `:b` | B | Bytes | 1 |
| `:kib` | KiB | Kibibytes | 1024 |
| `:mib` | MiB | Mebibytes | 1024^2 |
| `:gib` | GiB | Gibibytes | 1024^3 |
| `:tib` | TiB | Tebibytes | 1024^4 |
| `:pib` | PiB | Pebibytes | 1024^5 |
| `:eib` | EiB | Exbibytes | 1024^6 |
| `:zib` | ZiB | Zebibytes | 1024^7 |
| `:yib` | YiB | Yobibytes | 1024^8 |
"""
alias FileSize.Bit
alias FileSize.Byte
alias FileSize.Calculable
alias FileSize.Comparable
alias FileSize.Convertible
alias FileSize.Units
alias FileSize.Units.Info, as: UnitInfo
@typedoc """
A type that defines the IEC bit and byte units.
"""
@type iec_unit :: Bit.iec_unit() | Byte.iec_unit()
@typedoc """
A type that defines the SI bit and byte units.
"""
@type si_unit :: Bit.si_unit() | Byte.si_unit()
@typedoc """
A type that is a union of the bit and byte types.
"""
@type t :: Bit.t() | Byte.t()
@typedoc """
A type that is a union of the bit and byte unit types and
`t:FileSize.Units.Info.t/0`.
"""
@type unit :: iec_unit | si_unit | UnitInfo.t() | unit_symbol
@typedoc """
A type that represents a unit symbol.
"""
@type unit_symbol :: String.t()
@typedoc """
A type that contains the available unit systems.
"""
@type unit_system :: :iec | :si
@typedoc """
A type that defines the value used to create a new file size.
"""
@type value :: number | String.t() | Decimal.t()
@doc false
defmacro __using__(_) do
quote do
import FileSize.Sigil
end
end
@doc """
Gets the configuration.
"""
@spec __config__() :: Keyword.t()
def __config__ do
Application.get_all_env(:file_size)
end
@doc """
Builds a new file size. Raises when the given unit could not be found.
## Examples
iex> FileSize.new(2.5, :mb)
#FileSize<"2.5 MB">
iex> FileSize.new(214, :kib)
#FileSize<"214 KiB">
iex> FileSize.new(3, :bit)
#FileSize<"3 bit">
iex> FileSize.new("214", "KiB")
#FileSize<"214 KiB">
"""
@spec new(value, unit) :: t | no_return
def new(value, symbol_or_unit_or_unit_info \\ :b)
def new(value, %UnitInfo{mod: mod} = unit_info) do
mod.new(value, unit_info)
end
def new(value, symbol_or_unit) do
new(value, Units.fetch!(symbol_or_unit))
end
@doc """
Builds a new file size from the given number of bits.
## Example
iex> FileSize.from_bytes(2000)
#FileSize<"2000 B">
"""
@spec from_bytes(value) :: t
def from_bytes(bytes), do: FileSize.new(bytes, :b)
@doc """
Builds a new file size from the given number of bits, allowing conversion
in the same step.
## Options
When a keyword list is given, you must specify one of the following options.
* `:convert` - Converts the file size to the given `t:unit/0`.
* `:scale` - Scales and converts the file size to an appropriate unit in the
specified `t:unit_system/0`.
## Examples
iex> FileSize.from_bytes(2000, scale: :iec)
#FileSize<"1.953125 KiB">
iex> FileSize.from_bytes(16, scale: :unknown)
** (FileSize.InvalidUnitSystemError) Invalid unit system: :unknown
iex> FileSize.from_bytes(2, convert: :bit)
#FileSize<"16 bit">
iex> FileSize.from_bytes(1600, :kb)
#FileSize<"1.6 kB">
iex> FileSize.from_bytes(16, convert: :unknown)
** (FileSize.InvalidUnitError) Invalid unit: :unknown
"""
@spec from_bytes(value, unit | Keyword.t()) :: t
def from_bytes(bytes, symbol_or_unit_or_unit_info_or_opts)
def from_bytes(bytes, opts) when is_list(opts) do
do_from_bytes(bytes, opts)
end
def from_bytes(bytes, unit_or_unit_info) do
do_from_bytes(bytes, convert: unit_or_unit_info)
end
defp do_from_bytes(bytes, convert: unit_or_unit_info) do
bytes
|> from_bytes()
|> convert(unit_or_unit_info)
end
defp do_from_bytes(bytes, scale: unit_system) do
bytes
|> from_bytes()
|> scale(unit_system)
end
@doc """
Builds a new file size from the given number of bits.
## Example
iex> FileSize.from_bits(2000)
#FileSize<"2000 bit">
"""
@spec from_bits(value) :: t
def from_bits(bits), do: FileSize.new(bits, :bit)
@doc """
Builds a new file size from the given number of bits, allowing conversion
in the same step.
## Options
When a keyword list is given, you must specify one of the following options.
* `:convert` - Converts the file size to the given `t:unit/0`.
* `:scale` - Scales and converts the file size to an appropriate unit in the
specified `t:unit_system/0`.
## Examples
iex> FileSize.from_bits(2000, scale: :iec)
#FileSize<"1.953125 Kibit">
iex> FileSize.from_bits(16, scale: :unknown)
** (FileSize.InvalidUnitSystemError) Invalid unit system: :unknown
iex> FileSize.from_bits(16, convert: :b)
#FileSize<"2 B">
iex> FileSize.from_bits(1600, :kbit)
#FileSize<"1.6 kbit">
iex> FileSize.from_bits(16, convert: :unknown)
** (FileSize.InvalidUnitError) Invalid unit: :unknown
"""
@spec from_bits(value, unit | Keyword.t()) :: t
def from_bits(bits, symbol_or_unit_or_unit_info_or_opts)
def from_bits(bits, opts) when is_list(opts) do
do_from_bits(bits, opts)
end
def from_bits(bits, unit_or_unit_info) do
do_from_bits(bits, convert: unit_or_unit_info)
end
defp do_from_bits(bits, convert: unit_or_unit_info) do
bits
|> from_bits()
|> convert(unit_or_unit_info)
end
defp do_from_bits(bits, scale: unit_system) do
bits
|> from_bits()
|> scale(unit_system)
end
@doc """
Determines the size of the file at the given path.
## Options
When a keyword list is given, you must specify one of the following options.
* `:convert` - Converts the file size to the given `t:unit/0`.
* `:scale` - Scales and converts the file size to an appropriate unit in the
specified `t:unit_system/0`.
## Examples
iex> FileSize.from_file("path/to/my/file.txt")
{:ok, #FileSize<"133.7 kB">}
iex> FileSize.from_file("path/to/my/file.txt", :mb)
{:ok, #FileSize<"0.13 MB">}
iex> FileSize.from_file("path/to/my/file.txt", unit: :mb)
{:ok, #FileSize<"0.13 MB">}
iex> FileSize.from_file("path/to/my/file.txt", scale: :iec)
{:ok, #FileSize<"133.7 KiB">}
iex> FileSize.from_file("not/existing/file.txt")
{:error, :enoent}
"""
@spec from_file(Path.t(), unit | Keyword.t()) ::
{:ok, t} | {:error, File.posix()}
def from_file(path, symbol_or_unit_or_unit_info_or_opts \\ :b) do
with {:ok, %{size: value}} <- File.stat(path) do
{:ok, from_bytes(value, symbol_or_unit_or_unit_info_or_opts)}
end
end
@doc """
Determines the size of the file at the given path. Raises when the file could
not be found.
## Options
When a keyword list is given, you must specify one of the following options.
* `:convert` - Converts the file size to the given `t:unit/0`.
* `:scale` - Scales and converts the file size to an appropriate unit in the
specified `t:unit_system/0`.
## Examples
iex> FileSize.from_file!("path/to/my/file.txt")
#FileSize<"133.7 kB">
iex> FileSize.from_file!("path/to/my/file.txt", :mb)
#FileSize<"0.13 MB">
iex> FileSize.from_file!("path/to/my/file.txt", unit: :mb)
#FileSize<"0.13 MB">
iex> FileSize.from_file!("path/to/my/file.txt", unit: "KiB")
#FileSize<"133.7 KiB">
iex> FileSize.from_file!("path/to/my/file.txt", system: :iec)
#FileSize<"133.7 KiB">
iex> FileSize.from_file!("not/existing/file.txt")
** (File.Error) could not read file stats "not/existing/file.txt": no such file or directory
"""
@spec from_file!(Path.t(), unit | Keyword.t()) ::
t | no_return
def from_file!(path, symbol_or_unit_or_unit_info_or_opts \\ :b) do
path
|> File.stat!()
|> Map.fetch!(:size)
|> from_bytes(symbol_or_unit_or_unit_info_or_opts)
end
@doc """
Converts the given value into a value of type `t:FileSize.t/0`. Returns a
tuple containing the status and value or error.
"""
@spec parse(any) :: {:ok, t} | {:error, FileSize.ParseError.t()}
defdelegate parse(value), to: FileSize.Parser
@doc """
Converts the given value into a value of type `t:FileSize.t/0`. Returns the
value on success or raises `FileSize.ParseError` on error.
"""
@spec parse!(any) :: t | no_return
defdelegate parse!(value), to: FileSize.Parser
@doc """
Formats a file size in a human-readable format, allowing customization of the
formatting.
## Options
* `:symbols` - Allows using your own unit symbols. Must be a map that contains
the unit names as keys (as defined by `t:FileSize.unit/0`) and the unit
symbol strings as values. Missing entries in the map are filled with the
internal unit symbols from `FileSize.Units.list/0`.
Other options customize the number format and are forwarded to
`Number.Delimit.number_to_delimited/2`. The default precision for numbers is
0.
## Global Configuration
You can also define your custom symbols globally.
config :file_size, :symbols, %{b: "Byte", kb: "KB"}
The same is possible for number formatting.
config :file_size, :number_format, precision: 2, delimiter: ",", separator: "."
Or globally for the number library.
config :number, delimit: [precision: 2, delimiter: ",", separator: "."]
## Examples
iex> FileSize.format(FileSize.new(32, :kb))
"32 kB"
iex> FileSize.format(FileSize.new(2048.2, :mb))
"2,048 MB"
"""
@spec format(t, Keyword.t()) :: String.t()
defdelegate format(size, opts \\ []), to: FileSize.Formatter
@doc """
Formats the given size ignoring all user configuration. The result of this
function can be passed back to `FileSize.parse/1` and is also used by the
implementations of the `Inspect` and `String.Chars` protocols.
## Example
iex> FileSize.to_string(FileSize.new(32.2, :kb))
"32.2 kB"
"""
@spec to_string(t) :: String.t()
defdelegate to_string(size), to: FileSize.Formatter, as: :format_simple
@doc """
Converts the given file size to a given unit or unit system.
## Options
When a keyword list is given, you must specify one of the following options.
* `:unit` - Converts the file size to the given `t:unit/0`.
* `:system` - Converts the file size to the given `t:unit_system/0`.
## Examples
iex> FileSize.convert(FileSize.new(2, :kb), :b)
#FileSize<"2000 B">
iex> FileSize.convert(FileSize.new(2000, :b), unit: :kb)
#FileSize<"2 kB">
iex> FileSize.convert(FileSize.new(20, :kb), :kbit)
#FileSize<"160 kbit">
iex> FileSize.convert(FileSize.new(2, :kb), system: :iec)
#FileSize<"1.953125 KiB">
iex> FileSize.convert(FileSize.new(2, :kib), system: :si)
#FileSize<"2.048 kB">
iex> FileSize.convert(FileSize.new(2000, :b), unit: :unknown)
** (FileSize.InvalidUnitError) Invalid unit: :unknown
iex> FileSize.convert(FileSize.new(2, :b), system: :unknown)
** (FileSize.InvalidUnitSystemError) Invalid unit system: :unknown
"""
@spec convert(t, unit | Keyword.t()) :: t
def convert(size, symbol_or_unit_or_unit_info_or_opts)
def convert(size, opts) when is_list(opts) do
do_convert(size, opts)
end
def convert(size, unit_or_unit_info) do
do_convert(size, unit: unit_or_unit_info)
end
defp do_convert(size, unit: unit) do
Convertible.convert(size, Units.fetch!(unit))
end
defp do_convert(size, system: unit_system) do
convert(size, Units.equivalent_unit_for_system!(size.unit, unit_system))
end
@doc """
Converts the given file size to the most appropriate unit. When no unit system
is specified, the unit system of the source file size is used. If no unit
system could be inferred from the size, the SI unit system is used.
## Examples
iex> FileSize.scale(FileSize.new(2000, :b))
#FileSize<"2 kB">
iex> FileSize.scale(FileSize.new(2_000_000, :kb))
#FileSize<"2 GB">
iex> FileSize.scale(FileSize.new(2_000_000, :kb), :iec)
#FileSize<"1.862645149230957 GiB">
iex> FileSize.scale(FileSize.new(2000, :b), :unknown)
** (FileSize.InvalidUnitSystemError) Invalid unit system: :unknown
"""
@doc since: "1.1.0"
@spec scale(t, nil | unit_system) :: t
def scale(size, unit_system \\ nil) do
convert(size, Units.appropriate_unit_for_size!(size, unit_system))
end
@doc """
Compares two file sizes and returns an atom indicating whether the first value
is less than, greater than or equal to the second one.
## Example
iex> FileSize.compare(FileSize.new(2, :b), FileSize.new(16, :bit))
:eq
iex> FileSize.compare(FileSize.new(1, :b), FileSize.new(16, :bit))
:lt
iex> FileSize.compare(FileSize.new(3, :b), FileSize.new(16, :bit))
:gt
"""
@spec compare(t | String.t(), t | String.t()) :: :lt | :eq | :gt
def compare(size, other_size) do
size = parse!(size)
other_size = parse!(other_size)
Comparable.compare(size, other_size)
end
@doc """
Determines whether two file sizes are equal.
## Examples
iex> FileSize.equals?(FileSize.new(2, :b), FileSize.new(16, :bit))
true
iex> FileSize.equals?(FileSize.new(2, :b), FileSize.new(2, :b))
true
iex> FileSize.equals?(FileSize.new(1, :b), FileSize.new(2, :b))
false
"""
@spec equals?(t, t) :: boolean
def equals?(size, other_size) do
compare(size, other_size) == :eq
end
@doc """
Determines whether the first file size is less than the second one.
## Examples
iex> FileSize.lt?(FileSize.new(1, :b), FileSize.new(2, :b))
true
iex> FileSize.lt?(FileSize.new(2, :b), FileSize.new(1, :b))
false
"""
@doc since: "1.2.0"
@spec lt?(t, t) :: boolean
def lt?(size, other_size) do
compare(size, other_size) == :lt
end
@doc """
Determines whether the first file size is less or equal to than the second
one.
## Examples
iex> FileSize.lte?(FileSize.new(1, :b), FileSize.new(2, :b))
true
iex> FileSize.lte?(FileSize.new(1, :b), FileSize.new(1, :b))
true
iex> FileSize.lte?(FileSize.new(2, :b), FileSize.new(1, :b))
false
"""
@doc since: "2.0.0"
@spec lte?(t, t) :: boolean
def lte?(size, other_size) do
compare(size, other_size) in [:lt, :eq]
end
@doc """
Determines whether the first file size is greater than the second one.
## Examples
iex> FileSize.gt?(FileSize.new(2, :b), FileSize.new(1, :b))
true
iex> FileSize.gt?(FileSize.new(1, :b), FileSize.new(2, :b))
false
"""
@doc since: "1.2.0"
@spec gt?(t, t) :: boolean
def gt?(size, other_size) do
compare(size, other_size) == :gt
end
@doc """
Determines whether the first file size is less or equal to than the second
one.
## Examples
iex> FileSize.gte?(FileSize.new(2, :b), FileSize.new(1, :b))
true
iex> FileSize.gte?(FileSize.new(1, :b), FileSize.new(1, :b))
true
iex> FileSize.gte?(FileSize.new(1, :b), FileSize.new(2, :b))
false
"""
@doc since: "2.0.0"
@spec gte?(t, t) :: boolean
def gte?(size, other_size) do
compare(size, other_size) in [:eq, :gt]
end
defdelegate add(size, other_size), to: Calculable
@doc """
Adds two file sizes like `add/2` and converts the result to the specified
unit.
## Options
When a keyword list is given, you must specify one of the following options.
* `:unit` - Converts the file size to the given `t:unit/0`.
* `:system` - Converts the file size to the given `t:unit_system/0`.
## Examples
iex> FileSize.add(FileSize.new(1, :kb), FileSize.new(2, :kb), :b)
#FileSize<"3000 B">
iex> FileSize.add(FileSize.new(1, :kb), FileSize.new(2, :kb), unit: :b)
#FileSize<"3000 B">
iex> FileSize.add(FileSize.new(1, :kb), FileSize.new(2, :kb), system: :iec)
#FileSize<"2.9296875 KiB">
"""
@spec add(t, t, unit | Keyword.t()) :: t
def add(size, other_size, symbol_or_unit_or_unit_info_or_opts) do
size
|> add(other_size)
|> convert(symbol_or_unit_or_unit_info_or_opts)
end
defdelegate subtract(size, other_size), to: Calculable
@doc """
Subtracts two file sizes like `subtract/2` and converts the result to the
specified unit.
## Options
When a keyword list is given, you must specify one of the following options.
* `:unit` - Converts the file size to the given `t:unit/0`.
* `:system` - Converts the file size to the given `t:unit_system/0`.
## Examples
iex> FileSize.subtract(FileSize.new(2, :b), FileSize.new(6, :bit), :bit)
#FileSize<"10 bit">
iex> FileSize.subtract(FileSize.new(2, :b), FileSize.new(6, :bit), unit: :bit)
#FileSize<"10 bit">
iex> FileSize.subtract(FileSize.new(3, :kb), FileSize.new(1, :kb), system: :iec)
#FileSize<"1.953125 KiB">
"""
@spec subtract(t, t, unit | Keyword.t()) :: t
def subtract(size, other_size, symbol_or_unit_or_unit_info_or_opts) do
size
|> subtract(other_size)
|> convert(symbol_or_unit_or_unit_info_or_opts)
end
@doc """
Gets the normalized size from the given file size as integer.
## Example
iex> FileSize.to_integer(FileSize.new(2, :kbit))
2000
"""
@doc since: "2.0.0"
@spec to_integer(t) :: integer
def to_integer(size) do
size
|> Convertible.normalized_value()
|> trunc()
end
@doc """
Gets the value from the file size as float.
## Examples
iex> FileSize.value_to_float(FileSize.new(2, :kbit))
2.0
iex> FileSize.value_to_float(FileSize.new(2.3, :kbit))
2.3
"""
@doc since: "2.1.0"
@spec value_to_float(t) :: float
def value_to_float(size) do
size.value / 1
end
end
|
lib/file_size.ex
| 0.799168
| 0.614943
|
file_size.ex
|
starcoder
|
defmodule Specify.Provider.SystemEnv do
@moduledoc """
A Configuration Provider source based on `System.get_env/2`
Values will be loaded based on `\#{prefix}_\#{capitalized_field_name}`.
`prefix` defaults to the capitalized name of the configuration specification module.
`capitalized_field_name` is in `CONSTANT_CASE` (all-caps, with underscores as word separators).
### Examples
The following examples use the following specification for reference:
defmodule Elixir.Pet do
require Specify
Specify.defconfig do
@doc "The name of the pet"
field :name, :string
@doc "is it a dog or a cat?"
field :kind, :atom, system_env_name: "TYPE"
end
end
Note that if a field has a different name than the environment variable you want to read from,
you can add the `system_env_name:` option when specifying the field, as has been done for the `:kind` field
in the example module above.
iex> System.put_env("PET_NAME", "Timmy")
iex> System.put_env("PET_TYPE", "cat")
iex> Pet.load(sources: [Specify.Provider.SystemEnv.new()])
%Pet{name: "Timmy", kind: :cat}
iex> Pet.load(sources: [Specify.Provider.SystemEnv.new("PET")])
%Pet{name: "Timmy", kind: :cat}
iex> System.put_env("SECOND_PET_NAME", "John")
iex> System.put_env("SECOND_PET_TYPE", "dog")
iex> Pet.load(sources: [Specify.Provider.SystemEnv.new("SECOND_PET")])
%Pet{name: "John", kind: :dog}
"""
defstruct [:prefix, optional: false]
@doc """
"""
def new(prefix \\ nil, options \\ []) do
optional = options[:optional] || false
%__MODULE__{prefix: prefix, optional: optional}
end
defimpl Specify.Provider do
def load(provider = %Specify.Provider.SystemEnv{prefix: nil}, module) do
capitalized_prefix =
module
|> Macro.to_string()
|> String.upcase()
load(%Specify.Provider.SystemEnv{provider | prefix: capitalized_prefix}, module)
end
def load(%Specify.Provider.SystemEnv{prefix: prefix, optional: optional}, module) do
full_env = System.get_env()
res =
Enum.reduce(module.__specify__(:field_options), %{}, fn {name, options}, acc ->
capitalized_field_name = options[:system_env_name] || String.upcase(to_string(name))
full_field_name = "#{prefix}_#{capitalized_field_name}"
if Map.has_key?(full_env, full_field_name) do
Map.put(acc, name, full_env[full_field_name])
else
acc
end
end)
if res == %{} do
if optional do
{:ok, %{}}
else
{:error, :not_found}
end
else
{:ok, res}
end
end
end
end
|
lib/specify/provider/system_env.ex
| 0.840995
| 0.406096
|
system_env.ex
|
starcoder
|
defmodule EQ do
@moduledoc """
A simple wrapper around the Erlang Queue library that follows the idiomatic
pattern of expecting the module target first to take advantage of the pipeline
operator.
Queues are double ended. The mental picture of a queue is a line of people
(items) waiting for their turn. The queue front is the end with the item that
has waited the longest. The queue rear is the end an item enters when it
starts to wait. If instead using the mental picture of a list, the front is
called head and the rear is called tail.
Entering at the front and exiting at the rear are reverse operations on the queue.
"""
defstruct data: :queue.new
@type t :: %EQ{data: {[any],[any]} }
@doc """
Returns an empty queue
## Example
iex> EQ.new
#EQ<[]>
"""
@spec new :: EQ.t
def new(), do: %EQ{}
@doc """
Calculates and returns the length of given queue
## Example
iex> EQ.from_list([:a, :b, :c]) |> EQ.length
3
"""
@spec length(EQ.t) :: pos_integer()
def length(%EQ{data: queue}), do: :queue.len(queue)
@doc """
Adds an item to the end of the queue, returns the resulting queue
## Example
iex> EQ.new |> EQ.push(:a)
#EQ<[:a]>
"""
@spec push(EQ.t, any) :: EQ.t
def push(%EQ{data: queue}, item), do: :queue.in(item, queue) |> wrap
@doc """
Removes the item at the front of queue. Returns the tuple `{{:value, item}, Q2}`,
where item is the item removed and Q2 is the resulting queue. If Q1 is empty,
the tuple `{:empty, Q1}` is returned.
## Examples
iex> EQ.from_list([:a, :b]) |> EQ.pop
{{:value, :a}, %EQ{data: {[], [:b]} }}
iex> EQ.new |> EQ.pop
{:empty, EQ.new}
"""
@spec pop(EQ.t) :: {{:value, any}, EQ.t}
| {:empty, EQ.t}
def pop(%EQ{data: queue}) do
case :queue.out(queue) do
{val, queue} -> {val, wrap(queue)}
end
end
@doc """
Returns a list of the items in the queue in the same order;
the front item of the queue will become the head of the list.
## Example
iex> EQ.from_list([1, 2, 3, 4, 5]) |> EQ.to_list
[1, 2, 3, 4, 5]
"""
@spec to_list(EQ.t) :: [any]
def to_list(%EQ{data: queue}), do: :queue.to_list(queue)
@doc """
Returns a queue containing the items in L in the same order;
the head item of the list will become the front item of the queue.
## Example
iex> EQ.from_list [1, 2, 3, 4, 5]
#EQ<[1, 2, 3, 4, 5]>
"""
@spec from_list([any]) :: EQ.t
def from_list(list), do: :queue.from_list(list) |> wrap
@doc """
Returns a new queue with the items for the given queue in reverse order
## Example
iex> EQ.from_list([1, 2, 3, 4, 5]) |> EQ.reverse
#EQ<[5, 4, 3, 2, 1]>
"""
@spec reverse(EQ.t) :: EQ.t
def reverse(%EQ{data: queue}), do: :queue.reverse(queue) |> wrap
@doc """
With a given queue and an amount it returns {Q2, Q3}, where Q2 contains
the amount given and Q2 holds the rest. If attempted to split an empty
queue or past the length an argument error is raised
## Examples
iex> EQ.from_list([1, 2, 3, 4, 5]) |> EQ.split(3)
{EQ.from_list([1,2,3]), EQ.from_list([4,5])}
iex> EQ.from_list([1, 2, 3, 4, 5]) |> EQ.split(12)
** (ArgumentError) argument error
"""
@spec split(EQ.t, pos_integer()) :: {EQ.t, EQ.t}
def split(%EQ{data: queue}, amount) do
{left, right} = :queue.split(amount, queue)
{wrap(left), wrap(right)}
end
@doc """
Given two queues, an new queue is returned with the second appended to
the end of the first queue given
## Example
iex> EQ.from_list([1]) |> EQ.join(EQ.from_list([2]))
#EQ<[1, 2]>
"""
@spec join(EQ.t, EQ.t) :: EQ.t
def join(%EQ{data: front}, %EQ{data: back}) do
:queue.join(front, back) |> wrap
end
@doc """
With a given queue and function, a new queue is returned in the same
order as the one given where the function returns true for an element
## Example
iex> [1, 2, 3, 4, 5] |> EQ.from_list |> EQ.filter(fn x -> rem(x, 2) == 0 end)
#EQ<[2, 4]>
"""
@spec filter(EQ.t, Fun) :: EQ.t
def filter(%EQ{data: queue}, fun), do: :queue.filter(fun, queue) |> wrap
@doc """
Returns true if the given element is in the queue, false otherwise
## Examples
iex> EQ.from_list([1, 2, 3]) |> EQ.member?(2)
true
iex> EQ.from_list([1, 2, 3]) |> EQ.member?(9)
false
"""
@spec member?(EQ.t, any) :: true | false
def member?(%EQ{data: queue}, item), do: :queue.member(item, queue)
@doc """
Returns true if the given queue is empty, false otherwise
## Examples
iex> EQ.from_list([1, 2, 3]) |> EQ.empty?
false
iex> EQ.new |> EQ.empty?
true
"""
@spec empty?(EQ.t) :: true | false
def empty?(%EQ{data: queue}), do: :queue.is_empty(queue)
@doc """
Returns true if the given item is a queue, false otherwise
## Examples
iex> EQ.new |> EQ.is_queue?
true
iex> {:a_queue?, [], []} |> EQ.is_queue?
false
"""
@spec is_queue?(any) :: true | false
def is_queue?(%EQ{data: queue}), do: :queue.is_queue(queue)
def is_queue?(_), do: false
@doc """
Returns the item at the front of the queue.
Returns the tuple `{:value, item}``.
If Q1 is empty, the tuple `{:empty, Q1}` is returned.
## Examples
iex> EQ.from_list([1, 2]) |> EQ.head
{:value, 1}
iex> EQ.from_list([1]) |> EQ.head
{:value, 1}
iex> EQ.new |> EQ.head
{:empty, EQ.new}
"""
@spec head(EQ.t) :: {:value, any}
| {:empty, EQ.t}
def head(q = %EQ{data: data}) do
if q |> empty? do
{:empty, q}
else
{:value, :queue.head(data)}
end
end
@doc """
Returns the item at the end of the queue.
Returns the tuple `{:value, item}`.
If Q1 is empty, the tuple `{:empty, Q1}` is returned.
## Examples
iex> EQ.from_list([1, 2]) |> EQ.last
{:value, 2}
iex> EQ.from_list([1]) |> EQ.last
{:value, 1}
iex> EQ.new |> EQ.last
{:empty, EQ.new}
"""
@spec last(EQ.t) :: {:value, any}
| {:empty, EQ.t}
def last(q = %EQ{data: data}) do
if q |> empty? do
{:empty, q}
else
{:value, :queue.last(data)}
end
end
@doc """
Removes the item at the front of the queue, returns the resulting queue.
## Example
iex> EQ.from_list([1,2,3]) |> EQ.tail
#EQ<[2, 3]>
iex> EQ.from_list([1,2]) |> EQ.tail
#EQ<[2]>
iex> EQ.from_list([1]) |> EQ.tail
#EQ<[]>
iex> EQ.new |> EQ.tail
#EQ<[]>
"""
@spec tail(EQ.t) :: EQ.t
def tail(q = %EQ{data: queue}) do
if q |> empty? do
q
else
:queue.tail(queue) |> wrap
end
end
@doc false
@spec wrap({[any], [any]}) :: EQ.t
defp wrap(data), do: %EQ{data: data}
end
|
lib/e_q.ex
| 0.857872
| 0.655115
|
e_q.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.