code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Instream.Connection do
@moduledoc """
Defines a connection to an InfluxDB instance.
## Connection Definition
defmodule MyConnection do
use Instream.Connection, otp_app: :my_app
end
This connection will fetch it's configuration from the application environment
as defined by `:otp_app`. As an alternative you can define the configuration
in the module definition itself:
defmodule MyConnection do
use Instream.Connection,
config: [
version: :v1,
host: "influxdb.example.com",
scheme: "http"
]
end
Both inline and `:otp_app` configuration can be mixed. In this case the
application configuration will overwrite any inline values.
For more information on how to configure your connection please refer to
the documentation of `Instream.Connection.Config`.
### InfluxDB version
By default a connection module will expect to communicate with an
`InfluxDB 1.x` server (`version: :v1`). Configure `version: :v2` if you
are running an `InfluxDB 2.x` server.
"""
alias Instream.Log
@type log_entry ::
Log.PingEntry.t()
| Log.QueryEntry.t()
| Log.StatusEntry.t()
| Log.WriteEntry.t()
@type precision ::
:hour | :minute | :second | :millisecond | :microsecond | :nanosecond | :rfc3339
@type e_version_mismatch :: {:error, :version_mismatch}
defmacro __using__(opts) do
quote bind_quoted: [opts: opts], location: :keep do
alias Instream.Connection
alias Instream.Connection.Config
alias Instream.Connection.QueryRunnerV1
alias Instream.Connection.QueryRunnerV2
alias Instream.Connection.Supervisor
alias Instream.Data
@behaviour Connection
@otp_app opts[:otp_app]
@config opts[:config] || []
@impl Connection
def child_spec(_) do
%{
id: __MODULE__,
start: {Supervisor, :start_link, [__MODULE__]}
}
end
@impl Connection
def config(key \\ nil), do: Config.get(@otp_app, __MODULE__, key, @config)
@impl Connection
def ping(opts \\ []) do
case config(:version) do
:v2 -> {:error, :version_mismatch}
_ -> QueryRunnerV1.ping(opts, __MODULE__)
end
end
@impl Connection
def query(query, opts \\ []) do
case config(:version) do
:v2 -> QueryRunnerV2.read(query, opts, __MODULE__)
_ -> QueryRunnerV1.read(query, opts, __MODULE__)
end
end
@impl Connection
def status(opts \\ []) do
case config(:version) do
:v2 -> {:error, :version_mismatch}
_ -> QueryRunnerV1.status(opts, __MODULE__)
end
end
@impl Connection
def version(opts \\ []) do
case config(:version) do
:v2 -> {:error, :version_mismatch}
_ -> QueryRunnerV1.version(opts, __MODULE__)
end
end
@impl Connection
def write(points, opts \\ []) do
case config(:version) do
:v2 -> QueryRunnerV2.write(points, opts, __MODULE__)
_ -> QueryRunnerV1.write(points, opts, __MODULE__)
end
end
end
end
@doc """
Returns a supervisable connection child_spec.
"""
@callback child_spec(_ignored :: term) :: Supervisor.child_spec()
@doc """
Returns the connection configuration.
"""
@callback config(key :: atom | nil) :: Keyword.t() | term
@doc """
Pings the connection server.
*Only available with InfluxDB v1.x connections.*
"""
@callback ping(opts :: Keyword.t()) :: :pong | :error | e_version_mismatch
@doc """
Executes a reading query.
Options:
- `database`: use a database differing from the connection config for reading
- `method`: whether to use a `:get` or `:post` request
- `org`: use an organization differing from the connection config for reading
- `precision`: return data with a "precision" other than `:rfc3339`
"""
@callback query(query :: String.t(), opts :: Keyword.t()) :: any
@doc """
Checks the status of the connection server.
*Only available with InfluxDB v1.x connections.*
"""
@callback status(opts :: Keyword.t()) :: :ok | :error | e_version_mismatch
@doc """
Determines the version of the connection server.
*Only available with InfluxDB v1.x connections.*
If the version if undetectable (no header returned) it will be
reported as `"unknown"`. If the host is unreachable or an error occurred
the response will be `:error`.
"""
@callback version(opts :: Keyword.t()) :: String.t() | :error | e_version_mismatch
@doc """
Executes a writing query.
Usable options depend on the writer module configured.
"""
@callback write(payload :: map | [map], opts :: Keyword.t()) :: any
end
|
lib/instream/connection.ex
| 0.892756
| 0.455441
|
connection.ex
|
starcoder
|
defmodule Rlp do
@type rlp() :: binary() | [rlp()]
@spec encode!(nil | binary() | maybe_improper_list() | non_neg_integer() | tuple()) :: binary()
def encode!(<<x>>) when x < 0x80, do: <<x>>
def encode!(x) when is_binary(x) do
with_length!(0x80, x)
end
def encode!(list) when is_list(list) do
with_length!(0xC0, Enum.map(list, &encode!/1))
end
def encode!(other) do
encode!(do_encode!(other))
end
defp with_length!(offset, data) do
size = :erlang.iolist_size(data)
if size <= 55 do
[offset + size, data]
else
bin = :binary.encode_unsigned(size)
[byte_size(bin) + offset + 55, bin, data]
end
|> :erlang.iolist_to_binary()
end
@spec decode!(binary()) :: rlp()
def decode!(bin) do
{term, ""} = do_decode!(bin)
term
end
defp do_encode!(nil) do
""
end
defp do_encode!(struct) when is_struct(struct) do
Map.from_struct(struct)
|> Enum.map(fn {key, value} -> [Atom.to_string(key), value] end)
end
defp do_encode!(map) when is_map(map) do
Map.to_list(map)
|> Enum.map(fn {key, value} ->
[if(is_atom(key), do: Atom.to_string(key), else: key), value]
end)
end
defp do_encode!(tuple) when is_tuple(tuple) do
:erlang.tuple_to_list(tuple)
end
defp do_encode!(bits) when is_bitstring(bits) do
for <<x::size(1) <- bits>>, do: if(x == 1, do: "1", else: "0"), into: ""
end
defp do_encode!(0) do
# Sucks but this is the quasi standard by Go and Node.js
# This is why we have bin2num
""
end
defp do_encode!(num) when is_integer(num) do
:binary.encode_unsigned(num)
end
defp do_decode!(<<x::unsigned-size(8), rest::binary>>) when x <= 0x7F do
{<<x::unsigned>>, rest}
end
defp do_decode!(<<head::unsigned-size(8), rest::binary>>) when head <= 0xB7 do
size = head - 0x80
<<item::binary-size(size), rest::binary>> = rest
{item, rest}
end
defp do_decode!(<<head::unsigned-size(8), rest::binary>>) when head <= 0xBF do
length_size = (head - 0xB7) * 8
<<size::unsigned-size(length_size), item::binary-size(size), rest::binary>> = rest
{item, rest}
end
defp do_decode!(<<head::unsigned-size(8), rest::binary>>) when head <= 0xF7 do
size = head - 0xC0
<<list::binary-size(size), rest::binary>> = rest
{do_decode_list!([], list), rest}
end
defp do_decode!(<<head::unsigned-size(8), rest::binary>>) when head <= 0xFF do
length_size = (head - 0xF7) * 8
<<size::unsigned-size(length_size), list::binary-size(size), rest::binary>> = rest
{do_decode_list!([], list), rest}
end
defp do_decode_list!(list, "") do
Enum.reverse(list)
end
defp do_decode_list!(list, rest) do
{item, rest} = do_decode!(rest)
do_decode_list!([item | list], rest)
end
end
|
lib/rlp.ex
| 0.733547
| 0.535341
|
rlp.ex
|
starcoder
|
defmodule YelpEx.Client do
@moduledoc """
Client to interact with Yelp's Fusion API.
"""
use YelpEx.Client.Base
@doc """
Issues a GET request to the `/businesses/search` endpoint.
GET https://api.yelp.com/v3/businesses/search
This endpoint performs a search of businesses
based on the options submitted.
## Options:
* `:params` See Yelp
[docs](https://www.yelp.com/developers/documentation/v3/business_search)
for full list of `params`
* For all other `options` see:
[`HTTPoison.request/5`](https://hexdocs.pm/httpoison/0.10.0/HTTPoison.html#request/5)
*Note:* A **location** option is mandatory. Either by passing
the `location` or both the `latitude` and the `longitude`.
## Examples:
iex> options = [params: [location: "Philadelphia, PA 19106"]]
iex> YelpEx.Client.search(options)
{:ok, {<RESPONSE>}}
iex> options = [params: [longitude: -75.145101, latitude: 39.54364]]
iex> YelpEx.Client.search!(options)
{<RESPONSE>}
"""
@spec search(Keyword.t) :: {:ok, %{}} | {:error, HTTPoison.Error.t}
def search(options) do
get("businesses/search", [], options)
end
@doc """
Same as `search/1` but raises `HTTPoison.Error` if an error occurs.
"""
@spec search!(Keyword.t) :: %{}
def search!(options) do
get!("businesses/search", [], options)
end
@doc """
Issues a GET request to the `/businesses/search/phone`
endpoint.
GET https://api.yelp.com/v3/businesses/search/phone
This endpoint performs a search of businesses
based on a phone number.
## Options:
* `:params` This endpoint takes one param, `phone`.
The phone number of the business you want to search for
as a string. It must start with + and include the
country code. See Yelp
[docs](https://www.yelp.com/developers/documentation/v3/business_search_phone)
for more.
* For all other `options` see:
[`HTTPoison.request/5`](https://hexdocs.pm/httpoison/0.10.0/HTTPoison.html#request/5)
## Examples:
iex> options = [params: [phone: "+14159083801"]]
iex> YelpEx.Client.search_phone(options)
{:ok, {<RESPONSE>}}
iex> YelpEx.Client.search_phone!(options)
{<RESPONSE>}
"""
@spec search_phone(Keyword.t) :: {:ok, %{}} | {:error, HTTPoison.Error.t}
def search_phone(options) do
get("businesses/search/phone", [], options)
end
@doc """
Same as `search_phone/1` but raises `HTTPoison.Error`
if an error occurs.
"""
@spec search_phone!(Keyword.t) :: %{}
def search_phone!(options) do
get!("businesses/search/phone", [], options)
end
end
|
lib/yelp_ex/client.ex
| 0.881749
| 0.42922
|
client.ex
|
starcoder
|
defmodule Kalevala.Event.ItemDrop do
@moduledoc """
Events to drop an item in a room
In order to drop an item, send an `ItemDrop.Request` event with the
item instance. The room will call the `item_request_drop` callback
on the room module.
Depending on the response, an `Abort` or `Commit` event will be sent.
"""
end
defmodule Kalevala.Event.ItemDrop.Request do
@moduledoc """
Request to pick up an item from the room
"""
defstruct [:item_instance]
end
defmodule Kalevala.Event.ItemDrop.Abort do
@moduledoc """
The request to drop an item was aborted by the room
The item should be kept in the character's inventory.
"""
defstruct [:from, :item_instance, :reason]
end
defmodule Kalevala.Event.ItemDrop.Commit do
@moduledoc """
The request to drop an item was committed by the room
The item should be removed from the character's inventory. The item is
already in the room.
"""
defstruct [:from, :item_instance]
end
defmodule Kalevala.Event.ItemPickUp do
@moduledoc """
Events to pick up item in a room
In order to pick up an item, send an `ItemPickUp.Request` event with the
item name. The room will try to find the matching item(s) based on the
`matches?/2` callback on the item after loading them from instances in
the room.
After finding a matching item, the `item_request_pickup` callback is
called on the room module.
Depending on the response, an `Abort` or `Commit` event will be sent.
"""
end
defmodule Kalevala.Event.ItemPickUp.Request do
@moduledoc """
Request to pick up an item from the room
"""
defstruct [:item_name]
end
defmodule Kalevala.Event.ItemPickUp.Abort do
@moduledoc """
The request to pick up an item was aborted by the room
The item cannot be added to the character's room.
"""
defstruct [:from, :item_instance, :item_name, :reason]
end
defmodule Kalevala.Event.ItemPickUp.Commit do
@moduledoc """
The request to pick up an item was committed by the room
The item should be added to the character's inventory. The item instance
was is longer in the room.
"""
defstruct [:from, :item_name, :item_instance]
end
|
lib/kalevala/event/item.ex
| 0.807726
| 0.40754
|
item.ex
|
starcoder
|
defmodule Score.Server do
@moduledoc """
Score.Server is a GenServer that have 2 elements as state:
`max_number` (random number between 0..100) and a timestamp
(defaults to `nil` for the first query))
This GenServer run every minute and when it runs:
- Should update every user's points in the database
(using a random number generator [0-100] for each) and refresh `max_number`
with a new random number.
- Should accept a handle_call that:
- Queries the database for all users with more points than `max_number` but
only retrieve a max of 2 users, updates `timestamp` with the current
timestamp and returns the users just retrieved, as well as the timestamp
of the previous **`handle_call`**.
"""
use GenServer
require Logger
alias Score.Services.UserService
# Genserver API
def start_link(name) do
GenServer.start_link(__MODULE__, name, name: name)
end
def users_with_points_greater_then(name \\ __MODULE__) do
GenServer.call(name, :get)
end
def child_spec(name) do
%{id: name, start: {__MODULE__, :start_link, [name]}}
end
# Callbacks
@doc """
GenServer Init - build the state with:
- `max_number`: random number (between 0..100)
- `timestamp`: which indicates the last time someone queried the genserver,
defaults to nil for the first query
"""
@impl true
def init(_name) do
call_repeatedly()
state = %{max_number: Enum.random(0..100), timestamp: nil}
Logger.info("Starting ScoreServer with->
max_number:#{state.max_number},
timestamp: #{state.timestamp}")
{:ok, state}
end
@doc """
GenServer `handle_cast` that run every minute (Step 0) and when it runs:
- Step 1: Should update every user's points in the database
(using a random number generator (0..100) for each).
- Step 2: Refresh the `max_number` of the genserver state with a new random number.
"""
@impl true
def handle_info(:schedule, old_state) do
# Step 0: Should run every minute
call_repeatedly()
# Step 1: Should update every user's points in the database
{:ok, _number_of_users_updated} = UserService.update_every_users_point()
# Step 2: Refresh the `max_number` of the state with a new random number.
%{timestamp: timestamp} = old_state
new_max_number = Enum.random(0..100)
new_state = %{
max_number: new_max_number,
timestamp: timestamp
}
Logger.info("Score.Server: Performing `handle_info` with ->
max_number: #{new_max_number} and timestamp: #{timestamp}")
{:noreply, new_state}
end
@doc """
GenServer `handle_call` that:
- Step 1: Queries the database for all users with more points than `max_number`
but only retrieve a max of 2 users.
- Step 2: Updates the genserver state `timestamp` with the current timestamp
- Step 3: Returns the users just retrieved from the database, as well as the timestamp
of the **previous `handle_call`**.
"""
@impl true
def handle_call(:get, _from, old_state) do
# Step 1 - Queries the Database and build the `response`
limit = 2
%{max_number: max_number, timestamp: old_timestamp} = old_state
{:ok, users} = UserService.get_users(%{max_number: max_number, limit: limit})
response = %{
users: users,
timestamp: old_timestamp
}
# Step 2 - Build a new state with a new `timestamp`
next_state = %{
max_number: max_number,
timestamp:
NaiveDateTime.truncate(NaiveDateTime.utc_now(), :second)
|> NaiveDateTime.to_string()
}
Logger.info("Score.Server: Performing `handle_call` with ->
max_number: #{max_number} and timestamp: #{response.timestamp}")
# Step 3 - Returns the users with the `old_timestamp` and the `new_state`
{:reply, response, next_state}
end
defp call_repeatedly() do
# In 1 minute
Process.send_after(self(), :schedule, :timer.minutes(1))
end
end
|
lib/score/server.ex
| 0.892517
| 0.675678
|
server.ex
|
starcoder
|
defmodule SimpleSecrets.Primatives do
def nonce() do
:crypto.strong_rand_bytes(16)
end
def derive(master, role) do
:crypto.hash(:sha256, [master, role])
end
def derive_sender_hmac(master) do
derive(master, "simple-crypto/sender-hmac-key")
end
def derive_sender_key(master) do
derive(master, "simple-crypto/sender-cipher-key")
end
def derive_receiver_hmac(master) do
derive(master, "simple-crypto/receiver-hmac-key")
end
def derive_receiver_key(master) do
derive(master, "simple-crypto/receiver-cipher-key")
end
def encrypt(buffer, key) do
iv = nonce()
cipher = :crypto.block_encrypt(:aes_cbc256, key, iv, PKCS7.pad(buffer))
iv <> cipher
end
def decrypt(buffer, key, iv) do
:crypto.block_decrypt(:aes_cbc256, key, iv, buffer)
|> PKCS7.unpad()
end
def identify(buffer) do
input = [buffer_size(buffer), buffer]
<<prefix :: binary-size(6), _ :: binary>> = :crypto.hash(:sha256, input)
prefix
end
defp buffer_size(buffer) do
buffer
|> byte_size()
|> :binary.encode_unsigned()
end
def mac(buffer, hmac_key) do
:crypto.hmac(:sha256, hmac_key, buffer)
end
for n <- 1..32 do
def equals?(a, b) when byte_size(a) == unquote(n) and byte_size(b) == unquote(n) do
:crypto.exor(a, b) == unquote(Stream.repeatedly(fn -> 0 end) |> Enum.take(n) |> :erlang.iolist_to_binary)
end
end
def binify(string) do
string
|> pad()
|> Base.url_decode64!()
end
def stringify(buffer) do
buffer
|> Base.url_encode64()
|> unpad()
end
def serialize(object) do
object
|> Msgpax.pack!()
|> :erlang.iolist_to_binary()
end
def deserialize(binary) do
Msgpax.unpack!(binary)
end
defp pad(buffer) do
case buffer |> byte_size |> rem(4) do
0 ->
buffer
diff ->
pad(buffer, 4 - diff)
end
end
for n <- 0..3 do
def pad(buffer, unquote(n)) do
buffer <> unquote(Stream.repeatedly(fn -> "=" end) |> Enum.take(n) |> Enum.join())
end
end
defp unpad(string) do
string
|> String.replace("=", "")
end
end
|
lib/simple_secrets/primatives.ex
| 0.584745
| 0.561395
|
primatives.ex
|
starcoder
|
defmodule SanbaseWeb.Graphql.AbsintheBeforeSend do
@moduledoc ~s"""
Cache & Persist API Call Data right before sending the response.
This module is responsible for persisting the API Call data and
cache the whole result of some queries right before it is send to the client.
All queries that did not raise exceptions and were successfully handled
by the GraphQL layer pass through this module. The data for them is exported
to Kafka. See `export_api_call_data` for more info.
The Blueprint's `result` field contains the final result as a single map.
This result is made up of the top-level resolver and all custom resolvers.
Caching the end result instead of each resolver separately allows to
resolve the whole query with a single cache call - some queries could have
thousands of custom resolver invocations.
In order to cache a result all of the following conditions must be true:
- All queries must be present in the `@cached_queries` list
- The resolved value must not be an error
- During resolving there must not be any `:nocache` returned.
Most of the simple queries use 1 cache call and won't benefit from this approach.
Only queries with many resolvers are included in the list of allowed queries.
"""
alias SanbaseWeb.Graphql.Cache
@compile :inline_list_funcs
@compile inline: [
construct_query_name: 1,
cache_result: 2,
queries_in_request: 1,
extract_caller_data: 1,
export_api_call_data: 3,
remote_ip: 1,
has_graphql_errors?: 1,
maybe_create_or_drop_session: 2
]
@cached_queries [
"allProjects",
"allErc20Projects",
"allCurrencyProjects",
"projectsListHistoryStats",
"projectsListStats",
"allProjectsByFunction"
]
def cached_queries(), do: @cached_queries
def before_send(conn, %Absinthe.Blueprint{} = blueprint) do
# Do not cache in case of:
# -`:nocache` returend from a resolver
# - result is taken from the cache and should not be stored again. Storing
# it again `touch`es it and the TTL timer is restarted. This can lead
# to infinite storing the same value if there are enough requests
queries = queries_in_request(blueprint)
export_api_call_data(queries, conn, blueprint)
do_not_cache? = is_nil(Process.get(:do_not_cache_query))
case do_not_cache? or has_graphql_errors?(blueprint) do
true -> :ok
false -> cache_result(queries, blueprint)
end
conn
|> maybe_create_or_drop_session(blueprint.execution.context)
end
defp cache_result(queries, blueprint) do
all_queries_cachable? = queries |> Enum.all?(&Enum.member?(@cached_queries, &1))
if all_queries_cachable? do
Cache.store(
blueprint.execution.context.query_cache_key,
blueprint.result
)
end
end
defp maybe_create_or_drop_session(conn, %{create_session: true, auth_token: auth_token}) do
Plug.Conn.put_session(conn, :auth_token, auth_token)
end
defp maybe_create_or_drop_session(conn, %{delete_session: true}) do
Plug.Conn.configure_session(conn, drop: true)
end
defp maybe_create_or_drop_session(conn, _), do: conn
defp queries_in_request(%{operations: operations}) do
operations
|> Enum.flat_map(fn %{selections: selections} ->
selections
|> Enum.map(fn %{name: name} -> Inflex.camelize(name, :lower) end)
end)
end
# API Call exporting functions
# Create an API Call event for every query in a Document separately.
defp export_api_call_data(queries, conn, blueprint) do
now = DateTime.utc_now() |> DateTime.to_unix(:nanosecond)
duration_ms = div(now - blueprint.telemetry.start_time, 1_000_000)
user_agent = Plug.Conn.get_req_header(conn, "user-agent") |> List.first()
{user_id, san_tokens, auth_method, api_token} =
extract_caller_data(blueprint.execution.context)
# Replace all occurences of getMetric and getAnomaly with names where
# the metric or anomaly argument is also included
queries =
Map.get(blueprint.execution.context, :__get_query_name_arg__, []) ++
Enum.reject(queries, &(&1 == "getMetric" or &1 == "getAnomaly"))
id =
Logger.metadata() |> Keyword.get(:request_id) ||
"gen_" <> (:crypto.strong_rand_bytes(16) |> Base.encode64())
Enum.map(queries, fn query ->
%{
timestamp: div(now, 1_000_000_000),
id: id,
query: query |> construct_query_name(),
status_code: 200,
has_graphql_errors: has_graphql_errors?(blueprint),
user_id: user_id,
auth_method: auth_method,
api_token: api_token,
remote_ip: remote_ip(blueprint),
user_agent: user_agent,
duration_ms: duration_ms,
san_tokens: san_tokens
}
end)
|> Sanbase.Kafka.ApiCall.json_kv_tuple()
|> Sanbase.KafkaExporter.persist(:api_call_exporter)
end
defp construct_query_name({:get_metric, metric}), do: "getMetric|#{metric}"
defp construct_query_name({:get_anomaly, anomaly}), do: "getAnomaly|#{anomaly}"
defp construct_query_name(query), do: query
defp remote_ip(blueprint) do
blueprint.execution.context.remote_ip |> :inet_parse.ntoa() |> to_string()
end
defp extract_caller_data(%{
auth: %{auth_method: :user_token, current_user: user, san_balance: san_balance}
}) do
{user.id, san_balance, :jwt, nil}
end
defp extract_caller_data(%{
auth: %{auth_method: :apikey, current_user: user, token: token, san_balance: san_balance}
}) do
{user.id, san_balance, :apikey, token}
end
defp extract_caller_data(%{
auth: %{auth_method: :basic, san_balance: san_balance}
}) do
{nil, san_balance, :basic, nil}
end
defp extract_caller_data(_), do: {nil, nil, nil, nil}
defp has_graphql_errors?(%Absinthe.Blueprint{result: %{errors: _}}), do: true
defp has_graphql_errors?(_), do: false
end
|
lib/sanbase_web/graphql/absinthe_before_send.ex
| 0.773943
| 0.441492
|
absinthe_before_send.ex
|
starcoder
|
defmodule Aoc2021.Day8 do
@moduledoc """
See https://adventofcode.com/2021/day/8
"""
@spec solve_part1() :: non_neg_integer()
@spec solve_part1(Path.t()) :: non_neg_integer()
def solve_part1(path \\ "priv/day8/input.txt") do
path
|> read_input()
|> Stream.map(fn {_, v} -> v end)
|> Stream.map(fn v -> Enum.count(v, &is_easy_digit/1) end)
|> Enum.sum()
end
@spec solve_part2() :: non_neg_integer()
@spec solve_part2(Path.t()) :: non_neg_integer()
def solve_part2(path \\ "priv/day8/input.txt") do
path
|> read_input()
|> Stream.map(&determine_number/1)
|> Enum.sum()
end
defp determine_number({a, b}) do
map = mappings(a)
b
|> Enum.map(fn x -> Map.get(map, x) end)
|> Integer.undigits()
end
defp mappings(list) do
{easy, complex} = Enum.split_with(list, &is_easy_digit/1)
easy_digits =
Enum.reduce(easy, %{}, fn d, acc ->
dd = map_easy_digit(d)
Map.put(acc, dd, d)
end)
complex
|> Enum.reduce(easy_digits, fn d, acc ->
dd = map_hard_digit(d, acc)
Map.put(acc, dd, d)
end)
|> swap_key_value()
end
defp swap_key_value(map) do
map
|> Map.to_list()
|> Enum.map(fn {k, v} -> {v, k} end)
|> Map.new()
end
defp map_easy_digit(d) do
case MapSet.size(d) do
2 -> 1
3 -> 7
4 -> 4
7 -> 8
end
end
defp map_hard_digit(d, map) do
case MapSet.size(d) do
5 ->
map_2_3_5(d, map)
6 ->
map_0_6_9(d, map)
end
end
defp map_2_3_5(d, map) do
diff_to_1 = MapSet.size(MapSet.difference(d, Map.get(map, 1)))
diff_to_4 = MapSet.size(MapSet.difference(d, Map.get(map, 4)))
case {diff_to_1, diff_to_4} do
{3, 2} -> 3
{4, 2} -> 5
{4, 3} -> 2
end
end
defp map_0_6_9(d, map) do
diff_to_1 = MapSet.size(MapSet.difference(d, Map.get(map, 1)))
diff_to_4 = MapSet.size(MapSet.difference(d, Map.get(map, 4)))
case {diff_to_1, diff_to_4} do
{4, 2} -> 9
{4, 3} -> 0
{5, 3} -> 6
end
end
defp read_input(path) do
path
|> File.stream!()
|> Stream.map(&String.trim/1)
|> Stream.reject(&empty_line?/1)
|> Stream.map(&parse_line/1)
end
defp empty_line?(""), do: true
defp empty_line?(_), do: false
def alphabet do
MapSet.new([:a, :b, :c, :d, :e, :f, :g])
end
defp parse_line(line) do
[a, b] =
line
|> String.split(" | ")
|> Enum.map(&String.split/1)
sa = Enum.map(a, &parse_segment_set/1)
sb = Enum.map(b, &parse_segment_set/1)
{sa, sb}
end
defp parse_segment_set(word) do
word
|> String.graphemes()
|> Enum.map(&String.to_existing_atom/1)
|> MapSet.new()
end
defp is_easy_digit(s) do
MapSet.size(s) in [2, 3, 4, 7]
end
end
|
lib/aoc2021/day8.ex
| 0.773772
| 0.553626
|
day8.ex
|
starcoder
|
defmodule Phoenix.HTML.SimplifiedHelpers.TimeAgoInWords do
import Phoenix.HTML.SimplifiedHelpers.Gettext
@minutes_in_year 525_600
@minutes_in_quarter_year 131_400
@minutes_in_three_quarters_year 394_200
def time_ago_in_words(from_time),
do: distance_of_time_in_words(from_time, :os.system_time(:seconds))
def distance_of_time_in_words_to_now(from_time), do: time_ago_in_words(from_time)
def distance_of_time_in_words(from_time) when is_integer(from_time),
do: distance_of_time_in_words(from_time, 0)
def distance_of_time_in_words(%DateTime{} = from_time),
do: distance_of_time_in_words(Timex.to_unix(from_time), 0)
def distance_of_time_in_words(%NaiveDateTime{} = from_time),
do: distance_of_time_in_words(Timex.to_unix(from_time), 0)
if Code.ensure_loaded?(Ecto.DateTime) do
def distance_of_time_in_words(%Ecto.DateTime{} = from_time) do
from = Ecto.DateTime.to_erl(from_time)
distance_of_time_in_words(Timex.to_unix(from), 0)
end
end
def distance_of_time_in_words(%DateTime{} = from_time, to_time) when is_integer(to_time) do
distance_of_time_in_words(Timex.to_unix(from_time), to_time)
end
def distance_of_time_in_words(%NaiveDateTime{} = from_time, to_time) when is_integer(to_time) do
distance_of_time_in_words(Timex.to_unix(from_time), to_time)
end
if Code.ensure_loaded?(Ecto.DateTime) do
def distance_of_time_in_words(%Ecto.DateTime{} = from_time, to_time)
when is_integer(to_time) do
from = Ecto.DateTime.to_erl(from_time)
distance_of_time_in_words(Timex.to_unix(from), to_time)
end
end
def distance_of_time_in_words(%DateTime{} = from_time, %DateTime{} = to_time) do
distance_of_time_in_words(Timex.to_unix(from_time), Timex.to_unix(to_time))
end
def distance_of_time_in_words(%NaiveDateTime{} = from_time, %NaiveDateTime{} = to_time) do
distance_of_time_in_words(Timex.to_unix(from_time), Timex.to_unix(to_time))
end
if Code.ensure_loaded?(Ecto.DateTime) do
def distance_of_time_in_words(%Ecto.DateTime{} = from_time, %Ecto.DateTime{} = to_time) do
from = Ecto.DateTime.to_erl(from_time)
to = Ecto.DateTime.to_erl(to_time)
distance_of_time_in_words(Timex.to_unix(from), Timex.to_unix(to))
end
end
@spec distance_of_time_in_words(Integer.t(), Integer.t()) :: String.t()
def distance_of_time_in_words(from_time, to_time)
when is_integer(from_time) and is_integer(to_time) do
from_time = Enum.min([from_time, to_time])
distance_in_minutes = round((to_time - from_time) / 60.0)
distance_in_seconds = round(to_time - from_time)
case distance_in_minutes do
x when x in 0..1 ->
case distance_in_seconds do
x when x in 0..4 -> gettext("less than %{count} seconds", count: 5)
x when x in 5..9 -> gettext("less than %{count} seconds", count: 10)
x when x in 10..19 -> gettext("less than %{count} seconds", count: 20)
x when x in 20..39 -> gettext("half a minute")
x when x in 40..59 -> gettext("less than %{count} minute", count: 1)
_ -> gettext("%{count} minute", count: 1)
end
x when x in 2..44 ->
gettext("%{count} minutes", count: distance_in_minutes)
x when x in 45..89 ->
gettext("about %{count} hour", count: 1)
# 90 mins up to 24 hours
x when x in 90..1439 ->
gettext("about %{count} hours", count: round(distance_in_minutes / 60.0))
# 24 hours up to 42 hours
x when x in 1440..2519 ->
gettext("%{count} day", count: 1)
# 42 hours up to 30 days
x when x in 2520..43199 ->
gettext("%{count} days", count: round(distance_in_minutes / 1440.0))
# 30 days up to 60 days
x when x in 43200..86399 ->
gettext("about %{count} months", count: round(distance_in_minutes / 43200.0))
# 60 days up to 365 days
x when x in 86400..525_599 ->
gettext("%{count} months", count: round(distance_in_minutes / 43200.0))
_ ->
remainder = rem(distance_in_minutes, @minutes_in_year)
distance_in_years = div(distance_in_minutes, @minutes_in_year)
cond do
remainder < @minutes_in_quarter_year ->
case distance_in_years do
1 -> gettext("about %{count} year", count: distance_in_years)
_ -> gettext("about %{count} years", count: distance_in_years)
end
remainder < @minutes_in_three_quarters_year ->
case distance_in_years do
1 -> gettext("over %{count} year", count: distance_in_years)
_ -> gettext("over %{count} years", count: distance_in_years)
end
true ->
case distance_in_years do
1 -> gettext("almost %{count} year", count: distance_in_years + 1)
_ -> gettext("almost %{count} years", count: distance_in_years + 1)
end
end
end
end
end
|
lib/phoenix_html_simplified_helpers/time_ago_in_words.ex
| 0.624294
| 0.477006
|
time_ago_in_words.ex
|
starcoder
|
defmodule Throttlex.Bucket.Counter do
@moduledoc """
This module defines a gen_server that owns a named public ETS table.
The table contains counters aggregated by time slots of in seconds).
These counters allow counting/tracking the rate for errors, requests,
and any other variable we are interested in.
Periodically, the server will run the garbage collector removing entries
older than the current time slot (calculated at the GC starts).
"""
defmodule State do
@moduledoc false
@type t :: %__MODULE__{}
defstruct [
# Server name
:name,
# Starting time
:start_time,
# Garbage collector timer ref
:gc_timer,
# Garbage collector interval in seconds (Defaults to 15 min)
gc_interval: 900,
# Time slot size in seconds (Defaults to 1 min)
slot_size: 60,
# Gathered stats
stats: %{}
]
end
use GenServer
import Throttlex.Utils
alias Throttlex.Bucket.Counter.State
@type t :: atom
@type counter :: atom | binary
@type timestamp :: integer
@type slot_size :: pos_integer
## API
@doc """
Starts a new server for the time-bucket defined by the given options `opts`.
## Options
* `:name` - An atom defining the name of the server (Required).
* `:gc_interval` - Garbage collector interval in seconds (Defaults to
`900` - 15 min).
* `:slot_size` - Time slot size in seconds (Defaults to `60` - 1 min).
## Example
Throttlex.Bucket.Counter.start_link(name: :my_bucket)
"""
@spec start_link(Keyword.t()) :: GenServer.on_start()
def start_link(opts \\ []) do
name = opts[:name] || raise "expected name: to be given as argument"
GenServer.start_link(__MODULE__, opts, name: name)
end
@doc """
Increments the value for `counter` into the time-slot given by `timestamp`
and `slot_size`.
## Example
Throttlex.Bucket.Counter.incr(:my_bucket, :my_counter)
"""
@spec incr(t, counter, timestamp, slot_size | nil) :: integer
def incr(bucket, counter, timestamp \\ now(), slot_size \\ nil)
def incr(bucket, counter, timestamp, nil) do
incr(bucket, counter, timestamp, slot_size(bucket))
end
def incr(bucket, counter, timestamp, slot_size) do
counter_k = {time_slot(slot_size, timestamp), assert_counter(counter)}
:ets.update_counter(bucket, counter_k, 1, {counter_k, 0})
end
@doc """
Returns the value for `counter` into the time-slot given by `timestamp`
and `slot_size`.
## Example
Throttlex.Bucket.Counter.value(:my_bucket, :my_counter)
"""
@spec value(t, counter, timestamp, slot_size | nil) :: non_neg_integer
def value(bucket, counter, timestamp \\ now(), slot_size \\ nil)
def value(bucket, counter, timestamp, nil) do
value(bucket, counter, timestamp, slot_size(bucket))
end
def value(bucket, counter, timestamp, slot_size) do
case :ets.lookup(bucket, {time_slot(slot_size, timestamp), assert_counter(counter)}) do
[{_, value}] -> value
[] -> 0
end
end
@doc """
Returns the configured slot size.
## Example
Throttlex.Bucket.Counter.slot_size(:my_bucket)
"""
@spec slot_size(t) :: pos_integer
def slot_size(bucket) do
:ets.lookup_element(bucket, :"$slot_size", 2)
end
@doc """
Returns the gathered stats for the given `bucket`.
## Example
Throttlex.Bucket.Counter.stats(:my_bucket)
"""
@spec stats(t) :: map
def stats(bucket) do
GenServer.call(bucket, :stats)
end
@doc """
Resets or sets to `0` all counters for the bucket linked to the given
`bucket`.
## Example
Throttlex.Bucket.Counter.reset(:my_bucket)
"""
@spec reset(t) :: :ok
def reset(bucket) do
GenServer.call(bucket, :reset)
end
@doc """
Returns a list of all objects in bucket `bucket`.
## Example
Throttlex.Bucket.Counter.to_list(:my_bucket)
"""
@spec to_list(t) :: [term]
defdelegate to_list(bucket), to: :ets, as: :tab2list
@doc """
Returns the time-slot given by `timestamp` and `slot_size`.
## Example
Throttlex.Bucket.Counter.time_slot(10)
"""
@spec time_slot(slot_size, timestamp) :: timestamp
def time_slot(slot_size, timestamp \\ now()) do
trunc(timestamp / slot_size) * slot_size
end
## GenServer Callbacks
@impl true
def init(opts) do
name = Keyword.fetch!(opts, :name)
^name =
:ets.new(name, [
:named_table,
:public,
:set,
read_concurrency: true,
write_concurrency: true
])
state = struct(State, :maps.from_list(opts))
state = %{
state
| gc_timer: gc_reset(state.gc_interval),
start_time: now(),
name: name
}
true = :ets.insert(name, {:"$slot_size", state.slot_size})
{:ok, state}
end
@impl true
def handle_call(:stats, _from, %State{stats: stats} = state) do
{:reply, stats, state}
end
def handle_call(
:reset,
_from,
%State{name: name, gc_interval: interval, slot_size: slot_size} = state
) do
true = :ets.delete_all_objects(name)
true = :ets.insert(name, {:"$slot_size", slot_size})
{:reply, :ok, %{state | gc_timer: gc_reset(interval)}}
end
@impl true
def handle_info(:gc_timeout, %State{gc_interval: interval} = state) do
state =
interval
|> time_slot()
|> gc_run(state)
{:noreply, %{state | gc_timer: gc_reset(interval)}}
end
def handle_info(_message, state) do
{:noreply, state}
end
## Private Functions
defp gc_run(current_slot, %State{name: name, stats: stats} = state) do
true = :ets.safe_fixtable(name, true)
stats =
:ets.foldl(
fn
{{slot, counter} = key, value}, acc when slot < current_slot ->
true = :ets.delete(name, key)
Map.update(acc, counter, value, &(&1 + value))
_, acc ->
acc
end,
stats,
name
)
true = :ets.safe_fixtable(name, false)
%{state | stats: stats}
end
defp gc_reset(timeout) do
{:ok, timer_ref} = :timer.send_after(timeout * 1000, :gc_timeout)
timer_ref
end
defp assert_counter(counter) when is_atom(counter) or is_binary(counter), do: counter
defp assert_counter(counter) do
raise ArgumentError, "expected counter to be an atom, got: #{inspect(counter)}"
end
end
|
lib/throttlex/bucket/counter.ex
| 0.932745
| 0.482185
|
counter.ex
|
starcoder
|
defmodule Postgrex.Interval do
@moduledoc """
Struct for Postgres interval.
## Fields
* `months`
* `days`
* `secs`
"""
@type t :: %__MODULE__{months: integer, days: integer, secs: integer}
defstruct months: 0, days: 0, secs: 0
end
defmodule Postgrex.Range do
@moduledoc """
Struct for Postgres range.
## Fields
* `lower`
* `upper`
* `lower_inclusive`
* `upper_inclusive`
"""
@type t :: %__MODULE__{
lower: term | :empty | :unbound,
upper: term | :empty | :unbound,
lower_inclusive: boolean,
upper_inclusive: boolean
}
defstruct lower: nil, upper: nil, lower_inclusive: true, upper_inclusive: true
end
defmodule Postgrex.INET do
@moduledoc """
Struct for Postgres inet/cidr.
## Fields
* `address`
* `netmask`
"""
@type t :: %__MODULE__{address: :inet.ip_address(), netmask: 0..128}
defstruct address: nil, netmask: nil
end
defmodule Postgrex.MACADDR do
@moduledoc """
Struct for Postgres macaddr.
## Fields
* `address`
"""
@type macaddr :: {0..255, 0..255, 0..255, 0..255, 0..255, 0..255}
@type t :: %__MODULE__{address: macaddr}
defstruct address: nil
end
defmodule Postgrex.Point do
@moduledoc """
Struct for Postgres point.
## Fields
* `x`
* `y`
"""
@type t :: %__MODULE__{x: float, y: float}
defstruct x: nil, y: nil
end
defmodule Postgrex.Polygon do
@moduledoc """
Struct for Postgres polygon.
## Fields
* `vertices`
"""
@type t :: %__MODULE__{vertices: [Postgrex.Point.t()]}
defstruct vertices: nil
end
defmodule Postgrex.Line do
@moduledoc """
Struct for Postgres line.
Note, lines are stored in Postgres in the form `{a, b, c}`, which
parameterizes a line as `a*x + b*y + c = 0`.
## Fields
* `a`
* `b`
* `c`
"""
@type t :: %__MODULE__{a: float, b: float, c: float}
defstruct a: nil, b: nil, c: nil
end
defmodule Postgrex.LineSegment do
@moduledoc """
Struct for Postgres line segment.
## Fields
* `point1`
* `point2`
"""
@type t :: %__MODULE__{point1: Postgrex.Point.t(), point2: Postgrex.Point.t()}
defstruct point1: nil, point2: nil
end
defmodule Postgrex.Box do
@moduledoc """
Struct for Postgres rectangular box.
## Fields
* `upper_right`
* `bottom_left`
"""
@type t :: %__MODULE__{
upper_right: Postgrex.Point.t(),
bottom_left: Postgrex.Point.t()
}
defstruct upper_right: nil, bottom_left: nil
end
defmodule Postgrex.Path do
@moduledoc """
Struct for Postgres path.
## Fields
* `open`
* `points`
"""
@type t :: %__MODULE__{points: [Postgrex.Point.t()], open: boolean}
defstruct points: nil, open: nil
end
defmodule Postgrex.Circle do
@moduledoc """
Struct for Postgres circle.
## Fields
* `center`
* `radius`
"""
@type t :: %__MODULE__{center: Postgrex.Point.t(), radius: number}
defstruct center: nil, radius: nil
end
defmodule Postgrex.Lexeme do
@moduledoc """
Struct for Postgres Lexeme (A Tsvector type is composed of multiple lexemes)
## Fields
* `word`
* `positions`
"""
@type t :: %__MODULE__{word: String.t(), positions: [{pos_integer, :A | :B | :C | nil}]}
defstruct word: nil, positions: nil
end
|
lib/postgrex/builtins.ex
| 0.911888
| 0.618608
|
builtins.ex
|
starcoder
|
defmodule InflexDB do
@moduledoc """
Client for [InfluxDB](https://www.influxdata.com/products/influxdb-overview/)
Checkout `InflexDB.Client` on how to instantiate and configure a client.
"""
alias InflexDB.{
Authentication,
Client,
Point,
HTTPRequest,
HTTPResponse,
HTTPClient,
LineProtocol
}
@type error_response :: {:error, HTTPResponse.t()} | {:error, :econnrefused} | {:error, term()}
@doc """
Check the status of yout InfluxDB instance.
## Example
```elixir
client = %InflexDB.Client{}
InflexDB.ping(client)
```
"""
@spec ping(client :: Client.t()) :: :ok | error_response()
def ping(%Client{} = client) do
request = %HTTPRequest{
method: :get,
base_url: client.url,
path: "/ping"
}
request
|> HTTPClient.request()
|> handle_response()
end
@doc """
Creates a new database.
## Example
```elixir
client = %InflexDB.Client{}
InflexDB.create_database(client, "mydb")
```
"""
@spec create_database(client :: Client.t(), name :: String.t()) :: :ok | error_response()
def create_database(%Client{} = client, name) when is_binary(name) do
request = %HTTPRequest{
method: :post,
base_url: client.url,
path: "/query",
body: %{"q" => "CREATE DATABASE \"#{name}\";"},
content_type: :urlencoded
}
request
|> Authentication.with_credentials(client)
|> HTTPClient.request()
|> handle_response()
end
@doc """
Deletes all of the data, measurements, series, continuous queries, and retention policies from the specified database.
If you attempt to drop a database that does not exist, InfluxDB does not return an error.
## Example
```elixir
client = %InflexDB.Client{}
InflexDB.delete_database(client, "mydb")
```
"""
@spec delete_database(client :: Client.t(), name :: String.t()) :: :ok | error_response()
def delete_database(%Client{} = client, name) when is_binary(name) do
request = %HTTPRequest{
method: :post,
base_url: client.url,
path: "/query",
body: %{"q" => "DROP DATABASE \"#{name}\";"},
content_type: :urlencoded
}
request
|> Authentication.with_credentials(client)
|> HTTPClient.request()
|> handle_response()
end
@doc """
Returns a list of all databases on your instance.
```elixir
client = %InflexDB.Client{}
InflexDB.list_databases(client)
# {:ok, ["_internal", "mydb"]}
```
"""
@spec list_databases(client :: Client.t()) :: {:ok, [String.t()]} | error_response()
def list_databases(%Client{} = client) do
request = %HTTPRequest{
method: :post,
base_url: client.url,
path: "/query",
body: %{"q" => "SHOW DATABASES;"},
content_type: :urlencoded
}
request
|> Authentication.with_credentials(client)
|> HTTPClient.request()
|> handle_list_databases()
end
@doc """
Write points to a pre-existing database.
## Example
```elixir
client = %InflexDB.Client{}
points = [
%Point{
measurement: "weather",
tag_set: %{location: "us-midwest"},
field_set: %{temperature: 82}
},
%Point{
measurement: "weather",
tag_set: %{location: "us-midwest"},
field_set: %{temperature: 76}
}
]
InflexDB.write_points(client, "mydb", points)
```
"""
@spec write_points(client :: Client.t(), db :: String.t(), points :: [Point.t()]) ::
:ok | error_response()
def write_points(%Client{} = client, db, points) when is_binary(db) and is_list(points) do
body = LineProtocol.encode(points)
request = %HTTPRequest{
method: :post,
base_url: client.url,
path: "/write",
query: %{"db" => db},
body: body,
content_type: :text
}
request
|> Authentication.with_credentials(client)
|> HTTPClient.request()
|> handle_response()
end
@doc """
Write a single point to a pre-existing database.
## Example
```elixir
client = %InflexDB.Client{}
point = %Point{
measurement: "weather",
tag_set: %{location: "us-midwest"},
field_set: %{temperature: 82}
}
InflexDB.write_point(client, "mydb", point)
```
"""
@spec write_point(client :: Client.t(), db :: String.t(), point :: Point.t()) ::
:ok | error_response()
def write_point(%Client{} = client, db, %Point{} = point) when is_binary(db) do
write_points(client, db, [point])
end
@doc """
Query data with a SELECT statement.
## Example
```elixir
client = %InflexDB.Client{}
InflexDB.query(client, "mydb", "SELECT * FROM weather")
# {:ok,
# [
# %{
# name: "weather",
# statement_id: 0,
# tags: nil,
# values: [
# %{
# "location" => "us-midwest",
# "season" => "summer",
# "temperature" => 82,
# "time" => "2020-03-29T20:34:46.725338219Z"
# },
# %{
# "season" => "summer",
# "location" => "us-east",
# "temperature" => 879,
# "time" => "2020-03-29T20:40:46.790074049Z"
# }
# ]
# }
# ]}
```
Multiple `SELECT` statements are also supported:
```elixir
client = %Inflex.Client{}
statement = "SELECT * FROM weather group by location; SELECT * from weather2 group by season"
InflexDB.query(client, "mydb", statement)
# {:ok,
# [
# %{
# name: "weather",
# statement_id: 0,
# tags: %{"location" => "us-east"},
# values: [
# %{
# "season" => "summer",
# "temperature" => 879,
# "time" => "2020-03-29T20:40:46.790074049Z"
# },
# %{
# "season" => "winter",
# "temperature" => 8096,
# "time" => "2020-03-29T20:40:46.790074049Z"
# }
# ]
# },
# %{
# name: "weather",
# statement_id: 0,
# tags: %{"location" => "us-midwest"},
# values: [
# %{
# "season" => "summer",
# "temperature" => 82,
# "time" => "2020-03-29T20:34:46.725338219Z"
# },
# %{
# "season" => "summer",
# "temperature" => 82,
# "time" => "2020-03-29T20:35:15.531091771Z"
# }
# ]
# },
# %{
# name: "weather2",
# statement_id: 1,
# tags: %{"season" => "summer"},
# values: [
# %{
# "location" => "us-east",
# "temperature" => 842,
# "time" => "2020-03-29T20:59:41.755035346Z"
# },
# %{
# "location" => "us-midwest",
# "temperature" => 2342,
# "time" => "2020-03-29T20:59:41.755035346Z"
# }
# ]
# },
# %{
# name: "weather2",
# statement_id: 1,
# tags: %{"season" => "winter"},
# values: [
# %{
# "location" => "us-east",
# "temperature" => 7554,
# "time" => "2020-03-29T20:59:41.755035346Z"
# },
# %{
# "location" => "us-midwest",
# "temperature" => 5473,
# "time" => "2020-03-29T20:59:41.755035346Z"
# }
# ]
# }
# ]}
```
"""
@spec query(client :: Client.t(), db :: String.t(), query :: String.t()) ::
{:ok, map()} | error_response()
def query(%Client{} = client, db, query) when is_binary(db) and is_binary(query) do
request = %HTTPRequest{
method: :get,
base_url: client.url,
path: "/query",
query: %{"db" => db, "q" => query}
}
request
|> Authentication.with_credentials(client)
|> HTTPClient.request()
|> handle_query()
end
defp handle_response({:ok, %{status: 204}}), do: :ok
defp handle_response({:ok, %{status: 200}}), do: :ok
defp handle_response({:ok, response}), do: {:error, response}
defp handle_response({:error, reason}), do: {:error, reason}
defp handle_query({:ok, %{status: 200, body: body}}) do
result =
body
|> Map.get("results")
|> Enum.map(fn result ->
result
|> Map.get("series")
|> Enum.map(fn series ->
columns = Map.get(series, "columns")
%{
statement_id: Map.get(result, "statement_id"),
name: Map.get(series, "name"),
tags: Map.get(series, "tags"),
values:
series
|> Map.get("values")
|> Enum.map(fn value ->
columns
|> Enum.with_index()
|> Enum.map(fn {k, index} ->
{k, Enum.at(value, index)}
end)
|> Map.new()
end)
}
end)
end)
|> List.flatten()
{:ok, result}
end
defp handle_query({:ok, response}), do: {:error, response}
defp handle_query({:error, reason}), do: {:error, reason}
defp handle_list_databases({:ok, %{status: 200, body: body}}) do
result =
body
|> Map.get("results")
|> List.first()
|> Map.get("series")
|> List.first()
|> Map.get("values", [])
|> List.flatten()
{:ok, result}
end
defp handle_list_databases({:ok, response}), do: {:error, response}
defp handle_list_databases({:error, reason}), do: {:error, reason}
end
|
lib/inflex_db.ex
| 0.870336
| 0.577436
|
inflex_db.ex
|
starcoder
|
defmodule FalconPlusApi.Api.Strategy do
alias Maxwell.Conn
alias FalconPlusApi.{Util, Sig, Api}
@doc """
* [Session](#/authentication) Required
### Request
```{
"tpl_id": 221,
"tags": "",
"run_end": "24:00",
"run_begin": "00:00",
"right_value": "1",
"priority": 1,
"op": "==",
"note": "this is a test",
"metric": "agent.alive",
"max_step": 3,
"func": "all(#3)"
}```
### Response
```Status: 200```
```{"message":"stragtegy created"}```
"""
def create(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/strategy>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.post
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
* ex. /api/v1/strategy/904
### Response
```Status: 200```
```{"message":"strategy:904 has been deleted"}```
"""
def delete(strategy_id, sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/strategy/#{strategy_id}>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.delete
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
* ex. /api/v1/strategy/904
### Response
```Status: 200```
```{
"id": 904,
"metric": "agent.alive",
"tags": "",
"max_step": 3,
"priority": 1,
"func": "all(#3)",
"op": "==",
"right_value": "1",
"note": "this is a test",
"run_begin": "00:00",
"run_end": "24:00",
"tpl_id": 221
}```
"""
def info_by_id(strategy_id, sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/strategy/#{strategy_id}>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.get
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
### Response
```Status: 200```
```[
{
"id": 893,
"metric": "process.num",
"tags": "name=redis",
"max_step": 3,
"priority": 2,
"func": "all(#2)",
"op": "<",
"right_value": "1",
"note": "Redis异常",
"run_begin": "",
"run_end": "",
"tpl_id": 221
},
{
"id": 894,
"metric": "process.num",
"tags": "name=smtp",
"max_step": 3,
"priority": 2,
"func": "all(#3)",
"op": "<",
"right_value": "1",
"note": "Smtp异常",
"run_begin": "",
"run_end": "",
"tpl_id": 221
},
{
"id": 895,
"metric": "process.num",
"tags": "cmdline=logger",
"max_step": 3,
"priority": 3,
"func": "all(#5)",
"op": "<",
"right_value": "2",
"note": "logger异常",
"run_begin": "",
"run_end": "",
"tpl_id": 221
},
]```
"""
def list(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/strategy>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.get
|> Api.get_result
end
@doc """
* [Session](#/authentication) Required
### Request
```{
"tags": "",
"run_end": "",
"run_begin": "",
"right_value": "1",
"priority": 2,
"op": "==",
"note": "this is a test",
"metric": "agent.alive",
"max_step": 3,
"id": 904,
"func": "all(#3)"
}```
### Response
```Status: 200```
```{"message":"stragtegy:904 has been updated"}```
"""
def update(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/strategy>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.put
|> Api.get_result
end
end
|
lib/falcon_plus_api/api/strategy.ex
| 0.595375
| 0.748145
|
strategy.ex
|
starcoder
|
defimpl ExPlasma.TypedData, for: ExPlasma.Transaction do
import ExPlasma.Encoding, only: [to_binary: 1, keccak_hash: 1]
import ABI.TypeEncoder, only: [encode_raw: 2]
alias ExPlasma.Configuration
alias ExPlasma.Output
alias ExPlasma.TypedData
# Prefix and version byte motivated by http://eips.ethereum.org/EIPS/eip-191
@eip_191_prefix <<0x19, 0x01>>
@domain_signature "EIP712Domain(string name,string version,address verifyingContract,bytes32 salt)"
@signature "Transaction(uint256 txType,Input input0,Input input1,Input input2,Input input3,Output output0,Output output1,Output output2,Output output3,uint256 txData,bytes32 metadata)"
@output_signature "Output(uint256 outputType,bytes20 outputGuard,address currency,uint256 amount)"
@input_signature "Input(uint256 blknum,uint256 txindex,uint256 oindex)"
# The full encoded signature for the transaction
@encoded_signature @signature <> @input_signature <> @output_signature
# NB: Currently we only support 1 type of transaction: Payment.
@max_output_count 4
@empty_input Output.decode_id(<<0>>)
@empty_input_hash TypedData.hash(@empty_input, as: :input)
@empty_output %Output{
output_type: 0,
output_data: %{output_guard: <<0::160>>, token: <<0::160>>, amount: 0}
}
@empty_output_hash TypedData.hash(@empty_output, as: :output)
def encode(%{} = transaction, _options) do
encoded_inputs = Enum.map(transaction.inputs, &encode_as_input/1)
encoded_outputs = Enum.map(transaction.outputs, &encode_as_output/1)
encoded_transaction_type = encode_raw([transaction.tx_type], [{:uint, 256}])
encoded_transaction_data = encode_raw([transaction.tx_data], [{:uint, 256}])
encoded_metadata = encode_raw([transaction.metadata], [{:bytes, 32}])
[
@eip_191_prefix,
domain_separator(),
@encoded_signature,
encoded_transaction_type,
encoded_inputs,
encoded_outputs,
encoded_transaction_data,
encoded_metadata
]
end
def hash(%{} = transaction, options), do: transaction |> encode(options) |> hash(options)
def hash([prefix, domain_separator | encoded_transaction], _options) do
keccak_hash(prefix <> hash_domain(domain_separator) <> hash_encoded(encoded_transaction))
end
defp domain_separator() do
domain = Configuration.eip_712_domain()
[
@domain_signature,
domain.name,
domain.version,
domain.verifying_contract,
domain.salt
]
end
defp hash_domain([signature, name, version, verifying_contract, salt]) do
[
keccak_hash(signature),
keccak_hash(name),
keccak_hash(version),
encode_raw([to_binary(verifying_contract)], [:address]),
encode_raw([to_binary(salt)], [{:bytes, 32}])
]
|> Enum.join()
|> keccak_hash()
end
defp hash_encoded([signature, transaction_type, inputs, outputs, transaction_data, metadata]) do
[
keccak_hash(signature),
transaction_type,
hash_inputs(inputs),
hash_outputs(outputs),
transaction_data,
metadata
]
|> List.flatten()
|> Enum.join()
|> keccak_hash()
end
defp encode_as_input(output), do: TypedData.encode(output, as: :input)
defp encode_as_output(output), do: TypedData.encode(output, as: :output)
defp hash_inputs(inputs) do
inputs
|> Stream.map(&hash_output/1)
|> Stream.concat(Stream.cycle([@empty_input_hash]))
|> Enum.take(@max_output_count)
end
defp hash_outputs(outputs) do
outputs
|> Stream.map(&hash_output/1)
|> Stream.concat(Stream.cycle([@empty_output_hash]))
|> Enum.take(@max_output_count)
end
defp hash_output([signature | encoded_list]) do
data = [keccak_hash(signature) | encoded_list]
data
|> Enum.join()
|> keccak_hash()
end
end
|
lib/ex_plasma/typed_data/transaction.ex
| 0.67854
| 0.456168
|
transaction.ex
|
starcoder
|
defmodule Geometry.LineString do
@moduledoc """
A line-string struct, representing a 2D line.
A none empty line-string requires at least two points.
"""
alias Geometry.{GeoJson, LineString, Point, WKB, WKT}
defstruct points: []
@type t :: %LineString{points: Geometry.coordinates()}
@doc """
Creates an empty `LineString`.
## Examples
iex> LineString.new()
%LineString{points: []}
"""
@spec new :: t()
def new, do: %LineString{}
@doc """
Creates a `LineString` from the given `Geometry.Point`s.
## Examples
iex> LineString.new([Point.new(1, 2), Point.new(3, 4)])
%LineString{points: [[1, 2], [3, 4]]}
"""
@spec new([Point.t()]) :: t()
def new([]), do: %LineString{}
def new([_, _ | _] = points) do
%LineString{points: Enum.map(points, fn point -> point.coordinate end)}
end
@doc """
Returns `true` if the given `LineString` is empty.
## Examples
iex> LineString.empty?(LineString.new())
true
iex> LineString.empty?(
...> LineString.new(
...> [Point.new(1, 2), Point.new(3, 4)]
...> )
...> )
false
"""
@spec empty?(t()) :: boolean
def empty?(%LineString{} = line_string), do: Enum.empty?(line_string.points)
@doc """
Creates a `LineString` from the given coordinates.
## Examples
iex> LineString.from_coordinates(
...> [[-1, 1], [-2, 2], [-3, 3]]
...> )
%LineString{
points: [
[-1, 1],
[-2, 2],
[-3, 3]
]
}
"""
@spec from_coordinates([Geometry.coordinate()]) :: t()
def from_coordinates(coordinates), do: %LineString{points: coordinates}
@doc """
Returns an `:ok` tuple with the `LineString` from the given GeoJSON term.
Otherwise returns an `:error` tuple.
## Examples
iex> ~s(
...> {
...> "type": "LineString",
...> "coordinates": [
...> [1.1, 1.2],
...> [20.1, 20.2]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> LineString.from_geo_json()
{:ok, %LineString{points: [
[1.1, 1.2],
[20.1, 20.2]
]}}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json), do: GeoJson.to_line_string(json, LineString)
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_line_string(json, LineString) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `LineString`.
## Examples
iex> LineString.to_geo_json(
...> LineString.new([
...> Point.new(-1.1, -2.2),
...> Point.new(1.1, 2.2)
...> ])
...> )
%{
"type" => "LineString",
"coordinates" => [
[-1.1, -2.2],
[1.1, 2.2]
]
}
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%LineString{points: points}) do
%{
"type" => "LineString",
"coordinates" => points
}
end
@doc """
Returns an `:ok` tuple with the `LineString` from the given WKT string.
Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> LineString.from_wkt(
...> "LineString (-5.1 7.8, 0.1 0.2)"
...> )
{:ok, %LineString{
points: [
[-5.1, 7.8],
[0.1, 0.2]
]
}}
iex> LineString.from_wkt(
...> "SRID=7219;LineString (-5.1 7.8, 0.1 0.2)"
...> )
{:ok, {
%LineString{
points: [
[-5.1, 7.8],
[0.1, 0.2]
]
},
7219
}}
iex> LineString.from_wkt("LineString EMPTY")
{:ok, %LineString{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t()} | {:ok, t(), Geometry.srid()} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, LineString)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, LineString) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKT representation for a `LineString`. With option `:srid` an
EWKT representation with the SRID is returned.
## Examples
iex> LineString.to_wkt(LineString.new())
"LineString EMPTY"
iex> LineString.to_wkt(
...> LineString.new([
...> Point.new(7.1, 8.1),
...> Point.new(9.2, 5.2)
...> ])
...> )
"LineString (7.1 8.1, 9.2 5.2)"
iex> LineString.to_wkt(
...> LineString.new([
...> Point.new(7.1, 8.1),
...> Point.new(9.2, 5.2)
...> ]),
...> srid: 123
...> )
"SRID=123;LineString (7.1 8.1, 9.2 5.2)"
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%LineString{points: points}, opts \\ []) do
WKT.to_ewkt(<<"LineString ", to_wkt_points(points)::binary()>>, opts)
end
@doc """
Returns the WKB representation for a `LineString`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:xdr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.Point.to_wkb/1` function.
"""
@spec to_wkb(line_string, opts) :: wkb
when line_string: t() | Geometry.coordinates(),
opts: [endian: Geometry.endian(), srid: Geometry.srid(), mode: Geometry.mode()],
wkb: Geometry.wkb()
def to_wkb(%LineString{points: points}, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
to_wkb(points, srid, endian, mode)
end
@doc """
Returns an `:ok` tuple with the `LineString` from the given WKB string.
Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
The optional second argument determines if a `:hex`-string or a `:binary`
input is expected. The default is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.Point.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, LineString)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, LineString) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc false
@compile {:inline, to_wkt_points: 1}
@spec to_wkt_points(Geometry.coordinates()) :: Geometry.wkt()
def to_wkt_points([]), do: "EMPTY"
def to_wkt_points([coordinate | points]) do
<<"(",
Enum.reduce(points, Point.to_wkt_coordinate(coordinate), fn coordinate, acc ->
<<acc::binary(), ", ", Point.to_wkt_coordinate(coordinate)::binary()>>
end)::binary(), ")">>
end
@doc false
@compile {:inline, to_wkb: 2}
@spec to_wkb(coordinates, srid, endian, mode) :: wkb
when coordinates: Geometry.coordinates(),
srid: Geometry.srid() | nil,
endian: Geometry.endian(),
mode: Geometry.mode(),
wkb: Geometry.wkb()
def to_wkb(points, srid, endian, mode) do
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_points(points, endian, mode)::binary()
>>
end
@doc false
@compile {:inline, to_wkb_points: 3}
@spec to_wkb_points(coordinates, endian, mode) :: wkb
when coordinates: Geometry.coordinates(),
endian: Geometry.endian(),
mode: Geometry.mode(),
wkb: Geometry.wkb()
def to_wkb_points(points, endian, mode) do
Enum.reduce(points, WKB.length(points, endian, mode), fn coordinate, acc ->
<<acc::binary(), Point.to_wkb_coordinate(coordinate, endian, mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "00000002"
{:ndr, false} -> "02000000"
{:xdr, true} -> "20000002"
{:ndr, true} -> "02000020"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0x00000002::big-integer-size(32)>>
{:ndr, false} -> <<0x00000002::little-integer-size(32)>>
{:xdr, true} -> <<0x20000002::big-integer-size(32)>>
{:ndr, true} -> <<0x20000002::little-integer-size(32)>>
end
end
end
|
lib/geometry/line_string.ex
| 0.957387
| 0.523481
|
line_string.ex
|
starcoder
|
defmodule MafiaEngine.Players do
@moduledoc """
This module defines the type for a player list and functions to handle it.
## Examples
iex> p = MafiaEngine.Players.new()
[]
iex> {:ok, p} = MafiaEngine.Players.add(p, "Abed")
{:ok, [%MafiaEngine.Player{alive: true, name: "Abed", role: :unknown}]}
iex> {:ok, p} = MafiaEngine.Players.add(p, "Jeff")
{:ok,
[%MafiaEngine.Player{alive: true, name: "Jeff", role: :unknown},
%MafiaEngine.Player{alive: true, name: "Abed", role: :unknown}]
}
iex> MafiaEngine.Players.names(p)
["Jeff", "Abed"]
iex> p = MafiaEngine.Players.remove(p, "Abed")
[%MafiaEngine.Player{alive: true, name: "Jeff", role: :unknown}]
iex> MafiaEngine.Players.set_roles(p, [:townie])
[%MafiaEngine.Player{alive: true, name: "Jeff", role: :townie}]
"""
alias MafiaEngine.Player
@type t :: list(MafiaEngine.Player.t())
@doc """
Creates a new player list.
"""
@spec new() :: t
def new(), do: []
@doc """
Adds a new player with the given `name` unless `name` is taken.
## Examples
iex> p = MafiaEngine.Players.new()
[]
iex> {:ok, p} = MafiaEngine.Players.add(p, "Abed")
{:ok, [%MafiaEngine.Player{alive: true, name: "Abed", role: :unknown}]}
iex> MafiaEngine.Players.add(p, "Abed")
{:error, :name_already_taken}
"""
@spec add(t, String.t()) :: {:ok, t} | {:error, :name_already_taken}
def add(players, name) when is_binary(name) do
if name in names(players) do
{:error, :name_already_taken}
else
{:ok, [Player.new(name) | players]}
end
end
@doc """
Removes the player with the given `name` from the list if exists.
"""
@spec remove(t, String.t()) :: t
def remove(players, name) do
Enum.reject(players, fn p -> p.name == name end)
end
@doc """
Returns the player with the given `name` from the list.
If the player does not exist it returns none instead.
"""
@spec get(t, String.t()) :: MafiaEngine.Player.t() | :none
def get(players, name) do
Enum.find(players, :none, fn p -> p.name == name end)
end
@doc """
Gives a role from `role_list` at random to each player.
The `role_list` should have the same lenght as the player list.
"""
@spec set_roles(t, list(MafiaEngine.Role.t())) :: t
def set_roles(players, role_list) do
updated_players =
role_list
|> Enum.shuffle()
|> Enum.zip(players)
|> Enum.map(fn {role, player} ->
Player.set_role(player, role)
end)
updated_players
end
@doc """
Sets the player with the given `name` role to `role`.
"""
@spec set_role(t, String.t(), MafiaEngine.Role.t()) :: t
def set_role(players, name, role) do
Enum.map(
players,
fn
p when p.name == name -> Player.set_role(p, role)
p -> p
end
)
end
@doc """
Sets the player with the given `name` alive field to `false`.
"""
@spec kill(t, String.t()) :: t
def kill(players, name) do
Enum.map(
players,
fn
p when p.name == name -> Player.kill(p)
p -> p
end
)
end
@doc """
Returns a list with the player names.
"""
@spec names(t) :: list(String.t())
def names(players) do
Enum.map(players, fn p -> p.name end)
end
end
|
lib/mafia_engine/players.ex
| 0.723895
| 0.417925
|
players.ex
|
starcoder
|
defmodule AWS.CloudFront do
@moduledoc """
Amazon CloudFront
This is the *Amazon CloudFront API Reference*. This guide is for developers
who need detailed information about CloudFront API actions, data types, and
errors. For detailed information about CloudFront features, see the *Amazon
CloudFront Developer Guide*.
"""
@doc """
Creates a cache policy.
After you create a cache policy, you can attach it to one or more cache
behaviors. When it’s attached to a cache behavior, the cache policy
determines the following:
<ul> <li> The values that CloudFront includes in the *cache key*. These
values can include HTTP headers, cookies, and URL query strings. CloudFront
uses the cache key to find an object in its cache that it can return to the
viewer.
</li> <li> The default, minimum, and maximum time to live (TTL) values that
you want objects to stay in the CloudFront cache.
</li> </ul> The headers, cookies, and query strings that are included in
the cache key are automatically included in requests that CloudFront sends
to the origin. CloudFront sends a request when it can’t find an object in
its cache that matches the request’s cache key. If you want to send values
to the origin but *not* include them in the cache key, use
`OriginRequestPolicy`.
For more information about cache policies, see [Controlling the cache
key](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html)
in the *Amazon CloudFront Developer Guide*.
"""
def create_cache_policy(client, input, options \\ []) do
path_ = "/2020-05-31/cache-policy"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a new origin access identity. If you're using Amazon S3 for your
origin, you can use an origin access identity to require users to access
your content using a CloudFront URL instead of the Amazon S3 URL. For more
information about how to use origin access identities, see [Serving Private
Content through
CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html)
in the *Amazon CloudFront Developer Guide*.
"""
def create_cloud_front_origin_access_identity(client, input, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a new web distribution. You create a CloudFront distribution to
tell CloudFront where you want content to be delivered from, and the
details about how to track and manage content delivery. Send a `POST`
request to the `/*CloudFront API version*/distribution`/`distribution ID`
resource.
<important> When you update a distribution, there are more required fields
than when you create a distribution. When you update your distribution by
using
[UpdateDistribution](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_UpdateDistribution.html),
follow the steps included in the documentation to get the current
configuration and then make your updates. This helps to make sure that you
include all of the required fields. To view a summary, see [Required Fields
for Create Distribution and Update
Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html)
in the *Amazon CloudFront Developer Guide*.
</important>
"""
def create_distribution(client, input, options \\ []) do
path_ = "/2020-05-31/distribution"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a new distribution with tags.
"""
def create_distribution_with_tags(client, input, options \\ []) do
path_ = "/2020-05-31/distribution?WithTags"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a new field-level encryption configuration.
"""
def create_field_level_encryption_config(client, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a field-level encryption profile.
"""
def create_field_level_encryption_profile(client, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a new invalidation.
"""
def create_invalidation(client, distribution_id, input, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(distribution_id)}/invalidation"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a key group that you can use with [CloudFront signed URLs and
signed
cookies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html).
To create a key group, you must specify at least one public key for the key
group. After you create a key group, you can reference it from one or more
cache behaviors. When you reference a key group in a cache behavior,
CloudFront requires signed URLs or signed cookies for all requests that
match the cache behavior. The URLs or cookies must be signed with a private
key whose corresponding public key is in the key group. The signed URL or
cookie contains information about which public key CloudFront should use to
verify the signature. For more information, see [Serving private
content](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html)
in the *Amazon CloudFront Developer Guide*.
"""
def create_key_group(client, input, options \\ []) do
path_ = "/2020-05-31/key-group"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Enables additional CloudWatch metrics for the specified CloudFront
distribution. The additional metrics incur an additional cost.
For more information, see [Viewing additional CloudFront distribution
metrics](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/viewing-cloudfront-metrics.html#monitoring-console.distributions-additional)
in the *Amazon CloudFront Developer Guide*.
"""
def create_monitoring_subscription(client, distribution_id, input, options \\ []) do
path_ = "/2020-05-31/distributions/#{URI.encode(distribution_id)}/monitoring-subscription"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Creates an origin request policy.
After you create an origin request policy, you can attach it to one or more
cache behaviors. When it’s attached to a cache behavior, the origin request
policy determines the values that CloudFront includes in requests that it
sends to the origin. Each request that CloudFront sends to the origin
includes the following:
<ul> <li> The request body and the URL path (without the domain name) from
the viewer request.
</li> <li> The headers that CloudFront automatically includes in every
origin request, including `Host`, `User-Agent`, and `X-Amz-Cf-Id`.
</li> <li> All HTTP headers, cookies, and URL query strings that are
specified in the cache policy or the origin request policy. These can
include items from the viewer request and, in the case of headers,
additional ones that are added by CloudFront.
</li> </ul> CloudFront sends a request when it can’t find a valid object in
its cache that matches the request. If you want to send values to the
origin and also include them in the cache key, use `CachePolicy`.
For more information about origin request policies, see [Controlling origin
requests](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html)
in the *Amazon CloudFront Developer Guide*.
"""
def create_origin_request_policy(client, input, options \\ []) do
path_ = "/2020-05-31/origin-request-policy"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Uploads a public key to CloudFront that you can use with [signed URLs and
signed
cookies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html),
or with [field-level
encryption](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/field-level-encryption.html).
"""
def create_public_key(client, input, options \\ []) do
path_ = "/2020-05-31/public-key"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a real-time log configuration.
After you create a real-time log configuration, you can attach it to one or
more cache behaviors to send real-time log data to the specified Amazon
Kinesis data stream.
For more information about real-time log configurations, see [Real-time
logs](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html)
in the *Amazon CloudFront Developer Guide*.
"""
def create_realtime_log_config(client, input, options \\ []) do
path_ = "/2020-05-31/realtime-log-config"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 201)
end
@doc """
Creates a new RTMP distribution. An RTMP distribution is similar to a web
distribution, but an RTMP distribution streams media files using the Adobe
Real-Time Messaging Protocol (RTMP) instead of serving files using HTTP.
To create a new distribution, submit a `POST` request to the *CloudFront
API version*/distribution resource. The request body must include a
document with a *StreamingDistributionConfig* element. The response echoes
the `StreamingDistributionConfig` element and returns other information
about the RTMP distribution.
To get the status of your request, use the *GET StreamingDistribution* API
action. When the value of `Enabled` is `true` and the value of `Status` is
`Deployed`, your distribution is ready. A distribution usually deploys in
less than 15 minutes.
For more information about web distributions, see [Working with RTMP
Distributions](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-rtmp.html)
in the *Amazon CloudFront Developer Guide*.
<important> Beginning with the 2012-05-05 version of the CloudFront API, we
made substantial changes to the format of the XML document that you include
in the request body when you create or update a web distribution or an RTMP
distribution, and when you invalidate objects. With previous versions of
the API, we discovered that it was too easy to accidentally delete one or
more values for an element that accepts multiple values, for example,
CNAMEs and trusted signers. Our changes for the 2012-05-05 release are
intended to prevent these accidental deletions and to notify you when
there's a mismatch between the number of values you say you're specifying
in the `Quantity` element and the number of values specified.
</important>
"""
def create_streaming_distribution(client, input, options \\ []) do
path_ = "/2020-05-31/streaming-distribution"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a new streaming distribution with tags.
"""
def create_streaming_distribution_with_tags(client, input, options \\ []) do
path_ = "/2020-05-31/streaming-distribution?WithTags"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Deletes a cache policy.
You cannot delete a cache policy if it’s attached to a cache behavior.
First update your distributions to remove the cache policy from all cache
behaviors, then delete the cache policy.
To delete a cache policy, you must provide the policy’s identifier and
version. To get these values, you can use `ListCachePolicies` or
`GetCachePolicy`.
"""
def delete_cache_policy(client, id, input, options \\ []) do
path_ = "/2020-05-31/cache-policy/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Delete an origin access identity.
"""
def delete_cloud_front_origin_access_identity(client, id, input, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Delete a distribution.
"""
def delete_distribution(client, id, input, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Remove a field-level encryption configuration.
"""
def delete_field_level_encryption_config(client, id, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Remove a field-level encryption profile.
"""
def delete_field_level_encryption_profile(client, id, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes a key group.
You cannot delete a key group that is referenced in a cache behavior. First
update your distributions to remove the key group from all cache behaviors,
then delete the key group.
To delete a key group, you must provide the key group’s identifier and
version. To get these values, use `ListKeyGroups` followed by `GetKeyGroup`
or `GetKeyGroupConfig`.
"""
def delete_key_group(client, id, input, options \\ []) do
path_ = "/2020-05-31/key-group/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Disables additional CloudWatch metrics for the specified CloudFront
distribution.
"""
def delete_monitoring_subscription(client, distribution_id, input, options \\ []) do
path_ = "/2020-05-31/distributions/#{URI.encode(distribution_id)}/monitoring-subscription"
headers = []
query_ = []
request(client, :delete, path_, query_, headers, input, options, nil)
end
@doc """
Deletes an origin request policy.
You cannot delete an origin request policy if it’s attached to any cache
behaviors. First update your distributions to remove the origin request
policy from all cache behaviors, then delete the origin request policy.
To delete an origin request policy, you must provide the policy’s
identifier and version. To get the identifier, you can use
`ListOriginRequestPolicies` or `GetOriginRequestPolicy`.
"""
def delete_origin_request_policy(client, id, input, options \\ []) do
path_ = "/2020-05-31/origin-request-policy/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Remove a public key you previously added to CloudFront.
"""
def delete_public_key(client, id, input, options \\ []) do
path_ = "/2020-05-31/public-key/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes a real-time log configuration.
You cannot delete a real-time log configuration if it’s attached to a cache
behavior. First update your distributions to remove the real-time log
configuration from all cache behaviors, then delete the real-time log
configuration.
To delete a real-time log configuration, you can provide the
configuration’s name or its Amazon Resource Name (ARN). You must provide at
least one. If you provide both, CloudFront uses the name to identify the
real-time log configuration to delete.
"""
def delete_realtime_log_config(client, input, options \\ []) do
path_ = "/2020-05-31/delete-realtime-log-config/"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
Delete a streaming distribution. To delete an RTMP distribution using the
CloudFront API, perform the following steps.
**To delete an RTMP distribution using the CloudFront API**:
<ol> <li> Disable the RTMP distribution.
</li> <li> Submit a `GET Streaming Distribution Config` request to get the
current configuration and the `Etag` header for the distribution.
</li> <li> Update the XML document that was returned in the response to
your `GET Streaming Distribution Config` request to change the value of
`Enabled` to `false`.
</li> <li> Submit a `PUT Streaming Distribution Config` request to update
the configuration for your distribution. In the request body, include the
XML document that you updated in Step 3. Then set the value of the HTTP
`If-Match` header to the value of the `ETag` header that CloudFront
returned when you submitted the `GET Streaming Distribution Config` request
in Step 2.
</li> <li> Review the response to the `PUT Streaming Distribution Config`
request to confirm that the distribution was successfully disabled.
</li> <li> Submit a `GET Streaming Distribution Config` request to confirm
that your changes have propagated. When propagation is complete, the value
of `Status` is `Deployed`.
</li> <li> Submit a `DELETE Streaming Distribution` request. Set the value
of the HTTP `If-Match` header to the value of the `ETag` header that
CloudFront returned when you submitted the `GET Streaming Distribution
Config` request in Step 2.
</li> <li> Review the response to your `DELETE Streaming Distribution`
request to confirm that the distribution was successfully deleted.
</li> </ol> For information about deleting a distribution using the
CloudFront console, see [Deleting a
Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html)
in the *Amazon CloudFront Developer Guide*.
"""
def delete_streaming_distribution(client, id, input, options \\ []) do
path_ = "/2020-05-31/streaming-distribution/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Gets a cache policy, including the following metadata:
<ul> <li> The policy’s identifier.
</li> <li> The date and time when the policy was last modified.
</li> </ul> To get a cache policy, you must provide the policy’s
identifier. If the cache policy is attached to a distribution’s cache
behavior, you can get the policy’s identifier using `ListDistributions` or
`GetDistribution`. If the cache policy is not attached to a cache behavior,
you can get the identifier using `ListCachePolicies`.
"""
def get_cache_policy(client, id, options \\ []) do
path_ = "/2020-05-31/cache-policy/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets a cache policy configuration.
To get a cache policy configuration, you must provide the policy’s
identifier. If the cache policy is attached to a distribution’s cache
behavior, you can get the policy’s identifier using `ListDistributions` or
`GetDistribution`. If the cache policy is not attached to a cache behavior,
you can get the identifier using `ListCachePolicies`.
"""
def get_cache_policy_config(client, id, options \\ []) do
path_ = "/2020-05-31/cache-policy/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the information about an origin access identity.
"""
def get_cloud_front_origin_access_identity(client, id, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the configuration information about an origin access identity.
"""
def get_cloud_front_origin_access_identity_config(client, id, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the information about a distribution.
"""
def get_distribution(client, id, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the configuration information about a distribution.
"""
def get_distribution_config(client, id, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the field-level encryption configuration information.
"""
def get_field_level_encryption(client, id, options \\ []) do
path_ = "/2020-05-31/field-level-encryption/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the field-level encryption configuration information.
"""
def get_field_level_encryption_config(client, id, options \\ []) do
path_ = "/2020-05-31/field-level-encryption/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the field-level encryption profile information.
"""
def get_field_level_encryption_profile(client, id, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the field-level encryption profile configuration information.
"""
def get_field_level_encryption_profile_config(client, id, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the information about an invalidation.
"""
def get_invalidation(client, distribution_id, id, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(distribution_id)}/invalidation/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a key group, including the date and time when the key group was last
modified.
To get a key group, you must provide the key group’s identifier. If the key
group is referenced in a distribution’s cache behavior, you can get the key
group’s identifier using `ListDistributions` or `GetDistribution`. If the
key group is not referenced in a cache behavior, you can get the identifier
using `ListKeyGroups`.
"""
def get_key_group(client, id, options \\ []) do
path_ = "/2020-05-31/key-group/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets a key group configuration.
To get a key group configuration, you must provide the key group’s
identifier. If the key group is referenced in a distribution’s cache
behavior, you can get the key group’s identifier using `ListDistributions`
or `GetDistribution`. If the key group is not referenced in a cache
behavior, you can get the identifier using `ListKeyGroups`.
"""
def get_key_group_config(client, id, options \\ []) do
path_ = "/2020-05-31/key-group/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets information about whether additional CloudWatch metrics are enabled
for the specified CloudFront distribution.
"""
def get_monitoring_subscription(client, distribution_id, options \\ []) do
path_ = "/2020-05-31/distributions/#{URI.encode(distribution_id)}/monitoring-subscription"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets an origin request policy, including the following metadata:
<ul> <li> The policy’s identifier.
</li> <li> The date and time when the policy was last modified.
</li> </ul> To get an origin request policy, you must provide the policy’s
identifier. If the origin request policy is attached to a distribution’s
cache behavior, you can get the policy’s identifier using
`ListDistributions` or `GetDistribution`. If the origin request policy is
not attached to a cache behavior, you can get the identifier using
`ListOriginRequestPolicies`.
"""
def get_origin_request_policy(client, id, options \\ []) do
path_ = "/2020-05-31/origin-request-policy/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets an origin request policy configuration.
To get an origin request policy configuration, you must provide the
policy’s identifier. If the origin request policy is attached to a
distribution’s cache behavior, you can get the policy’s identifier using
`ListDistributions` or `GetDistribution`. If the origin request policy is
not attached to a cache behavior, you can get the identifier using
`ListOriginRequestPolicies`.
"""
def get_origin_request_policy_config(client, id, options \\ []) do
path_ = "/2020-05-31/origin-request-policy/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets a public key.
"""
def get_public_key(client, id, options \\ []) do
path_ = "/2020-05-31/public-key/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets a public key configuration.
"""
def get_public_key_config(client, id, options \\ []) do
path_ = "/2020-05-31/public-key/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets a real-time log configuration.
To get a real-time log configuration, you can provide the configuration’s
name or its Amazon Resource Name (ARN). You must provide at least one. If
you provide both, CloudFront uses the name to identify the real-time log
configuration to get.
"""
def get_realtime_log_config(client, input, options \\ []) do
path_ = "/2020-05-31/get-realtime-log-config/"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
Gets information about a specified RTMP distribution, including the
distribution configuration.
"""
def get_streaming_distribution(client, id, options \\ []) do
path_ = "/2020-05-31/streaming-distribution/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the configuration information about a streaming distribution.
"""
def get_streaming_distribution_config(client, id, options \\ []) do
path_ = "/2020-05-31/streaming-distribution/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets a list of cache policies.
You can optionally apply a filter to return only the managed policies
created by AWS, or only the custom policies created in your AWS account.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_cache_policies(client, marker \\ nil, max_items \\ nil, type \\ nil, options \\ []) do
path_ = "/2020-05-31/cache-policy"
headers = []
query_ = []
query_ = if !is_nil(type) do
[{"Type", type} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists origin access identities.
"""
def list_cloud_front_origin_access_identities(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List CloudFront distributions.
"""
def list_distributions(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distribution"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of distribution IDs for distributions that have a cache
behavior that’s associated with the specified cache policy.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_distributions_by_cache_policy_id(client, cache_policy_id, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distributionsByCachePolicyId/#{URI.encode(cache_policy_id)}"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of distribution IDs for distributions that have a cache
behavior that references the specified key group.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_distributions_by_key_group(client, key_group_id, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distributionsByKeyGroupId/#{URI.encode(key_group_id)}"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of distribution IDs for distributions that have a cache
behavior that’s associated with the specified origin request policy.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_distributions_by_origin_request_policy_id(client, origin_request_policy_id, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distributionsByOriginRequestPolicyId/#{URI.encode(origin_request_policy_id)}"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of distributions that have a cache behavior that’s associated
with the specified real-time log configuration.
You can specify the real-time log configuration by its name or its Amazon
Resource Name (ARN). You must provide at least one. If you provide both,
CloudFront uses the name to identify the real-time log configuration to
list distributions for.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_distributions_by_realtime_log_config(client, input, options \\ []) do
path_ = "/2020-05-31/distributionsByRealtimeLogConfig/"
headers = []
query_ = []
request(client, :post, path_, query_, headers, input, options, nil)
end
@doc """
List the distributions that are associated with a specified AWS WAF web
ACL.
"""
def list_distributions_by_web_a_c_l_id(client, web_a_c_l_id, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distributionsByWebACLId/#{URI.encode(web_a_c_l_id)}"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List all field-level encryption configurations that have been created in
CloudFront for this account.
"""
def list_field_level_encryption_configs(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/field-level-encryption"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Request a list of field-level encryption profiles that have been created in
CloudFront for this account.
"""
def list_field_level_encryption_profiles(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists invalidation batches.
"""
def list_invalidations(client, distribution_id, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(distribution_id)}/invalidation"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of key groups.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_key_groups(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/key-group"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of origin request policies.
You can optionally apply a filter to return only the managed policies
created by AWS, or only the custom policies created in your AWS account.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_origin_request_policies(client, marker \\ nil, max_items \\ nil, type \\ nil, options \\ []) do
path_ = "/2020-05-31/origin-request-policy"
headers = []
query_ = []
query_ = if !is_nil(type) do
[{"Type", type} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List all public keys that have been added to CloudFront for this account.
"""
def list_public_keys(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/public-key"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of real-time log configurations.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_realtime_log_configs(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/realtime-log-config"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List streaming distributions.
"""
def list_streaming_distributions(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/streaming-distribution"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List tags for a CloudFront resource.
"""
def list_tags_for_resource(client, resource, options \\ []) do
path_ = "/2020-05-31/tagging"
headers = []
query_ = []
query_ = if !is_nil(resource) do
[{"Resource", resource} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Add tags to a CloudFront resource.
"""
def tag_resource(client, input, options \\ []) do
path_ = "/2020-05-31/tagging?Operation=Tag"
headers = []
{query_, input} =
[
{"Resource", "Resource"},
]
|> AWS.Request.build_params(input)
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
Remove tags from a CloudFront resource.
"""
def untag_resource(client, input, options \\ []) do
path_ = "/2020-05-31/tagging?Operation=Untag"
headers = []
{query_, input} =
[
{"Resource", "Resource"},
]
|> AWS.Request.build_params(input)
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
Updates a cache policy configuration.
When you update a cache policy configuration, all the fields are updated
with the values provided in the request. You cannot update some fields
independent of others. To update a cache policy configuration:
<ol> <li> Use `GetCachePolicyConfig` to get the current configuration.
</li> <li> Locally modify the fields in the cache policy configuration that
you want to update.
</li> <li> Call `UpdateCachePolicy` by providing the entire cache policy
configuration, including the fields that you modified and those that you
didn’t.
</li> </ol>
"""
def update_cache_policy(client, id, input, options \\ []) do
path_ = "/2020-05-31/cache-policy/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Update an origin access identity.
"""
def update_cloud_front_origin_access_identity(client, id, input, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Updates the configuration for a web distribution.
<important> When you update a distribution, there are more required fields
than when you create a distribution. When you update your distribution by
using this API action, follow the steps here to get the current
configuration and then make your updates, to make sure that you include all
of the required fields. To view a summary, see [Required Fields for Create
Distribution and Update
Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html)
in the *Amazon CloudFront Developer Guide*.
</important> The update process includes getting the current distribution
configuration, updating the XML document that is returned to make your
changes, and then submitting an `UpdateDistribution` request to make the
updates.
For information about updating a distribution using the CloudFront console
instead, see [Creating a
Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-creating-console.html)
in the *Amazon CloudFront Developer Guide*.
**To update a web distribution using the CloudFront API**
<ol> <li> Submit a
[GetDistributionConfig](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistributionConfig.html)
request to get the current configuration and an `Etag` header for the
distribution.
<note> If you update the distribution again, you must get a new `Etag`
header.
</note> </li> <li> Update the XML document that was returned in the
response to your `GetDistributionConfig` request to include your changes.
<important> When you edit the XML file, be aware of the following:
<ul> <li> You must strip out the ETag parameter that is returned.
</li> <li> Additional fields are required when you update a distribution.
There may be fields included in the XML file for features that you haven't
configured for your distribution. This is expected and required to
successfully update the distribution.
</li> <li> You can't change the value of `CallerReference`. If you try to
change this value, CloudFront returns an `IllegalUpdate` error.
</li> <li> The new configuration replaces the existing configuration; the
values that you specify in an `UpdateDistribution` request are not merged
into your existing configuration. When you add, delete, or replace values
in an element that allows multiple values (for example, `CNAME`), you must
specify all of the values that you want to appear in the updated
distribution. In addition, you must update the corresponding `Quantity`
element.
</li> </ul> </important> </li> <li> Submit an `UpdateDistribution` request
to update the configuration for your distribution:
<ul> <li> In the request body, include the XML document that you updated in
Step 2. The request body must include an XML document with a
`DistributionConfig` element.
</li> <li> Set the value of the HTTP `If-Match` header to the value of the
`ETag` header that CloudFront returned when you submitted the
`GetDistributionConfig` request in Step 1.
</li> </ul> </li> <li> Review the response to the `UpdateDistribution`
request to confirm that the configuration was successfully updated.
</li> <li> Optional: Submit a
[GetDistribution](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistribution.html)
request to confirm that your changes have propagated. When propagation is
complete, the value of `Status` is `Deployed`.
</li> </ol>
"""
def update_distribution(client, id, input, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Update a field-level encryption configuration.
"""
def update_field_level_encryption_config(client, id, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Update a field-level encryption profile.
"""
def update_field_level_encryption_profile(client, id, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Updates a key group.
When you update a key group, all the fields are updated with the values
provided in the request. You cannot update some fields independent of
others. To update a key group:
<ol> <li> Get the current key group with `GetKeyGroup` or
`GetKeyGroupConfig`.
</li> <li> Locally modify the fields in the key group that you want to
update. For example, add or remove public key IDs.
</li> <li> Call `UpdateKeyGroup` with the entire key group object,
including the fields that you modified and those that you didn’t.
</li> </ol>
"""
def update_key_group(client, id, input, options \\ []) do
path_ = "/2020-05-31/key-group/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Updates an origin request policy configuration.
When you update an origin request policy configuration, all the fields are
updated with the values provided in the request. You cannot update some
fields independent of others. To update an origin request policy
configuration:
<ol> <li> Use `GetOriginRequestPolicyConfig` to get the current
configuration.
</li> <li> Locally modify the fields in the origin request policy
configuration that you want to update.
</li> <li> Call `UpdateOriginRequestPolicy` by providing the entire origin
request policy configuration, including the fields that you modified and
those that you didn’t.
</li> </ol>
"""
def update_origin_request_policy(client, id, input, options \\ []) do
path_ = "/2020-05-31/origin-request-policy/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Update public key information. Note that the only value you can change is
the comment.
"""
def update_public_key(client, id, input, options \\ []) do
path_ = "/2020-05-31/public-key/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Updates a real-time log configuration.
When you update a real-time log configuration, all the parameters are
updated with the values provided in the request. You cannot update some
parameters independent of others. To update a real-time log configuration:
<ol> <li> Call `GetRealtimeLogConfig` to get the current real-time log
configuration.
</li> <li> Locally modify the parameters in the real-time log configuration
that you want to update.
</li> <li> Call this API (`UpdateRealtimeLogConfig`) by providing the
entire real-time log configuration, including the parameters that you
modified and those that you didn’t.
</li> </ol> You cannot update a real-time log configuration’s `Name` or
`ARN`.
"""
def update_realtime_log_config(client, input, options \\ []) do
path_ = "/2020-05-31/realtime-log-config/"
headers = []
query_ = []
request(client, :put, path_, query_, headers, input, options, nil)
end
@doc """
Update a streaming distribution.
"""
def update_streaming_distribution(client, id, input, options \\ []) do
path_ = "/2020-05-31/streaming-distribution/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} when not is_nil(body) ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "cloudfront",
region: "us-east-1"}
host = build_host("cloudfront", client)
url = host
|> build_url(path, client)
|> add_query(query, client)
additional_headers = [{"Host", host}, {"Content-Type", "text/xml"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(client, method, url, payload, headers, options, success_status_code)
end
defp perform_request(client, method, url, payload, headers, options, success_status_code) do
case AWS.Client.request(client, method, url, payload, headers, options) do
{:ok, %{status_code: status_code, body: body} = response}
when is_nil(success_status_code) and status_code in [200, 202, 204]
when status_code == success_status_code ->
body = if(body != "", do: decode!(client, body))
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{endpoint: endpoint}) do
"#{endpoint_prefix}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, [], _client) do
url
end
defp add_query(url, query, client) do
querystring = encode!(client, query, :query)
"#{url}?#{querystring}"
end
defp encode!(client, payload, format \\ :xml) do
AWS.Client.encode!(client, payload, format)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :xml)
end
end
|
lib/aws/generated/cloud_front.ex
| 0.874185
| 0.503967
|
cloud_front.ex
|
starcoder
|
defmodule Rampart.Controller do
@moduledoc ~S"""
The Controller module provides functions that are
to be used by controllers in your application. These
may not strictly be controllers, but they are for
the part of your application that will handle the
request, and trigger the authorisation.
"""
@typedoc """
A resource can be anything and is application specific,
but in most scenarios it will most likely be a module,
or a struct.
"""
@type resource :: any
alias Rampart.Authorize, as: AuthPlug
@doc false
defmacro __using__(_opts) do
quote location: :keep do
import Rampart.Controller
end
end
@doc """
Authorizes the supplied resource. This should be called
in the controller action, as Rampart will automatically
determine which policy to use, and what action was invoked.
If you want to specify a different policy action, see
`authorize!/3`
"""
@spec authorize!(Plug.Conn.t, resource) :: none()
defmacro authorize!(conn, resource) do
do_authorize(conn, resource, policy_action(__CALLER__))
end
@doc """
Authorizes the supplied resource. Unlike `authorize/2`,
this function allows you to specify which policy action
should be used, rather than having it determined by
Rampart. This is useful if you have a number of
actions that all require the same permission.
For example, if you had a photo controller, which had
an `edit` and a `resize` action, both of these actions
are forms of editing. So your `resize` action may call
photo = Repo.get(MyApp.Photo, id)
authorize!(photo, :edit?)
And your policy would not need a `resize/2` function
defined.
"""
@spec authorize!(Plug.Conn.t, resource, atom()) :: none()
defmacro authorize!(conn, resource, action) do
do_authorize(conn, resource, action)
end
# Hands off the authorisation login to the
# main authorization plug.
defp do_authorize(conn, resource, action) do
# plug Rampart.Authorize, resource: resource, action: action
quote do
opts =
[resource: unquote(resource), action: unquote(action)]
|> AuthPlug.init()
AuthPlug.call(unquote(conn), opts)
end
end
# Given the caller, returns the name of the
# module and function that called the authorize
# function.
defp policy_action(caller) do
{ func, _arity } = caller.function
:"#{func}?"
end
end
|
lib/rampart/controller.ex
| 0.682679
| 0.400632
|
controller.ex
|
starcoder
|
defmodule AwsCredentials do
@moduledoc """
GenServer that catches the fetched credendials and re-fetches them when they expire
Usage:
```
# Start the application with env provider
{:ok, _pid} = AwsCredentials.start_link(provider: AwsCredentials.Providers.Environment)
# Or start the application with EC2 provider
{:ok, _pid} = AwsCredentials.start_link(provider: AwsCredentials.Providers.Environment, expiration_threshold: 600)
# Fetch credentials whenever they are required by the application
credentials = AwsCredentials.fetch()
"""
use GenServer
require Logger
alias AwsCredentials.Credentials
@default_provider AwsCredentials.Providers.Environment
@default_expiration_threshold_sec 600
def start_link(opts \\ []) do
provider = Keyword.get(opts, :provider, @default_provider)
expiration_threshold =
Keyword.get(opts, :expiration_threshold, @default_expiration_threshold_sec)
GenServer.start_link(
__MODULE__,
[provider: provider, expiration_threshold: expiration_threshold],
name: __MODULE__
)
end
@impl true
def init(opts) do
provider = Keyword.get(opts, :provider)
expiration_threshold = Keyword.get(opts, :expiration_threshold)
case fetch_new(provider) do
{:ok, credentials} ->
state = %{
provider: provider,
credentials: credentials,
expiration_threshold: expiration_threshold
}
{:ok, state}
error ->
log_error(error)
{:stop, :shutdown}
end
end
def fetch() do
case GenServer.call(__MODULE__, :fetch) do
{:ok, credentials} ->
{:ok, credentials}
error ->
log_error(error)
GenServer.stop(__MODULE__, :shutdown)
end
end
defp fetch_new(provider) do
provider.fetch()
end
@impl true
def handle_call(:fetch, _, %{provider: provider, credentials: credentials} = state) do
if expired?(credentials, state) do
case fetch_new(provider) do
{:ok, new_credentials} ->
new_state = Map.merge(state, %{credentials: new_credentials})
{:reply, {:ok, new_credentials}, new_state}
error ->
{:reply, error, error}
end
else
{:reply, {:ok, credentials}, state}
end
end
defp log_error({:error, reason}) do
Logger.error("Failed to fetch AWS credentials", reason: reason)
end
defp log_error({:error, status_code, reason}) do
Logger.error("Failed to fetch AWS credentials", status_code: status_code, reason: reason)
end
defp expired?(%Credentials{Expiration: exp}, %{expiration_threshold: expiration_threshold}) do
cond do
is_nil(exp) ->
false
DateTime.diff(exp, DateTime.utc_now(), :second) <= expiration_threshold ->
true
true ->
true
end
end
end
|
lib/aws_credentials.ex
| 0.805211
| 0.66737
|
aws_credentials.ex
|
starcoder
|
defmodule Chaperon.Action do
@moduledoc """
Helper functions to be used with `Chaperon.Actionable`.
"""
@doc """
Retries `action` within `session` by calling `Chaperon.Actionable.abort/2`
followed by `Chaperon.Actionable.run/2`.
"""
def retry(action, session) do
with {:ok, action, session} <- Chaperon.Actionable.abort(action, session) do
Chaperon.Actionable.run(action, session)
end
end
@doc """
Returns a `Chaperon.Action.Error` for the given arguments.
"""
def error(action, session, reason) do
%Chaperon.Action.Error{
reason: reason,
action: action,
session: session
}
end
@doc """
Every `Chaperon.Actionable` can expose a `callback` field.
`callback` can be either:
- a callback function or atom naming the callback function inside the session's current scenario module:
`atom | ((Chaperon.Session.t, any | {:error, any}) -> any)`
- a map containing callback and error functions:
```
%{
ok: atom | ((Chaperon.Session.t, any) -> any),
error: atom | ((Chaperon.Session.t, any) -> any)
}
```
When defining just a single callback function, it will be called in both
success and error cases (passed in as `{:error, reason}`).
To handle each case individually, you can just use pattern matching:
session
|> post("/greet", json: [hello: "world!"], with_result: fn
(session, {:error, reason}) ->
# handle error case here
session
|> log_error("Failed to greet")
(session, %HTTPoison.Response{body: response}) ->
# do something with successful response here
session
|> log_info("Greeted successfully!")
end)
"""
def callback(%{callback: %{ok: cb}}), do: cb
def callback(%{callback: cb}), do: cb
def error_callback(%{callback: %{error: cb}}), do: cb
def error_callback(%{callback: cb}),
do: fn session, resp ->
session
|> Chaperon.Session.call_callback(cb, {:error, resp})
end
def error_callback(_), do: nil
end
|
lib/chaperon/action.ex
| 0.898309
| 0.611672
|
action.ex
|
starcoder
|
defmodule P1.Parser do
use Combine
import Combine.Parsers.Base
import Combine.Parsers.Text
alias P1.Channel, as: Channel
alias P1.Tags, as: Tags
@moduledoc """
Understands the P1 format of Smartmeters and translates them to elixir types
As the specification says that all lines wih obis codes are optional and the order in which they appear is free,
this parser works with the choice parser from the combine library
this means all parsers that can parse a line with an obis code will be consulted
and hopefully only one will return a valid result
"""
# credo:disable-for-this-file Credo.Check.Refactor.PipeChainStart
# credo:disable-for-this-file Credo.Check.Refactor.CyclomaticComplexity
@doc false
def parse(line) do
if (String.trim(line) == "") do
{:ok, []}
else
case Combine.parse(line, line_parser(nil)) do
{:error, reason} -> {:error, reason}
result -> {:ok, result}
end
end
end
@doc false
def parse!(line) do
case parse(line) do
{:error, reason} -> raise reason
{:ok, result} -> result
end
end
@doc false
def parse_telegram(telegram) do
case Combine.parse(telegram, telegram_parser(nil)) do
{:error, reason} -> {:error, reason}
result -> {:ok, result}
end
end
@doc false
def parse_telegram!(telegram) do
case parse_telegram(telegram) do
{:error, reason} -> raise reason
{:ok, result} -> result
end
end
# Parsers
defp telegram_parser(previous) do
previous
|> pipe([word_of(~r/[^!]*/), char("!")], &Enum.join(&1))
|> hex(4)
end
defp line_parser(previous) do
previous
|> choice([header_parser(), obis_parser()])
end
defp obis_parser(previous \\ nil) do
previous
|> medium_channel_parser()
|> ignore(char(":"))
|> measurement_type_parser()
|> values_parser()
|> ignore(option(newline()))
end
defp medium_channel_parser(previous) do
previous
|> pipe([integer(), char("-"), integer()], fn [t, _, c] -> Channel.construct(t, c) end)
end
defp measurement_type_parser(previous) do
previous
|> pipe([integer(), ignore(char(".")), integer(), ignore(char(".")), integer()], &to_tags(&1))
end
defp values_parser(previous) do
previous
|> many1(parens(value_parser()))
end
defp value_parser(previous \\ nil) do
previous
|> choice([
timestamp_parser(),
integer_with_unit_parser(),
float_with_unit_parser(),
word_of(~r/[\w\*\:\-\.]*/)
])
end
defp float_with_unit_parser(previous \\ nil) do
previous |> pipe([float(), ignore(char("*")), unit_parser()], &to_value(&1))
end
defp integer_with_unit_parser(previous \\ nil) do
previous |> pipe([integer(), ignore(char("*")), unit_parser()], &to_value(&1))
end
defp unit_parser(previous \\ nil) do
previous |> word_of(~r/s|m3|V|A|kWh|kW/)
end
# defp hexadecimal_parser(previous \\ nil) do
# previous |> map(word_of(~r/[0-9a-f]/i), fn txt -> Hexate.decode(txt) end)
# end
defp header_parser(previous \\ nil) do
previous |> pipe([ignore(char("/")), word_of(~r/\w{3}/), ignore(char("5")), word_of(~r/.+/)],
fn [m, n] -> %P1.Header{manufacturer: m, model: n} end)
end
# Helper functions
defp timestamp_parser(previous \\ nil) do
previous |> map(word_of(~r/\d+[SW]/), &(timestamp_to_utc(&1)))
end
defp timestamp_to_utc(text) do
# as this is only valid in the netherlands, i can use this trick
tz_offset = case String.last(text) do
"S" -> "+02:00"
"W" -> "+01:00"
end
[date | time] = text |> String.slice(0 .. String.length(text) - 1) |> String.codepoints
|> Enum.chunk_every(2) |> Enum.map(&Enum.join/1) |> Enum.chunk_every(3)
"20#{Enum.join(date, "-")}T#{Enum.join(hd(time), ":")}#{tz_offset}"
end
defp hex(size) when is_integer(size), do: word_of(~r/[0-9a-f]{#{size}}/i)
defp hex(previous, size), do: previous |> word_of(~r/[0-9a-f]{#{size}}/i)
defp parens(parser), do: between(char("("), parser, char(")"))
defp to_value([value, unit]), do: %P1.Value{value: value, unit: unit}
defp to_tags([0, 2, 8]), do: %Tags{tags: [general: :version]}
defp to_tags([1, 0, 0]), do: %Tags{tags: [general: :timestamp]}
defp to_tags([96, 1, 1]), do: %Tags{tags: [general: :equipment_identifier]}
defp to_tags([96, 14, 0]), do: %Tags{tags: [general: :tariff_indicator]}
defp to_tags([1, 8, 1]), do: %Tags{tags: [energy: :total, direction: :consume, tariff: :low]}
defp to_tags([1, 8, 2]), do: %Tags{tags: [energy: :total, direction: :consume, tariff: :normal]}
defp to_tags([2, 8, 1]), do: %Tags{tags: [energy: :total, direction: :produce, tariff: :low]}
defp to_tags([2, 8, 2]), do: %Tags{tags: [energy: :total, direction: :produce, tariff: :normal]}
defp to_tags([1, 7, 0]), do: %Tags{tags: [power: :active, phase: :all, direction: :consume]}
defp to_tags([2, 7, 0]), do: %Tags{tags: [power: :active, phase: :all, direction: :produce]}
defp to_tags([21, 7, 0]), do: %Tags{tags: [power: :active, phase: :l1, direction: :consume]}
defp to_tags([41, 7, 0]), do: %Tags{tags: [power: :active, phase: :l2, direction: :consume]}
defp to_tags([61, 7, 0]), do: %Tags{tags: [power: :active, phase: :l3, direction: :consume]}
defp to_tags([22, 7, 0]), do: %Tags{tags: [power: :active, phase: :l1, direction: :produce]}
defp to_tags([42, 7, 0]), do: %Tags{tags: [power: :active, phase: :l2, direction: :produce]}
defp to_tags([62, 7, 0]), do: %Tags{tags: [power: :active, phase: :l3, direction: :produce]}
defp to_tags([31, 7, 0]), do: %Tags{tags: [amperage: :active, phase: :l1]}
defp to_tags([51, 7, 0]), do: %Tags{tags: [amperage: :active, phase: :l2]}
defp to_tags([71, 7, 0]), do: %Tags{tags: [amperage: :active, phase: :l3]}
defp to_tags([32, 7, 0]), do: %Tags{tags: [voltage: :active, phase: :l1]}
defp to_tags([52, 7, 0]), do: %Tags{tags: [voltage: :active, phase: :l2]}
defp to_tags([72, 7, 0]), do: %Tags{tags: [voltage: :active, phase: :l3]}
defp to_tags([96, 7, 9]), do: %Tags{tags: [power_failures: :long]}
defp to_tags([96, 7, 21]), do: %Tags{tags: [power_failures: :short]}
defp to_tags([99, 97, 0]), do: %Tags{tags: [power_failures: :event_log]}
defp to_tags([32, 32, 0]), do: %Tags{tags: [voltage: :sags, phase: :l1]}
defp to_tags([52, 32, 0]), do: %Tags{tags: [voltage: :sags, phase: :l2]}
defp to_tags([72, 32, 0]), do: %Tags{tags: [voltage: :sags, phase: :l3]}
defp to_tags([32, 36, 0]), do: %Tags{tags: [voltage: :swells, phase: :l1]}
defp to_tags([52, 36, 0]), do: %Tags{tags: [voltage: :swells, phase: :l2]}
defp to_tags([72, 36, 0]), do: %Tags{tags: [voltage: :swells, phase: :l3]}
defp to_tags([96, 13, 0]), do: %Tags{tags: [message: :text]}
defp to_tags([96, 13, 1]), do: %Tags{tags: [message: :code]}
defp to_tags([24, 1, 0]), do: %Tags{tags: [mbus: :device_type]}
defp to_tags([96, 1, 0]), do: %Tags{tags: [mbus: :equipment_identifier]}
defp to_tags([24, 2, 1]), do: %Tags{tags: [mbus: :measurement]}
end
|
lib/p1/parser.ex
| 0.599837
| 0.455683
|
parser.ex
|
starcoder
|
defmodule NimblePool do
@external_resource "README.md"
@moduledoc "README.md"
|> File.read!()
|> String.split("<!-- MDOC !-->")
|> Enum.fetch!(1)
use GenServer
require Logger
@type from :: {pid, reference}
@type init_arg :: term
@type pool_state :: term
@type worker_state :: term
@type client_state :: term
@type user_reason :: term
@doc """
Initializes the worker.
It receives the worker argument passed to `start_link/1`. It must
return `{:ok, worker_state}` or `{:async, fun}`, where the `fun`
is a zero-arity function that must return the worker state.
Note this callback is synchronous and therefore will block the pool.
If you need to perform long initialization, consider using the
`{:async, fun}` return type.
"""
@callback init_worker(pool_state) ::
{:ok, worker_state, pool_state} | {:async, (() -> worker_state), pool_state}
@doc """
Initializes the pool.
It receives the worker argument passed to `start_link/1` and must
return `{:ok, pool_state}` upon successful initialization,
`:ignore` to exit normally, or `{:stop, reason}` to exit with `reason`
and return `{:error, reason}`.
This is a good place to perform a registration for example.
It must return the `pool_state`. The `pool_state` is given to
`init_worker`. By default, it simply returns the arguments given.
This callback is optional.
"""
@callback init_pool(init_arg) :: {:ok, pool_state} | :ignore | {:stop, reason :: any()}
@doc """
Checks a worker out.
It receives `maybe_wrapped_command`. The `command` is given to the `checkout!/4`
call and may optionally be wrapped by `c:handle_enqueue/2`. It must return either
`{:ok, client_state, worker_state}`, `{:remove, reason, pool_state}`, or
`{:skip, Exception.t(), pool_state}`.
If `:remove` is returned, `NimblePool` will attempt to checkout another
worker.
If `:skip` is returned, `NimblePool` will skip the checkout, the client will
raise the returned exception, and the worker will be left ready for the next
checkout attempt.
Note this callback is synchronous and therefore will block the pool.
Avoid performing long work in here, instead do as much work as
possible on the client.
Once the connection is checked out, the worker won't receive any
messages targetted to `c:handle_info/2`.
"""
@callback handle_checkout(maybe_wrapped_command :: term, from, worker_state, pool_state) ::
{:ok, client_state, worker_state, pool_state}
| {:remove, user_reason, pool_state}
| {:skip, Exception.t(), pool_state}
@doc """
Checks a worker in.
It receives the `client_state`, returned by the `checkout!/4`
anonymous function and it must return either `{:ok, worker_state}`
or `{:remove, reason, pool_state}`.
Note this callback is synchronous and therefore will block the pool.
Avoid performing long work in here, instead do as much work as
possible on the client.
Once the connection is checked in, it may immediately be handed
to another client, without traversing any of the messages in the
pool inbox.
This callback is optional.
"""
@callback handle_checkin(client_state, from, worker_state, pool_state) ::
{:ok, worker_state, pool_state} | {:remove, user_reason, pool_state}
@doc """
Receives a message in the worker.
It receives the `message` and it must return either
`{:ok, worker_state}` or `{:remove, reason}`.
Note this callback is synchronous and therefore will block the pool.
Avoid performing long work in here.
This callback is optional.
"""
@callback handle_info(message :: term, worker_state) ::
{:ok, worker_state} | {:remove, user_reason}
@doc """
Executed by the pool, whenever a request to checkout a worker is enqueued.
The `command` argument should be treated as an opaque value, but it can be
wrapped with some data to be used in `c:handle_checkout/4`.
It must return either `{:ok, maybe_wrapped_command, pool_state}` or
`{:skip, Exception.t(), pool_state}` if checkout is to be skipped.
Note this callback is synchronous and therefore will block the pool.
Avoid performing long work in here.
This callback is optional.
"""
@callback handle_enqueue(command :: term, pool_state) ::
{:ok, maybe_wrapped_command :: term, pool_state}
| {:skip, Exception.t(), pool_state}
@doc """
Terminates a worker.
This callback is invoked with `:DOWN` whenever the client
link breaks, with `:timeout` whenever the client times out,
with one of `:throw`, `:error`, `:exit` whenever the client
crashes with one of the reasons above.
If at any point you return `{:remove, reason}`, the `reason`
will also be given to `terminate`. If any callback raises,
the raised exception will be given as `reason`.
It receives the latest known `worker_state`, which may not
be the latest state. For example, if a client checksout the
state and crashes, we don't fully know the `client_state`,
so the `terminate` callback needs to take such scenarios
into account.
This callback is optional.
"""
@callback terminate_worker(
:DOWN | :timeout | :throw | :error | :exit | user_reason,
worker_state,
pool_state
) ::
{:ok, pool_state}
@optional_callbacks init_pool: 1,
handle_checkin: 4,
handle_info: 2,
handle_enqueue: 2,
terminate_worker: 3
@doc """
Defines a pool to be started under the supervision tree.
It accepts the same options as `start_link/1` with the
addition or `:restart` and `:shutdown` that control the
"Child Specification".
"""
def child_spec(opts)
def child_spec(opts) do
{worker, _} = Keyword.fetch!(opts, :worker)
{restart, opts} = Keyword.pop(opts, :restart, :permanent)
{shutdown, opts} = Keyword.pop(opts, :shutdown, 5_000)
%{
id: worker,
start: {__MODULE__, :start_link, [opts]},
shutdown: shutdown,
restart: restart
}
end
@doc """
Starts a pool.
## Options
* `:worker` - a `{worker_mod, worker_init_arg}` tuple with the worker
module that implements the `NimblePool` behaviour and the worker
initial argument. This argument is required.
* `:pool_size` - how many workers in the pool. Defaults to 10.
"""
def start_link(opts) do
{{worker, arg}, opts} = Keyword.pop(opts, :worker)
{pool_size, opts} = Keyword.pop(opts, :pool_size, 10)
unless is_atom(worker) do
raise ArgumentError, "worker must be an atom, got: #{inspect(worker)}"
end
unless pool_size > 0 do
raise ArgumentError, "pool_size must be more than 0, got: #{inspect(pool_size)}"
end
GenServer.start_link(__MODULE__, {worker, arg, pool_size}, opts)
end
@doc """
Stops a pool.
"""
def stop(pool, reason \\ :normal, timeout \\ :infinity) do
GenServer.stop(pool, reason, timeout)
end
@doc """
Checks out from the pool.
It expects a command, which will be passed to the `c:handle_checkout/4`
callback. The `c:handle_checkout/4` callback will return a client state,
which is given to the `function`.
The `function` receives two arguments, the pool reference and must return
a two-element tuple, where the first element is the function return value,
and the second element is the updated `client_state`, which will be given
as the first argument to `c:handle_checkin/4`.
`checkout!` also has an optional `timeout` value, this value will be applied
to checkout operation itself. `checkin` happens asynchronously.
"""
def checkout!(pool, command, function, timeout \\ 5_000) when is_function(function, 2) do
# Reimplementation of gen.erl call to avoid multiple monitors.
pid = GenServer.whereis(pool)
unless pid do
exit!(:noproc, :checkout, [pool])
end
ref = Process.monitor(pid)
send_call(pid, ref, {:checkout, command, deadline(timeout)})
receive do
{^ref, {:skipped, exception}} ->
raise exception
{^ref, client_state} ->
Process.demonitor(ref, [:flush])
try do
function.({pid, ref}, client_state)
catch
kind, reason ->
send(pid, {__MODULE__, :cancel, ref, kind})
:erlang.raise(kind, reason, __STACKTRACE__)
else
{result, client_state} ->
send(pid, {__MODULE__, :checkin, ref, client_state})
result
end
{:DOWN, ^ref, _, _, :noconnection} ->
exit!({:nodedown, get_node(pid)}, :checkout, [pool])
{:DOWN, ^ref, _, _, reason} ->
exit!(reason, :checkout, [pool])
after
timeout ->
Process.demonitor(ref, [:flush])
exit!(:timeout, :checkout, [pool])
end
end
@doc """
Sends an `update` instruction to the pool about the checked out worker.
This must be called inside the `checkout!` callback with
the `from` value given to `checkout`.
This is useful to update the pool state before effectively
checking the state in, which is handy when transferring
resources that requires two steps.
"""
def update({pid, ref}, command) do
send(pid, {__MODULE__, :update, ref, command})
end
defp deadline(timeout) when is_integer(timeout) do
System.monotonic_time() + System.convert_time_unit(timeout, :millisecond, :native)
end
defp deadline(:infinity), do: :infinity
defp get_node({_, node}), do: node
defp get_node(pid) when is_pid(pid), do: node(pid)
defp send_call(pid, ref, message) do
# Auto-connect is asynchronous. But we still use :noconnect to make sure
# we send on the monitored connection, and not trigger a new auto-connect.
Process.send(pid, {:"$gen_call", {self(), ref}, message}, [:noconnect])
end
defp exit!(reason, fun, args) do
exit({reason, {__MODULE__, fun, args}})
end
## Callbacks
@impl true
def init({worker, arg, pool_size}) do
Process.flag(:trap_exit, true)
_ = Code.ensure_loaded(worker)
with {:ok, pool_state} <- do_init_pool(worker, arg) do
{pool_state, resources, async} =
Enum.reduce(1..pool_size, {pool_state, :queue.new(), %{}}, fn
_, {pool_state, resources, async} ->
init_worker(worker, pool_state, resources, async)
end)
state = %{
worker: worker,
queue: :queue.new(),
requests: %{},
monitors: %{},
resources: resources,
async: async,
state: pool_state
}
{:ok, state}
end
end
@impl true
def handle_call({:checkout, command, deadline}, {pid, ref} = from, state) do
%{requests: requests, monitors: monitors, worker: worker, state: pool_state} = state
mon_ref = Process.monitor(pid)
requests = Map.put(requests, ref, {pid, mon_ref, :command, command, deadline})
monitors = Map.put(monitors, mon_ref, ref)
state = %{state | requests: requests, monitors: monitors}
case handle_enqueue(worker, command, pool_state) do
{:ok, command, pool_state} ->
{:noreply, maybe_checkout(command, mon_ref, deadline, from, %{state | state: pool_state})}
{:skip, exception, pool_state} ->
state = remove_request(%{state | state: pool_state}, ref, mon_ref)
{:reply, {:skipped, exception}, state}
end
end
@impl true
def handle_info({__MODULE__, :update, ref, command}, state) do
%{requests: requests, state: pool_state, worker: worker} = state
case requests do
%{^ref => {pid, mon_ref, :state, worker_state}} ->
{:ok, worker_state, pool_state} = worker.handle_update(command, worker_state, pool_state)
requests = Map.put(requests, ref, {pid, mon_ref, :state, worker_state})
{:noreply, %{state | requests: requests, state: pool_state}}
%{} ->
exit(:unexpected_precheckin)
end
end
@impl true
def handle_info({__MODULE__, :checkin, ref, worker_client_state}, state) do
%{requests: requests, resources: resources, worker: worker, state: pool_state} = state
case requests do
%{^ref => {pid, mon_ref, :state, worker_server_state}} ->
checkin =
if function_exported?(worker, :handle_checkin, 4) do
args = [worker_client_state, {pid, ref}, worker_server_state, pool_state]
apply_worker_callback(pool_state, worker, :handle_checkin, args)
else
{:ok, worker_server_state, pool_state}
end
{resources, state} =
case checkin do
{:ok, worker_server_state, pool_state} ->
{:queue.in(worker_server_state, resources), %{state | state: pool_state}}
{:remove, reason, pool_state} ->
{resources,
remove_worker(reason, worker_server_state, %{state | state: pool_state})}
end
state = remove_request(state, ref, mon_ref)
{:noreply, maybe_checkout(%{state | resources: resources})}
%{} ->
exit(:unexpected_checkin)
end
end
@impl true
def handle_info({__MODULE__, :cancel, ref, reason}, state) do
cancel_request_ref(ref, reason, state)
end
@impl true
def handle_info({__MODULE__, :init_worker}, state) do
%{async: async, resources: resources, worker: worker, state: pool_state} = state
{pool_state, resources, async} = init_worker(worker, pool_state, resources, async)
{:noreply, maybe_checkout(%{state | async: async, resources: resources, state: pool_state})}
end
@impl true
def handle_info({:DOWN, ref, _, _, _} = down, state) do
%{monitors: monitors, async: async} = state
case monitors do
%{^ref => request_ref} ->
cancel_request_ref(request_ref, :DOWN, state)
%{} ->
case async do
%{^ref => _} -> remove_async_ref(ref, state)
%{} -> maybe_handle_info(down, state)
end
end
end
@impl true
def handle_info({:EXIT, pid, _reason} = exit, state) do
%{async: async} = state
case async do
%{^pid => _} -> {:noreply, %{state | async: Map.delete(async, pid)}}
%{} -> maybe_handle_info(exit, state)
end
end
@impl true
def handle_info({ref, worker_state} = reply, state) when is_reference(ref) do
%{async: async, resources: resources} = state
case async do
%{^ref => _} ->
Process.demonitor(ref, [:flush])
resources = :queue.in(worker_state, resources)
async = Map.delete(async, ref)
state = %{state | async: async, resources: resources}
{:noreply, maybe_checkout(state)}
%{} ->
maybe_handle_info(reply, state)
end
end
@impl true
def handle_info(msg, state) do
maybe_handle_info(msg, state)
end
@impl true
def terminate(reason, %{resources: resources} = state) do
for worker_server_state <- :queue.to_list(resources) do
maybe_terminate_worker(reason, worker_server_state, state)
end
:ok
end
defp do_init_pool(worker, arg) do
if function_exported?(worker, :init_pool, 1) do
worker.init_pool(arg)
else
{:ok, arg}
end
end
defp remove_async_ref(ref, state) do
%{async: async, resources: resources, worker: worker, state: pool_state} = state
{pool_state, resources, async} =
init_worker(worker, pool_state, resources, Map.delete(async, ref))
{:noreply, %{state | resources: resources, async: async, state: pool_state}}
end
defp cancel_request_ref(ref, reason, %{requests: requests} = state) do
case requests do
# Exited or timed out before we could serve it
%{^ref => {_, mon_ref, :command, _command, _deadline}} ->
{:noreply, remove_request(state, ref, mon_ref)}
# Exited or errored during client processing
%{^ref => {_, mon_ref, :state, worker_server_state}} ->
state = remove_request(state, ref, mon_ref)
{:noreply, remove_worker(reason, worker_server_state, state)}
%{} ->
exit(:unexpected_remove)
end
end
defp maybe_handle_info(msg, state) do
%{resources: resources, worker: worker} = state
if function_exported?(worker, :handle_info, 2) do
{resources, state} =
Enum.reduce(:queue.to_list(resources), {:queue.new(), state}, fn
worker_server_state, {resources, state} ->
case apply_worker_callback(worker, :handle_info, [msg, worker_server_state]) do
{:ok, worker_server_state} ->
{:queue.in(worker_server_state, resources), state}
{:remove, reason} ->
{resources, remove_worker(reason, worker_server_state, state)}
end
end)
{:noreply, %{state | resources: resources}}
else
{:noreply, state}
end
end
defp maybe_checkout(%{queue: queue, requests: requests} = state) do
case :queue.out(queue) do
{{:value, {pid, ref}}, queue} ->
case requests do
# The request still exists, so we are good to go
%{^ref => {^pid, mon_ref, :command, command, deadline}} ->
maybe_checkout(command, mon_ref, deadline, {pid, ref}, %{state | queue: queue})
# It should never happen
%{^ref => _} ->
exit(:unexpected_checkout)
# The request is no longer active, do nothing
%{} ->
maybe_checkout(%{state | queue: queue})
end
{:empty, _queue} ->
state
end
end
defp maybe_checkout(command, mon_ref, deadline, {pid, ref} = from, state) do
%{resources: resources, requests: requests, worker: worker, queue: queue, state: pool_state} =
state
if past_deadline?(deadline) do
state = remove_request(state, ref, mon_ref)
maybe_checkout(state)
else
case :queue.out(resources) do
{{:value, worker_server_state}, resources} ->
args = [command, from, worker_server_state, pool_state]
case apply_worker_callback(pool_state, worker, :handle_checkout, args) do
{:ok, worker_client_state, worker_server_state, pool_state} ->
GenServer.reply({pid, ref}, worker_client_state)
requests = Map.put(requests, ref, {pid, mon_ref, :state, worker_server_state})
%{state | resources: resources, requests: requests, state: pool_state}
{:remove, reason, pool_state} ->
state = remove_worker(reason, worker_server_state, %{state | state: pool_state})
maybe_checkout(command, mon_ref, deadline, from, %{state | resources: resources})
{:skip, exception, pool_state} ->
GenServer.reply({pid, ref}, {:skipped, exception})
remove_request(%{state | state: pool_state}, ref, mon_ref)
other ->
raise """
unexpected return from #{inspect(worker)}.handle_checkout/4.
Expected: {:ok, client_state, server_state, pool_state} | {:remove, reason, pool_state} | {:skip, Exception.t(), pool_state}
Got: #{inspect(other)}
"""
end
{:empty, _} ->
%{state | queue: :queue.in(from, queue)}
end
end
end
defp past_deadline?(deadline) when is_integer(deadline) do
System.monotonic_time() >= deadline
end
defp past_deadline?(:infinity), do: false
defp remove_worker(reason, worker_server_state, state) do
state = maybe_terminate_worker(reason, worker_server_state, state)
schedule_init()
state
end
defp maybe_terminate_worker(reason, worker_server_state, state) do
%{worker: worker, state: pool_state} = state
if function_exported?(worker, :terminate_worker, 3) do
args = [reason, worker_server_state, pool_state]
case apply_worker_callback(worker, :terminate_worker, args) do
{:ok, pool_state} ->
%{state | state: pool_state}
{:remove, _reason} ->
state
other ->
raise """
unexpected return from #{inspect(worker)}.terminate_worker/3.
Expected:
{:ok, pool_state}
Got: #{inspect(other)}
"""
end
else
state
end
end
defp init_worker(worker, pool_state, resources, async) do
case apply_worker_callback(worker, :init_worker, [pool_state]) do
{:ok, worker_state, pool_state} ->
{pool_state, :queue.in(worker_state, resources), async}
{:async, fun, pool_state} when is_function(fun, 0) ->
%{ref: ref, pid: pid} = Task.Supervisor.async(NimblePool.TaskSupervisor, fun)
{pool_state, resources, async |> Map.put(ref, pid) |> Map.put(pid, ref)}
{:remove, _reason} ->
send(self(), {__MODULE__, :init_worker})
{pool_state, resources, async}
other ->
raise """
unexpected return from #{inspect(worker)}.init_worker/1.
Expected:
{:ok, worker_state, pool_state}
| {:async, (() -> worker_state), pool_state}
Got: #{inspect(other)}
"""
end
end
defp schedule_init() do
send(self(), {__MODULE__, :init_worker})
end
defp apply_worker_callback(worker, fun, args) do
do_apply_worker_callback(worker, fun, args, &{:remove, &1})
end
defp apply_worker_callback(pool_state, worker, fun, args) do
do_apply_worker_callback(worker, fun, args, &{:remove, &1, pool_state})
end
defp do_apply_worker_callback(worker, fun, args, catch_fun) do
try do
apply(worker, fun, args)
catch
kind, reason ->
reason = Exception.normalize(kind, reason, __STACKTRACE__)
Logger.error(
[
"Error during #{inspect(worker)}.#{fun}/#{length(args)} callback:\n"
| Exception.format(kind, reason, __STACKTRACE__)
],
crash_reason: {crash_reason(kind, reason), __STACKTRACE__}
)
catch_fun.(reason)
end
end
defp crash_reason(:throw, value), do: {:nocatch, value}
defp crash_reason(_, value), do: value
defp remove_request(pool_state, ref, mon_ref) do
requests = Map.delete(pool_state.requests, ref)
monitors = Map.delete(pool_state.monitors, mon_ref)
Process.demonitor(mon_ref, [:flush])
%{pool_state | requests: requests, monitors: monitors}
end
defp handle_enqueue(worker, command, pool_state) do
if function_exported?(worker, :handle_enqueue, 2) do
worker.handle_enqueue(command, pool_state)
else
{:ok, command, pool_state}
end
end
end
|
lib/nimble_pool.ex
| 0.885452
| 0.486149
|
nimble_pool.ex
|
starcoder
|
defmodule ResxJSON.Partial do
@moduledoc """
Functions that can be used to build partials for a partial stream will be
processed by `ResxJSON.Encoder`.
"""
defstruct [literal: "", separator: "", element: true, prefix: "", suffix: "", end: false]
@doc """
Create part of a JSON string value.
A stream containing the following list of partials will result in the
string `"\"abcd\""`.
[ResxJSON.Partial.value("a"), ResxJSON.Partial.value(["b", "c"]), ResxJSON.Partial.value("d", :end)] #=> "\"abcd\""
"""
def value(data), do: %__MODULE__{ literal: to_string(data), prefix: "\"", suffix: "\"" }
def value(data, :end), do: %__MODULE__{ literal: to_string(data), separator: ",", prefix: "\"", suffix: "\"", end: true }
@doc """
Create part of a JSON object key.
A stream containing the following list of partials will result in the
key `"\"abcd\":"`.
[ResxJSON.Partial.key("a"), ResxJSON.Partial.key(["b", "c"]), ResxJSON.Partial.key("d", :end)] #=> "\"abcd\":"
This should be used inside an object (`ResxJSON.Partial.object/0`) and should
be followed by a value (literal or partial) that will become the value for
that key.
"""
def key(data), do: %__MODULE__{ literal: to_string(data), prefix: "\"", suffix: "\":" }
def key(data, :end), do: %__MODULE__{ literal: to_string(data), prefix: "\"", suffix: "\":", end: true }
@doc """
Create part of a JSON array.
A stream containing the following list of partials will result in the
array `"[]"`.
[ResxJSON.Partial.array(), ResxJSON.Partial.array(:end)] #=> "[]"
Any elements between the two array functions will be put inside the resulting
array.
"""
def array(), do: %__MODULE__{ literal: "[", end: true }
def array(:end), do: %__MODULE__{ literal: "]", separator: ",", element: false, end: true }
@doc """
Create part of a JSON object.
A stream containing the following list of partials will result in the
object `"{}"`.
[ResxJSON.Partial.object(), ResxJSON.Partial.object(:end)] #=> "{}"
Any key/value pairs between the two object functions will be put inside the
resulting object. Keys should be referenced with by `ResxJSON.Partial.key/1`,
while values may be partials or literals.
"""
def object(), do: %__MODULE__{ literal: "{", end: true }
def object(:end), do: %__MODULE__{ literal: "}", separator: ",", element: false, end: true }
end
|
lib/resx_json/partial.ex
| 0.807499
| 0.538498
|
partial.ex
|
starcoder
|
defmodule Ash.Filter do
@moduledoc """
The representation of a filter in Ash.
Ash filters are stored as nested `Ash.Filter.Expression{}` and `%Ash.Filter.Not{}` structs,
terminating in a `%Ash.Filter.Predicate{}` struct. An expression is simply a boolean operator
and the left and right hand side of that operator.
## Filter Templates
Filter templates are simplified fielter statements (they only support atom keys), that have substitutions in them.
Currently, the substitutions are `{:_actor, :field}` and `{:_actor, :_primary_key}`
You can pass a filter template to `build_filter_from_template/2` with an actor, and it will return the new result
Additionally, you can ask if the filter template contains an actor reference via `template_references_actor?/1`
"""
alias Ash.Actions.SideLoad
alias Ash.Engine.Request
alias Ash.Error.Query.{
AggregatesNotSupported,
InvalidFilterValue,
NoSuchAttributeOrRelationship,
NoSuchFilterPredicate,
ReadActionRequired
}
alias Ash.Filter.Predicate.{Eq, GreaterThan, In, IsNil, LessThan}
alias Ash.Filter.{Expression, Not, Predicate}
alias Ash.Query.Aggregate
@built_in_predicates [
eq: Eq,
equals: Eq,
in: In,
lt: LessThan,
gt: GreaterThan,
less_than: LessThan,
greater_than: GreaterThan,
is_nil: IsNil
]
@string_builtin_predicates Enum.into(@built_in_predicates, %{}, fn {key, value} ->
{to_string(key), value}
end)
defstruct [:resource, :expression]
@type t :: %__MODULE__{}
defmodule Simple do
@moduledoc "Represents a simplified filter, with a simple list of predicates"
defstruct [:resource, :predicates]
defmodule Not do
@moduledoc "A negated predicate"
defstruct [:predicate]
end
end
def parse!(resource, statement, aggregates \\ %{}) do
case parse(resource, statement, aggregates) do
{:ok, filter} ->
filter
{:error, error} ->
raise error
end
end
def parse(resource, statement, aggregates \\ %{}) do
context = %{
resource: resource,
relationship_path: [],
aggregates: aggregates
}
case parse_expression(statement, context) do
{:ok, expression} ->
{:ok, %__MODULE__{expression: expression, resource: resource}}
{:error, error} ->
{:error, error}
end
end
@doc "transform an expression based filter to a simple filter, which is just a list of predicates"
def to_simple_filter(%{resource: resource, expression: expression}) do
predicates = get_predicates(expression)
%Simple{resource: resource, predicates: predicates}
end
@doc "Replace any actor value references in a template with the values from a given actor"
def build_filter_from_template(template, actor) do
walk_filter_template(template, fn
{:_actor, :_primary_key} ->
if actor do
Map.take(actor, Ash.Resource.primary_key(actor.__struct__))
else
false
end
{:_actor, field} ->
Map.get(actor || %{}, field)
other ->
other
end)
end
@doc "Whether or not a given template contains an actor reference"
def template_references_actor?({:_actor, _}), do: true
def template_references_actor?(filter) when is_list(filter) do
Enum.any?(filter, &template_references_actor?/1)
end
def template_references_actor?(filter) when is_map(filter) do
Enum.any?(fn {key, value} ->
template_references_actor?(key) || template_references_actor?(value)
end)
end
def template_references_actor?(tuple) when is_tuple(tuple) do
Enum.any?(Tuple.to_list(tuple), &template_references_actor?/1)
end
def template_references_actor?(_), do: false
defp walk_filter_template(filter, mapper) when is_list(filter) do
case mapper.(filter) do
^filter ->
Enum.map(filter, &walk_filter_template(&1, mapper))
other ->
walk_filter_template(other, mapper)
end
end
defp walk_filter_template(filter, mapper) when is_map(filter) do
case mapper.(filter) do
^filter ->
Enum.into(filter, %{}, &walk_filter_template(&1, mapper))
other ->
walk_filter_template(other, mapper)
end
end
defp walk_filter_template(tuple, mapper) when is_tuple(tuple) do
case mapper.(tuple) do
^tuple ->
tuple
|> Tuple.to_list()
|> Enum.map(&walk_filter_template(&1, mapper))
|> List.to_tuple()
other ->
walk_filter_template(other, mapper)
end
end
defp walk_filter_template(value, mapper), do: mapper.(value)
defp get_predicates(expr, acc \\ [])
defp get_predicates(true, acc), do: acc
defp get_predicates(false, _), do: false
defp get_predicates(_, false), do: false
defp get_predicates(%Expression{op: :and, left: left, right: right}, acc) do
acc = get_predicates(left, acc)
get_predicates(right, acc)
end
defp get_predicates(%Not{expression: expression}, acc) do
expression
|> get_predicates()
|> Enum.reduce(acc, fn predicate, acc ->
[%Simple.Not{predicate: predicate} | acc]
end)
end
defp get_predicates(%Predicate{} = predicate, acc), do: [predicate | acc]
def used_aggregates(filter) do
reduce(filter, [], fn
%Predicate{attribute: %Aggregate{} = aggregate}, acc ->
[aggregate | acc]
_, acc ->
acc
end)
end
def run_other_data_layer_filters(api, _resource, filter) do
reduce(filter, {:ok, filter}, fn
%Expression{op: :or}, {:ok, filter} ->
{:halt, {:ok, filter}}
%Predicate{} = expression, {:ok, filter} ->
expression
|> relationship_paths(:ands_only)
|> filter_paths_that_change_data_layers(filter.resource)
|> Enum.reduce_while({:halt, {:ok, filter}}, fn path, {:halt, {:ok, filter}} ->
{for_path, without_path} = split_expression_by_relationship_path(filter, path)
relationship = Ash.Resource.relationship(filter.resource, path)
query =
relationship.destination
|> Ash.Query.new(api)
|> Map.put(:filter, for_path)
add_other_data_layer_read_results(query, relationship, path, without_path)
end)
%Expression{op: :and} = expression, {:ok, filter} ->
expression
|> relationship_paths(:ands_only)
|> filter_paths_that_change_data_layers(filter.resource)
|> Enum.reduce_while({:halt, {:ok, filter}}, fn path, {:halt, {:ok, filter}} ->
{for_path, without_path} = split_expression_by_relationship_path(filter, path)
relationship = Ash.Resource.relationship(filter.resource, path)
query =
relationship.destination
|> Ash.Query.new(api)
|> Map.put(:filter, for_path)
add_other_data_layer_read_results(query, relationship, path, without_path)
end)
_, {:ok, filter} ->
{:ok, filter}
end)
end
defp add_other_data_layer_read_results(query, relationship, path, filter_without_path) do
case query.api.read(query) do
{:ok, results} ->
new_filter =
case relationship.type do
:many_to_many ->
many_to_many_read_results(results, relationship, query, path)
_ ->
results
|> Enum.map(&Map.get(&1, relationship.destination_field))
|> Enum.reject(&is_nil/1)
|> record_filters_or_false(relationship)
|> put_at_path(:lists.droplast(path))
end
case add_to_filter(filter_without_path, new_filter) do
{:ok, filter} -> {:cont, {:halt, {:ok, filter}}}
{:error, error} -> {:halt, {:return, {:error, error}}}
end
{:error, error} ->
{:halt, {:return, {:error, error}}}
end
end
defp record_filters_or_false(records, relationship) do
case records do
[] ->
false
[value] ->
[{relationship.source_field, value}]
values ->
[{relationship.source_field, [in: values]}]
end
end
defp many_to_many_read_results(results, relationship, query, path) do
destination_values =
results
|> Enum.map(&Map.get(&1, relationship.destination_field))
|> Enum.reject(&is_nil/1)
join_query =
relationship.through
|> Ash.Query.new(query.api)
|> Ash.Query.filter([
{relationship.destination_field_on_join_table, [in: destination_values]}
])
case query.api.read(join_query) do
{:ok, results} ->
results
|> Enum.map(&Map.get(&1, relationship.source_field_on_join_table))
|> Enum.reject(&is_nil/1)
|> case do
[] ->
false
[value] ->
[{relationship.source_field, value}]
values ->
[{relationship.source_field, [in: values]}]
end
|> put_at_path(:lists.droplast(path))
{:error, error} ->
{:error, error}
end
end
defp filter_paths_that_change_data_layers(paths, resource, acc \\ [])
defp filter_paths_that_change_data_layers([], _resource, acc), do: acc
defp filter_paths_that_change_data_layers([path | rest], resource, acc) do
case shortest_path_to_changed_data_layer(resource, path) do
{:ok, path} ->
new_rest = Enum.reject(rest, &List.starts_with?(&1, path))
filter_paths_that_change_data_layers(new_rest, resource, [path | acc])
:error ->
filter_paths_that_change_data_layers(rest, resource, acc)
end
end
defp shortest_path_to_changed_data_layer(resource, path, acc \\ [])
defp shortest_path_to_changed_data_layer(_resource, [], _acc), do: :error
defp shortest_path_to_changed_data_layer(resource, [relationship | rest], acc) do
relationship = Ash.Resource.relationship(resource, relationship)
if relationship.type == :many_to_many do
if Ash.Resource.data_layer_can?(resource, {:join, relationship.through}) do
shortest_path_to_changed_data_layer(relationship.destination, rest, [
relationship.name | acc
])
else
{:ok, Enum.reverse([relationship.name | acc])}
end
else
if Ash.Resource.data_layer_can?(resource, {:join, relationship.destination}) do
shortest_path_to_changed_data_layer(relationship.destination, rest, [
relationship.name | acc
])
else
{:ok, Enum.reverse([relationship.name | acc])}
end
end
end
def put_at_path(value, []), do: value
def put_at_path(value, [key | rest]), do: [{key, put_at_path(value, rest)}]
def relationship_paths(filter_or_expression, kind \\ :all)
def relationship_paths(nil, _), do: []
def relationship_paths(%{expression: nil}, _), do: []
def relationship_paths(%__MODULE__{expression: expression}, kind),
do: relationship_paths(expression, kind)
def relationship_paths(expression, kind) do
expression
|> do_relationship_paths(kind)
|> List.wrap()
|> List.flatten()
|> Enum.uniq()
|> Enum.map(fn {path} -> path end)
end
def add_to_filter!(base, addition, op \\ :and, aggregates \\ %{}) do
case add_to_filter(base, addition, op, aggregates) do
{:ok, value} ->
value
{:error, error} ->
raise Ash.Error.to_ash_error(error)
end
end
def add_to_filter(base, addition, op \\ :and, aggregates \\ %{})
def add_to_filter(nil, %__MODULE__{} = addition, _, _), do: {:ok, addition}
def add_to_filter(
%__MODULE__{} = base,
%__MODULE__{} = addition,
op,
_
) do
{:ok, %{base | expression: Expression.new(op, base.expression, addition.expression)}}
end
def add_to_filter(%__MODULE__{} = base, statement, op, aggregates) do
case parse(base.resource, statement, aggregates) do
{:ok, filter} -> add_to_filter(base, filter, op, aggregates)
{:error, error} -> {:error, error}
end
end
@doc """
Returns true if the second argument is a strict subset (always returns the same or less data) of the first
"""
def strict_subset_of(nil, _), do: true
def strict_subset_of(_, nil), do: false
def strict_subset_of(%{resource: resource}, %{resource: other_resource})
when resource != other_resource,
do: false
def strict_subset_of(filter, candidate) do
Ash.SatSolver.strict_filter_subset(filter, candidate)
end
def strict_subset_of?(filter, candidate) do
strict_subset_of(filter, candidate) == true
end
def relationship_filter_request_paths(filter) do
filter
|> relationship_paths()
|> Enum.map(&[:filter, &1])
end
def read_requests(_, nil), do: {:ok, []}
def read_requests(api, filter) do
filter
|> Ash.Filter.relationship_paths()
|> Enum.map(fn path ->
{path, filter_expression_by_relationship_path(filter, path, true)}
end)
|> Enum.reduce_while({:ok, []}, fn {path, scoped_filter}, {:ok, requests} ->
%{resource: resource} = scoped_filter
with %{errors: []} = query <- Ash.Query.new(resource, api),
%{errors: []} = query <- Ash.Query.filter(query, scoped_filter),
{:action, action} when not is_nil(action) <-
{:action, Ash.Resource.primary_action(resource, :read)} do
request =
Request.new(
resource: resource,
api: api,
query:
Request.resolve(
[[:data, :authorization_filter]],
fn %{
data: %{
authorization_filter: authorization_filter
}
} ->
if authorization_filter do
relationship =
Ash.Resource.relationship(
resource,
List.first(path)
)
case SideLoad.reverse_relationship_path(
relationship,
tl(path)
) do
:error ->
{:ok, query}
{:ok, reverse_relationship} ->
filter = put_at_path(authorization_filter, reverse_relationship)
{:ok, Ash.Query.filter(query, filter)}
end
else
{:ok, query}
end
end
),
async?: false,
path: [:filter, path],
strict_check_only?: true,
action: action,
name: "authorize filter #{Enum.join(path, ".")}",
data: []
)
{:cont, {:ok, [request | requests]}}
else
{:error, error} -> {:halt, {:error, error}}
%{errors: errors} -> {:halt, {:error, errors}}
{:action, nil} -> {:halt, {:error, ReadActionRequired.exception(resource: resource)}}
end
end)
end
def map(%__MODULE__{expression: nil} = filter, _) do
filter
end
def map(%__MODULE__{expression: expression} = filter, func) do
%{filter | expression: do_map(func.(expression), func)}
end
def map(expression, func) do
do_map(func.(expression), func)
end
def do_map(expression, func) do
case expression do
{:halt, expr} ->
expr
%Expression{left: left, right: right} = expr ->
%{expr | left: do_map(left, func), right: do_map(right, func)}
%Not{expression: not_expr} = expr ->
%{expr | expression: do_map(not_expr, func)}
other ->
func.(other)
end
end
def reduce(filter, acc \\ nil, func)
def reduce(%__MODULE__{expression: nil}, acc, _), do: acc
def reduce(%__MODULE__{expression: expression}, acc, func) do
case func.(expression, acc) do
{:halt, acc} ->
acc
{:return, value} ->
value
acc ->
case do_reduce(expression, acc, func) do
{:halt, acc} -> acc
{:return, value} -> value
acc -> acc
end
end
end
def reduce(expression, acc, func) do
case func.(expression, acc) do
{:halt, acc} ->
acc
{:return, value} ->
value
acc ->
case do_reduce(expression, acc, func) do
{:halt, acc} -> acc
{:return, value} -> value
acc -> acc
end
end
end
def do_reduce(expression, acc, func) do
case expression do
%Expression{} = expression ->
do_reduce_expression(expression, acc, func)
%Not{expression: not_expr} ->
case func.(not_expr, acc) do
{:halt, acc} ->
acc
{:return, value} ->
{:return, value}
acc ->
do_reduce(not_expr, acc, func)
end
{:return, value} ->
{:return, value}
{:halt, value} ->
{:halt, value}
other ->
func.(other, acc)
end
end
defp do_reduce_expression(%Expression{left: left, right: right}, acc, func) do
case func.(right, acc) do
{:halt, acc} ->
case func.(left, acc) do
{:return, value} ->
{:return, value}
{:halt, acc} ->
acc
acc ->
do_reduce(left, acc, func)
end
{:return, value} ->
{:return, value}
acc ->
continue_reduce(left, right, acc, func)
end
end
defp continue_reduce(left, right, acc, func) do
case func.(left, acc) do
{:halt, acc} ->
do_reduce(right, acc, func)
{:return, value} ->
{:return, value}
acc ->
case do_reduce(left, acc, func) do
{:halt, acc} ->
{:halt, acc}
{:return, acc} ->
{:return, acc}
acc ->
do_reduce(right, acc, func)
end
end
end
defp split_expression_by_relationship_path(%{expression: expression} = filter, _path)
when expression in [nil, true, false] do
{filter, filter}
end
defp split_expression_by_relationship_path(filter, path) do
{for_path, without_path} = do_split_expression_by_relationship_path(filter.expression, path)
{%__MODULE__{
resource: Ash.Resource.related(filter.resource, path),
expression: for_path
},
%__MODULE__{
resource: filter.resource,
expression: without_path
}}
end
def filter_expression_by_relationship_path(filter, path, scope? \\ false) do
%__MODULE__{
resource: Ash.Resource.related(filter.resource, path),
expression: do_filter_expression_by_relationship_path(filter.expression, path, scope?)
}
end
defp do_split_expression_by_relationship_path(
%Expression{op: op, left: left, right: right},
path
) do
{new_for_path_left, new_without_path_left} =
do_split_expression_by_relationship_path(left, path)
{new_for_path_right, new_without_path_right} =
do_split_expression_by_relationship_path(right, path)
{Expression.new(op, new_for_path_left, new_for_path_right),
Expression.new(op, new_without_path_left, new_without_path_right)}
end
defp do_split_expression_by_relationship_path(%Not{expression: expression}, path) do
{new_for_path, new_without_path} = do_split_expression_by_relationship_path(expression, path)
{Not.new(new_for_path), Not.new(new_without_path)}
end
defp do_split_expression_by_relationship_path(
%Predicate{relationship_path: predicate_path} = predicate,
path
) do
if List.starts_with?(predicate_path, path) do
{%{predicate | relationship_path: Enum.drop(predicate_path, length(path))}, nil}
else
{nil, predicate}
end
end
defp do_filter_expression_by_relationship_path(
%Expression{op: op, left: left, right: right},
path,
scope?
) do
new_left = do_filter_expression_by_relationship_path(left, path, scope?)
new_right = do_filter_expression_by_relationship_path(right, path, scope?)
Expression.new(op, new_left, new_right)
end
defp do_filter_expression_by_relationship_path(%Not{expression: expression}, path, scope?) do
new_expression = do_filter_expression_by_relationship_path(expression, path, scope?)
Not.new(new_expression)
end
defp do_filter_expression_by_relationship_path(
%Predicate{relationship_path: predicate_path} = predicate,
path,
scope?
) do
if List.starts_with?(predicate_path, path) do
if scope? do
%{predicate | relationship_path: Enum.drop(predicate_path, Enum.count(path))}
else
predicate
end
else
nil
end
end
def remove_aggregates(%__MODULE__{expression: expression} = filter) do
%{filter | expression: remove_aggregates(expression)}
end
def remove_aggregates(%Expression{op: op, left: left, right: right}) do
Expression.new(op, remove_aggregates(left), remove_aggregates(right))
end
def remove_aggregates(%Not{expression: expression}) do
Not.new(remove_aggregates(expression))
end
def remove_aggregates(%Predicate{attribute: %Ash.Query.Aggregate{}}), do: nil
def remove_aggregates(other), do: other
defp do_relationship_paths(%Predicate{relationship_path: []}, _) do
[]
end
defp do_relationship_paths(%Predicate{relationship_path: path}, _) do
{path}
end
defp do_relationship_paths(%Expression{op: :or}, :ands_only) do
[]
end
defp do_relationship_paths(%Expression{left: left, right: right}, kind) do
[do_relationship_paths(left, kind), do_relationship_paths(right, kind)]
end
defp do_relationship_paths(%Not{expression: expression}, kind) do
do_relationship_paths(expression, kind)
end
defp do_relationship_paths(_, _), do: []
defp parse_expression(%__MODULE__{expression: expression}, context),
do: {:ok, add_to_predicate_path(expression, context)}
defp parse_expression(statement, context) when is_map(statement) or is_list(statement) do
Enum.reduce_while(statement, {:ok, nil}, fn expression_part, {:ok, expression} ->
case add_expression_part(expression_part, context, expression) do
{:ok, new_expression} ->
{:cont, {:ok, new_expression}}
{:error, error} ->
{:halt, {:error, error}}
end
end)
end
defp parse_expression(statement, context) do
parse_expression([statement], context)
end
defp add_expression_part(boolean, _context, expression) when is_boolean(boolean),
do: {:ok, Expression.new(:and, expression, boolean)}
defp add_expression_part(%__MODULE__{expression: adding_expression}, context, expression) do
{:ok, Expression.new(:and, expression, add_to_predicate_path(adding_expression, context))}
end
defp add_expression_part(%resource{} = record, context, expression) do
if resource == context.resource do
pkey_filter = record |> Map.take(Ash.Resource.primary_key(resource)) |> Map.to_list()
add_expression_part(pkey_filter, context, expression)
else
{:error,
InvalidFilterValue.exception(
value: record,
message: "Records must match the resource being filtered"
)}
end
end
defp add_expression_part({not_key, nested_statement}, context, expression)
when not_key in [:not, "not"] do
case parse_expression(nested_statement, context) do
{:ok, nested_expression} ->
{:ok, Expression.new(:and, expression, Not.new(nested_expression))}
{:error, error} ->
{:error, error}
end
end
defp add_expression_part({is_nil_key, field}, context, expression)
when is_nil_key in ["is_nil", :is_nil] do
case IsNil.new(context.resource, %{name: field}, true) do
{:ok, is_nil} -> {:ok, Expression.new(:is_nil, expression, is_nil)}
{:error, reason} -> {:error, reason}
end
end
defp add_expression_part({or_key, nested_statements}, context, expression)
when or_key in [:or, "or"] do
with {:ok, nested_expression} <- parse_and_join(nested_statements, :or, context),
:ok <- validate_data_layers_support_boolean_filters(nested_expression) do
{:ok, Expression.new(:and, expression, nested_expression)}
end
end
defp add_expression_part({and_key, nested_statements}, context, expression)
when and_key in [:and, "and"] do
case parse_and_join(nested_statements, :and, context) do
{:ok, nested_expression} ->
{:ok, Expression.new(:and, expression, nested_expression)}
{:error, error} ->
{:error, error}
end
end
defp add_expression_part({field, nested_statement}, context, expression)
when is_atom(field) or is_binary(field) do
aggregates =
Enum.flat_map(context.aggregates, fn {key, _} ->
[key, to_string(key)]
end)
cond do
attr = Ash.Resource.attribute(context.resource, field) ->
case parse_predicates(nested_statement, attr, context) do
{:ok, nested_statement} ->
{:ok, Expression.new(:and, expression, nested_statement)}
{:error, error} ->
{:error, error}
end
rel = Ash.Resource.relationship(context.resource, field) ->
context =
context
|> Map.update!(:relationship_path, fn path -> path ++ [rel.name] end)
|> Map.put(:resource, rel.destination)
if is_list(nested_statement) || is_map(nested_statement) do
case parse_expression(nested_statement, context) do
{:ok, nested_expression} ->
{:ok, Expression.new(:and, expression, nested_expression)}
{:error, error} ->
{:error, error}
end
else
with [field] <- Ash.Resource.primary_key(context.resource),
attribute <- Ash.Resource.attribute(context.resource, field),
{:ok, casted} <-
Ash.Type.cast_input(attribute.type, nested_statement) do
add_expression_part({field, casted}, context, expression)
else
_other ->
{:error,
InvalidFilterValue.exception(
value: inspect(nested_statement),
message:
"A single value must be castable to the primary key of the resource: #{
inspect(context.resource)
}"
)}
end
end
field in aggregates ->
field =
if is_binary(field) do
String.to_existing_atom(field)
else
field
end
add_aggregate_expression(context, nested_statement, field, expression)
true ->
{:error,
NoSuchAttributeOrRelationship.exception(
attribute_or_relationship: field,
resource: context.resource
)}
end
end
defp add_expression_part(value, context, expression) when is_map(value) do
# Can't call `parse_expression/2` here because it will loop
value
|> Map.to_list()
|> Enum.reduce_while({:ok, nil}, fn {key, value}, {:ok, expression} ->
case add_expression_part({key, value}, context, expression) do
{:ok, new_expression} ->
{:cont, {:ok, new_expression}}
{:error, error} ->
{:halt, {:error, error}}
end
end)
|> case do
{:ok, new_expression} -> {:ok, Expression.new(:and, expression, new_expression)}
{:error, error} -> {:error, error}
end
end
defp add_expression_part(value, context, expression) when is_list(value) do
Enum.reduce_while(value, {:ok, expression}, fn value, {:ok, expression} ->
case add_expression_part(value, context, expression) do
{:ok, expression} -> {:cont, {:ok, expression}}
{:error, error} -> {:halt, {:error, error}}
end
end)
end
defp add_expression_part(value, _, _) do
{:error, InvalidFilterValue.exception(value: value)}
end
defp add_aggregate_expression(context, nested_statement, field, expression) do
if Ash.Resource.data_layer_can?(context.resource, :aggregate_filter) do
case parse_predicates(nested_statement, Map.get(context.aggregates, field), context) do
{:ok, nested_statement} ->
{:ok, Expression.new(:and, expression, nested_statement)}
{:error, error} ->
{:error, error}
end
else
{:error, AggregatesNotSupported.exception(resource: context.resource, feature: "filtering")}
end
end
defp validate_data_layers_support_boolean_filters(%Expression{op: :or, left: left, right: right}) do
left_resources =
left
|> reduce([], fn
%Predicate{} = pred, acc ->
[pred.resource | acc]
_, acc ->
acc
end)
|> Enum.uniq()
right_resources =
right
|> reduce([], fn
%Predicate{} = pred, acc ->
[pred.resource | acc]
_, acc ->
acc
end)
|> Enum.uniq()
left_resources
|> Enum.filter(&(&1 in right_resources))
|> Enum.reduce_while(:ok, fn resource, :ok ->
if Ash.Resource.data_layer_can?(resource, :boolean_filter) do
{:cont, :ok}
else
{:halt, {:error, "Data layer for #{resource} does not support boolean filters"}}
end
end)
end
defp validate_data_layers_support_boolean_filters(_), do: :ok
defp add_to_predicate_path(expression, context) do
case expression do
%Not{expression: expression} = not_expr ->
%{not_expr | expression: add_to_predicate_path(expression, context)}
%Expression{left: left, right: right} = expression ->
%{
expression
| left: add_to_predicate_path(left, context),
right: add_to_predicate_path(right, context)
}
%Predicate{relationship_path: relationship_path} = pred ->
%{pred | relationship_path: context.relationship_path ++ relationship_path}
other ->
other
end
end
defp parse_and_join(statements, op, context) do
Enum.reduce_while(statements, {:ok, nil}, fn statement, {:ok, expression} ->
case parse_expression(statement, context) do
{:ok, nested_expression} ->
{:cont, {:ok, Expression.new(op, expression, nested_expression)}}
{:error, error} ->
{:halt, {:error, error}}
end
end)
end
defp parse_predicates(value, field, context) when not is_list(value) and not is_map(value) do
parse_predicates([eq: value], field, context)
end
defp parse_predicates(values, attr, context) do
data_layer_predicates =
Map.get(
Ash.Resource.data_layer_filters(context.resource),
Ash.Type.storage_type(attr.type),
[]
)
if is_map(values) || Keyword.keyword?(values) do
Enum.reduce_while(values, {:ok, nil}, fn {key, value}, {:ok, expression} ->
case get_predicate(key, data_layer_predicates) do
value when value in [nil, []] ->
error = NoSuchFilterPredicate.exception(key: key, resource: context.resource)
{:halt, {:error, error}}
predicate_module ->
case Predicate.new(
context.resource,
attr,
predicate_module,
value,
context.relationship_path
) do
{:ok, predicate} ->
{:cont, {:ok, Expression.new(:and, expression, predicate)}}
{:error, error} ->
{:halt, {:error, error}}
end
end
end)
else
error = InvalidFilterValue.exception(value: values)
{:halt, error}
end
end
defp get_predicate(key, data_layer_predicates) when is_atom(key) do
@built_in_predicates[key] || data_layer_predicates[key]
end
defp get_predicate(key, data_layer_predicates) when is_binary(key) do
Map.get(@string_builtin_predicates, key) ||
Enum.find_value(data_layer_predicates, fn {pred, value} ->
if to_string(pred) == key do
value
else
false
end
end)
end
defp get_predicate(_, _), do: nil
defimpl Inspect do
import Inspect.Algebra
@custom_colors [
number: :cyan
]
def inspect(
%{expression: expression},
opts
) do
opts = %{opts | syntax_colors: Keyword.merge(opts.syntax_colors, @custom_colors)}
concat(["#Ash.Filter<", to_doc(expression, opts), ">"])
end
end
end
|
lib/ash/filter/filter.ex
| 0.88758
| 0.668691
|
filter.ex
|
starcoder
|
defmodule ExCell.LiveView do
@moduledoc """
Cell helpers used to render the live view cells in both Views and Cells
"""
@view_adapter ExCell.config(:view_adapter, Phoenix.LiveView)
@doc """
Renders a cell in the view.
### Examples
iex(0)> safe_to_string(AppWeb.AvatarView.live_cell(AvatarLiveCell, socket))
"<div class=\"AvatarLiveCell\" ...>"
"""
def live_cell(cell, conn_or_socket) do
render_cell(cell, conn_or_socket, [])
end
@doc """
Renders a cell in the view with children.
### Examples
iex(0)> safe_to_string(AppWeb.AvatarView.live_cell(AvatarLiveCell, do: "Foo"))
"<div class=\"AvatarLiveCell\" ...>Foo</div>"
"""
def live_cell(cell, conn_or_socket, do: children) do
render_cell(cell, conn_or_socket, children: children)
end
@doc """
Renders a cell in the view with assigns.
### Examples
iex(0)> safe_to_string(AppWeb.AvatarView.live_cell(AvatarLiveCell, user: %User{name: "Bar"}))
"<div class=\"AvatarLiveCell\" ...>Bar</div>"
"""
def live_cell(cell, conn_or_socket, assigns) when is_list(assigns) do
render_cell(cell, conn_or_socket, assigns)
end
@doc """
Renders a cell in the view with children without a block.
### Examples
iex(0)> safe_to_string(AppWeb.AvatarView.live_cell(AvatarLiveCell, "Hello"))
"<div class=\"AvatarLiveCell\" ...>Hello</div>"
"""
def live_cell(cell, conn_or_socket, children) do
render_cell(cell, conn_or_socket, children: children)
end
def live_cell(cell, conn_or_socket, assigns, do: children) when is_list(assigns) do
render_cell(cell, conn_or_socket, [children: children] ++ assigns)
end
def live_cell(cell, conn_or_socket, children, assigns) when is_list(assigns) do
render_cell(cell, conn_or_socket, [children: children] ++ assigns)
end
defp render_cell(cell, conn_or_socket, assigns) do
assigns = Map.new(assigns)
@view_adapter.live_render(conn_or_socket, cell, session: %{assigns: assigns})
end
end
|
lib/ex_cell/live_view.ex
| 0.719285
| 0.549641
|
live_view.ex
|
starcoder
|
defmodule Mmo.Collision do
alias Mmo.{World, Player}
def check(%{x: x, y: y}, %World{width: width, height: height})
when x < 0 or y < 0 or x >= width or y >= height do
{:collision, :static_object}
end
def check(%Player{} = player, %World{} = world) do
if collision_check(player, world) do
{:collision, :static_object}
else
case damage_check(player, world) do
false ->
case enemy_check(player, world) do
false ->
case item_check(player, world) do
false ->
:no_collision
{true, item} ->
{:collision, {:item, item}}
end
{true, enemy} ->
{:collision, {:enemy, enemy}}
end
tile_number ->
{:no_collision, {:damage_object, tile_number}}
end
end
end
def check(%{} = player, %World{} = world) do
if collision_check(player, world) do
{:collision, :static_object}
else
case damage_check(player, world) do
false ->
case enemy_check(player, world) do
false ->
:no_collision
{true, enemy} ->
{:collision, {:enemy, enemy}}
end
tile_number ->
{:no_collision, {:damage_object, tile_number}}
end
end
end
def check(_obj, _checking_objs) do
:no_collision
end
def collision_check(%{} = player, %World{collision: collision}) do
get_tile_data(player, collision) > 0
end
def damage_check(%{} = player, %World{damage: damage_tiles}) do
tile_number = get_tile_data(player, damage_tiles)
if get_tile_data(player, damage_tiles) > 0 do
{true, tile_number}
else
false
end
end
def enemy_check(%{} = player, %World{enemies: enemies}) do
enemy_check(player, Map.values(enemies))
end
def enemy_check(_player, []) do
false
end
def enemy_check(%{x: x, y: y}, [%{x: x, y: y} = enemy | _enemies]) do
{true, enemy}
end
def enemy_check(player, [_enemy | enemies]) do
enemy_check(player, enemies)
end
def item_check(%{} = player, %World{items: items}) do
item_check(player, Map.values(items))
end
def item_check(_player, []) do
false
end
def item_check(%{x: x, y: y}, [%{x: x, y: y} = enemy | _enemies]) do
{true, enemy}
end
def item_check(player, [_enemy | enemies]) do
item_check(player, enemies)
end
def get_tile_data(%{x: x, y: y}, tiles) when is_list(tiles) do
tiles
|> Enum.at(y)
|> Enum.at(x)
end
end
|
lib/mmo/collision.ex
| 0.556882
| 0.535827
|
collision.ex
|
starcoder
|
defprotocol Dict do
@only [Record]
@moduledoc """
This module provides the Dict protocol
with the goal of being a common API
to work with dictionaries.
"""
@doc """
Returns a list containing all dict's keys.
The keys are not guaranteed to be sorted, unless
the underlying dict implementation defines so.
## Examples
Dict.keys [a: 1, b: 2] #=> [:a,:b]
"""
def keys(dict)
@doc """
Returns a list containing all dict's values.
## Examples
Dict.values [a: 1, b: 2] #=> [1,2]
"""
def values(dict)
@doc """
Returns the number of elements in `dict`.
## Examples
Dict.size [a: 1, b: 2] #=> 2
"""
def size(dict)
@doc """
Returns whether the given key exists in the given dict.
## Examples
Dict.has_key?([a: 1], :a) #=> true
Dict.has_key?([a: 1], :b) #=> false
"""
def has_key?(dict, key)
@doc """
Returns the value associated with `key` in `dict`. If `dict` does not
contain `key`, returns `default` (or nil if not provided).
## Examples
Dict.get [a: 1], :a #=> 1
Dict.get [a: 1], :b #=> nil
Dict.get [a: 1], :b, 3 #=> 3
"""
def get(dict, key)
def get(dict, key, default)
@doc """
Stores the given `value` under `key` in `dict`.
If `dict` already has `key`, the stored value is replaced by the new one.
## Examples
Dict.put [a: 1, b: 2], :a, 3
#=> [a: 3, b: 2]
"""
def put(dict, key, val)
@doc """
Removes the entry stored under the given key from `dict`.
If `dict` does not contain `key`, returns the dictionary unchanged.
## Examples
Dict.delete [a: 1, b: 2], :a #=> [b: 2]
Dict.delete [b: 2], :a #=> [b: 2]
"""
def delete(dict, key)
@doc """
Merges two dicts into one. If the dicts have duplicated entries, the one
given as second argument wins.
## Examples
Dict.merge [a: 1, b: 2], [a: 3, d: 4]
#=> [a:3, b:2, d: 4]
"""
def merge(dict1, dict2)
@doc """
Merges two dicts into one. If the dicts have duplicated entries, the given
function is invoked to solve conflicts.
## Examples
Dict.merge [a: 1, b: 2], [a: 3, d: 4], fn _k, v1, v2 ->
v1 + v2
end
#=> [a: 4, b: 2, d: 4]
"""
def merge(dict1, dict2, fun)
@doc """
Update a value in `dict` by calling `fun` on the value to get a new
value. An exception is generated if `key` is not present in the dict.
## Examples
Dict.update [a: 1, b: 2], :a, fn val -> -val end
#=> [a: -1, b: 2]
"""
def update(dict, key, fun)
@doc """
Update a value in `dict` by calling `fun` on the value to get a new value. If
`key` is not present in `dict` then `initial` will be stored as the first
value.
## Examples
Dict.update [a: 1, b: 2], :c, 3, fn val -> -val end
#=> [a: 1, b: 2, c: 3]
"""
def update(dict, key, initial, fun)
@doc """
Returns an empty dict of the same type as `dict`.
"""
def empty(dict)
@doc """
Returns a list of key-value pairs stored in `dict`.
No particular order is enforced.
"""
def to_list(dict)
end
|
lib/elixir/lib/dict.ex
| 0.885554
| 0.773772
|
dict.ex
|
starcoder
|
defmodule Faker.Cat.En do
import Faker, only: [sampler: 2]
@moduledoc """
Functions for Cat names, breeds and registries in English
"""
@doc """
Returns a Cat name string
## Examples
iex> Faker.Cat.En.name()
"Daisy"
iex> Faker.Cat.En.name()
"Lily"
iex> Faker.Cat.En.name()
"Felix"
iex> Faker.Cat.En.name()
"Max"
"""
@spec name() :: String.t()
sampler(:name, [
"Alfie",
"Angel",
"Bella",
"Charlie",
"Chloe",
"Coco",
"Daisy",
"Felix",
"Jasper",
"Lily",
"Lucky",
"Lucy",
"Max",
"Millie",
"Milo",
"Missy",
"Misty",
"Molly",
"Oliver",
"Oscar",
"Poppy",
"Sam",
"Shadow",
"Simba",
"Smokey",
"Smudge",
"Sooty",
"Tiger"
])
@doc """
Returns a Cat breed string
## Examples
iex> Faker.Cat.En.breed()
"Mekong Bobtail"
iex> Faker.Cat.En.breed()
"Suphalak"
iex> Faker.Cat.En.breed()
"Russian White, Black and Tabby"
iex> Faker.Cat.En.breed()
"Asian Semi-longhair"
"""
@spec breed() :: String.t()
sampler(:breed, [
"Abyssinian",
"Aegean",
"American Bobtail",
"American Curl",
"American Shorthair",
"American Wirehair",
"Arabian Mau",
"Asian",
"Asian Semi-longhair",
"Australian Mist",
"Balinese",
"Bambino",
"Bengal",
"Birman",
"Bombay",
"Brazilian Shorthair",
"British Longhair",
"British Semipi-longhair",
"British Shorthair",
"Burmese",
"Burmilla",
"California Spangled",
"Chantilly-Tiffany",
"Chartreux",
"Chausie",
"Cheetoh",
"Colorpoint Shorthair",
"Cornish Rex",
"Cymric, or Manx Longhair",
"Cyprus",
"Devon Rex",
"Donskoy, or Don Sphynx",
"Dragon Li",
"Dwarf cat, or Dwelf",
"Egyptian Mau",
"European Shorthair",
"Exotic Shorthair",
"Foldex Cat",
"German Rex",
"Havana Brown",
"Highlander",
"Himalayan, or Colorpoint Persian",
"Japanese Bobtail",
"Javanese",
"Khao Manee",
"Korat",
"Korean Bobtail",
"Korn Ja",
"Kurilian Bobtail",
"Kurilian Bobtail, or Kuril Islands Bobtail",
"LaPerm",
"Lykoi",
"Maine Coon",
"Manx",
"Mekong Bobtail",
"Minskin",
"Munchkin",
"Napoleon",
"Nebelung",
"Norwegian Forest Cat",
"Ocicat",
"Ojos Azules",
"Oregon Rex",
"Oriental Bicolor",
"Oriental Longhair",
"Oriental Shorthair",
"PerFold Cat (Experimental Breed - WCF)",
"Persian (Modern Persian Cat)",
"Persian (Traditional Persian Cat)",
"Peterbald",
"Pixie-bob",
"Raas",
"Ragamuffin",
"Ragdoll",
"Russian Blue",
"Russian White, Black and Tabby",
"<NAME>",
"Savannah",
"Scottish Fold",
"Selkirk Rex",
"Serengeti",
"Serrade petit",
"Siamese",
"Siberian",
"Singapura",
"Snowshoe",
"Sokoke",
"Somali",
"Sphynx",
"Suphalak",
"Thai",
"Tonkinese",
"Toyger",
"Turkish Angora",
"Turkish Van",
"Ukrainian Levkoy"
])
@doc """
Returns a cat registry string
## Examples
iex> Faker.Cat.En.registry()
"Cat Aficionado Association"
iex> Faker.Cat.En.registry()
"Fédération Internationale Féline"
iex> Faker.Cat.En.registry()
"Fédération Internationale Féline"
iex> Faker.Cat.En.registry()
"Fédération Internationale Féline"
"""
@spec registry() :: String.t()
sampler(:registry, [
"American Cat Fanciers Association",
"Associazione Nazionale Felina Italiana",
"Canadian Cat Association",
"Cat Aficionado Association",
"Cat Fanciers' Association",
"Emirates Feline Federation",
"Fédération Internationale Féline",
"Felis Britannica",
"Governing Council of the Cat",
"Fancy Southern Africa Cat Council",
"The International Cat Association"
])
end
|
lib/faker/cat/en.ex
| 0.696784
| 0.472501
|
en.ex
|
starcoder
|
defmodule Site.TripPlan.ItineraryRowList do
@moduledoc """
Information about an Itinerary that's used for rendering.
An optional to and from name can be passed in.
"""
alias Site.TripPlan.ItineraryRow
alias TripPlan.Itinerary
alias Stops.Stop
@typep destination :: {String.t(), Stop.id_t(), DateTime.t(), [Alerts.Alert.t()]}
defstruct rows: [],
destination: nil,
accessible?: false,
alerts?: false
@type t :: %__MODULE__{
rows: [ItineraryRow.t()],
destination: destination,
accessible?: boolean,
alerts?: boolean
}
@type opts :: [to: String.t() | nil, from: String.t() | nil]
@doc """
Builds a ItineraryRowList from the given itinerary
"""
@spec from_itinerary(Itinerary.t(), ItineraryRow.Dependencies.t(), opts) :: t
def from_itinerary(
%Itinerary{legs: legs, accessible?: accessible?} = itinerary,
deps,
opts \\ []
) do
alerts = get_alerts(itinerary, deps)
rows = get_rows(itinerary, deps, opts, alerts)
%__MODULE__{
rows: rows,
destination: get_destination(legs, opts, alerts),
accessible?: accessible?,
alerts?: Enum.any?(rows, fn row -> !Enum.empty?(row.alerts) end)
}
end
@spec get_rows(Itinerary.t(), ItineraryRow.Dependencies.t(), opts, [Alerts.Alert.t()]) :: [
ItineraryRow.t()
]
defp get_rows(itinerary, deps, opts, alerts) do
rows =
for {leg, index} <- Enum.with_index(itinerary.legs) do
leg
|> ItineraryRow.from_leg(deps, Enum.at(itinerary.legs, index + 1))
|> ItineraryRow.fetch_alerts(alerts)
end
update_from_name(rows, opts[:from])
end
@spec get_alerts(Itinerary.t(), ItineraryRow.Dependencies.t()) :: [Alerts.Alert.t()]
defp get_alerts(itinerary, deps) do
itinerary.start
|> deps.alerts_repo.()
|> Site.TripPlan.Alerts.filter_for_itinerary(
itinerary,
route_by_id: deps.route_mapper,
trip_by_id: deps.trip_mapper
)
end
@spec get_destination([TripPlan.Leg.t()], Keyword.t(), [Alerts.Alert.t()]) :: destination
defp get_destination(legs, opts, alerts) do
last_leg = List.last(legs)
{name, stop_id} =
last_leg |> Map.get(:to) |> ItineraryRow.name_from_position(&Stops.Repo.get_parent/1)
alerts = Alerts.Stop.match(alerts, stop_id)
{destination_name(name, opts[:to]), stop_id, last_leg.stop, alerts}
end
@spec destination_name(String.t(), String.t() | nil) :: String.t()
defp destination_name(default_name, nil), do: default_name
defp destination_name(_default_name, to_name), do: to_name
@spec update_from_name([ItineraryRow.t()], String.t() | nil) :: [ItineraryRow.t()]
defp update_from_name(rows, nil), do: rows
defp update_from_name([first_row | rest_rows], from_name) do
{_default_name, stop_id} = first_row.stop
[%{first_row | stop: {from_name, stop_id}} | rest_rows]
end
end
defimpl Enumerable, for: Site.TripPlan.ItineraryRowList do
def count(_itinerary_row_list) do
{:error, __MODULE__}
end
def member?(_itinerary_row_list, %Site.TripPlan.ItineraryRow{}) do
{:error, __MODULE__}
end
def member?(_itinerary_row_list, _other) do
{:ok, false}
end
def reduce(%{rows: rows}, acc, fun) do
Enumerable.reduce(rows, acc, fun)
end
def slice(_itinerary_row_list) do
{:error, __MODULE__}
end
end
|
apps/site/lib/site/trip_plan/itinerary_row_list.ex
| 0.727201
| 0.428951
|
itinerary_row_list.ex
|
starcoder
|
defmodule Hocon do
@moduledoc"""
This module pareses and decodes a [hocon](https://github.com/lightbend/config/blob/master/HOCON.md) configuration string.
## Example
iex(1)> conf = ~s(animal { favorite : "dog" }, key : \"\"\"${animal.favorite} is my favorite animal\"\"\")
iex(2)> Hocon.decode(conf)
{:ok,
%{"animal" => %{"favorite" => "dog"}, "key" => "dog is my favorite animal"}}
## Units format
The parser returns a map, because in Elixir it is a common use case to use pattern matching on maps to
extract specific values and keys. Therefore the `Hocon.decode/2` function returns a map. To support
interpreting a value with some family of units, you can call some conversion functions like `as_bytes/1`.
## Example
iex> conf = ~s(limit : "512KB")
iex> {:ok, %{"limit" => limit}} = Hocon.decode(conf)
iex> Hocon.as_bytes(limit)
524288
It is possible to access the unit formats by a keypath, as well:
## Example
iex> conf = ~s(a { b { c { limit : "512KB" } } })
iex> {:ok, map} = Hocon.decode(conf)
iex> Hocon.get_bytes(map, "a.b.c.limit")
524288
iex> Hocon.get_size(map, "a.b.c.limit")
512000
## Include
HOCON supports including of other configuration files. The default implmentation uses the file systems, which
seems to be the most known use case. For other use cases you can implement the `Hocon.Resolver` behaviour and
call the `decode/2` function with `file_resolver: MyResolver` as an option.
## Example
The file `include-1.conf` exists and has the following content:
{ x : 10, y : ${a.x} }
In the case we use the `Hocon.FileResolver` (which is the default as well):
iex> conf = ~s({ a : { include "./test/data/include-1" } })
iex> Hocon.decode(conf, file_resolver: Hocon.FileResolver)
{:ok, %{"a" => %{"x" => 10, "y" => 10}}}
To minimize the dependencies of other packages, you need to include the `HoconUrlResolver` if you want to load
configuration from the internet:
def deps do
[
{:hocon_url_resolver, "~> 0.1.0"}
]
end
or just implement a resolver like:
## URL-Resolver with HTTPoison
defmodule HoconUrlResolver do
@behaviour Hocon.Resolver
@spec exists?(Path.t()) :: boolean
def exists?(url) do
case HTTPoison.head(url) do
{:ok, %HTTPoison.Response{status_code: 200}} -> true
{:ok, %HTTPoison.Response{status_code: 404}} -> false
{:error, _} -> false
end
end
def load(url) do
case HTTPoison.get(url) do
{:ok, %HTTPoison.Response{status_code: 200, body: body}} -> {:ok, body}
{:ok, %HTTPoison.Response{status_code: 404}} -> {:error, "not found"}
{:error, %HTTPoison.Error{reason: reason}} -> {:error, reason}
end
end
end
"""
alias Hocon.Parser
alias Hocon.Period
## size units, power of 2
@kb 1024
@mb @kb * 1024
@gb @mb * 1024
@tb @gb * 1024
@pb @tb * 1024
@eb @pb * 1024
@zb @eb * 1024
@yb @zb * 1024
## size units, power of 10
@kb_10 1000
@mb_10 @kb_10 * 1000
@gb_10 @mb_10 * 1000
@tb_10 @gb_10 * 1000
@pb_10 @tb_10 * 1000
@eb_10 @pb_10 * 1000
@zb_10 @eb_10 * 1000
@yb_10 @zb_10 * 1000
## time units, the base is millisconds
@ns 0.000001
@us 0.001
@ms 1
@s @ms * 1000
@m @s * 60
@h @m * 60
@d @h * 24
@doc"""
Parses and decodes a hocon string and returns a map
## options
* `:convert_numerically_indexed` - if set to true then numerically-indexed objects are converted to arrays
* `:strict_conversion` - if set to `true` then numerically-indexed objects are only converted to arrays
if all keys are numbers
* `:file_resolver` - set to the module, which is responsible for loading the file resources. The default is `Hocon.FileResolver`
* `:url_resolver` - set to the module, which is responsible for loading the url resources. The default is `Hocon.FileResolver`
## Example
iex> conf = ~s(animal { favorite : "dog" }, key : \"\"\"${animal.favorite} is my favorite animal\"\"\")
iex> Hocon.decode(conf)
{:ok,
%{"animal" => %{"favorite" => "dog"}, "key" => "dog is my favorite animal"}}
## Runtime-Configuration with HOCON
Use can use the HOCON-Parser as a `Config.Provider` to load configuration during boot:
defmodule HOCONConfigProvider do
@behaviour Config.Provider
require Logger
# Let's pass the path to the HOCON file as config
def init(path) when is_binary(path), do: path
def load(config, path) do
# We need to start any app we may depend on.
{:ok, _} = Application.ensure_all_started(:hocon)
{:ok, _} = Application.ensure_all_started(:logger)
Logger.info("Reading runtime config from \#{path}")
conf = path |> File.read!() |> Hocon.decode!()
runtime = [mailer_config(conf)] |> filter_nils()
Config.Reader.merge(config, runtime)
end
defp mailer_config(%{"mailer" => %{"server" => server, "port" => port}}) do
{JobsKliniken.Mailer, [server: server, port: port]}
end
defp mailer_config(%{}) do
{JobsKliniken.Mailer, nil}
end
defp filter_nils(keyword) do
Enum.reject(keyword, fn {_key, value} -> is_nil(value) end)
end
end
"""
def decode(string, opts \\ []) do
Parser.decode(string, opts)
end
@doc"""
Similar to `decode/2` except it will unwrap the error tuple and raise
in case of errors.
"""
def decode!(string, opts \\ []) do
Parser.decode!(string, opts)
end
@doc """
Returns a value for the `keypath` from a map or a successfull parse HOCON string.
## Example
iex> conf = Hocon.decode!(~s(a { b { c : "10kb" } }))
%{"a" => %{"b" => %{"c" => "10kb"}}}
iex> Hocon.get(conf, "a.b.c")
"10kb"
iex> Hocon.get(conf, "a.b.d")
nil
iex> Hocon.get(conf, "a.b.d", "1kb")
"1kb"
"""
def get(root, keypath, default \\ nil) do
keypath = keypath
|> String.split(".")
|> Enum.map(fn str -> String.trim(str) end)
case get_in(root, keypath) do
nil -> default
other -> other
end
end
@doc """
Same a `get/3` but the value is interpreted like a number by using the power of 2.
## Example
iex> conf = Hocon.decode!(~s(a { b { c : "10kb" } }))
%{"a" => %{"b" => %{"c" => "10kb"}}}
iex> Hocon.get_bytes(conf, "a.b.c")
10240
iex> Hocon.get_bytes(conf, "a.b.d")
nil
iex> Hocon.get_bytes(conf, "a.b.d", 1024)
1024
"""
def get_bytes(root, keypath, default \\ nil) do
keypath = keypath
|> String.split(".")
|> Enum.map(fn str -> String.trim(str) end)
case get_in(root, keypath) do
nil -> default
other -> as_bytes(other)
end
end
@doc """
Same a `get/3` but the value is interpreted like a number by using the power of 10.
## Example
iex> conf = Hocon.decode!(~s(a { b { c : "10kb" } }))
%{"a" => %{"b" => %{"c" => "10kb"}}}
iex> Hocon.get_size(conf, "a.b.c")
10000
iex> Hocon.get_size(conf, "a.b.d")
nil
iex> Hocon.get_size(conf, "a.b.d", 1000)
1000
"""
def get_size(root, keypath, default \\ nil) do
keypath = keypath
|> String.split(".")
|> Enum.map(fn str -> String.trim(str) end)
case get_in(root, keypath) do
nil -> default
other -> as_size(other)
end
end
@doc """
Same a `get/3` but the value is interpreted like a duration format in milliseconds.
## Example
iex> conf = Hocon.decode!(~s(a { b { c : "30s" } }))
%{"a" => %{"b" => %{"c" => "30s"}}}
iex> Hocon.get_milliseconds(conf, "a.b.c")
30000
iex> Hocon.get_milliseconds(conf, "a.b.d")
nil
iex> Hocon.get_milliseconds(conf, "a.b.d", 1000)
1000
"""
def get_milliseconds(root, keypath, default \\ nil) do
keypath = keypath
|> String.split(".")
|> Enum.map(fn str -> String.trim(str) end)
case get_in(root, keypath) do
nil -> default
other -> as_milliseconds(other)
end
end
@doc """
Same a `get/3` but the value is interpreted like a duration format in `Hocon.Period`.
## Example
iex> conf = Hocon.decode!(~s(a { b { c : "3 weeks" } }))
%{"a" => %{"b" => %{"c" => "30s"}}}
iex> Hocon.get_period(conf, "a.b.c")
%Hocon.Period{days: 21, months: 0, years: 0}
iex> Hocon.get_period(conf, "a.b.d")
nil
iex> Hocon.get_period(conf, "a.b.d", 7)
7
"""
def get_period(root, keypath, default \\ nil) do
keypath = keypath
|> String.split(".")
|> Enum.map(fn str -> String.trim(str) end)
case get_in(root, keypath) do
nil -> default
other -> as_period(other)
end
end
@doc """
Returns the size of the `string` by using the power of 2.
## Example
iex> Hocon.as_bytes("512kb")
524288
iex> Hocon.as_bytes("125 gigabytes")
134217728000
"""
def as_bytes(value) when is_number(value), do: value
def as_bytes(string) when is_binary(string) do
as_bytes(Regex.named_captures(~r/(?<value>\d+)(\W)?(?<unit>[[:alpha:]]+)?/, String.downcase(string)))
end
def as_bytes(%{"unit" => "", "value" => value}), do: parse_integer(value, 1)
def as_bytes(%{"unit" => unit, "value" => value}) when unit in ~w(b byte bytes), do: parse_integer(value, 1)
def as_bytes(%{"unit" => unit, "value" => value}) when unit in ~w(k kb kilobyte kilobytes), do: parse_integer(value, @kb)
def as_bytes(%{"unit" => unit, "value" => value}) when unit in ~w(m mb megabyte megabytes), do: parse_integer(value, @mb)
def as_bytes(%{"unit" => unit, "value" => value}) when unit in ~w(g gb gigabyte gigabytes), do: parse_integer(value, @gb)
def as_bytes(%{"unit" => unit, "value" => value}) when unit in ~w(t tb terabyte terabytes), do: parse_integer(value, @tb)
def as_bytes(%{"unit" => unit, "value" => value}) when unit in ~w(p pb petabyte petabytes), do: parse_integer(value, @pb)
def as_bytes(%{"unit" => unit, "value" => value}) when unit in ~w(e eb exabyte exabytes), do: parse_integer(value, @eb)
def as_bytes(%{"unit" => unit, "value" => value}) when unit in ~w(z zb zettabyte zettabytes), do: parse_integer(value, @zb)
def as_bytes(%{"unit" => unit, "value" => value}) when unit in ~w(y yb yottabyte yottabytes), do: parse_integer(value, @yb)
@doc """
Returns the size of the `string` by using the power of 10.
## Example
iex> Hocon.as_size("512kb")
512000
iex> Hocon.as_size("125 gigabytes")
125000000000
"""
def as_size(value) when is_number(value), do: value
def as_size(string) when is_binary(string) do
as_size(Regex.named_captures(~r/(?<value>\d+)(\W)?(?<unit>[[:alpha:]]+)?/, String.downcase(string)))
end
def as_size(%{"unit" => "", "value" => value}), do: parse_integer(value, 1)
def as_size(%{"unit" => unit, "value" => value}) when unit in ~w(b byte bytes), do: parse_integer(value, 1)
def as_size(%{"unit" => unit, "value" => value}) when unit in ~w(k kb kilobyte kilobytes), do: parse_integer(value, @kb_10)
def as_size(%{"unit" => unit, "value" => value}) when unit in ~w(m mb megabyte megabytes), do: parse_integer(value, @mb_10)
def as_size(%{"unit" => unit, "value" => value}) when unit in ~w(g gb gigabyte gigabytes), do: parse_integer(value, @gb_10)
def as_size(%{"unit" => unit, "value" => value}) when unit in ~w(t tb terabyte terabytes), do: parse_integer(value, @tb_10)
def as_size(%{"unit" => unit, "value" => value}) when unit in ~w(p pb petabyte petabytes), do: parse_integer(value, @pb_10)
def as_size(%{"unit" => unit, "value" => value}) when unit in ~w(e eb exabyte exabytes), do: parse_integer(value, @eb_10)
def as_size(%{"unit" => unit, "value" => value}) when unit in ~w(z zb zettabyte zettabytes), do: parse_integer(value, @zb_10)
def as_size(%{"unit" => unit, "value" => value}) when unit in ~w(y yb yottabyte yottabytes), do: parse_integer(value, @yb_10)
@doc """
Returns the time of the `string` as milliseconds.
## Example
iex> Hocon.as_milliseconds("30s")
30000
iex> Hocon.as_milliseconds("10us")
0.01
"""
def as_milliseconds(value) when is_number(value), do: value
def as_milliseconds(string) when is_binary(string) do
as_milliseconds(Regex.named_captures(~r/(?<value>\d+)(\W)?(?<unit>[[:alpha:]]+)?/, String.downcase(string)))
end
def as_milliseconds(%{"unit" => "", "value" => value}), do: parse_integer(value, 1)
def as_milliseconds(%{"unit" => unit, "value" => value}) when unit in ~w(ns nano nanos nanosecond nanoseconds), do: parse_integer(value, @ns)
def as_milliseconds(%{"unit" => unit, "value" => value}) when unit in ~w(us micro micros microsecond microseconds), do: parse_integer(value, @us)
def as_milliseconds(%{"unit" => unit, "value" => value}) when unit in ~w(ms milli millis millisecond millisecond), do: parse_integer(value, @ms)
def as_milliseconds(%{"unit" => unit, "value" => value}) when unit in ~w(s second seconds), do: parse_integer(value, @s)
def as_milliseconds(%{"unit" => unit, "value" => value}) when unit in ~w(m minute minutes), do: parse_integer(value, @m)
def as_milliseconds(%{"unit" => unit, "value" => value}) when unit in ~w(h hour hours), do: parse_integer(value, @h)
def as_milliseconds(%{"unit" => unit, "value" => value}) when unit in ~w(d day days), do: parse_integer(value, @d)
@doc """
Returns the duration of the `string` as `Hocon.Period`.
## Example
iex> Hocon.as_period("3 weeks")
%Hocon.Period{days: 21, months: 0, years: 0}
iex> Hocon.as_period("14d")
%Hocon.Period{days: 14, months: 0, years: 0}
"""
def as_period(value) when is_number(value), do: Period.days(value)
def as_period(string) when is_binary(string) do
as_period(Regex.named_captures(~r/(?<value>\d+)(\W)?(?<unit>[[:alpha:]]+)?/, String.downcase(string)))
end
def as_period(%{"unit" => "", "value" => value}), do: value |> parse_integer() |> Period.days()
def as_period(%{"unit" => unit, "value" => value}) when unit in ~w(d day days), do: value |> parse_integer() |> Period.days()
def as_period(%{"unit" => unit, "value" => value}) when unit in ~w(w week weeks), do: value |> parse_integer() |> Period.weeks()
def as_period(%{"unit" => unit, "value" => value}) when unit in ~w(m mo month months), do: value |> parse_integer() |> Period.months()
def as_period(%{"unit" => unit, "value" => value}) when unit in ~w(y year years), do: value |> parse_integer() |> Period.years()
defp parse_integer(string) do
with {result, ""} <- Integer.parse(string) do
result
end
end
defp parse_integer(string, factor) do
with {result, ""} <- Integer.parse(string) do
result * factor
end
end
end
|
lib/hocon.ex
| 0.904622
| 0.467028
|
hocon.ex
|
starcoder
|
defmodule TeslaOAuth2ClientAuth.ClientSecretJWT do
@moduledoc """
Tesla middleware that implements the `"client_secret_jwt"` authentication scheme for
[https://openid.net/specs/openid-connect-core-1_0.html#ClientAuthentication](OpenID Connect clients)
The client configuration must contain a `"client_secret"` member whose value is the
client secret (a `String.t()`) or a JWK in its `"jwks"` attribute that is suited for signature
and has a `"kty"` of `"oct"`.
To determine the MAC algorithm to use, this middleware:
- uses the client's `"token_endpoint_auth_signing_alg"` value if present, and check it against
the server metadata `"token_endpoint_auth_signing_alg_values_supported"`
- otherwise uses the `"token_endpoint_auth_signing_alg_values_supported"` server metadata and
picks one algorithm that is suitable for MACing
- otherwise raises
Note that the body of the `Tesla.Env` must be a map to be later serialized with
the `Tesla.Middleware.FormUrlencoded`.
The options of this middleware are:
- `:jwt_lifetime`: the lifetime of the JWT in seconds. Defaults to `30`
- `:jwt_jti_callback`: a `(TeslaOAuth2ClientAuth.opts() -> String.t())` function that returns
the `"jti"` field of the JWT. Defaults to a random 16-bytes base64 encoded string
- `:jwt_additional_claims`: claims added to the JWT. They have precedence over the default
claims
"""
@behaviour Tesla.Middleware
@assertion_type "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
@impl true
def call(%Tesla.Env{body: %{}} = env, next, opts) do
client_id = opts[:client_config]["client_id"] || raise "Missing client id"
body =
env.body
|> Map.put("client_id", client_id)
|> Map.put("client_assertion_type", @assertion_type)
|> Map.put("client_assertion", build_assertion(opts))
%Tesla.Env{env | body: body}
|> Tesla.run(next)
end
defp build_assertion(opts) do
client_id = opts[:client_config]["client_id"] || raise "Missing client id"
issuer =
opts[:server_metadata]["token_endpoint"] ||
raise "Missing token endpoint to be used as the audience from server metadata"
lifetime = opts[:jwt_lifetime] || 30
mac_alg = mac_alg(opts[:client_config], opts[:server_metadata])
jti =
case opts[:jwt_jti_callback] do
callback when is_function(callback, 1) ->
callback.(opts)
nil ->
gen_jti()
end
message =
%{
iss: client_id,
sub: client_id,
aud: issuer,
jti: jti,
exp: now() + lifetime,
iat: now()
}
|> Map.merge(opts[:jwt_additional_claims] || %{})
|> Jason.encode!()
client_jwk(opts[:client_config])
|> JOSE.JWS.sign(message, %{"alg" => mac_alg})
|> JOSE.JWS.compact()
|> elem(1)
end
defp mac_alg(client_config, server_metadata) do
case client_config do
%{"token_endpoint_auth_signing_alg" => "none"} ->
raise "illegal `token_endpoint_auth_signing_alg` in client configuration: `none`"
%{"token_endpoint_auth_signing_alg" => alg} ->
if alg in (server_metadata["token_endpoint_auth_signing_alg_values_supported"] || []) do
alg
else
raise "client's token endpoint auth algorithm not supported by the authorization server"
end
_ ->
server_metadata["token_endpoint_auth_signing_alg_values_supported"]
|> Enum.find(fn alg -> alg in ["HS256", "HS384", "HS512"] end)
|> case do
alg when is_binary(alg) ->
alg
nil ->
raise "no suitable MAC algorithm supported by the authorization server"
end
end
end
defp client_jwk(%{"client_secret" => client_secret}) do
JOSE.JWK.from(%{"k" => Base.url_encode64(client_secret, padding: false), "kty" => "oct"})
end
defp client_jwk(%{"jwks" => jwks}) do
jwks["keys"]
|> JOSEUtils.JWKS.signature_keys()
|> Enum.filter(fn jwk -> jwk["kty"] == "oct" end)
|> List.first()
end
defp client_jwk(_) do
raise "missing client secret or jwks"
end
defp gen_jti(), do: :crypto.strong_rand_bytes(16) |> Base.encode64(padding: false)
defp now(), do: System.system_time(:second)
end
|
lib/tesla_oauth2_client_auth/client_secret_jwt.ex
| 0.858348
| 0.420034
|
client_secret_jwt.ex
|
starcoder
|
defmodule Mix.Tasks.Materialize.Install do
@moduledoc """
Install materialize package
```shell
$ mix materialize.install
```
Comment out or delete the contents of the file **assets/css/phoenix.css**
If you are using a brunch, change the file **assets/brunch-config.js**:
```Elixir
#{Materialize.if_use_branch}
```
"""
# @shortdoc "Install materialize-css"
use Mix.Task
@compile if Mix.env == :test, do: :export_all
@cmd_npm "npm install materialize-css --save-dev"
@doc "start task"
def run(_) do
IO.puts "Install materialize-css v #{Materialize.Mixfile.get_version}"
do_run()
end
defp do_run do
npm_install() |> do_assets()
finish()
end
defp npm_install do
cmd("cd #{Path.absname("assets")} && #{@cmd_npm}")
cmd("cd ../")
Path.join(~w(assets node_modules materialize-css dist))
end
defp do_assets(npm_dist_path) do
chek_path(npm_dist_path, "\nTray run: #{@cmd_npm}")
web_vendor_path = Path.join(~w(assets vendor materialize))
priv_static_path = Path.join(~w(assets static))
File.mkdir_p web_vendor_path
copy_dir_r(npm_dist_path, web_vendor_path, "css")
copy_dir_r(npm_dist_path, web_vendor_path, "js")
copy_dir_r(npm_dist_path, priv_static_path, "fonts")
Mix.shell.info [:white, "* New files copied:"]
Mix.shell.info [:white, "\n#{Materialize.assets_struct}"]
end
defp finish do
Mix.shell.info [:green, "* The materialize-css installed successful!"]
Mix.shell.info [:white, "\n* If you are using a brunch, change the file assets/brunch-config.js:"]
Mix.shell.info [:white, "\n#{Materialize.if_use_branch}"]
end
defp cmd(cmd) do
Mix.shell.info [:green, "* running ", :reset, cmd]
case Mix.shell.cmd(cmd, quiet: true) do
0 -> []
_ -> ["$ #{cmd}"]
end
end
defp copy_dir_r(source_path, dist_path, dir) do
res_dist_path = Path.join([dist_path, dir])
File.cp_r(Path.join([source_path, dir]), res_dist_path)
chek_path res_dist_path
end
defp chek_path(path) do
unless File.exists? path do
Mix.raise """
Can't find "#{path}"
"""
end
end
defp chek_path(path, text) do
unless File.exists? path do
Mix.raise """
Can't find "#{path}" #{text}
"""
end
end
end
|
lib/mix/tasks/install.ex
| 0.614972
| 0.611237
|
install.ex
|
starcoder
|
defmodule ReIntegrations.Orulo.Mapper do
@moduledoc """
Module to map external structures into persistable internal structures.
"""
alias ReIntegrations.{
Orulo.BuildingPayload
}
@development_attributes ~w(name description developer number_of_floors apts_per_floor status id)
def building_payload_into_development_params(%BuildingPayload{} = %{payload: payload}) do
payload
|> Map.take(@development_attributes)
|> Enum.reduce(%{}, &convert_development_attribute(&1, &2))
end
defp convert_development_attribute({"name", name}, acc), do: Map.put(acc, :name, name)
defp convert_development_attribute({"description", description}, acc),
do: Map.put(acc, :description, description)
defp convert_development_attribute({"developer", %{"name" => name}}, acc) do
Map.put(acc, :builder, name)
end
defp convert_development_attribute({"number_of_floors", floor_count}, acc) do
Map.put(acc, :floor_count, floor_count)
end
defp convert_development_attribute({"apts_per_floor", units_per_floor}, acc) do
Map.put(acc, :units_per_floor, units_per_floor)
end
@phase_map %{
"Em construção" => "building",
"Pronto novo" => "delivered",
"Pronto usado" => "delivered"
}
defp convert_development_attribute({"status", status}, acc) do
phase = Map.get(@phase_map, status)
Map.put(acc, :phase, phase)
end
defp convert_development_attribute({"id", orulo_id}, acc) do
Map.put(acc, :orulo_id, orulo_id)
end
defp convert_development_attribute(_, acc), do: acc
@address_attributes ~w(street area city state zip_code latitude longitude number)
def building_payload_into_address_params(
%BuildingPayload{} = %{payload: %{"address" => address}}
) do
address
|> Map.take(@address_attributes)
|> Enum.reduce(%{}, &convert_address_attribute(&1, &2))
end
defp convert_address_attribute({"street", street}, acc) do
Map.put(acc, :street, street)
end
defp convert_address_attribute({"area", neighborhood}, acc) do
Map.put(acc, :neighborhood, neighborhood)
end
defp convert_address_attribute({"city", city}, acc) do
Map.put(acc, :city, city)
end
defp convert_address_attribute({"state", state}, acc) do
Map.put(acc, :state, state)
end
defp convert_address_attribute({"zip_code", postal_code}, acc) do
Map.put(acc, :postal_code, postal_code)
end
defp convert_address_attribute({"latitude", lat}, acc) do
Map.put(acc, :lat, lat)
end
defp convert_address_attribute({"longitude", lng}, acc) do
Map.put(acc, :lng, lng)
end
defp convert_address_attribute({"number", number}, acc) do
Map.put(acc, :street_number, Integer.to_string(number))
end
defp convert_address_attribute(_, acc), do: acc
end
|
apps/re_integrations/lib/orulo/mapper.ex
| 0.682679
| 0.478407
|
mapper.ex
|
starcoder
|
defmodule Resonator.Helpers do
@moduledoc """
require Resonator.Helpers, as: H # the cool way
"""
@doc """
Convenience to get environment bits. Avoid all that repetitive
`Application.get_env( :myapp, :blah, :blah)` noise.
Use it as `H.env(:anyapp, :key, default)`
You can add the default app to your config file:
```
config :resonator, app: :myapp
```
Then you can use it as `H.env(:key)` instead of `H.env(:myapp, :key)`
"""
def env(key, default \\ nil), do: env(Application.get_env(:resonator, :app, :resonator), key, default)
def env(app, key, default), do: Application.get_env(app, key, default)
@doc """
Opposite of Resonator.Helpers.env.
"""
def put_env(key, default \\ nil), do: put_env(Application.get_env(:resonator, :app, :resonator), key, default)
def put_env(app, key, default), do: Application.put_env(app, key, default)
@doc """
Spit to output any passed variable, with location information.
If `sample` option is given, it should be a float between 0.0 and 1.0.
Output will be produced randomly with that probability.
Given `opts` will be fed straight into `inspect`. Any option accepted by it should work.
"""
defmacro spit(obj \\ "", opts \\ []) do
quote do
opts = unquote(opts)
obj = unquote(obj)
opts = Keyword.put(opts, :env, __ENV__)
Resonator.Helpers.maybe_spit(obj, opts, opts[:sample])
obj # chainable
end
end
@doc false
def maybe_spit(obj, opts, nil), do: do_spit(obj, opts)
def maybe_spit(obj, opts, prob) when is_float(prob) do
if :rand.uniform <= prob, do: do_spit(obj, opts)
end
defp do_spit(obj, opts) do
%{file: file, line: line} = opts[:env]
name = Process.info(self)[:registered_name]
chain = [:bright, :red, "\n\n#{file}:#{line}", :normal, "\n #{inspect self}", :green," #{name}"]
msg = inspect(obj, opts)
chain = chain ++ [:red, "\n\n#{msg}"]
chain |> Kernel.++(["\n\n", :reset]) |> IO.ANSI.format(true) |> IO.puts
end
@doc """
Print to stdout a _TODO_ message, with location information.
"""
defmacro todo(msg \\ "") do
quote do
%{file: file, line: line} = __ENV__
[:yellow, "\nTODO: #{file}:#{line} #{unquote(msg)}\n", :reset]
|> IO.ANSI.format(true)
|> IO.puts
:todo
end
end
@doc """
Apply given defaults to given Keyword. Returns merged Keyword.
The inverse of `Keyword.merge`, best suited to apply some defaults in a
chainable way.
Ex:
kw = gather_data
|> transform_data
|> H.defaults(k1: 1234, k2: 5768)
|> here_i_need_defaults
Instead of:
kw1 = gather_data
|> transform_data
kw = [k1: 1234, k2: 5768]
|> Keyword.merge(kw1)
|> here_i_need_defaults
iex> [a: 3] |> Resonator.Helpers.defaults(a: 4, b: 5)
[b: 5, a: 3]
iex> %{a: 3} |> Resonator.Helpers.defaults(%{a: 4, b: 5})
%{a: 3, b: 5}
"""
def defaults(args, defs) when is_map(args) and is_map(defs) do
defs |> Map.merge(args)
end
def defaults(args, defs) when is_list(args) and is_list(defs) do
defs |> Keyword.merge(args)
end
def defaults(args, defs, labelled: true), do: {:ok, defaults(args, defs)}
@doc """
Returns `{:error, reason}` if any given key is not in the given Keyword.
Else returns given Keyword, so it can be chained using pipes.
If `labelled: true` is given, then response is `{:ok, args}`.
"""
def requires(args, required) when is_map(args) do
keys = args |> Map.keys
case requires(keys, required) do
^keys -> args # chainable
x -> x
end
end
def requires(args, required) when is_list(args) do
keys = case Keyword.keyword?(args) do
true -> args |> Keyword.keys
false -> args
end
case do_requires(keys, required) do
:ok -> args # chainable
x -> x
end
end
def requires(args, required, labelled: true) do
case requires(args, required) do
{:error, _} = e -> e
x -> {:ok, x}
end
end
defp do_requires(keys, [required|rest]) do
case required in keys do
true -> do_requires(keys, rest)
false -> {:error, "Required argument '#{required}' was not present in #{inspect(keys)}"}
end
end
defp do_requires(_, []), do: :ok
@doc """
Exploding version of `requires/2`
"""
def requires!(args, required) do
case requires(args, required) do
{:error, reason} -> raise(ArgumentError, reason)
x -> x
end
end
@doc """
Pipeable version of `Kernel.struct/2`
"""
def to_struct(data, struct), do: struct(struct, data)
end
|
lib/resonator/helpers.ex
| 0.706292
| 0.731107
|
helpers.ex
|
starcoder
|
defmodule Vtc.Source do
@moduledoc """
Protocols for source values that can be used to construct a timecode.
"""
use Ratio
@typedoc """
Result type of `Vtc.Source.Seconds.seconds/2`.
"""
@type seconds_result :: {:ok, Ratio.t() | integer} | {:error, Vtc.Timecode.ParseError.t()}
defprotocol Seconds do
@moduledoc """
Protocol which types can implement to be passed as the main value of
`Vtc.Timecode.with_seconds/2`.
# Implementations
Out of the box, this protocol is implemented for the following types:
- `Ratio`
- `Integer`
- `Float`
- `String` & 'BitString'
- runtime ("01:00:00.0")
- decimal ("3600.0")
"""
@doc """
Returns the value as a rational seconds value.
# Arguments
- **value**: The source value.
- **rate**: The framerate of the timecode being parsed.
# Returns
A result tuple with a rational representation of the seconds value using `Ratio` on
success.
"""
@spec seconds(t, Vtc.Framerate.t()) :: Vtc.Source.seconds_result()
def seconds(value, rate)
end
defimpl Seconds, for: [Ratio, Integer] do
@spec seconds(Ratio.t() | integer, Vtc.Framerate.t()) :: Vtc.Source.seconds_result()
def seconds(value, rate), do: Private.Parse.from_seconds_core(value, rate)
end
defimpl Seconds, for: Float do
@spec seconds(float, Vtc.Framerate.t()) :: Vtc.Source.seconds_result()
def seconds(value, rate), do: Seconds.seconds(Ratio.new(value, 1), rate)
end
defimpl Seconds, for: [String, BitString] do
@spec seconds(String.t() | bitstring, Vtc.Framerate.t()) :: Vtc.Source.seconds_result()
def seconds(value, rate), do: Private.Parse.parse_runtime_string(value, rate)
end
@typedoc """
Result type of `Vtc.Source.Frames.frames/2`.
"""
@type frames_result :: {:ok, integer} | {:error, Vtc.Timecode.ParseError.t()}
defprotocol Frames do
@moduledoc """
Protocol which types can implement to be passed as the main value of
`Vtc.Timecode.with_frames/2`.
# Implementations
Out of the box, this protocol is implemented for the following types:
- `Integer`
- `String` & 'BitString'
- timecode ("01:00:00:00")
- integer ("86400")
- Feet+Frames ("5400+00")
"""
@doc """
Returns the value as a frame count.
# Arguments
- **value**: The source value.
- **rate**: The framerate of the timecode being parsed.
# Returns
A result tuple with an integer value representing the frame count on success.
"""
@spec frames(t, Vtc.Framerate.t()) :: Vtc.Source.frames_result()
def frames(value, rate)
end
defimpl Frames, for: Integer do
@spec frames(integer, Vtc.Framerate.t()) :: Vtc.Source.frames_result()
def frames(value, _rate), do: {:ok, value}
end
defimpl Frames, for: [String, BitString] do
@spec frames(String.t() | Bitstring, Vtc.Framerate.t()) :: Vtc.Source.frames_result()
def frames(value, rate), do: Private.Parse.parse_frames_string(value, rate)
end
@typedoc """
Result type of `Vtc.Source.PremiereTicks.ticks/2`.
"""
@type ticks_result :: {:ok, integer} | {:error, Vtc.Timecode.ParseError.t()}
defprotocol PremiereTicks do
@moduledoc """
Protocol which types can implement to be passed as the main value of
`Vtc.Timecode.with_premiere_ticks/2`.
# Implementations
Out of the box, this protocol is implemented for the following types:
- `Integer`
"""
@doc """
Returns the number of Adobe Premiere Pro ticks as an integer.
# Arguments
- **value**: The source value.
- **rate**: The framerate of the timecode being parsed.
# Returns
A result tuple with a rational representation of the seconds value using `Ratio` on
success.
"""
@spec ticks(t, Vtc.Framerate.t()) :: Vtc.Source.ticks_result()
def ticks(value, rate)
end
defimpl PremiereTicks, for: Integer do
@spec ticks(integer, Vtc.Framerate.t()) :: Vtc.Source.ticks_result()
def ticks(value, _rate), do: {:ok, value}
end
end
defmodule Private.Parse do
@moduledoc false
use Ratio
@spec from_seconds_core(Ratio.t() | integer, Vtc.Framerate.t()) :: Vtc.Source.seconds_result()
def from_seconds_core(value, rate) do
# If our seconds are not cleanly divisible by the length of a single frame, we need
# to round to the nearest frame.
seconds =
if not is_integer(value / rate.playback) do
frames = Private.Rat.round_ratio?(rate.playback * value)
seconds = frames / rate.playback
seconds
else
value
end
{:ok, seconds}
end
@spec parse_frames_string(String.t(), Vtc.Framerate.t()) :: Vtc.Source.frames_result()
def parse_frames_string(value, rate) do
case parse_tc_string(value, rate) do
{:ok, tc} -> {:ok, tc}
{:error, %Vtc.Timecode.ParseError{reason: :bad_drop_frames} = err} -> {:error, err}
{:error, _} -> parse_feet_and_frames(value, rate)
end
end
@spec parse_tc_string(String.t(), Vtc.Framerate.t()) :: Vtc.Source.frames_result()
def parse_tc_string(value, rate) do
tc_regex =
~r/^(?P<negative>-)?((?P<section1>[0-9]+)[:|;])?((?P<section2>[0-9]+)[:|;])?((?P<section3>[0-9]+)[:|;])?(?P<frames>[0-9]+)$/
with {:ok, matched} <- apply_regex(tc_regex, value),
sections <- tc_matched_to_sections(matched),
{:ok, frames} <- tc_sections_to_frames(sections, rate) do
{:ok, frames}
else
:no_match -> {:error, %Vtc.Timecode.ParseError{reason: :unrecognized_format}}
{:error, err} -> {:error, err}
end
end
@spec apply_regex(Regex.t(), String.t()) :: :no_match | {:ok, map}
defp apply_regex(regex, value) do
matched = Regex.named_captures(regex, value)
if matched == nil do
:no_match
else
{:ok, matched}
end
end
@spec tc_matched_to_sections(map) :: Vtc.Timecode.Sections.t()
defp tc_matched_to_sections(matched) do
# It's faster to append to the front of a list, so we will work backwards
section_keys = ["section3", "section2", "section1"]
sections = build_groups(matched, section_keys)
{seconds, sections} = tc_get_next_section(sections)
{minutes, sections} = tc_get_next_section(sections)
{hours, _} = tc_get_next_section(sections)
# If the regex matched, then the frames place has to have matched.
frames = String.to_integer(matched["frames"])
is_negative = matched["negative"] != ""
%Vtc.Timecode.Sections{
negative: is_negative,
hours: hours,
minutes: minutes,
seconds: seconds,
frames: frames
}
end
# Extracts groups that may or may not be in the match into a list of values.
@spec build_groups(map, list(String.t())) :: list(String.t())
defp build_groups(matched, section_keys) do
# Reduce to our present section values.
{_, sections} =
Enum.map_reduce(section_keys, [], fn section_key, sections ->
this_section = matched[section_key]
sections =
if this_section != "" do
[this_section | sections]
else
sections
end
{this_section, sections}
end)
sections
end
@spec tc_get_next_section(list(String.t())) :: {integer, list(String.t())}
defp tc_get_next_section(sections) do
{value, sections} = List.pop_at(sections, -1)
value_int =
if value == nil or value == "" do
0
else
String.to_integer(value)
end
{value_int, sections}
end
@spec tc_sections_to_frames(Vtc.Timecode.Sections.t(), Vtc.Framerate.t()) ::
Vtc.Source.frames_result()
defp tc_sections_to_frames(%Vtc.Timecode.Sections{} = sections, %Vtc.Framerate{} = rate) do
seconds =
sections.minutes * Private.Const.secondsPerMinute() +
sections.hours * Private.Const.secondsPerHour() +
sections.seconds
frames = sections.frames + seconds * Vtc.Framerate.timebase(rate)
with {:ok, adjustment} <- Private.Drop.parse_adjustment(sections, rate) do
frames = frames + adjustment
frames = Private.Rat.round_ratio?(frames)
frames =
if sections.negative do
-frames
else
frames
end
{:ok, frames}
else
{:error, err} -> {:error, err}
end
end
@spec parse_feet_and_frames(String.t(), Vtc.Framerate.t()) :: Vtc.Source.frames_result()
def parse_feet_and_frames(value, rate) do
ff_regex = ~r/(?P<negative>-)?(?P<feet>[0-9]+)\+(?P<frames>[0-9]+)/
with {:ok, matched} <- apply_regex(ff_regex, value) do
feet = matched["feet"] |> String.to_integer()
frames = matched["frames"] |> String.to_integer()
frames = feet * Private.Const.frames_per_foot() + frames
frames =
if matched["negative"] != "" do
-frames
else
frames
end
Vtc.Source.Frames.frames(frames, rate)
else
:no_match -> {:error, %Vtc.Timecode.ParseError{reason: :unrecognized_format}}
end
end
@spec parse_runtime_string(String.t(), Vtc.Framerate.t()) :: Vtc.Source.seconds_result()
def parse_runtime_string(value, rate) do
runtime_regex =
~r/^(?P<negative>-)?((?P<section1>[0-9]+)[:|;])?((?P<section2>[0-9]+)[:|;])?(?P<seconds>[0-9]+(\.[0-9]+)?)$/
with {:ok, matched} <- apply_regex(runtime_regex, value),
seconds <- runtime_matched_to_second(matched),
{:ok, seconds} = Vtc.Source.Seconds.seconds(seconds, rate) do
{:ok, seconds}
else
:no_match -> {:error, %Vtc.Timecode.ParseError{reason: :unrecognized_format}}
{:error, err} -> {:error, err}
end
end
@spec runtime_matched_to_second(map) :: Ratio.t() | integer
defp runtime_matched_to_second(matched) do
section_keys = ["section2", "section1"]
sections = build_groups(matched, section_keys)
{minutes, sections} = tc_get_next_section(sections)
{hours, _} = tc_get_next_section(sections)
# We will always have a 'seconds' group.
seconds = Ratio.new(Decimal.new(matched["seconds"]), 1)
is_negative = matched["negative"] != ""
seconds =
hours * Private.Const.secondsPerHour() + minutes * Private.Const.secondsPerMinute() +
seconds
if is_negative do
-seconds
else
seconds
end
end
end
|
lib/sources.ex
| 0.928198
| 0.612136
|
sources.ex
|
starcoder
|
defmodule Nx.Defn.Grad do
@moduledoc false
alias Nx.Defn.{Expr, Tree}
alias Nx.Tensor, as: T
def transform(to_grad, expr) do
expr = validate_expr!(expr)
initial = Expr.tensor(1.0)
{graded, _} =
Tree.composite(to_grad, %{}, fn to_grad, shared ->
id = grad_id!(to_grad)
{graded, _} = to_grad(expr, initial, {%{id => :stop}, shared})
graded =
if graded.shape == to_grad.shape do
graded
else
Nx.broadcast(graded, to_grad)
end
{graded, shared}
end)
graded
end
defp grad_id!(%T{data: %Expr{id: id}}) do
id
end
defp grad_id!(other) do
raise ArgumentError,
"the first argument of grad must be a variable or a tuple of defn expressions, " <>
"got: #{inspect(other)}"
end
defp validate_expr!(%T{data: %Expr{}, shape: {}} = expr) do
expr
end
defp validate_expr!(%T{data: %Expr{}, shape: shape}) do
raise ArgumentError,
"can only compute gradients of expressions that return scalars, " <>
"got shape: #{inspect(shape)}"
end
defp validate_expr!(other) do
validate_expr!(Expr.tensor(other))
end
## Recursion
defp to_grad(expr, res, cache) do
Tree.composite(expr, cache, fn
%T{data: %Expr{id: id, op: op, args: args}} = ans, {result_cache, grad_cache} = cache ->
key = [id | res.data.id]
case result_cache do
%{^id => :stop} ->
{res, cache}
%{^key => res} ->
{res, cache}
%{} ->
case grad(op, args, ans, res, cache) do
{res, {result_cache, grad_cache}} ->
{res, {Map.put(result_cache, key, res), grad_cache}}
:none ->
parts =
case grad_cache do
%{^id => parts} -> parts
%{} -> cached_grad(op, args, ans)
end
{res, {result_cache, grad_cache}} = grad_parts(parts, res, cache)
{res, {Map.put(result_cache, key, res), Map.put(grad_cache, id, parts)}}
end
end
end)
end
defp grad_parts([], _res, cache), do: {Expr.tensor(0.0), cache}
defp grad_parts([{head, subg} | tail], g, cache) do
acc = to_grad(head, maybe_multiply(g, subg), cache)
Enum.reduce(tail, acc, fn {expr, subg}, {acc, cache} ->
{graded, cache} = to_grad(expr, maybe_multiply(g, subg), cache)
{maybe_add(acc, graded), cache}
end)
end
## Control-flow / syntax nodes
defp grad(:metadata, [_, %{stop_grad: true}], _ans, _g, cache) do
{Expr.tensor(1.0), cache}
end
defp grad(:metadata, [expr, %{custom_grad: fun}], _ans, g, cache) do
args = fun.(expr, g)
unless is_list(args) and Enum.all?(args, &match?({_, _}, &1)) do
raise "custom_grad/2 must return a list of tuples, " <>
"where the first element is the expression to continue computing grad " <>
"and the second element is the updated g"
end
Enum.reduce(args, {Expr.tensor(0.0), cache}, fn {expr, g}, {acc, cache} ->
{graded, cache} = to_grad(expr, g, cache)
{maybe_add(acc, graded), cache}
end)
end
defp grad(:cond, [clauses, last], _ans, g, cache) do
{clauses, cache} =
Enum.map_reduce(clauses, cache, fn {head, body}, cache ->
{body, cache} = to_grad(body, g, cache)
{{head, body}, cache}
end)
{last, cache} = to_grad(last, g, cache)
{Expr.cond(clauses, last), cache}
end
defp grad(:elem, [tuple, index, _size], _ans, g, cache) do
{tuple, cache} = to_grad(tuple, g, cache)
{elem(tuple, index), cache}
end
defp grad(:select, [pred, on_true, on_false], _ans, g, cache) do
{d_on_true, cache} = to_grad(on_true, g, cache)
{d_on_false, cache} = to_grad(on_false, g, cache)
result = Nx.select(pred, d_on_true, d_on_false)
{result, cache}
end
defp grad(:outer, [x, y], ans, g, cache) do
x = Nx.reshape(x, {Nx.size(x.shape), 1})
y = Nx.reshape(y, {1, Nx.size(y.shape)})
{x, y} = binary_broadcast(x, y, ans)
{dx, cache} = to_grad(x, Nx.multiply(g, y), cache)
{dy, cache} = to_grad(y, Nx.multiply(g, x), cache)
{maybe_add(dx, dy), cache}
end
defp grad(:broadcast, [x, shape, axes], _ans, g, cache) do
implicit_axes =
for {a, i} <- Enum.with_index(axes),
elem(shape, a) != 1 and elem(x.shape, i) == 1,
do: {a, i}
{implicit_axes, broadcast_axes} = Enum.unzip(implicit_axes)
explicit_axes = Nx.axes(shape) -- axes
g =
case explicit_axes ++ implicit_axes do
[] -> g
sum_axes -> Nx.sum(g, axes: sum_axes)
end
g =
case broadcast_axes do
[] -> g
_ -> Nx.broadcast(g, x.shape, axes: Nx.axes(x.shape) -- broadcast_axes)
end
to_grad(x, g, cache)
end
defp grad(:clip, [operand, min, max], _ans, g, cache) do
# w.r.t min
w_min =
Nx.select(
Nx.bitwise_and(Nx.greater(min, operand), Nx.less(min, max)),
Nx.broadcast(g, operand),
0.0
)
# w.r.t operand
w_operand =
Nx.select(
Nx.bitwise_and(Nx.greater(operand, min), Nx.less(operand, max)),
g,
0.0
)
# w.r.t max
w_max = Nx.select(Nx.less(max, operand), Nx.broadcast(g, operand), 0.0)
{g_operand, cache} = to_grad(operand, Nx.multiply(g, w_operand), cache)
{g_min, cache} = to_grad(min, Nx.multiply(g, w_min), cache)
{g_max, cache} = to_grad(max, Nx.multiply(g, w_max), cache)
{g_operand |> maybe_add(g_min) |> maybe_add(g_max), cache}
end
defp grad(:squeeze, [x, axes], _ans, g, cache) do
g = Nx.broadcast(g, x.shape, axes: Nx.axes(x.shape) -- axes)
to_grad(x, g, cache)
end
defp grad(:reshape, [x, _new_shape], _ans, g, cache) do
to_grad(x, Nx.reshape(g, x), cache)
end
defp grad(:transpose, [x, axes], _ans, g, cache) do
to_grad(x, Nx.transpose(g, axes: argsort(axes)), cache)
end
defp grad(:pad, [x, value, padding_config], _ans, g, cache) do
inverse_padding_config = Enum.map(padding_config, fn {lo, hi, _} -> {-lo, -hi, 0} end)
unpadded = Nx.pad(g, 0.0, inverse_padding_config)
start_indices = List.duplicate(0, Nx.rank(unpadded))
lengths = Tuple.to_list(unpadded.shape)
strides = padding_config |> Enum.map(fn {_, _, interior} -> interior + 1 end)
g_operand = Nx.slice(unpadded, start_indices, lengths, strides: strides)
g_value = Nx.subtract(Nx.sum(g), Nx.sum(g_operand))
{dx, cache} = to_grad(x, g_operand, cache)
{dvalue, cache} = to_grad(value, g_value, cache)
{maybe_add(dx, dvalue), cache}
end
defp grad(:slice, [x, start_indices, _lengths, strides], _ans, g, cache) do
lo_pads = start_indices
hi_pads = hi_pads(0, g.shape, x.shape, start_indices, strides)
interior_pads = Enum.map(strides, &(&1 - 1))
padding_config = Enum.zip([lo_pads, hi_pads, interior_pads])
pad_value = 0.0
to_grad(x, Nx.pad(g, pad_value, padding_config), cache)
end
defp grad(:reverse, [x, axes], _ans, g, cache) do
reversed = Nx.reverse(g, axes: axes)
to_grad(x, reversed, cache)
end
defp grad(:sum, [x, opts], _ans, g, cache) do
grad_reduce(x, opts, g, cache, & &1)
end
defp grad(:product, [x, opts], _ans, g, cache) do
axes = opts[:axes] || Nx.axes(x)
non_axes = Nx.axes(x) -- axes
n = Enum.reduce(axes, 1, fn axis, size -> elem(x.shape, axis) * size end)
non_axes_shape =
non_axes
|> Enum.map(&elem(x.shape, &1))
|> List.to_tuple()
permutation = axes ++ non_axes
new_shape = Tuple.insert_at(non_axes_shape, 0, n)
operand = Nx.reshape(Nx.transpose(x, axes: permutation), new_shape)
x = reduce_prod_tree(operand, 0, n, non_axes_shape)
to_grad(x, g, cache)
end
@reduce_min_max_ops [:reduce_max, :reduce_min]
defp grad(op, [x, opts], ans, g, cache) when op in @reduce_min_max_ops do
grad_reduce(x, opts, g, cache, fn g ->
axes = opts[:axes] || Nx.axes(x)
shape =
for {d, i} <- Enum.with_index(Tuple.to_list(x.shape)) do
if i in axes, do: 1, else: d
end
locs = Nx.equal(x, Nx.reshape(ans, List.to_tuple(shape)))
num = Nx.multiply(g, locs)
den = Nx.sum(locs, axes: axes, keep_axes: true)
Nx.divide(num, den)
end)
end
defp grad(:dot, [x, axes_x, y, axes_y], ans, g, cache) do
g = Nx.broadcast(g, ans)
contract_gx = up_to(Nx.rank(x.shape) - length(axes_x), Nx.rank(g.shape))
contract_gy = up_to(0, Nx.rank(x.shape) - length(axes_x))
contract_x = Nx.axes(x.shape) -- axes_x
contract_y = Nx.axes(y.shape) -- axes_y
transpose_x = Enum.map(argsort(axes_y), &Enum.fetch!(axes_x, &1))
transpose_y = Enum.map(argsort(axes_x), &Enum.fetch!(axes_y, &1))
gx =
g
|> Nx.dot(contract_gx, y, contract_y)
|> Nx.transpose(axes: argsort(contract_x ++ transpose_x))
gy =
g
|> Nx.dot(contract_gy, x, contract_x)
|> Nx.transpose(axes: argsort(contract_y ++ transpose_y))
{dx, cache} = to_grad(x, gx, cache)
{dy, cache} = to_grad(y, gy, cache)
{maybe_add(dx, dy), cache}
end
defp grad(:conv, [x, y, opts], ans, g, cache) do
grad_conv(x, y, opts, ans, g, cache)
end
@window_chooser_op [:window_min, :window_max]
defp grad(op, [x, window_dimensions, opts], _ans, g, cache) when op in @window_chooser_op do
padding = opts[:padding]
strides = opts[:strides]
fun =
if op == :window_min,
do: &Nx.scatter_window_min/5,
else: &Nx.scatter_window_max/5
g = fun.(x, g, window_dimensions, [padding: padding, strides: strides], 0)
to_grad(x, g, cache)
end
defp grad(:window_sum, [x, window_dimensions, opts], _, ans, cache) do
strides = opts[:strides]
window_dilation = opts[:window_dilations]
base_dilation = List.duplicate(1, Nx.rank(x))
padding = opts[:padding]
padding_config =
conv_lhs_padding(
x.shape,
window_dimensions,
strides,
ans.shape,
padding,
base_dilation,
window_dilation
)
padding_config =
padding_config
|> Enum.zip(strides)
|> Enum.map(fn {{lo, hi}, s} -> {lo, hi, s - 1} end)
g = Nx.pad(ans, 0.0, padding_config)
g =
Nx.window_sum(
g,
window_dimensions,
strides: base_dilation,
padding: List.duplicate({0, 0}, Nx.rank(x)),
window_dilations: window_dilation
)
to_grad(x, g, cache)
end
defp grad(_op, _args, _ans, _g, _cache) do
:none
end
## Cached gradients
defp cached_grad(:add, [x, y], ans) do
{x, y} = binary_broadcast(x, y, ans)
[{x, Expr.tensor(1.0)}, {y, Expr.tensor(1.0)}]
end
defp cached_grad(:subtract, [x, y], ans) do
{x, y} = binary_broadcast(x, y, ans)
[{x, Expr.tensor(1.0)}, {y, Expr.tensor(-1.0)}]
end
defp cached_grad(:multiply, [x, y], ans) do
{x, y} = binary_broadcast(x, y, ans)
[{x, y}, {y, x}]
end
defp cached_grad(:divide, [x, y], ans) do
{x, y} = binary_broadcast(x, y, ans)
[{x, Nx.divide(1.0, y)}, {y, Nx.negate(Nx.divide(ans, y))}]
end
defp cached_grad(:quotient, _, _) do
raise ArgumentError, """
cannot compute gradient for Nx.quotient/2.
If a floating point computation is acceptable, consider \
using an implementation of floor division. See the \
documentation of `Nx.quotient` for more details.
"""
end
defp cached_grad(:remainder, [x, y], ans) do
{x, y} = binary_broadcast(x, y, ans)
[{x, Expr.tensor(1.0)}, {y, Nx.negate(Nx.floor(Nx.divide(x, y)))}]
end
defp cached_grad(:power, [x, y], ans) do
{x, y} = binary_broadcast(x, y, ans)
exponent = Nx.select(Nx.equal(y, 0.0), 1.0, Nx.subtract(y, 1.0))
base = Nx.select(Nx.equal(x, 0.0), 1.0, x)
gx = Nx.multiply(y, Nx.power(x, exponent))
gy = Nx.multiply(Nx.log(base), ans)
[{x, gx}, {y, gy}]
end
defp cached_grad(:atan2, [x, y], ans) do
{x, y} = binary_broadcast(x, y, ans)
den = Nx.add(Nx.multiply(x, x), Nx.multiply(y, y))
[{x, Nx.divide(y, den)}, {y, Nx.negate(Nx.divide(x, den))}]
end
defp cached_grad(op, [x, y], ans) when op in [:min, :max] do
{x, y} = binary_broadcast(x, y, ans)
lhs =
Nx.divide(
Nx.select(Nx.equal(x, ans), 1.0, 0.0),
Nx.select(Nx.equal(y, ans), 2.0, 1.0)
)
rhs =
Nx.divide(
Nx.select(Nx.equal(y, ans), 1.0, 0.0),
Nx.select(Nx.equal(x, ans), 2.0, 1.0)
)
[{x, lhs}, {y, rhs}]
end
defp cached_grad(:as_type, [x], _ans) do
[{x, Expr.tensor(1.0)}]
end
defp cached_grad(:bitcast, [x], _ans) do
[{x, Expr.tensor(1.0)}]
end
defp cached_grad(:metadata, [expr, _metadata], _ans) do
[{expr, Expr.tensor(1.0)}]
end
defp cached_grad(:abs, [x], _ans) do
[{x, Nx.select(Nx.greater_equal(x, 0.0), 1.0, -1.0)}]
end
defp cached_grad(:sqrt, [x], ans) do
[{x, Nx.divide(0.5, ans)}]
end
defp cached_grad(:cbrt, [x], ans) do
[{x, Nx.divide(1.0, 3 |> Nx.multiply(ans) |> Nx.multiply(ans))}]
end
defp cached_grad(:exp, [x], ans) do
[{x, ans}]
end
defp cached_grad(:expm1, [x], ans) do
[{x, Nx.add(ans, 1)}]
end
defp cached_grad(:log, [x], _ans) do
[{x, Nx.divide(1.0, x)}]
end
defp cached_grad(:log1p, [x], _ans) do
[{x, Nx.divide(1.0, Nx.add(x, 1))}]
end
defp cached_grad(:logistic, [x], ans) do
g =
x
|> Nx.negate()
|> Nx.exp()
|> Nx.multiply(ans)
|> Nx.multiply(ans)
[{x, g}]
end
defp cached_grad(:negate, [x], _ans) do
[{x, Expr.tensor(-1.0)}]
end
defp cached_grad(:rsqrt, [x], _ans) do
[{x, Nx.multiply(-0.5, Nx.power(x, -1.5))}]
end
defp cached_grad(:sin, [x], _ans) do
[{x, Nx.cos(x)}]
end
defp cached_grad(:asin, [x], _ans) do
[{x, Nx.rsqrt(Nx.subtract(1.0, Nx.multiply(x, x)))}]
end
defp cached_grad(:sinh, [x], _ans) do
[{x, Nx.cosh(x)}]
end
defp cached_grad(:asinh, [x], _ans) do
[{x, Nx.rsqrt(Nx.add(Nx.multiply(x, x), 1.0))}]
end
defp cached_grad(:acosh, [x], _ans) do
[{x, Nx.rsqrt(Nx.subtract(Nx.multiply(x, x), 1.0))}]
end
defp cached_grad(:atanh, [x], _ans) do
[{x, Nx.divide(1.0, Nx.subtract(1.0, Nx.multiply(x, x)))}]
end
defp cached_grad(:cos, [x], _ans) do
[{x, Nx.negate(Nx.sin(x))}]
end
defp cached_grad(:acos, [x], _ans) do
[{x, Nx.negate(Nx.rsqrt(Nx.subtract(1.0, Nx.multiply(x, x))))}]
end
defp cached_grad(:cosh, [x], _ans) do
[{x, Nx.sinh(x)}]
end
defp cached_grad(:tan, [x], _ans) do
cos = Nx.cos(x)
[{x, 1 |> Nx.divide(cos) |> Nx.divide(cos)}]
end
defp cached_grad(:atan, [x], _ans) do
[{x, Nx.divide(1.0, Nx.add(1.0, Nx.multiply(x, x)))}]
end
defp cached_grad(:tanh, [x], ans) do
[{x, Nx.subtract(1.0, Nx.multiply(ans, ans))}]
end
@half_sqrt_pi :math.sqrt(:math.pi()) / 2
@two_rsqrt_pi 2 / :math.sqrt(:math.pi())
defp cached_grad(:erf, [x], _ans) do
g =
x
|> Nx.multiply(x)
|> Nx.negate()
|> Nx.exp()
|> Nx.multiply(@two_rsqrt_pi)
[{x, g}]
end
defp cached_grad(:erfc, [x], _ans) do
g =
x
|> Nx.multiply(x)
|> Nx.negate()
|> Nx.exp()
|> Nx.multiply(-@two_rsqrt_pi)
[{x, g}]
end
defp cached_grad(:erf_inv, [x], ans) do
g = Nx.multiply(@half_sqrt_pi, Nx.exp(Nx.multiply(ans, ans)))
[{x, g}]
end
defp cached_grad(:reduce, _, _) do
raise ArgumentError, """
cannot compute gradient for Nx.reduce/4.
If you are computing the sum, product, or similar, use the \
appropriate Nx functions instead. If you have a custom usage \
of reduce, consider using stop_grad/1 (making it equivalent \
to the identify function) or using custom_grad/2 (giving it \
a proper gradient implementation).
"""
end
defp cached_grad(:window_product, _, _) do
raise ArgumentError, """
cannot compute gradient for Nx.window_product/3.
Consider using stop_grad/1 (making it equivalent \
to the identify function) or using custom_grad/2 (giving it \
a proper gradient implementation).
"""
end
defp cached_grad(:reduce_window, _, _) do
raise ArgumentError, """
cannot compute gradient for Nx.reduce_window/5.
If you are computing the sum, max, or similar of a window, use \
the appropriate Nx functions instead. If you have a custom usage \
of reduce_window, consider using stop_grad/1 (making it equivalent \
to the identify function) or using custom_grad/2 (giving it \
a proper gradient implementation).
"""
end
@error [:map]
defp cached_grad(op, _, _) when op in @error do
raise ArgumentError, """
cannot compute gradient for Nx.#{op}.
Consider using stop_grad/1 to make the gradient equivalent to \
the identify function or use custom_grad/2 to define a proper \
gradient implementation
"""
end
@constants [:tensor, :parameter, :eye, :iota, :random_uniform, :random_normal] ++
[:all?, :any?, :argmax, :argmin] ++
[:bitwise_and, :bitwise_or, :bitwise_xor, :bitwise_not] ++
[:logical_and, :logical_or, :logical_xor, :logical_not] ++
[:left_shift, :right_shift, :count_leading_zeros, :population_count] ++
[:floor, :round, :ceil, :sign] ++
[:equal, :greater, :greater_equal, :less, :less_equal, :not_equal]
defp cached_grad(op, _, _) when op in @constants do
[]
end
defp cached_grad(op, _, _) do
raise ArgumentError, """
gradient not yet implemented for Nx.#{op}.
Please open up an issue so we can implement the missing gradient
"""
end
## Windows
defp reduce_prod_tree(_, _, 0, non_axes_shape),
do: Nx.broadcast(Expr.tensor(1.0), non_axes_shape)
defp reduce_prod_tree(x, axis, 1, _), do: Nx.squeeze(x, axes: [axis])
defp reduce_prod_tree(x, axis, axis_value, non_axes_shape) do
n1 = div(axis_value + 1, 2)
n2 = axis_value - n1
x1_start_indices = List.duplicate(0, Nx.rank(x.shape))
x1_limit_indices = x.shape |> put_elem(axis, n1) |> Tuple.to_list()
x2_start_indices = x1_start_indices |> List.update_at(axis, fn _ -> n1 end)
x2_limit_indices =
x1_limit_indices |> List.update_at(axis, fn _ -> elem(x.shape, axis) - n1 end)
x1 = Nx.slice(x, x1_start_indices, x1_limit_indices)
x2 = Nx.slice(x, x2_start_indices, x2_limit_indices)
x2 =
if n2 != n1 do
paddings = List.duplicate({0, 0, 0}, Nx.rank(x.shape))
paddings = List.update_at(paddings, axis, fn _ -> {0, 1, 0} end)
Nx.pad(x2, 1, paddings)
else
x2
end
new_operand = Nx.multiply(x1, x2)
new_axis_value = elem(new_operand.shape, 0)
reduce_prod_tree(new_operand, axis, new_axis_value, non_axes_shape)
end
## Conv
defp grad_conv(x, y, opts, ans, g, cache) do
g = Nx.broadcast(g, ans)
input_permutation = opts[:input_permutation]
kernel_permutation = opts[:kernel_permutation]
output_permutation = opts[:output_permutation]
strides = opts[:strides]
padding = opts[:padding]
lhs_dilation = opts[:input_dilation]
rhs_dilation = opts[:kernel_dilation]
feature_group_size = opts[:feature_group_size]
batch_group_size = opts[:batch_group_size]
[lhs0, lhs1 | lhs_sdim_axes] = input_permutation
[rhs0, rhs1 | rhs_sdim_axes] = kernel_permutation
[_, _ | out_sdim_axes] = output_permutation
t_lhs_permutation = conv_spec_transpose(input_permutation)
t_rhs_permutation = conv_spec_transpose(kernel_permutation)
t_out_permutation = conv_spec_transpose(output_permutation)
lhs_sdims = conv_sdims(x.shape, lhs_sdim_axes)
rhs_sdims = conv_sdims(y.shape, rhs_sdim_axes)
out_sdims = conv_sdims(g.shape, out_sdim_axes)
rhs =
cond do
feature_group_size > 1 ->
y = reshape_axis_out_of(rhs0, feature_group_size, y)
reshape_axis_into(rhs0, rhs1, y)
batch_group_size > 1 ->
y = reshape_axis_out_of(rhs0, batch_group_size, y)
reshape_axis_into(rhs0, rhs1, y)
true ->
y
end
lhs_padding =
conv_lhs_padding(
lhs_sdims,
rhs_sdims,
strides,
out_sdims,
padding,
lhs_dilation,
rhs_dilation
)
rhs_padding =
conv_rhs_padding(
lhs_sdims,
rhs_sdims,
strides,
out_sdims,
padding,
lhs_dilation,
rhs_dilation
)
lhs_feature_group_size =
if batch_group_size > 1, do: batch_group_size, else: feature_group_size
{rhs_feature_group_size, rhs_batch_group_size} =
cond do
batch_group_size > 1 ->
{batch_group_size, 1}
feature_group_size > 1 ->
{1, feature_group_size}
true ->
{1, 1}
end
revd_weights = Nx.reverse(rhs, axes: rhs_sdim_axes)
gx =
Nx.conv(g, revd_weights,
strides: lhs_dilation,
padding: lhs_padding,
input_dilation: strides,
kernel_dilation: rhs_dilation,
input_permutation: output_permutation,
kernel_permutation: t_rhs_permutation,
output_permutation: input_permutation,
feature_group_size: lhs_feature_group_size,
batch_group_size: 1
)
gx =
if batch_group_size > 1 do
gx = reshape_axis_out_of(lhs1, batch_group_size, gx)
reshape_axis_into(lhs1, lhs0, gx)
else
gx
end
gy =
Nx.conv(x, g,
strides: rhs_dilation,
padding: rhs_padding,
input_dilation: lhs_dilation,
kernel_dilation: strides,
input_permutation: t_lhs_permutation,
kernel_permutation: t_out_permutation,
output_permutation: t_rhs_permutation,
feature_group_size: rhs_feature_group_size,
batch_group_size: rhs_batch_group_size
)
{dx, cache} = to_grad(x, gx, cache)
{dy, cache} = to_grad(y, gy, cache)
{maybe_add(dx, dy), cache}
end
defp conv_spec_transpose([dim0, dim1 | rest]), do: [dim1, dim0 | rest]
defp conv_sdims(shape, axes) do
axes
|> Enum.map(&elem(shape, &1))
|> List.to_tuple()
end
defp conv_lhs_padding(
lhs_sdims,
rhs_sdims,
strides,
out_sdims,
padding,
lhs_dilation,
rhs_dilation
) do
lhs_dilated_padding_config = Enum.map(lhs_dilation, &{0, 0, &1 - 1})
rhs_dilated_padding_config = Enum.map(rhs_dilation, &{0, 0, &1 - 1})
out_dilated_padding_config = Enum.map(strides, &{0, 0, &1 - 1})
lhs_dilated_shape = Tuple.to_list(Nx.Shape.pad(lhs_sdims, lhs_dilated_padding_config))
rhs_dilated_shape = Tuple.to_list(Nx.Shape.pad(rhs_sdims, rhs_dilated_padding_config))
out_dilated_shape = Tuple.to_list(Nx.Shape.pad(out_sdims, out_dilated_padding_config))
# TODO: Use Enum.zip_with on Elixir v1.12
pad_before =
rhs_dilated_shape
|> Enum.zip(padding)
|> Enum.map(fn {s, {lo, _}} -> s - lo - 1 end)
pad_after =
[lhs_dilated_shape, rhs_dilated_shape, out_dilated_shape, pad_before]
|> Enum.zip()
|> Enum.map(fn {l, r, o, p} -> l + r - 1 - o - p end)
Enum.zip(pad_before, pad_after)
end
defp conv_rhs_padding(
lhs_sdims,
rhs_sdims,
strides,
out_sdims,
padding,
lhs_dilation,
rhs_dilation
) do
lhs_dilated_padding_config = Enum.map(lhs_dilation, &{0, 0, &1 - 1})
rhs_dilated_padding_config = Enum.map(rhs_dilation, &{0, 0, &1 - 1})
out_dilated_padding_config = Enum.map(strides, &{0, 0, &1 - 1})
lhs_dilated_shape = Tuple.to_list(Nx.Shape.pad(lhs_sdims, lhs_dilated_padding_config))
rhs_dilated_shape = Tuple.to_list(Nx.Shape.pad(rhs_sdims, rhs_dilated_padding_config))
out_dilated_shape = Tuple.to_list(Nx.Shape.pad(out_sdims, out_dilated_padding_config))
# TODO: Use Enum.zip_with on Elixir v1.12
total_in_pad =
[out_dilated_shape, rhs_dilated_shape, lhs_dilated_shape]
|> Enum.zip()
|> Enum.map(fn {o, r, l} -> o + r - l - 1 end)
padding
|> Enum.zip(total_in_pad)
|> Enum.map(fn {{lo, _}, hi} -> {lo, hi - lo} end)
end
defp reshape_axis_into(src, dst, x) do
perm = for i <- 0..(Nx.rank(x.shape) - 1), i != src, do: i
perm = List.insert_at(perm, dst, src)
new_shape = Tuple.delete_at(x.shape, src)
new_val = elem(new_shape, dst) * elem(x.shape, src)
new_shape = put_elem(new_shape, dst, new_val)
Nx.reshape(Nx.transpose(x, axes: perm), new_shape)
end
defp reshape_axis_out_of(src, size1, x) do
size2 = div(elem(x.shape, src), size1)
new_shape = x.shape
new_shape = put_elem(new_shape, src, size1)
new_shape = Tuple.insert_at(new_shape, src + 1, size2)
Nx.reshape(x, new_shape)
end
## Helpers
defp grad_reduce(x, opts, g, cache, fun) do
axes = opts[:axes]
keep_axes = opts[:keep_axes]
g =
if keep_axes || !axes do
Nx.broadcast(g, x)
else
axes = Nx.axes(x.shape) -- axes
Nx.broadcast(g, x, axes: axes)
end
to_grad(x, fun.(g), cache)
end
defp hi_pads(pos, g_shape, x_shape, [start | starts], [stride | strides]) do
g_dim = elem(g_shape, pos)
x_dim = elem(x_shape, pos)
val = x_dim - (start + (1 + stride * (g_dim - 1)))
[val | hi_pads(pos + 1, g_shape, x_shape, starts, strides)]
end
defp hi_pads(_, _, _, [], []), do: []
defp binary_broadcast(x, y, ans) do
{Nx.broadcast(x, ans), Nx.broadcast(y, ans)}
end
defp maybe_add(x, %T{data: %Expr{op: :negate, args: [y]}} = negated_y) do
cond do
zero?(x) -> negated_y
zero?(y) -> x
true -> Nx.subtract(x, y)
end
end
defp maybe_add(x, y) do
cond do
zero?(x) -> y
zero?(y) -> x
true -> Nx.add(x, y)
end
end
defp maybe_multiply(x, %T{data: %Expr{op: :divide, args: [y, z]}} = division) do
cond do
one?(x) -> division
one?(y) -> Nx.divide(x, z)
true -> Nx.multiply(x, division)
end
end
defp maybe_multiply(x, y) do
cond do
one?(y) -> x
one?(x) -> y
true -> Nx.multiply(x, y)
end
end
@zero Nx.tensor(0.0)
@one Nx.tensor(1.0)
defp zero?(expr), do: match?(%T{data: %Expr{op: :tensor, args: [@zero]}}, expr)
defp one?(expr), do: match?(%T{data: %Expr{op: :tensor, args: [@one]}}, expr)
defp up_to(i, n) when i < n, do: [i | up_to(i + 1, n)]
defp up_to(_, _), do: []
defp argsort(list), do: list |> Enum.with_index() |> Enum.sort() |> Enum.map(&elem(&1, 1))
end
|
lib/nx/defn/grad.ex
| 0.712632
| 0.585397
|
grad.ex
|
starcoder
|
defmodule HighRoller.Parser do
@moduledoc """
Documentation for the Parser module. This module contains all the code for parsing strings and turning them into the results of dice rolls.
"""
@doc """
Parses a roll string into a final result
## Examples
iex> HighRoller.Parser.parse("3d1")
3
"""
def parse(roll_string) do
case parse_with_results(roll_string) do
%{total: total} -> total
:error -> :error
end
end
@doc """
Parses a roll string and returns both the final result and the results of each of the rolls
## Examples
iex> HighRoller.Parser.parse("3d1+1")
{total: 4, full_results: [[1, 1, 1], "+", 1]}
"""
def parse_with_results(roll_string) do
try do
roll_string
|> String.replace(" ", "")
|> parse_operators
|> roll_dice_chunks()
|> resolve_integers()
|> create_result_map()
rescue
ArithmeticError -> :error
end
end
defp roll_dice_chunks([]), do: []
defp roll_dice_chunks([roll_string | remaining]) do
if String.match?(roll_string, ~r/[0-9]+d[0-9]+/) do
[parse_single_roll(roll_string) | roll_dice_chunks(remaining)]
else
[roll_string | roll_dice_chunks(remaining)]
end
end
defp parse_single_roll(roll_string) do
[num_of_dice, back_half] = String.split(roll_string, "d", parts: 2)
[sides, options] = parse_options(back_half)
{num_of_dice, _} = Integer.parse(num_of_dice)
{sides, _} = Integer.parse(sides)
HighRoller.roll_with_options(num_of_dice, sides, options)
end
defp parse_options(back_half) when is_bitstring(back_half), do: parse_options(String.split(back_half, ~r/kh|kl|k|dh|dl|d/, include_captures: true))
defp parse_options([sides_string]), do: [sides_string, {}]
defp parse_options([sides_string, option_name, option_number]) do
{actual_number, _} = Integer.parse(option_number)
[sides_string, [{String.to_atom(option_name), actual_number}]]
end
defp parse_operators(roll_string) do
Regex.split(~r/\+|\-/, roll_string, include_captures: true)
end
defp resolve_integers([]), do: []
defp resolve_integers([chunk | remaining]) when is_bitstring(chunk) do
case Integer.parse(chunk) do
{result, ""} -> [result | resolve_integers(remaining)]
{_, _} -> [chunk | resolve_integers(remaining)]
:error -> [chunk | resolve_integers(remaining)]
end
end
defp resolve_integers([chunk | remaining]), do: [chunk | resolve_integers(remaining)]
defp resolve_roll_groups([]), do: []
defp resolve_roll_groups([chunk | remaining]) when is_list(chunk) do
[Enum.sum(chunk) | resolve_roll_groups(remaining)]
end
defp resolve_roll_groups([chunk | remaining]), do: [chunk | resolve_roll_groups(remaining)]
defp create_result_map(chunks) do
total = chunks
|> resolve_roll_groups()
|> combine()
|> Enum.sum()
%{total: total, full_results: chunks}
end
defp combine([first_number, "+", second_number | remaining]), do: combine([first_number + second_number | remaining])
defp combine([first_number, "-", second_number | remaining]), do: combine([first_number - second_number | remaining])
defp combine(chunks), do: chunks
end
|
lib/high_roller/parser.ex
| 0.663342
| 0.548915
|
parser.ex
|
starcoder
|
defmodule Distributed.Replicator.Node do
@moduledoc """
The functions in `Distributed.Replicator.Node` module helps to replicate an event by processing it on the all nodes in the network.
In `Distributed.Replicator.Node`, functions execute processes in parallel.
**Note**: Since this module is only a wrapper for `Node` module, there is no need to write a detailed documentation for this module.
Please check documentation of the `Node` module; you can basically think that the functions of the module run on every single node
without specifying nodes, and you will be replied with a list of results of the processes.
"""
use GenServer
@doc false
def start_link() do
GenServer.start_link(__MODULE__, [], name: __MODULE__.process_id())
end
@doc false
def init(_opts \\ []) do
{:ok, %{}}
end
@doc false
def process_id() do
Distributed.Replicator.Node
end
@doc """
Monitors the status of nodes.
If flag is true, monitoring is turned on. If flag is false, monitoring is turned off.
For more information, see `Node.monitor/2` and `Node.monitor/3`.
"""
@spec monitor(flag :: boolean, opts :: [any]) :: [true]
def monitor(flag, opts \\ []) do
Distributed.Parallel.map(Distributed.Node.list(opts), &(Node.monitor(&1, flag, opts)))
end
@doc """
Tries to set up connections to nodes.
"""
@spec ping(opts :: [any]) :: [:pong | :pang]
def ping(opts \\ []) do
Distributed.Parallel.map(Distributed.Node.list(opts), &(Node.ping(&1)))
end
@doc """
Returns the PIDs of new processes started by the application of `fun` on nodes.
See `Node.spawn/2` and `Node.spawn/3`.
"""
@spec spawn(fun :: (() -> any), opts :: [any]) :: [pid | {pid, reference}]
def spawn(fun, opts \\ [])
when is_function(fun)
do
spawn_opts = Keyword.get(opts, :spawn_opts, [])
Distributed.Parallel.map(Distributed.Node.list(opts), &(Node.spawn(&1, fun, spawn_opts)))
end
@doc """
Returns the PIDs of new processes started by the application of `module.fun(args)` on nodes.
See `Node.spawn/4` and `Node.spawn/5`.
"""
@spec spawn(module :: module, fun :: atom, args :: [any], opts :: [any]) :: [pid | {pid, reference}]
def spawn(module, fun, args, opts \\ [])
when is_atom(module)
when is_atom(fun)
do
spawn_opts = Keyword.get(opts, :spawn_opts, [])
Distributed.Parallel.map(Distributed.Node.list(opts), &(Node.spawn(&1, module, fun, args, spawn_opts)))
end
@doc """
Returns the PIDs of new linked processes started by the application of `fun` on nodes.
See `Node.spawn_link/2`.
"""
@spec spawn_link((() -> any)) :: [pid]
def spawn_link(fun, opts \\ [])
when is_function(fun)
do
Distributed.Parallel.map(Distributed.Node.list(opts), &(Node.spawn_link(&1, fun)))
end
@doc """
Returns the PIDs of new linked processes started by the application of `module.function(args)` on nodes.
See `Node.spawn_link/4`.
"""
@spec spawn_link(module :: module, fun :: atom, args :: [any], opts :: [any]) :: [pid]
def spawn_link(module, fun, args, opts \\ [])
when is_atom(module)
when is_atom(fun)
do
Distributed.Parallel.map(Distributed.Node.list(opts), &(Node.spawn_link(&1, module, fun, args)))
end
end
|
lib/distributed/replicator/node.ex
| 0.719186
| 0.631339
|
node.ex
|
starcoder
|
defmodule ExHal.Link do
@moduledoc """
A Link is a directed reference from one resource to another resource. They
are found in the `_links` and `_embedded` sections of a HAL document
"""
use Expat
alias ExHal.{Document, NsReg}
@typedoc """
A link. Links may be simple or dereferenced (from the embedded section).
"""
@type t :: %__MODULE__{
rel: String.t(),
href: String.t(),
templated: boolean(),
name: String.t(),
target: Document.t()
}
defstruct [:rel, :href, :templated, :name, :target]
@doc """
Build new link struct from _links entry.
"""
def from_links_entry(rel, a_map) do
href = Map.fetch!(a_map, "href")
templated = Map.get(a_map, "templated", false)
name = Map.get(a_map, "name", nil)
%__MODULE__{rel: rel, href: href, templated: templated, name: name}
end
@doc """
Build new link struct from embedded doc.
"""
def from_embedded(rel, embedded_doc) do
{:ok, href} = ExHal.url(embedded_doc, fn _doc -> {:ok, nil} end)
%__MODULE__{rel: rel, href: href, templated: false, target: embedded_doc}
end
@doc """
Returns target url, expanded with `vars` if any are provided.
Returns `{:ok, "fully_qualified_url"}`
`:error` if link target is anonymous
"""
def target_url(a_link, vars \\ %{}) do
case a_link do
%{href: nil} ->
:error
%{templated: true} ->
{:ok, UriTemplate.expand(a_link.href, vars)}
_ ->
{:ok, a_link.href}
end
end
@doc """
Returns target url, expanded with `vars` if any are provided.
Returns `"fully_qualified_url"` or raises exception
"""
def target_url!(a_link, vars \\ %{}) do
{:ok, url} = target_url(a_link, vars)
url
end
@doc """
Expands "curie"d link rels using the namespaces found in the `curies` link.
Returns `[%Link{}, ...]` a link struct for each possible variation of the input link
"""
def expand_curie(link, namespaces) do
NsReg.variations(namespaces, link.rel)
|> Enum.map(fn rel -> %{link | rel: rel} end)
end
def embedded?(link) do
!!link.target
end
@doc """
**Deprecated**
See `to_json_map/1`
"""
def to_json_hash(link), do: to_json_map(link)
@doc """
Returns a map that matches the shape of the intended JSON output.
"""
def to_json_map(link) do
if embedded?(link) do
Document.to_json_hash(link.target)
else
%{"href" => link.href}
|> add_templated(link)
|> add_name(link)
end
end
defpat simple_link(%{target: nil})
defpat unnamed_link(%{name: nil})
defpat embedded_link(%{target: %{}})
@doc """
Returns true if the links are equivalent.
Comparison rules:
- simple links are equal if their hrefs are equal and their names are equal.
- embedded links are equal if their hrefs are non-nil and equal
- a simple and an embedded link are equal if their hrefs are equal
"""
@spec equal?(__MODULE__.t(), __MODULE__.t()) :: boolean()
def equal?(%{href: nil}, _), do: false
def equal?(_, %{href: nil}), do: false
def equal?(link_a = simple_link(), link_b = simple_link()) do
link_a.rel == link_b.rel
&& link_a.href == link_b.href
&& link_a.name == link_b.name
end
def equal?(link_a = embedded_link(), link_b = embedded_link()) do
# both embedded and href's are comparable
link_a.rel == link_b.rel
&& link_a.href == link_b.href
end
def equal?(link_a = simple_link(), link_b = embedded_link()), do: equal?(link_b, link_a)
def equal?(link_a = embedded_link(), link_b = simple_link()) do
# both embedded and href's are comparable
link_a.rel == link_b.rel
&& link_a.href == link_b.href
end
# private functions
defp add_templated(json_map, %{templated: true}) do
Map.merge(json_map, %{"templated" => true})
end
defp add_templated(json_map, _), do: json_map
defp add_name(json_map, %{name: name}) when is_binary(name) do
Map.merge(json_map, %{"name" => name})
end
defp add_name(json_map, _), do: json_map
end
|
lib/exhal/link.ex
| 0.872714
| 0.542015
|
link.ex
|
starcoder
|
defmodule Bricks.Connector.Tcp do
@moduledoc """
A Connector for TCP sockets, using `:gen_tcp`
Belongs to application `:bricks`
## Create Options
### All
Ordering: Required first, then alphabetical
Option | Type(s) | Default | Raw `gen_tcp` option
:--------------------- | :---------------- | :------------- | :-----------------------
`:host` | `binary` | `(REQUIRED)` | `(POSITIONAL)`
`:port` | `pos_integer` | `(REQUIRED)` | `(POSITIONAL)`
`:connect_timeout` | `timeout` | `5000` | `(POSITIONAL)`
`:bam_window` | `Socket.window` | `10` | `(NONE)`
`:active` | `Socket.active` | `true` | `:active`
`:bind_to_device` | `binary` | `(NONE)` | `:bind_to_device`
`:buffer` | `non_neg_integer` | `(UNKNOWN)` | `:buffer`
`:delay_send?` | `boolean` | `false` | `:delay_send`
`:deliver` | `:port`, `:term` | `(UNKNOWN)` | `:deliver`
`:dont_route?` | `boolean` | `(false?)` | `:dontroute`
`:exit_on_close?` | `boolean` | `true` | `:exit_on_close`
`:header_size` | `non_neg_integer` | `(NONE)` | `:header`
`:high_msgq_watermark` | `pos_integer` | `(UNKNOWN)` | `:high_msgq_watermark`
`:high_watermark` | `non_neg_integer` | `(UNKNOWN)` | `:high_watermark`
`:ipv4?` | `boolean` | `true` | `:inet`, `:ipv6_v6only`
`:ipv6?` | `boolean` | `false` | `:inet6`, `:ipv6_v6only`
`:keepalive?` | `boolean` | `false` | `:keepalive`
`:line_delimiter` | `char` | `?\\n` | `:line_delimiter`
`:linger?` | `linger` | `(NONE)` | `:linger`
`:local_port` | `Socket.port_num` | `0` (random) | `:port`
`:low_msgq_watermark` | `pos_integer` | `(UNKNOWN)` | `:low_msgq_watermark`
`:low_watermark` | `non_neg_integer` | `(UNKNOWN)` | `:low_watermark`
`:network_interface` | `Socket.host` | `(NONE` | `:ip`, `:ifaddr`
`:network_namespace` | `binary` | `(NONE)` | `:netns`
`:nodelay?` | `boolean` | `false` | `:nodelay`
`:packet_type` | `packet_type` | `:raw` | `:packet`
`:packet_size` | `pos_integer` | `0` (no limit) | `:packet_size`
`:priority` | `non_neg_integer` | `(NONE)` | `:priority`
`:raw_fd` | `non_neg_integer` | `(NONE)` | `:fd`
`:receive_buffer` | `non_neg_integer` | `(NONE)` | `:recbuf`
`:receive_tclass?` | `boolean` | `false?` | `:recvtclass`
`:receive_tos?` | `boolean` | `false?` | `:recvtos`
`:receive_ttl?` | `boolean` | `false?` | `:recvttl`
`:receive_timeout` | `timeout` | `5000` | `(POSITIONAL)`
`:send_timeout` | `timeout` | `5000` | `:send_timeout`
`:reuse_addr?` | `boolean` | `false` | `:reuseaddr`
`:send_timeout_close?` | `boolean` | `true` | `:send_timeout_close`
`:show_econnreset?` | `boolean` | `false` | `:show_econnreset`
`:send_buffer` | `non_neg_integer` | `(NONE)` | `:sndbuf`
`:tos` | `non_neg_integer` | `(NONE)` | `:tos`
`:tclass` | `non_neg_integer` | `(NONE)` | `:tclass`
`:tcp_module` | `atom` | `(SEE DOCS)` | `:tcp_module`
### Destination Selection
Option | Type | Default | Raw `gen_tcp` option
:------ | :------------ | :----------- | :-------------------
`:host` | `binary` | `(REQUIRED)` | `(POSITIONAL)`
`:port` | `pos_integer` | `(REQUIRED)` | `(POSITIONAL)`
### IP Version Selection
Option | Type | Default | Raw `gen_tcp` option
:------- | :-------- | :------ | :-----------------------
`:ipv4?` | `boolean` | `true` | `:inet`, `:ipv6_v6only`
`:ipv6?` | `boolean` | `false` | `:inet6`, `:ipv6_v6only`
These options toggle which IP versions may be used. At least one
must be `true` or you will get an error.
### Timeouts
Option | Type | Default | Raw `gen_tcp` option
:----------------- | :-------- | :------ | :-------------------
`:connect_timeout` | `timeout` | `5000` | `(POSITIONAL)`
`:receive_timeout` | `timeout` | `5000` | `(POSITIONAL)`
`:send_timeout` | `timeout` | `5000` | `:send_timeout`
These toggle how long you are prepared to wait for an operation to
complete before a timeout error is returned. They are standard
erlang `timeout` values: non-negative integers or `:infinity`.
### Activity Control
Option | Type | Default | Raw `gen_tcp` option
:------------ | :-------------- | :------ | :-------------------
`:active` | `Socket.active` | `true` | `:active`
`:bam_window` | `Socket.window` | `10` | `(NONE)`
See discussion on socket activity modes in the `Bricks.Socket`
module documentation for more information.
### Erlang Options
Option | Type | Default | Raw `gen_tcp` option
:--------------------- | :---------------- | :----------- | :---------------------
`:buffer` | `non_neg_integer` | `(UNKNOWN)` | `:buffer`
`:delay_send?` | `boolean` | `false` | `:delay_send`
`:exit_on_close?` | `boolean` | `true` | `:exit_on_close`
`:header_size` | `non_neg_integer` | `(NONE)` | `:header`
`:high_msgq_watermark` | `pos_integer` | `(UNKNOWN)` | `:high_msgq_watermark`
`:high_watermark` | `non_neg_integer` | `(UNKNOWN)` | `:high_watermark`
`:line_delimiter` | `char` | `?\\n` | `:line_delimiter`
`:low_msgq_watermark` | `pos_integer` | `(UNKNOWN)` | `:low_msgq_watermark`
`:low_watermark` | `non_neg_integer` | `(UNKNOWN)` | `:low_watermark`
`:packet_type` | `packet_type` | `:raw` | `:packet`
`:send_timeout_close?` | `boolean` | `true` | `:send_timeout_close`
`:show_econnreset?` | `boolean` | `false` | `:show_econnreset`
`:tcp_module` | `atom` | `(SEE DOCS)` | `:tcp_module`
`:tcp_opts` | `proplist` | `[]` | `(ANY)`
#### `:buffer`
The size of the user-level buffer used by the driver. Not to be
confused with options `:send_buffer` and `:receive_buffer`, which
correspond to the Kernel socket buffers. For TCP it is recommended
to have val(buffer) >= val(recbuf) to avoid performance issues
because of unnecessary copying. However, as the size set for recbuf
usually become larger, you are encouraged to use getopts/2 to
analyze the behavior of your operating system.
Note that this is also the maximum amount of data that can be
received from a single `recv` call. If you are using higher than
normal MTU consider setting buffer higher.
#### `:delay_send?`
Normally, when an Erlang process sends to a socket, the driver tries
to send the data immediately. If that fails, the driver uses any
means available to queue up the message to be sent whenever the
operating system says it can handle it. Setting `delay_send: true`
makes all messages queue up. The messages sent to the network are
then larger but fewer. The option affects the scheduling of send
requests versus Erlang processes instead of changing any real
property of the socket. The option is implementation-specific.
#### `:exit_on_close?`
The only reason to set it to false is if you want to continue
sending data to the socket after a close is detected, for example,
if the peer uses `:gen_tcp.shutdown/2` to shut down the write side.
#### `:header_size`
This option is only meaningful if option binary was specified when
the socket was created. If option header is specified, the first
Size number bytes of data received from the socket are elements of a
list, and the remaining data is a binary specified as the tail of
the same list. For example, if set to `2`, the data received matches
`[byte1,byte2|binary]`
#### `:high_msgq_watermark`
The socket message queue is set to a busy state when the amount of
data on the message queue reaches this limit. Notice that this limit
only concerns data that has not yet reached the ERTS internal socket
implementation. Defaults to `8 kB`.
Senders of data to the socket are suspended if either the socket
message queue is busy or the socket itself is busy.
For more information, see options `:low_msgq_watermark`,
`:high_watermark`, and `:low_watermark`.
Notice that distribution sockets disable the use of
`:high_msgq_watermark` and `:low_msgq_watermark`. Instead use the
distribution buffer busy limit, which is a similar feature.
#### `:high_watermark`
The socket is set to a busy state when the amount of data queued
internally by the ERTS socket implementation reaches this
limit. Defaults to `8 kB`.
Senders of data to the socket are suspended if either the socket
message queue is busy or the socket itself is busy.
For more information, see options low_watermark,
high_msgq_watermark, and low_msqg_watermark.
#### `:line_delimiter`
Sets the line delimiting character for line-oriented protocols
(`:line`). Defaults to `?\n`.
#### `:low_msgq_watermark`
If the socket message queue is in a busy state, the socket message
queue is set in a not busy state when the amount of data queued in
the message queue falls below this limit. Notice that this limit
only concerns data that has not yet reached the ERTS internal socket
implementation. Defaults to `4 kB`.
Senders that are suspended because of either a busy message queue or
a busy socket are resumed when the socket message queue and the
socket are not busy.
For more information, see options `:high_msgq_watermark`,
`:high_watermark`, and `:low_watermark`.
Notice that distribution sockets disable the use of
`:high_msgq_watermark` and `:low_msgq_watermark`. Instead they use
the distribution buffer busy limit, which is a similar feature.
#### `:low_watermark`
If the socket is in a busy state, the socket is set in a not busy
state when the amount of data queued internally by the ERTS socket
implementation falls below this limit. Defaults to `4 kB`.
Senders that are suspended because of a busy message queue or a busy
socket are resumed when the socket message queue and the socket are
not busy.
For more information, see options `:high_watermark`,
`:high_msgq_watermark`, and `:low_msgq_watermark`.
#### `:packet_type`
Defines the type of packets to use for a socket. Possible values:
`:raw` | `0`
: No packaging is done.
`1` | `2` | `4`
: Packets consist of a header specifying the number of bytes in the
packet, followed by that number of bytes. The header length can be
one, two, or four bytes, and containing an unsigned integer in
big-endian byte order. Each send operation generates the header,
and the header is stripped off on each receive operation. The
4-byte header is limited to 2Gb.
`:asn1` | `:cdr` | `:sunrm` | `:fcgi` | `:tpkt` | `:line`
: These packet types only have effect on receiving. When sending a
packet, it is the responsibility of the application to supply a
correct header. On receiving, however, one message is sent to the
controlling process for each complete packet received, and,
similarly, each call to `:gen_tcp.recv/2,3` returns one complete
packet. The header is not stripped off.
The meanings of the packet types are as follows:
- `:asn1` - ASN.1 BER
- `:sunrm` - Sun's RPC encoding
- `:cdr` - CORBA (GIOP 1.1)
- `:fcgi` - Fast CGI
- `:tpkt` - TPKT format [RFC1006]
- `:line` - Line mode, a packet is a line-terminated with newline,
lines longer than the receive buffer are truncated
##### `:http` | `:http_bin`
The Hypertext Transfer Protocol. The packets are returned with the
format according to HttpPacket described in
`:erlang.decode_packet/3` in ERTS. A socket in passive mode returns
`{:ok, packet}` from `:gen_tcp.recv` while an active socket sends
messages like `{http, socket_handle, packet}`.
##### `:httph` | `:httph_bin`
These two types are often not needed, as the socket automatically
switches from `:http`/`:http_bin` to `:httph`/`:httph_bin`
internally after the first line is read. However, there can be
occasions when they are useful, such as parsing trailers from
chunked encoding.
#### `:send_timeout_close?`
Used together with `:send_timeout` to specify whether the socket is to
be automatically closed when the send operation returns
`{:error,:timeout}`. The recommended setting is `true`, which
automatically closes the socket.
#### `:show_econnreset?`
When this option is set to `false`, which is default, an RST
received from the TCP peer is treated as a normal close (as though
an FIN was sent). A caller to `:gen_tcp.recv/2` gets `{:error,
:closed}`. In `active` mode, the controlling process receives a
`{:tcp_closed, socket_handle}` message, indicating that the peer has
closed the connection.
Setting this option to `true` allows you to distinguish between a
connection that was closed normally, and one that was aborted
(intentionally or unintentionally) by the TCP peer. A call to
`:gen_tcp.recv/2` returns `{:error, :econnreset}`. In `active` mode,
the controlling process receives a `{:tcp_error, socket_handle,
:econnreset}` message before the usual `{:tcp_closed,
socket_handle}`, as is the case for any other socket error. Calls to
`:gen_tcp.send/2` also returns `{:error, :econnreset}` when it is
detected that a TCP peer has sent an RST.
A connected socket returned from `:gen_tcp.accept/1` inherits the
`:show_econnreset?` setting from the listening socket.
#### `:tcp_module`
Overrides which callback module is used. Defaults to `:inet_tcp` for
IPv4 and `:inet6_tcp` for IPv6.
#### `:tcp_opts`
Raw `gen_tcp`/`inet` options proplist. *Appended* to options.
### OS options
Option | Type | Default | Raw `gen_tcp` option
:------------------- | :---------------- | :------------- | :-------------------
`:bind_to_device` | `binary` | `(NONE)` | `:bind_to_device`
`:deliver` | `:port`, `:term` | `(UNKNOWN)` | `:deliver`
`:dont_route?` | `boolean` | `(false?)` | `:dontroute`
`:keepalive?` | `boolean` | `false` | `:keepalive`
`:linger` | `linger` | `(NONE)` | `:linger`
`:local_port` | `Socket.port_num` | `0` (random) | `:port`
`:network_interface` | `Socket.host` | `(NONE` | `:ip`, `:ifaddr`
`:network_namespace` | `binary` | `(NONE)` | `:netns`
`:nodelay?` | `boolean` | `false` | `:nodelay`
`:packet_size` | `pos_integer` | `0` (no limit) | `:packet_size`
`:priority` | `non_neg_integer` | `(NONE)` | `:priority`
`:raw_fd` | `non_neg_integer` | `(NONE)` | `:fd`
`:receive_buffer` | `non_neg_integer` | `(NONE)` | `:recbuf`
`:receive_tclass?` | `boolean` | `false?` | `:recvtclass`
`:receive_tos?` | `boolean` | `false?` | `:recvtos`
`:receive_ttl?` | `boolean` | `false?` | `:recvttl`
`:reuse_addr?` | `boolean` | `false` | `:reuseaddr`
`:send_buffer` | `non_neg_integer` | `(NONE)` | `:sndbuf`
`:tos` | `non_neg_integer` | `(NONE)` | `:tos`
`:tclass` | `non_neg_integer` | `(NONE)` | `:tclass`
#### `:bind_to_device`
Binds a socket to a specific network interface. This option must be
used in a function call that creates a socket, that is,
`:gen_tcp.connect/3,4`, `:gen_tcp.listen/2`, `:gen_udp.open/1,2`, or
`:gen_sctp.open/0,1,2`.
Unlike `getifaddrs/0`, Ifname is encoded a binary. In the unlikely
case that a system is using non-7-bit-ASCII characters in network
device names, special care has to be taken when encoding this
argument.
This option uses the Linux-specific socket option `SO_BINDTODEVICE`,
such as in Linux kernel 2.0.30 or later, and therefore only exists
when the runtime system is compiled for such an operating system.
Before Linux 3.8, this socket option could be set, but could not
retrieved with getopts/2. Since Linux 3.8, it is readable.
The virtual machine also needs elevated privileges, either running
as superuser or (for Linux) having capability `CAP_NET_RAW`.
The primary use case for this option is to bind sockets into Linux VRF instances.
#### `:deliver`
When `active: true`, data is delivered on the form `port` :
`{socket_handle, {:data, [h1,..hsz | data]}}` or `term` : `{:tcp,
socket_handle, [h1..hsz | data]}`
#### `:dont_route?`
Enables/disables routing bypass for outgoing messages.
#### `:keepalive?`
Enables/disables periodic transmission on a connected socket when no
other data is exchanged. If the other end does not respond, the
connection is considered broken and an error message is sent to the
controlling process. Defaults to disabled.
#### `:linger`
Determines the time-out, in seconds, for flushing unsent data in the
`:gen_tcp.close/1` socket call. If the first component of the value
tuple is false, the second is ignored. This means that
`:gen_tcp.close/1` returns immediately, not waiting for data to be
flushed. Otherwise, the second component is the flushing time-out,
in seconds.
#### `:local_port`
Local port number to use for the outgoing socket.
#### `:network_interface`
If the host has many network interfaces, this option specifies which one to use.
#### `:network_namespace`
Sets a network namespace for the socket. Parameter s a filename
defining the namespace, for example, "/var/run/netns/example",
typically created by command `ip netns add example`. This option
must be used in a function call that creates a socket, that is,
`:gen_tcp.connect/3,4`, `:gen_tcp.listen/2`, `:gen_udp.open/1,2`, or
`:gen_sctp.open/0,1,2`.
This option uses the Linux-specific syscall `setns()`, such as in
Linux kernel 3.0 or later, and therefore only exists when the
runtime system is compiled for such an operating system.
The virtual machine also needs elevated privileges, either running
as superuser or (for Linux) having capability `CAP_SYS_ADMIN`
according to the documentation for `setns(2)`. However, during
testing also `CAP_SYS_PTRACE` and `CAP_DAC_READ_SEARCH` have proven
to be necessary.
Example:
```shell
setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp
```
Notice that the filesystem containing the virtual machine executable
(`beam.smp` in the example) must be local, mounted without flag
`nosetuid`, support extended attributes, and the kernel must support
file capabilities. All this runs out of the box on at least Ubuntu
12.04 LTS, except that SCTP sockets appear to not support network
namespaces.
Namespace is a filename and is encoded and decoded as discussed in
module file, with the following exceptions:
- Emulator flag +fnu is ignored.
- `:inet.getopts/2` for this option returns a binary for the
filename if the stored filename cannot be decoded. This is only
to occur if you set the option using a binary that cannot be
decoded with the emulator's filename encoding:
`:file.native_name_encoding/0`.
#### `:nodelay?`
If `true`, option `TCP_NODELAY` is turned on for the socket, which
means that also small amounts of data are sent immediately.
#### `:packet_size`
Sets the maximum allowed length of the packet body. If the packet
header indicates that the length of the packet is longer than the
maximum allowed length, the packet is considered invalid. The same
occurs if the packet header is too large for the socket receive
buffer.
For line-oriented protocols (`line`, `http*`), option `packet_size`
also guarantees that lines up to the indicated length are accepted
and not considered invalid because of internal buffer limitations.
#### `:priority`
Sets the `SO_PRIORITY` socket level option on platforms where this is
implemented. The behavior and allowed range varies between different
systems. The option is ignored on platforms where it is not
implemented. Use with caution.
#### `:raw_fd`
If a socket has somehow been connected without using gen_tcp, use
this option to pass the file descriptor for it. If
`:network_interface` and/or `:port` options are combined with this
option, the fd is bound to the specified interface and port before
connecting. If these options are not specified, it is assumed that
the fd is already bound appropriately.
#### `:receive_buffer`
The minimum size of the receive buffer to use for the socket. You
are encouraged to use `:inet.getopts/2` to retrieve the size set by
your operating system.
#### `:receive_tclass?`
If set to true activates returning the received `TCLASS` value on
platforms that implements the protocol `IPPROTO_IPV6` option
`IPV6_RECVTCLASS` or `IPV6_2292RECVTCLASS` for the socket. The value
is returned as a `{:tclass,tclass}` tuple regardless of if the
platform returns an `IPV6_TCLASS` or an `IPV6_RECVTCLASS` `CMSG`
value.
For packet oriented sockets that supports receiving ancillary data
with the payload data (gen_udp and gen_sctp), the `TCLASS` value is
returned in an extended return tuple contained in an ancillary data
list. For stream oriented sockets (gen_tcp) the only way to get the
`TCLASS` value is if the platform supports the pktoptions option.
#### `:receive_tos?`
If set to true activates returning the received `TOS` value on
platforms that implements the protocol `IPPROTO_IP` option
`IP_RECVTOS` for the socket. The value is returned as a `{:tos,tos}`
tuple regardless of if the platform returns an `IP_TOS` or an
`IP_RECVTOS` `CMSG` value.
For packet oriented sockets that supports receiving ancillary data
with the payload data (`:gen_udp` and `:gen_sctp`), the `TOS` value
is returned in an extended return tuple contained in an ancillary
data list. For stream oriented sockets (`:gen_tcp`) the only way to
get the TOS value is if the platform supports the `pktoptions` option.
#### `:receive_ttl?`
If set to true activates returning the received `TTL` value on
platforms that implements the protocol `IPPROTO_IP` option
`IP_RECVTTL` for the socket. The value is returned as a `{:ttl,ttl}`
tuple regardless of if the platform returns an `IP_TTL` or an
`IP_RECVTTL` `CMSG` value.
For packet oriented sockets that supports receiving ancillary data
with the payload data (`:gen_udp` and `:gen_sctp`), the `TTL` value
is returned in an extended return tuple contained in an ancillary
data list. For stream oriented sockets (`;gen_tcp`) the only way to
get the `TTL` value is if the platform supports the `pktoptions`
option.
#### `:reuse_addr?`
Allows or disallows local reuse of port numbers. By default, reuse
is disallowed.
#### `:send_buffer`
The minimum size of the send buffer to use for the socket. You are
encouraged to use `getopts/2`, to retrieve the size set by your
operating system.
#### `:tos`
Sets `IP_TOS IP` level options on platforms where this is
implemented. The behavior and allowed range varies between different
systems. The option is ignored on platforms where it is not
implemented. Use with caution.
#### `:tclass`
Sets `IPV6_TCLASS IP` level options on platforms where this is
implemented. The behavior and allowed range varies between different
systems. The option is ignored on platforms where it is not
implemented. Use with caution.
"""
@enforce_keys [
:host,
:port,
:tcp_opts,
:receive_timeout,
:connect_timeout,
:bam_window,
:active
]
defstruct @enforce_keys
alias Bricks.{Connector, Options, Socket, Util}
alias Bricks.Connector.Tcp
alias Bricks.Error.{BadCombo, BadOption, Connect}
import Bricks.Guards
@default_connect_timeout 5000
@default_receive_timeout 5000
@default_send_timeout 5000
@default_tcp_opts []
@default_bam_window 10
@default_active false
## Types
@typedoc "Valid linger value. See docs for info"
@type linger :: {boolean(), non_neg_integer()}
@typedoc "Packet type for inbuilt message parsing facilities"
@type packet_type ::
:raw
| 0
| 1
| 2
| 4
| :asn1
| :cdr
| :sunrm
| :fcgi
| :tpkt
| :line
| :http
| :http_bin
| :httph
| :httph_bin
@typedoc "TCP Connector State"
@type t :: %Tcp{
host: Socket.host(),
port: Socket.port_num(),
tcp_opts: [term()],
receive_timeout: timeout(),
connect_timeout: timeout(),
bam_window: Socket.window(),
active: Socket.active()
}
@typedoc "Options for `create/1`"
@type create_opts :: %{
# Required
:host => binary(),
:port => pos_integer(),
# Optional Socket members
optional(:connect_timeout) => timeout(),
optional(:receive_timeout) => timeout(),
optional(:bam_window) => Socket.window(),
optional(:active) => Socket.active(),
# Optional non-Socket member `:gen_tcp`/`:inet` socket options
optional(:bind_to_device) => binary(),
optional(:buffer) => non_neg_integer(),
optional(:delay_send?) => boolean(),
optional(:deliver) => :port | :term,
optional(:dont_route?) => boolean(),
optional(:exit_on_close?) => boolean(),
optional(:header_size) => non_neg_integer(),
optional(:high_msgq_watermark) => pos_integer(),
optional(:high_watermark) => non_neg_integer(),
optional(:ipv4?) => boolean(),
optional(:ipv6?) => boolean(),
optional(:keepalive?) => boolean(),
optional(:line_delimiter) => char(),
optional(:linger) => {boolean(), pos_integer()},
optional(:local_port) => Socket.port_num(),
optional(:low_msgq_watermark) => pos_integer(),
optional(:low_watermark) => non_neg_integer(),
optional(:network_interface) => binary() | :inet.socket_address(),
optional(:network_namespace) => binary(),
optional(:nodelay?) => boolean(),
optional(:packet_type) => :raw | 1 | 2 | 4,
optional(:packet_size) => pos_integer(),
optional(:priority) => non_neg_integer(),
optional(:raw_fd) => non_neg_integer(),
optional(:receive_buffer) => non_neg_integer(),
optional(:receive_tclass?) => boolean(),
optional(:receive_tos?) => boolean(),
optional(:receive_ttl?) => boolean(),
optional(:reuse_addr?) => boolean(),
optional(:send_timeout) => timeout(),
optional(:send_timeout_close?) => boolean(),
optional(:show_econnreset?) => boolean(),
optional(:send_buffer) => non_neg_integer(),
optional(:tcp_module) => atom(),
optional(:tcp_opts) => [term()],
optional(:tos) => non_neg_integer(),
optional(:tclass) => non_neg_integer()
}
@typedoc "The errors that `create/1` may return"
@type option_error :: BadOption.t() | BadCombo.t()
@spec create(create_opts()) :: {:ok, Connector.t()} | {:error, option_error()}
@doc """
Creates a `Bricks.Connector` which uses this module as a callback
and the provided options to open and configure the socket.
See module documentation for more information about the options
"""
def create(opts) do
with {:ok, tcp_opts} <- tcp_options(opts) do
create_connector(opts, tcp_opts)
end
end
## behaviour impl: Connector
@spec connect(t()) :: {:ok, Socket.t()} | {:error, term()}
@doc false
def connect(%Tcp{host: host, port: port, tcp_opts: opts, connect_timeout: timeout} = tcp) do
case :gen_tcp.connect(host, port, opts, timeout) do
{:error, reason} -> {:error, Connect.new(reason)}
{:ok, socket} -> socket(socket, tcp)
end
end
## Internal helpers
@tcp_table_options [
bind_to_device: {:bind_to_device, &is_binary/1, [:binary]},
buffer: {:buffer, &non_neg_int?/1, [:non_neg_int]},
deliver: {:deliver, &deliver?/1, [:port, :term]},
delay_send?: {:delay_send, &is_boolean/1, [:bool]},
dont_route?: {:dontroute, &is_boolean/1, [:bool]},
exit_on_close?: {:exit_on_close, &is_boolean/1, [:bool]},
header_size: {:header, &non_neg_int?/1, [:non_neg_int]},
high_msgq_watermark: {:high_msgq_watermark, &pos_int?/1, [:pos_int]},
high_watermark: {:high_watermark, &non_neg_int?/1, [:non_neg_int]},
keepalive?: {:keepalive, &is_boolean/1, [:bool]},
line_delimiter: {:line_delimiter, &char?/1, [:char]},
linger: {:linger, &linger?/1, [:see_docs]},
local_port: {:port, &port?/1, [:non_neg_int]},
low_msgq_watermark: {:low_msgq_watermark, &pos_int?/1, [:pos_int]},
low_watermark: {:low_watermark, &non_neg_int?/1, [:non_neg_int]},
network_namespace: {:netns, &is_binary/1, [:binary]},
nodelay?: {:nodelay, &is_boolean/1, [:bool]},
packet_type: {:packet, &packet_type?/1, [:raw, 1, 2, 4]},
packet_size: {:packet_size, &pos_int?/1, [:pos_int]},
priority: {:priority, &non_neg_int?/1, [:non_neg_int]},
raw_fd: {:fd, &non_neg_int?/1, [:non_neg_int]},
receive_buffer: {:recbuf, &non_neg_int?/1, [:non_neg_int]},
receive_tclass?: {:recvtclass, &is_boolean/1, [:bool]},
receive_tos?: {:recvtos, &is_boolean/1, [:bool]},
receive_ttl?: {:recvttl, &is_boolean/1, [:bool]},
reuse_addr?: {:reuseaddr, &is_boolean/1, [:bool]},
send_timeout: {:send_timeout, &timeout?/1, [:infinity, :non_neg_int]},
send_timeout_close?: {:send_timeout_close, &is_boolean/1, [:bool]},
show_econnreset?: {:show_econnreset, &is_boolean/1, [:bool]},
send_buffer: {:sndbuf, &non_neg_int?/1, [:non_neg_int]},
tcp_module: {:tcp_module, &is_atom/1, [:atom]},
tos: {:tos, &non_neg_int?/1, [:non_neg_int]},
tclass: {:tclass, &non_neg_int?/1, [:non_neg_int]}
]
@tcp_custom_options [
:ipv4?,
:ipv6?,
:connect_timeout,
:send_timeout,
:receive_timeout,
:tcp_opts,
:active,
:binary?,
:host,
:port,
:bam_window,
:raw,
:network_interface
]
# network_interface: {:ip, &host?/1, },
@tcp_option_keys @tcp_custom_options ++ Keyword.keys(@tcp_table_options)
defp create_connector(opts, tcp_opts) do
with {:ok, conn_timeout} <-
Options.default_timeout(opts, :connect_timeout, @default_connect_timeout),
{:ok, receive_timeout} <-
Options.default_timeout(opts, :receive_timeout, @default_receive_timeout),
{:ok, bam_window} <-
Options.default(opts, :bam_window, @default_bam_window, &window?/1, [:once, :pos_int]),
{:ok, active} <-
Options.default(opts, :active, @default_active, &active?/1, [:bool, :integer, :once]),
{:ok, host} <- Options.required(opts, :host, &host?/1, [:binary, :ipv4, :ipv6]),
{:ok, port} <- Options.required(opts, :port, &port?/1, [:pos_int]) do
tcp = %Tcp{
host: host,
port: port,
tcp_opts: tcp_opts,
receive_timeout: receive_timeout,
connect_timeout: conn_timeout,
bam_window: bam_window,
active: active
}
{:ok, Connector.new(__MODULE__, tcp)}
end
end
defp tcp_options(opts) do
with {:ok, active} <-
Options.default(opts, :active, @default_active, &active?/1, [:bool, :integer, :once]),
{:ok, mode} <- mode(opts),
{:ok, send_timeout} <-
Options.default_timeout(opts, :send_timeout, @default_send_timeout),
{:ok, tcp_opts} <-
Options.default(opts, :tcp_opts, @default_tcp_opts, &is_list/1, [:proplist]),
:ok <- Options.check_extra_keys(opts, @tcp_option_keys),
{:ok, table} <- Options.table_options(opts, @tcp_table_options),
{:ok, ni} <- network_interface_options(opts),
{:ok, ip} <- ip_opts(opts),
{:ok, raw} <- raw_opts(opts) do
synthetic = [active: active, mode: mode, send_timeout: send_timeout]
{:ok, ip ++ ni ++ table ++ raw ++ synthetic ++ tcp_opts}
end
end
defp network_interface_options(opts) do
case Map.fetch(opts, :network_interface) do
{:ok, ni} ->
case host?(ni) do
true -> {:ok, [{:ip, Util.host_address(ni)}]}
_ -> {:error, BadOption.new(:network_interface, ni, [:binary, :ip])}
end
_ ->
{:ok, []}
end
end
@doc false
def mode(opts) do
Map.get(opts, :binary?, true)
|> case do
true ->
{:ok, :binary}
false ->
{:ok, :list}
binary? ->
{:error, BadOption.new(:binary?, binary?, [:bool])}
end
end
defp socket(socket, %Tcp{} = tcp) do
opts = Map.take(tcp, [:host, :port, :active, :receive_timeout, :bam_window])
try do
with {:error, reason} <- Socket.Tcp.create(socket, opts) do
:ok = :gen_tcp.close(socket)
{:error, reason}
end
rescue
e ->
:gen_tcp.close(socket)
{:error, e}
end
end
defp ip_opts(opts) do
ipv4? = Map.get(opts, :ipv4?, true)
ipv6? = Map.get(opts, :ipv6?, false)
case {ipv4?, ipv6?} do
{true, true} ->
{:ok, [:inet6]}
{true, false} ->
{:ok, [:inet]}
{false, true} ->
{:ok, [:inet6, {:ipv6_v6only, true}]}
{x, _} when not is_boolean(x) ->
{:error, BadOption.new(:ipv4?, ipv4?, [:bool])}
{_, x} when not is_boolean(x) ->
{:error, BadOption.new(:ipv6?, ipv6?, [:bool])}
{false, false} ->
{:error, BadCombo.new(%{ipv4?: ipv4?, ipv6?: ipv6?}, "May not both be false")}
end
end
@doc false
def raw_opts(opts) do
case Map.fetch(opts, :raw) do
{:ok, {protocol, optionnum, valuebin}} -> {:ok, [{:raw, protocol, optionnum, valuebin}]}
{:ok, other} -> {:error, BadOption.new(:raw, other, [:see_docs])}
:error -> {:ok, []}
end
end
end
|
bricks/lib/connectors/tcp.ex
| 0.808483
| 0.806243
|
tcp.ex
|
starcoder
|
defmodule Modbux.Rtu.Master do
@moduledoc """
API for a Modbus RTU Master device.
"""
use GenServer, restart: :transient
alias Modbux.Rtu.{Master, Framer}
alias Modbux.Rtu
alias Circuits.UART
require Logger
@timeout 1000
@speed 115_200
defstruct tty: nil,
timeout: nil,
cmd: nil,
active: false,
uart_opts: nil,
uart_pid: nil,
parent_pid: nil
@doc """
Starts a Modbus RTU Master process.
The following options are available:
* `tty` - defines the serial port to spawn the Master.
* `timeout` - defines slave timeout.
* `active` - (`true` or `false`) specifies whether data is received as
messages (mailbox) or by calling `request/2`.
* `gen_opts` - defines extra options for the Genserver OTP configuration.
* `uart_opts` - defines extra options for the UART configuration (defaults:
[speed: 115200, rx_framing_timeout: 1000]).
The messages (when active mode is true) have the following form:
`{:modbus_rtu, {:slave_response, cmd, values}}`
or
`{:modbus_rtu, {:slave_error, payload, reason}}`
The following are some reasons:
* `:ecrc` - corrupted message (invalid crc).
* `:einval` - invalid function.
* `:eaddr` - invalid memory address requested.
## Example
```elixir
Modbux.Rtu.Master.start_link(tty: "tnt0", active: true, uart_opts: [speed: 9600])
```
"""
@spec start_link(keyword) :: :ignore | {:error, any} | {:ok, pid}
def start_link(params) do
gen_opts = Keyword.get(params, :gen_opts, [])
GenServer.start_link(__MODULE__, {params, self()}, gen_opts)
end
@spec stop(atom | pid | {atom, any} | {:via, atom, any}) :: :ok
def stop(pid) do
GenServer.stop(pid)
end
@doc """
Gets the Master state.
"""
def state(pid) do
GenServer.call(pid, :state)
end
@doc """
Configure the Master serial port.
The following options are available:
* `tty` - defines the serial port to spawn the Master.
* `timeout` - defines slave timeout.
* `active` - (`true` or `false`) specifies whether data is received as
messages (mailbox) or by calling `request/2`.
* `gen_opts` - defines extra options for the Genserver OTP configuration.
* `uart_opts` - defines extra options for the UART configuration.
"""
def configure(pid, params) do
GenServer.call(pid, {:configure, {params, self()}})
end
@doc """
Open the Master serial port.
"""
def open(pid) do
GenServer.call(pid, :open)
end
@doc """
Close the Master serial port.
"""
def close(pid) do
GenServer.call(pid, :close)
end
@doc """
Send a request to Modbus RTU Slave.
`cmd` is one of:
- `{:rc, slave, address, count}` read `count` coils.
- `{:ri, slave, address, count}` read `count` inputs.
- `{:rhr, slave, address, count}` read `count` holding registers.
- `{:rir, slave, address, count}` read `count` input registers.
- `{:fc, slave, address, value}` force single coil.
- `{:phr, slave, address, value}` preset single holding register.
- `{:fc, slave, address, values}` force multiple coils.
- `{:phr, slave, address, values}` preset multiple holding registers.
"""
@spec request(atom | pid | {atom, any} | {:via, atom, any}, tuple()) ::
:ok | {:ok, list()} | {:error, String.t()}
def request(pid, cmd) do
GenServer.call(pid, {:request, cmd})
end
@doc """
Read and parse the last request (if the last request timeouts).
"""
@spec read(atom | pid | {atom, any} | {:via, atom, any}) :: any
def read(pid) do
GenServer.call(pid, :read)
end
def terminate(:normal, _state), do: nil
def terminate(reason, state) do
Logger.error("(#{__MODULE__}) Error: #{inspect(reason)}, state: #{inspect(state)}")
end
# Callbacks
def init({params, parent_pid}) do
active = Keyword.get(params, :active, false)
parent_pid = if active, do: parent_pid
timeout = Keyword.get(params, :timeout, @timeout)
tty = Keyword.fetch!(params, :tty)
Logger.debug("(#{__MODULE__}) Starting Modbux Master at \"#{tty}\"")
uart_opts = Keyword.get(params, :uart_opts, speed: @speed, rx_framing_timeout: @timeout)
{:ok, u_pid} = UART.start_link()
UART.open(u_pid, tty, [framing: {Framer, behavior: :master}, active: false] ++ uart_opts)
Logger.debug("(#{__MODULE__}) Reported UART configuration: \"#{inspect(UART.configuration(u_pid))}\"")
state = %Master{
parent_pid: parent_pid,
tty: tty,
active: active,
uart_pid: u_pid,
timeout: timeout,
uart_opts: uart_opts
}
{:ok, state}
end
def handle_call(:state, _from, state), do: {:reply, state, state}
def handle_call(:read, _from, state) do
res = unless is_nil(state.cmd), do: uart_read(state, state.cmd)
{:reply, res, state}
end
def handle_call(:open, _from, %{uart_pid: u_pid, tty: tty, uart_opts: uart_opts} = state) do
UART.open(u_pid, tty, [framing: {Framer, behavior: :master}, active: false] ++ uart_opts)
{:reply, :ok, state}
end
def handle_call(:close, _from, state) do
UART.close(state.uart_pid)
{:reply, :ok, state}
end
def handle_call({:request, cmd}, _from, state) do
uart_frame = Rtu.pack_req(cmd)
Logger.debug("(#{__MODULE__}) Frame: #{inspect(uart_frame <> <<0x00>>)}")
UART.flush(state.uart_pid)
UART.write(state.uart_pid, uart_frame)
res =
if state.active do
Task.start_link(__MODULE__, :async_uart_read, [state, cmd])
:ok
else
uart_read(state, cmd)
end
{:reply, res, %{state | cmd: cmd}}
end
def handle_call({:configure, {params, parent_pid}}, _from, state) do
active = Keyword.get(params, :active, false)
parent_pid = if active, do: parent_pid
timeout = Keyword.get(params, :timeout, state.timeout)
tty = Keyword.get(params, :tty, state.tty)
uart_opts = Keyword.get(params, :uart_opts, state.uart_opts)
Logger.debug("(#{__MODULE__}) Starting Modbux Master at \"#{tty}\"")
UART.close(state.uart_pid)
UART.stop(state.uart_pid)
{:ok, u_pid} = UART.start_link()
UART.open(u_pid, tty, [framing: {Framer, behavior: :master}, active: false] ++ uart_opts)
new_state = %Master{
parent_pid: parent_pid,
tty: tty,
active: active,
uart_pid: u_pid,
timeout: timeout,
uart_opts: uart_opts
}
{:reply, :ok, new_state}
end
# Catch all clause
def handle_info(msg, state) do
Logger.warn("(#{__MODULE__}) Unknown msg: #{inspect(msg)}")
{:noreply, state}
end
def async_uart_read(state, cmd) do
uart_read(state, cmd) |> notify(state, cmd)
end
defp uart_read(state, cmd) do
case UART.read(state.uart_pid, state.timeout) do
{:ok, ""} ->
Logger.warn("(#{__MODULE__}) Timeout")
{:error, :timeout}
{:ok, {:error, reason, msg}} ->
Logger.warn("(#{__MODULE__}) Error in frame: #{inspect(msg)}, reason: #{inspect(reason)}")
{:error, reason}
{:ok, slave_response} ->
Rtu.parse_res(cmd, slave_response) |> pack_res()
{:error, reason} ->
Logger.warn("(#{__MODULE__}) Error: #{inspect(reason)}")
{:error, reason}
end
end
defp notify({:error, reason}, state, cmd),
do: send(state.parent_pid, {:modbus_rtu, {:slave_error, cmd, reason}})
defp notify({:ok, slave_response}, state, cmd),
do: send(state.parent_pid, {:modbus_rtu, {:slave_response, cmd, slave_response}})
defp notify(:ok, state, cmd), do: send(state.parent_pid, {:modbus_rtu, {:slave_response, cmd, :ok}})
defp pack_res(nil), do: :ok
defp pack_res(value) when is_tuple(value), do: value
defp pack_res(value), do: {:ok, value}
end
|
lib/rtu/master.ex
| 0.886635
| 0.667107
|
master.ex
|
starcoder
|
defmodule UsersService.Infra.MapHelper do
@doc """
Convert map string camelCase keys to underscore_keys
"""
def underscore_keys(nil), do: nil
def underscore_keys(map = %{}) do
map
|> Enum.map(fn {k, v} -> {Macro.underscore(k), underscore_keys(v)} end)
|> Enum.map(fn {k, v} -> {String.replace(k, "-", "_"), v} end)
|> Enum.into(%{})
end
# Walk the list and atomize the keys of
# of any map members
def underscore_keys([head | rest]) do
[underscore_keys(head) | underscore_keys(rest)]
end
def underscore_keys(not_a_map) do
not_a_map
end
@doc """
Convert map string keys to :atom keys
"""
def atomize_keys(nil), do: nil
# Structs don't do enumerable and anyway the keys are already
# atoms
def atomize_keys(struct = %{__struct__: _}) do
struct
end
def atomize_keys(map = %{}) do
map
|> Enum.map(fn {k, v} -> {String.to_atom(k), atomize_keys(v)} end)
|> Enum.into(%{})
end
# Walk the list and atomize the keys of
# of any map members
def atomize_keys([head | rest]) do
[atomize_keys(head) | atomize_keys(rest)]
end
def atomize_keys(not_a_map) do
not_a_map
end
@doc """
Convert map atom keys to strings
"""
def stringify_keys(nil), do: nil
def stringify_keys(map = %{}) do
map
|> Enum.map(fn {k, v} -> {Atom.to_string(k), stringify_keys(v)} end)
|> Enum.into(%{})
end
# Walk the list and stringify the keys of
# of any map members
def stringify_keys([head | rest]) do
[stringify_keys(head) | stringify_keys(rest)]
end
def stringify_keys(not_a_map) do
not_a_map
end
@doc """
Deep merge two maps
"""
def deep_merge(left, right) do
Map.merge(left, right, &deep_resolve/3)
end
# Key exists in both maps, and both values are maps as well.
# These can be merged recursively.
defp deep_resolve(_key, left = %{}, right = %{}) do
deep_merge(left, right)
end
# Key exists in both maps, but at least one of the values is
# NOT a map. We fall back to standard merge behavior, preferring
# the value on the right.
defp deep_resolve(_key, _left, right) do
right
end
end
|
users_service/lib/infra/map_helper.ex
| 0.731155
| 0.465448
|
map_helper.ex
|
starcoder
|
defmodule Sourceror.Comments do
@moduledoc """
Utilities to merge an un-merge comments and quoted expressions.
"""
import Sourceror.Identifier, only: [is_pipeline_op: 1, is_binary_op: 1]
@doc """
Merges the comments into the given quoted expression.
The comments are inserted into the metadata of their closest node. Comments in
the same line of before a node are inserted into the `:leading_comments` field
while comments that are right before an `end` keyword are inserted into the
`:trailing_comments` field.
"""
@spec merge_comments(Macro.t(), list(map)) :: Macro.t()
def merge_comments(quoted, comments) do
{quoted, leftovers} =
Macro.traverse(quoted, comments, &do_merge_comments/2, &merge_leftovers/2)
case leftovers do
[] ->
quoted
_ ->
line = Sourceror.get_line(quoted)
{:__block__, [trailing_comments: leftovers, leading_comments: [], line: line], [quoted]}
end
end
defp do_merge_comments({form, _, _} = quoted, comments)
when not is_pipeline_op(form) and not is_binary_op(form) do
{comments, rest} = gather_leading_comments_for_node(quoted, comments)
quoted = put_comments(quoted, :leading_comments, comments)
{quoted, rest}
end
defp do_merge_comments(quoted, comments), do: {quoted, comments}
defp merge_leftovers({_, _, _} = quoted, comments) do
{comments, rest} = gather_trailing_comments_for_node(quoted, comments)
quoted = put_comments(quoted, :trailing_comments, comments)
{quoted, rest}
end
defp merge_leftovers(quoted, comments), do: {quoted, comments}
defp gather_leading_comments_for_node(quoted, comments) do
line = Sourceror.get_line(quoted, 0)
{comments, rest} =
Enum.reduce(comments, {[], []}, fn
comment, {comments, rest} ->
if comment.line <= line do
{[comment | comments], rest}
else
{comments, [comment | rest]}
end
end)
rest = Enum.sort_by(rest, & &1.line)
comments = Enum.sort_by(comments, & &1.line)
{comments, rest}
end
defp gather_trailing_comments_for_node(quoted, comments) do
line = Sourceror.get_end_line(quoted, 0)
has_closing_line? = Sourceror.has_closing_line?(quoted)
{comments, rest} =
Enum.reduce(comments, {[], []}, fn
comment, {comments, rest} ->
cond do
has_closing_line? and comment.line < line ->
{[comment | comments], rest}
not has_closing_line? and comment.line <= line ->
{[comment | comments], rest}
true ->
{comments, [comment | rest]}
end
end)
rest = Enum.sort_by(rest, & &1.line)
comments = Enum.sort_by(comments, & &1.line)
{comments, rest}
end
defp put_comments(quoted, key, comments) do
Macro.update_meta(quoted, &Keyword.put(&1, key, comments))
end
@doc """
Does the opposite of `merge_comments/2`, it extracts the comments from the
quoted expression and returns both as a `{quoted, comments}` tuple.
"""
@spec extract_comments(Macro.t()) :: {Macro.t(), list(map)}
def extract_comments(quoted, opts \\ []) do
collapse_comments = Keyword.get(opts, :collapse_comments, false)
correct_lines = Keyword.get(opts, :correct_lines, false)
quoted =
if correct_lines do
Sourceror.LinesCorrector.correct(quoted)
else
quoted
end
Macro.postwalk(quoted, [], fn
{_, _, _} = quoted, acc ->
do_extract_comments(quoted, acc, collapse_comments)
other, acc ->
{other, acc}
end)
end
defp do_extract_comments({_, meta, _} = quoted, acc, collapse_comments) do
leading_comments = Keyword.get(meta, :leading_comments, [])
leading_comments_count = length(leading_comments)
leading_comments =
if collapse_comments do
for {comment, i} <- Enum.with_index(leading_comments, 0) do
next_eol_correction = max(0, comment.next_eol_count - 1)
line = max(1, meta[:line] - (leading_comments_count - i - next_eol_correction))
%{comment | line: line}
end
else
leading_comments
end
trailing_comments = Keyword.get(meta, :trailing_comments, [])
trailing_comments =
if collapse_comments do
collapse_trailing_comments(quoted, trailing_comments)
else
trailing_comments
end
acc =
Enum.concat([acc, leading_comments, trailing_comments])
|> Enum.sort_by(& &1.line)
quoted =
Macro.update_meta(quoted, fn meta ->
meta
|> Keyword.delete(:leading_comments)
|> Keyword.delete(:trailing_comments)
end)
{quoted, acc}
end
defp collapse_trailing_comments(quoted, trailing_comments) do
meta = Sourceror.get_meta(quoted)
comments =
Enum.map(trailing_comments, fn comment ->
line = meta[:end_of_expression][:line] || meta[:line]
%{comment | line: line - 2, previous_eol_count: 1}
end)
comments =
case comments do
[first | rest] ->
[%{first | previous_eol_count: 0} | rest]
_ ->
comments
end
case List.pop_at(comments, -1) do
{last, rest} when is_map(last) ->
rest ++ [%{last | next_eol_count: 2}]
_ ->
comments
end
end
end
|
lib/sourceror/comments.ex
| 0.747524
| 0.616532
|
comments.ex
|
starcoder
|
import Kernel, except: [apply: 2]
defmodule Ecto.Query.Builder.Select do
@moduledoc false
alias Ecto.Query.Builder
@doc """
Escapes a select.
It allows tuples, lists and variables at the top level. Inside the
tuples and lists query expressions are allowed.
## Examples
iex> escape({1, 2}, [], __ENV__)
{{:{}, [], [:{}, [], [1, 2]]}, {[], %{}}}
iex> escape([1, 2], [], __ENV__)
{[1, 2], {[], %{}}}
iex> escape(quote(do: x), [x: 0], __ENV__)
{{:{}, [], [:&, [], [0]]}, {[], %{}}}
"""
@spec escape(Macro.t, Keyword.t, Macro.Env.t) :: {Macro.t, {list, %{}}}
def escape(atom, _vars, _env)
when is_atom(atom) and not is_boolean(atom) and atom != nil do
Builder.error! """
#{inspect(atom)} is not a valid query expression, :select expects a query expression or a list of fields
"""
end
def escape(other, vars, env) do
if take?(other) do
{{:{}, [], [:&, [], [0]]}, {[], %{0 => {:any, other}}}}
else
escape(other, {[], %{}}, vars, env)
end
end
# Tuple
defp escape({left, right}, params_take, vars, env) do
escape({:{}, [], [left, right]}, params_take, vars, env)
end
# Tuple
defp escape({:{}, _, list}, params_take, vars, env) do
{list, params_take} = Enum.map_reduce(list, params_take, &escape(&1, &2, vars, env))
expr = {:{}, [], [:{}, [], list]}
{expr, params_take}
end
# Struct
defp escape({:%, _, [name, map]}, params_take, vars, env) do
name = Macro.expand(name, env)
{escaped_map, params_take} = escape(map, params_take, vars, env)
{{:{}, [], [:%, [], [name, escaped_map]]}, params_take}
end
# Map
defp escape({:%{}, _, [{:|, _, [data, pairs]}]}, params_take, vars, env) do
{data, params_take} = escape(data, params_take, vars, env)
{pairs, params_take} = escape_pairs(pairs, params_take, vars, env)
{{:{}, [], [:%{}, [], [{:{}, [], [:|, [], [data, pairs]]}]]}, params_take}
end
# Merge
defp escape({:merge, _, [left, {kind, _, _} = right]}, params_take, vars, env)
when kind in [:%{}, :map] do
{left, params_take} = escape(left, params_take, vars, env)
{right, params_take} = escape(right, params_take, vars, env)
{{:{}, [], [:merge, [], [left, right]]}, params_take}
end
defp escape({:merge, _, [_left, right]}, _params_take, _vars, _env) do
Builder.error! "expected the second argument of merge/2 in select to be a map, got: `#{Macro.to_string(right)}`"
end
# Map
defp escape({:%{}, _, pairs}, params_take, vars, env) do
{pairs, params_take} = escape_pairs(pairs, params_take, vars, env)
{{:{}, [], [:%{}, [], pairs]}, params_take}
end
# List
defp escape(list, params_take, vars, env) when is_list(list) do
Enum.map_reduce(list, params_take, &escape(&1, &2, vars, env))
end
# map/struct(var, [:foo, :bar])
defp escape({tag, _, [{var, _, context}, fields]}, {params, take}, vars, env)
when tag in [:map, :struct] and is_atom(var) and is_atom(context) do
taken = escape_fields(fields, tag, env)
expr = Builder.escape_var!(var, vars)
take = add_take(take, Builder.find_var!(var, vars), {tag, taken})
{expr, {params, take}}
end
defp escape(expr, params_take, vars, env) do
Builder.escape(expr, :any, params_take, vars, {env, &escape_expansion/5})
end
defp escape_expansion(expr, _type, params_take, vars, env) do
escape(expr, params_take, vars, env)
end
defp escape_pairs(pairs, params_take, vars, env) do
Enum.map_reduce pairs, params_take, fn({k, v}, acc) ->
{k, acc} = escape_key(k, acc, vars, env)
{v, acc} = escape(v, acc, vars, env)
{{k, v}, acc}
end
end
defp escape_key(k, params_take, _vars, _env) when is_atom(k) do
{k, params_take}
end
defp escape_key(k, params_take, vars, env) do
escape(k, params_take, vars, env)
end
defp escape_fields({:^, _, [interpolated]}, tag, _env) do
quote do
Ecto.Query.Builder.Select.fields!(unquote(tag), unquote(interpolated))
end
end
defp escape_fields(expr, tag, env) do
case Macro.expand(expr, env) do
fields when is_list(fields) ->
fields
_ ->
Builder.error! "`#{tag}/2` in `select` expects either a literal or " <>
"an interpolated list of atom fields"
end
end
@doc """
Called at runtime to verify a field.
"""
def fields!(tag, fields) do
if take?(fields) do
fields
else
raise ArgumentError,
"expected a list of fields in `#{tag}/2` inside `select`, got: `#{inspect fields}`"
end
end
defp take?(fields) do
is_list(fields) and Enum.all?(fields, fn
{k, v} when is_atom(k) -> take?(List.wrap(v))
k when is_atom(k) -> true
_ -> false
end)
end
@doc """
Called at runtime for interpolated/dynamic selects.
"""
def select!(kind, query, fields, file, line) do
take = %{0 => {:any, fields!(:select, fields)}}
expr = %Ecto.Query.SelectExpr{expr: {:&, [], [0]}, take: take, file: file, line: line}
if kind == :select do
apply(query, expr)
else
merge(query, expr)
end
end
@doc """
Builds a quoted expression.
The quoted expression should evaluate to a query at runtime.
If possible, it does all calculations at compile time to avoid
runtime work.
"""
@spec build(:select | :merge, Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t
def build(kind, query, _binding, {:^, _, [var]}, env) do
quote do
Ecto.Query.Builder.Select.select!(unquote(kind), unquote(query), unquote(var),
unquote(env.file), unquote(env.line))
end
end
def build(kind, query, binding, expr, env) do
{query, binding} = Builder.escape_binding(query, binding, env)
{expr, {params, take}} = escape(expr, binding, env)
params = Builder.escape_params(params)
take = {:%{}, [], Map.to_list(take)}
select = quote do: %Ecto.Query.SelectExpr{
expr: unquote(expr),
params: unquote(params),
file: unquote(env.file),
line: unquote(env.line),
take: unquote(take)}
if kind == :select do
Builder.apply_query(query, __MODULE__, [select], env)
else
quote do
query = unquote(query)
Builder.Select.merge(query, unquote(select))
end
end
end
@doc """
The callback applied by `build/5` to build the query.
"""
@spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t
def apply(%Ecto.Query{select: nil} = query, expr) do
%{query | select: expr}
end
def apply(%Ecto.Query{}, _expr) do
Builder.error! "only one select expression is allowed in query"
end
def apply(query, expr) do
apply(Ecto.Queryable.to_query(query), expr)
end
@doc """
The callback applied by `build/5` when merging.
"""
def merge(%Ecto.Query{select: nil} = query, new_select) do
merge(query, new_select, {:&, [], [0]}, [], %{}, new_select)
end
def merge(%Ecto.Query{select: old_select} = query, new_select) do
%{expr: old_expr, params: old_params, take: old_take} = old_select
merge(query, old_select, old_expr, old_params, old_take, new_select)
end
def merge(query, expr) do
merge(Ecto.Queryable.to_query(query), expr)
end
defp merge(query, select, old_expr, old_params, old_take, new_select) do
%{expr: new_expr, params: new_params, take: new_take} = new_select
new_expr = Ecto.Query.Builder.bump_interpolations(new_expr, old_params)
expr =
case {classify_merge(old_expr, old_take), classify_merge(new_expr, new_take)} do
{_, _} when old_expr == new_expr ->
new_expr
{{:source, meta, ix}, {:source, _, ix}} ->
{:&, meta, [ix]}
{{:struct, meta, name, old_fields}, {:map, _, new_fields}} when old_params == [] ->
cond do
new_fields == [] ->
old_expr
Keyword.keyword?(old_fields) and Keyword.keyword?(new_fields) ->
{:%, meta, [name, {:%{}, meta, Keyword.merge(old_fields, new_fields)}]}
true ->
{:merge, [], [old_expr, new_expr]}
end
{{:map, meta, old_fields}, {:map, _, new_fields}} when old_params == [] ->
cond do
old_fields == [] ->
new_expr
new_fields == [] ->
old_expr
Keyword.keyword?(old_fields) and Keyword.keyword?(new_fields) ->
{:%{}, meta, Keyword.merge(old_fields, new_fields)}
true ->
{:merge, [], [old_expr, new_expr]}
end
{_, {:map, _, _}} ->
{:merge, [], [old_expr, new_expr]}
{_, _} ->
message = """
cannot select_merge #{merge_argument_to_error(new_expr, query)} into \
#{merge_argument_to_error(old_expr, query)}, those select expressions \
are incompatible. You can only select_merge:
* a source (such as post) with another source (of the same type)
* a source (such as post) with a map
* a struct with a map
* a map with a map
Incompatible merge found
"""
raise Ecto.QueryError, query: query, message: message
end
select = %{
select | expr: expr,
params: old_params ++ new_params,
take: merge_take(old_expr, old_take, new_take)
}
%{query | select: select}
end
defp classify_merge({:&, meta, [ix]}, take) when is_integer(ix) do
case take do
%{^ix => {:map, _}} -> {:map, meta, :runtime}
_ -> {:source, meta, ix}
end
end
defp classify_merge({:%, meta, [name, {:%{}, _, fields}]}, _take)
when fields == [] or tuple_size(hd(fields)) == 2 do
{:struct, meta, name, fields}
end
defp classify_merge({:%{}, meta, fields}, _take)
when fields == [] or tuple_size(hd(fields)) == 2 do
{:map, meta, fields}
end
defp classify_merge({:%{}, meta, _}, _take) do
{:map, meta, :runtime}
end
defp classify_merge(_, _take) do
:error
end
defp merge_argument_to_error({:&, _, [0]}, %{from: %{source: {source, alias}}}) do
"source #{inspect(source || alias)}"
end
defp merge_argument_to_error({:&, _, [ix]}, _query) do
"join (at position #{ix})"
end
defp merge_argument_to_error(other, _query) do
Macro.to_string(other)
end
defp add_take(take, key, value) do
Map.update(take, key, value, &merge_take_kind_and_fields(key, &1, value))
end
defp merge_take(old_expr, %{} = old_take, %{} = new_take) do
Enum.reduce(new_take, old_take, fn {binding, new_value}, acc ->
case acc do
%{^binding => old_value} ->
Map.put(acc, binding, merge_take_kind_and_fields(binding, old_value, new_value))
%{} ->
# If the binding is a not filtered source, merge shouldn't restrict it
case old_expr do
{:&, _, [^binding]} -> acc
_ -> Map.put(acc, binding, new_value)
end
end
end)
end
defp merge_take_kind_and_fields(binding, {old_kind, old_fields}, {new_kind, new_fields}) do
{merge_take_kind(binding, old_kind, new_kind), Enum.uniq(old_fields ++ new_fields)}
end
defp merge_take_kind(_, kind, kind), do: kind
defp merge_take_kind(_, :any, kind), do: kind
defp merge_take_kind(_, kind, :any), do: kind
defp merge_take_kind(binding, old, new) do
Builder.error! "cannot select_merge because the binding at position #{binding} " <>
"was previously specified as a `#{old}` and later as `#{new}`"
end
end
|
lib/ecto/query/builder/select.ex
| 0.791055
| 0.444444
|
select.ex
|
starcoder
|
defmodule Crux.Structs do
@moduledoc """
Provides a unified function to create one or a list of structs, invoking their `create/1` function if available.
"""
alias Crux.Structs.Util
require Util
Util.modulesince("0.1.0")
@doc """
Can be implemented by structs to transform the inital data.
"""
@callback create(data :: map()) :: struct()
@optional_callbacks create: 1
@doc ~S"""
Creates a struct or a list of structs invoking their `create/1` function if available.
## Examples
```elixir
# A single member
iex> %{
...> "nick" => "nick",
...> "user" => %{"username" => "space", "discriminator" => "0001", "id" => "218348062828003328", "avatar" => "646a356e237350bf8b8dfde15667dfc4"},
...> "roles" => ["251158405832638465", "373405430589816834"],
...> "mute" => false,
...> "deaf" => false,
...> "joined_at" => "2016-11-02T00:51:21.342000+00:00"
...> }
...> |> Crux.Structs.create(Crux.Structs.Member)
%Crux.Structs.Member{
nick: "nick",
user: 218348062828003328,
roles: MapSet.new([251158405832638465, 373405430589816834]),
mute: false,
deaf: false,
joined_at: "2016-11-02T00:51:21.342000+00:00",
guild_id: nil
}
# A single user
iex> %{"username" => "space", "discriminator" => "0001", "id" => "218348062828003328", "avatar" => "46a356e237350bf8b8dfde15667dfc4"}
...> |> Crux.Structs.create(Crux.Structs.User)
%Crux.Structs.User{username: "space", discriminator: "0001", id: 218348062828003328, avatar: "46a356e237350bf8b8dfde15667dfc4"}
# Multiple users
iex> [
...> %{"username" => "space", "discriminator" => "0001", "id" => "218348062828003328", "avatar" => "46a356e237350bf8b8dfde15667dfc4"},
...> %{"username" => "Drahcirius", "discriminator" => "1336", "id" => "130175406673231873", "avatar" => "c896aebec82c90f590b08cfebcdc4e3b"}
...> ]
...> |> Crux.Structs.create(Crux.Structs.User)
[
%Crux.Structs.User{username: "space", discriminator: "0001", id: 218348062828003328, avatar: "46a356e237350bf8b8dfde15667dfc4"},
%Crux.Structs.User{username: "Drahcirius", discriminator: "1336", id: 130175406673231873, avatar: "<KEY>"}
]
# Does not alter already structs
iex> Crux.Structs.create(
...> %Crux.Structs.User{username: "space", discriminator: "0001", id: 218348062828003328, avatar: "<KEY>"},
...> Crux.Structs.User
...> )
%Crux.Structs.User{username: "space", discriminator: "0001", id: 218348062828003328, avatar: "<KEY>"}
# Fallback
iex> Crux.Structs.create(nil, nil)
nil
```
"""
@spec create(data :: map(), target :: module()) :: struct()
@spec create(data :: list(), target :: module()) :: list(struct())
Util.since("0.1.0")
def create(data, target)
def create(nil, _target), do: nil
def create(data, target) when is_list(data) do
Enum.map(data, &create(&1, target))
end
def create(%{__struct__: target} = data, target), do: data
def create(data, target) do
Code.ensure_loaded(target)
if :erlang.function_exported(target, :create, 1) do
target.create(data)
else
data = Util.atomify(data)
struct(target, data)
end
end
end
|
lib/structs.ex
| 0.860589
| 0.680912
|
structs.ex
|
starcoder
|
defmodule AshJsonApi.Resource do
@route_schema [
route: [
type: :string,
required: true,
doc: "The path of the route"
],
action: [
type: :atom,
required: true,
doc: "The action to call when this route is hit"
],
primary?: [
type: :boolean,
default: false,
doc:
"Whether or not this is the route that should be linked to by default when rendering links to this type of route"
]
]
@get %Ash.Dsl.Entity{
name: :get,
args: [:action],
describe: "A GET route to retrieve a single record",
examples: [
"get :read"
],
schema:
@route_schema
|> Ash.OptionsHelpers.set_default!(:route, "/:id"),
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :get,
controller: AshJsonApi.Controllers.Get,
action_type: :read,
type: :get
]
}
@index %Ash.Dsl.Entity{
name: :index,
args: [:action],
describe: "A GET route to retrieve a list of records",
examples: [
"index :read"
],
schema:
@route_schema
|> Ash.OptionsHelpers.set_default!(:route, "/")
|> Keyword.put(:paginate?, type: :boolean, default: true),
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :get,
controller: AshJsonApi.Controllers.Index,
action_type: :read,
type: :index
]
}
@relationship_arguments_doc """
A list of arguments that can be edited in the `data.relationships` input.
This is primarily useful for those who want to keep their relationship changes in compliance with the `JSON:API` spec.
If you are not focused on building a fully compliant JSON:API, it is likely far simpler to simply accept arguments
in the `attributes` key and ignore the `data.relationships` input.
If the argument's type is `{:array, _}`, a list of data will be expected. Otherwise, it will expect a single item.
For example:
```elixir
# On a tweets resource
# With a patch route that references the `authors` argument
json_api do
routes do
patch :update, relationship_arguments: [:authors]
end
end
# And an argument by that name in the action
actions do
update :cupdate do
argument :authors, {:array, :map}, allow_nil?: false
change manage_relationship(:authors, type: :replace) # Use the authors argument to allow changing the related authors on update
end
end
```
You can then send the value for `authors` in the relationships key, e.g
```json
{
data: {
attributes: {
...
},
relationships: {
authors: {
data: [
{type: "author", id: 1}, // the `type` key is removed when the value is placed into the action, so this input would be `%{"id" => 1}` (`type` is required by `JSON:API` specification)
{type: "author", id: 2, meta: {arbitrary: 1, keys: 2}}, <- `meta` is JSON:API spec freeform data, so this input would be `%{"id" => 2, "arbitrary" => 1, "keys" => 2}`
]
}
}
}
}
```
If you do not include `:authors` in the `relationship_arguments` key, you would supply its value in `attributes`, e.g:
```elixir
{
data: {
attributes: {
authors: {
{id: 1},
{id: 2, arbitrary: 1, keys: 2},
}
}
}
}
```
Non-map argument types, e.g `argument :author, :integer` (expecting an author id) work with `manage_relationship`, but not with
JSON:API, because it expects `{"type": _type, "id" => id}` for relationship values. To support non-map arguments in `relationship_arguments`,
instead of `:author`, use `{:id, :author}`. This works for `{:array, _}` type arguments as well, so the value would be a list of ids.
"""
@post %Ash.Dsl.Entity{
name: :post,
args: [:action],
describe: "A POST route to create a record",
examples: [
"post :create"
],
schema:
@route_schema
|> Ash.OptionsHelpers.set_default!(:route, "/")
|> Keyword.put(:relationship_arguments,
type: :any,
doc: @relationship_arguments_doc,
default: []
),
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :post,
controller: AshJsonApi.Controllers.Post,
action_type: :create,
type: :post
]
}
@patch %Ash.Dsl.Entity{
name: :patch,
args: [:action],
describe: "A PATCH route to update a record",
examples: [
"patch :update"
],
schema:
@route_schema
|> Ash.OptionsHelpers.set_default!(:route, "/:id")
|> Keyword.put(:relationship_arguments,
type: :any,
doc: @relationship_arguments_doc,
default: []
),
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :patch,
controller: AshJsonApi.Controllers.Patch,
action_type: :update,
type: :patch
]
}
@delete %Ash.Dsl.Entity{
name: :delete,
args: [:action],
describe: "A DELETE route to destroy a record",
examples: [
"delete :destroy"
],
schema:
@route_schema
|> Ash.OptionsHelpers.set_default!(:route, "/:id"),
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :delete,
controller: AshJsonApi.Controllers.Delete,
action_type: :destroy,
type: :delete
]
}
@related %Ash.Dsl.Entity{
name: :related,
args: [:relationship, :action],
describe: "A GET route to read the related resources of a relationship",
examples: [
"related :comments, :read"
],
schema:
@route_schema
|> Ash.OptionsHelpers.make_optional!(:route)
|> Ash.OptionsHelpers.append_doc!(:route, "Defaults to /:id/[relationship_name]")
|> Keyword.put(:relationship,
type: :atom,
required: true
),
transform: {__MODULE__, :set_related_route, []},
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :get,
controller: AshJsonApi.Controllers.GetRelated
]
}
@relationship %Ash.Dsl.Entity{
name: :relationship,
args: [:relationship, :action],
describe: "A READ route to read the relationship, returns resource identifiers.",
examples: [
"relationship :comments, :read"
],
schema:
@route_schema
|> Ash.OptionsHelpers.make_optional!(:route)
|> Ash.OptionsHelpers.append_doc!(
:route,
" Defaults to /:id/relationships/[relationship_name]"
)
|> Keyword.put(:relationship,
type: :atom,
required: true
),
transform: {__MODULE__, :set_relationship_route, []},
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :get,
controller: AshJsonApi.Controllers.GetRelationship
]
}
@post_to_relationship %Ash.Dsl.Entity{
name: :post_to_relationship,
args: [:relationship],
describe: "A POST route to create related entities using resource identifiers",
examples: [
"post_to_relationship :comments"
],
schema:
@route_schema
|> Ash.OptionsHelpers.make_optional!(:route)
|> Ash.OptionsHelpers.append_doc!(
:route,
" Defaults to /:id/relationships/[relationship_name]"
)
|> Keyword.put(:relationship,
type: :atom,
required: true
)
|> Keyword.delete(:action),
transform: {__MODULE__, :set_relationship_route, []},
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :post,
type: :post_to_relationship,
controller: AshJsonApi.Controllers.PostToRelationship
]
}
@patch_relationship %Ash.Dsl.Entity{
name: :patch_relationship,
args: [:relationship],
describe: "A PATCH route to update a relationship using resource identifiers",
examples: [
"patch_relationship :comments"
],
schema:
@route_schema
|> Ash.OptionsHelpers.make_optional!(:route)
|> Ash.OptionsHelpers.append_doc!(
:route,
" Defaults to /:id/relationships/[relationship_name]"
)
|> Keyword.put(:relationship,
type: :atom,
required: true
)
|> Keyword.delete(:action),
transform: {__MODULE__, :set_relationship_route, []},
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :patch,
type: :patch_relationship,
controller: AshJsonApi.Controllers.PatchRelationship
]
}
@delete_from_relationship %Ash.Dsl.Entity{
name: :delete_from_relationship,
args: [:relationship],
describe: "A DELETE route to remove related entities using resource identifiers",
examples: [
"delete_from_relationship :comments"
],
schema:
@route_schema
|> Ash.OptionsHelpers.make_optional!(:route)
|> Ash.OptionsHelpers.append_doc!(
:route,
" Defaults to /:id/relationships/[relationship_name]"
)
|> Keyword.put(:relationship,
type: :atom,
required: true
)
|> Keyword.delete(:action),
transform: {__MODULE__, :set_relationship_route, []},
target: AshJsonApi.Resource.Route,
auto_set_fields: [
method: :delete,
type: :delete_from_relationship,
controller: AshJsonApi.Controllers.DeleteFromRelationship
]
}
@routes %Ash.Dsl.Section{
name: :routes,
describe: "Configure the routes that will be exposed via the JSON:API",
schema: [
base: [
type: :string,
required: true,
doc: "The base route for the resource, e.g `\"/users\"`"
]
],
examples: [
"""
routes do
base_route "/posts"
get :read
get :me, route: "/me"
index :read
post :confirm_name, route: "/confirm_name"
patch :update
related :comments, :read
relationship :comments, :read
post_to_relationship :comments
patch_relationship :comments
delete_from_relationship :comments
end
"""
],
entities: [
@get,
@index,
@post,
@patch,
@delete,
@related,
@relationship,
@post_to_relationship,
@patch_relationship,
@delete_from_relationship
]
}
@primary_key %Ash.Dsl.Section{
name: :primary_key,
describe: "Encode the id of the JSON API response from selected attributes of a resource",
examples: [
"""
primary_key do
keys [:first_name, :last_name]
delimiter "~"
end
"""
],
schema: [
keys: [
type: {:custom, Ash.OptionsHelpers, :list_of_atoms, []},
doc: "the list of attributes to encode JSON API primary key",
required: true
],
delimiter: [
type: :string,
default: "-",
required: false,
doc: "The delimiter to concatenate the primary key values. Default to be '-'"
]
]
}
@json_api %Ash.Dsl.Section{
name: :json_api,
sections: [@routes, @primary_key],
describe: "Configure the resource's behavior in the JSON:API",
examples: [
"""
json_api do
type "post"
includes [
friends: [
:comments
],
comments: []
]
routes do
base_route "/posts"
get :read
get :me, route: "/me"
index :read
post :confirm_name, route: "/confirm_name"
patch :update
related :comments, :read
relationship :comments, :read
post_to_relationship :comments
patch_relationship :comments
delete_from_relationship :comments
end
end
"""
],
schema: [
type: [
type: :string,
doc: "The resource identifier type of this resource in JSON:API",
required: true
],
includes: [
type: :any,
default: [],
doc: "A keyword list of all paths that are includable from this resource"
]
]
}
@transformers [
AshJsonApi.Resource.Transformers.PrependRoutePrefix,
AshJsonApi.Resource.Transformers.ValidateNoOverlappingRoutes,
AshJsonApi.Resource.Transformers.RequirePrimaryKey
]
@sections [@json_api]
@moduledoc """
The entrypoint for adding JSON:API behavior to a resource"
# Table of Contents
#{Ash.Dsl.Extension.doc_index(@sections)}
#{Ash.Dsl.Extension.doc(@sections)}
"""
require Ash.Dsl.Extension
use Ash.Dsl.Extension, sections: @sections, transformers: @transformers
def type(resource) do
Extension.get_opt(resource, [:json_api], :type, nil, false)
end
def includes(resource) do
Extension.get_opt(resource, [:json_api], :includes, [], false)
end
def base_route(resource) do
Extension.get_opt(resource, [:json_api, :routes], :base, nil, false)
end
def encode_primary_key(%resource{} = record) do
case primary_key_fields(resource) do
[] ->
# Expect resource to have only 1 primary key if :primary_key section is not used
[key] = Ash.Resource.Info.primary_key(resource)
Map.get(record, key)
keys ->
delimiter = primary_key_delimiter(resource)
[_ | concatenated_keys] =
keys
|> Enum.reverse()
|> Enum.reduce([], fn key, acc -> [delimiter, to_string(Map.get(record, key)), acc] end)
IO.iodata_to_binary(concatenated_keys)
end
end
def primary_key_fields(resource) do
Extension.get_opt(resource, [:json_api, :primary_key], :keys, [], false)
end
def primary_key_delimiter(resource) do
Extension.get_opt(resource, [:json_api, :primary_key], :delimiter, [], false)
end
def routes(resource) do
Extension.get_entities(resource, [:json_api, :routes])
end
def route(resource, criteria \\ %{}) do
resource
|> routes()
|> Enum.find(fn route ->
Map.take(route, Map.keys(criteria)) == criteria
end)
end
@doc false
def set_related_route(%{route: nil, relationship: relationship} = route) do
{:ok, %{route | route: ":id/#{relationship}"}}
end
def set_related_route(route), do: {:ok, route}
@doc false
def set_relationship_route(%{route: nil, relationship: relationship} = route) do
{:ok, %{route | route: ":id/relationships/#{relationship}"}}
end
def set_relationship_route(route), do: {:ok, route}
@doc false
def validate_fields(fields) when is_list(fields) do
if Enum.all?(fields, &is_atom/1) do
{:ok, fields}
else
{:error, "Invalid fields"}
end
end
end
|
lib/ash_json_api/resource/resource.ex
| 0.808748
| 0.747869
|
resource.ex
|
starcoder
|
defmodule StepFlow.Workflows.Status do
use Ecto.Schema
import Ecto.Changeset
import Ecto.Query, warn: false
import EctoEnum
alias StepFlow.Jobs
alias StepFlow.Progressions.Progression
alias StepFlow.Repo
alias StepFlow.Workflows
alias StepFlow.Workflows.Workflow
require Logger
@moduledoc false
defenum(StateEnum, ["pending", "skipped", "processing", "retrying", "error", "completed"])
def state_enum_label(value) do
case value do
value when value in [0, :pending] -> "pending"
value when value in [1, :skipped] -> "skipped"
value when value in [2, :processing] -> "processing"
value when value in [3, :retrying] -> "retrying"
value when value in [4, :error] -> "error"
value when value in [5, :completed] -> "completed"
_ -> "unknown"
end
end
schema "step_flow_workflow_status" do
field(:state, StepFlow.Workflows.Status.StateEnum)
belongs_to(:status, Jobs.Status, foreign_key: :job_status_id, defaults: nil)
belongs_to(:workflow, Workflow, foreign_key: :workflow_id)
timestamps()
end
@doc false
def changeset(%Workflows.Status{} = status, attrs) do
status
|> cast(attrs, [:workflow_id, :state, :job_status_id])
|> foreign_key_constraint(:workflow_id)
|> validate_required([:state, :workflow_id])
end
@doc """
Define the workflow status given events. It also tracks completed, retrying
and error job status of a workflow.
Returns `{:ok, workflow_status}` if the event is correct, nil otherwise
## Examples
iex> define_workflow_status(1, :completed_workflow)
{:ok, %Workflows.Status{state: :completed, workflow_id: 1, job_id: nil, id: 1}}
iex> define_workflow_status(1, :incorrect_event)
nil
"""
def define_workflow_status(workflow_id, event, payload \\ %{})
def define_workflow_status(workflow_id, :created_workflow, _payload) do
set_workflow_status(workflow_id, :pending)
end
def define_workflow_status(workflow_id, :job_progression, %Progression{progression: 0}) do
last_status = get_last_workflow_status(workflow_id)
if last_status.state == :pending do
set_workflow_status(workflow_id, :processing)
else
Logger.warn(
"Can't set workflow #{workflow_id} to :processing because current state is #{
last_status.state
}."
)
{:ok, last_status}
end
end
def define_workflow_status(workflow_id, :job_completed, %Jobs.Status{
id: job_status_id,
job_id: job_id
}) do
jobs_status_not_completed =
get_last_jobs_status(workflow_id)
|> Enum.filter(fn s -> s.state in [:error, :retrying] and s.job_id != job_id end)
|> length()
if jobs_status_not_completed == 0 do
set_workflow_status(workflow_id, :pending, job_status_id)
else
last_status = get_last_workflow_status(workflow_id)
set_workflow_status(workflow_id, last_status.state, job_status_id)
end
end
def define_workflow_status(workflow_id, :job_retrying, %Jobs.Status{
id: job_status_id,
job_id: job_id
}) do
jobs_status_in_error =
get_last_jobs_status(workflow_id)
|> Enum.filter(fn s -> s.state == :error and s.job_id != job_id end)
|> length()
if jobs_status_in_error == 0 do
set_workflow_status(workflow_id, :processing, job_status_id)
else
set_workflow_status(workflow_id, :error, job_status_id)
end
end
def define_workflow_status(workflow_id, :completed_workflow, _payload) do
last_status = get_last_workflow_status(workflow_id)
if last_status != nil do
Logger.info("Complete wokflow #{workflow_id} from state #{last_status.state}.")
end
set_workflow_status(workflow_id, :completed)
end
def define_workflow_status(workflow_id, event, %Jobs.Status{id: job_status_id})
when event in [:job_error, :queue_not_found] do
set_workflow_status(workflow_id, :error, job_status_id)
end
def define_workflow_status(_workflow_id, _event, _payload), do: nil
def set_workflow_status(workflow_id, status, job_status_id \\ nil) do
%Workflows.Status{}
|> Workflows.Status.changeset(%{
workflow_id: workflow_id,
state: status,
job_status_id: job_status_id
})
|> Repo.insert()
end
@doc """
Returns the last updated status of a workflow per job_id.
"""
def get_last_jobs_status(workflow_id) when is_number(workflow_id) do
query =
from(
job_status in Jobs.Status,
inner_join:
workflow_status in subquery(
from(
workflow_status in Workflows.Status,
where: workflow_status.workflow_id == ^workflow_id
)
),
on: workflow_status.job_status_id == job_status.id,
order_by: [
desc: field(workflow_status, :inserted_at),
desc: field(job_status, :id),
asc: field(job_status, :job_id)
],
distinct: [asc: field(job_status, :job_id)]
)
Repo.all(query)
end
@doc """
Returns the last updated status of a workflow.
"""
def get_last_workflow_status(workflow_id) when is_number(workflow_id) do
query =
from(
workflow_status in Workflows.Status,
where: workflow_status.workflow_id == ^workflow_id,
order_by: [desc: :updated_at, desc: :id],
limit: 1
)
Repo.one(query)
end
def get_last_workflow_status(_workflow_id), do: nil
@doc """
"""
def list_workflows_status(start_date, end_date, identifiers, user_rights) do
query =
if Enum.empty?(identifiers) do
from(
workflow in Workflow,
join: rights in assoc(workflow, :rights),
where: rights.action == "view",
where: fragment("?::varchar[] && ?::varchar[]", rights.groups, ^user_rights)
)
else
from(
workflow in Workflow,
where: workflow.identifier in ^identifiers,
join: rights in assoc(workflow, :rights),
where: rights.action == "view",
where: fragment("?::varchar[] && ?::varchar[]", rights.groups, ^user_rights)
)
end
query =
from(
workflows_status in Workflows.Status,
inner_join: workflow in subquery(query),
on: workflows_status.workflow_id == workflow.id,
where:
fragment("?::timestamp", workflows_status.inserted_at) >= ^start_date and
fragment("?::timestamp", workflows_status.inserted_at) <= ^end_date and
workflows_status.state in [:completed, :error, :processing]
)
Repo.all(query)
end
end
|
lib/step_flow/workflows/status.ex
| 0.658966
| 0.438424
|
status.ex
|
starcoder
|
defmodule FixtureBuilder.Executer do
@moduledoc false
alias FixtureBuilder.Op
alias FixtureBuilder.Utils
def execute(fixtures, module), do: execute_next(fixtures, module)
defp execute_next(%FixtureBuilder{ops: []} = fixtures, _), do: fixtures
defp execute_next(%FixtureBuilder{ops: [%Op{name: :put} = op | tail]} = fixtures, module) do
fixtures = Map.put(fixtures, :parent, Utils.get(fixtures.data, Utils.get_parent_path(op)))
case apply_fixture(op, fixtures, module) do
%FixtureBuilder{} = nested_fixtures ->
nested_fixtures
result ->
data = Utils.update(fixtures.data, op.path, fn _ -> result end)
nested_ops = Enum.map(op.children, &Map.put(&1, :path, op.path ++ &1.path))
%{fixtures | data: data, ops: nested_ops ++ tail}
end
|> execute_next(module)
end
defp execute_next(%FixtureBuilder{ops: [%Op{name: :append} = op | tail]} = fixtures, module) do
fixtures = Map.put(fixtures, :parent, Utils.get(fixtures.data, Utils.get_parent_path(op)))
case apply_fixture(op, fixtures, module) do
%FixtureBuilder{} = nested_fixtures ->
nested_fixtures
result ->
data =
Utils.update(fixtures.data, op.path, fn
value when is_list(value) ->
value ++ [result]
_ ->
[result]
end)
nested_ops = Enum.map(op.children, &Map.put(&1, :path, op.path ++ &1.path))
%{fixtures | data: data, ops: nested_ops ++ tail}
end
|> execute_next(module)
end
defp execute_next(%FixtureBuilder{ops: [%Op{name: :merge} = op | tail]} = fixtures, module) do
fixtures = Map.put(fixtures, :parent, Utils.get(fixtures.data, Utils.get_parent_path(op)))
case apply_fixture(op, fixtures, module) do
%FixtureBuilder{} = nested_fixtures ->
nested_fixtures
result ->
data =
Utils.update(fixtures.data, op.path, fn
value when is_map(value) ->
Map.merge(value, result)
value ->
raise RuntimeError,
"Expected a map to merge, got: #{inspect(value)}. " <>
"Make sure the parent is a map and/or you provided the correct " <>
"initial data when using fixture composition."
end)
nested_ops = Enum.map(op.children, &Map.put(&1, :path, op.path ++ &1.path))
%{fixtures | data: data, ops: nested_ops ++ tail}
end
|> execute_next(module)
end
defp execute_next(%FixtureBuilder{ops: [%Op{name: :run} = op | tail]} = fixtures, module) do
fixtures = Map.put(fixtures, :parent, Utils.get(fixtures.data, Utils.get_parent_path(op)))
result = apply(op.extra.callback, [fixtures, fixtures.args])
data = Utils.update(fixtures.data, op.path, fn _ -> result end)
nested_ops = Enum.map(op.children, &Map.put(&1, :path, op.path ++ &1.path))
%{fixtures | data: data, ops: nested_ops ++ tail}
|> execute_next(module)
end
defp apply_fixture(op, fixtures, module) do
fixture_module = Utils.find_fixture_module!(op.fixture, module)
args =
if is_function(op.args, 1) do
op.args.(fixtures.data)
else
op.args
end
case apply(fixture_module, :build, [op.fixture, args, fixtures]) do
%FixtureBuilder{} = nested_fixtures ->
nested_data = Utils.update(fixtures.data, op.path, fn _ -> nested_fixtures.data end)
nested_ops = Enum.map(nested_fixtures.ops, &Map.put(&1, :path, op.path ++ &1.path))
tail = Enum.map(op.children, &Map.put(&1, :path, op.path ++ &1.path))
nested_fixtures
|> Map.put(:data, nested_data)
|> Map.put(:ops, nested_ops)
|> execute_next(module)
|> Map.put(:ops, tail ++ List.delete_at(fixtures.ops, 0))
value ->
value
end
end
end
|
lib/fixture_builder/executer.ex
| 0.698638
| 0.513851
|
executer.ex
|
starcoder
|
defmodule Garuda.RoomManager.RoomDb do
@moduledoc false
# Stores the info of all the game-rooms and functions to manage those data.
# Orwell uses data from RoomDb, for rendering the live interactive dashboard
alias Garuda.MatchMaker.Matcher
alias Garuda.RoomManager.Records
use GenServer
@room_db_name :room_db
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@spec save_init_room_state(pid(), map()) :: any()
@doc """
Saves the specific game-room info with its pid as key
* `room_pid` - pid of the particular game-room
* `info` - A map of game-room info
"""
def save_init_room_state(room_pid, info) do
GenServer.call(__MODULE__, {"save_room", {room_pid, info}})
end
@doc """
Adds a new player details to the room. (Expecting room is already created)
* `room_pid` - pid of the game-room
* `opts` - A keyword-list of player-info
"""
@spec on_room_join(pid(), Keyword.t()) :: String.t()
def on_room_join(room_pid, opts) do
GenServer.call(__MODULE__, {"room_join", room_pid, opts})
end
@doc """
Updates a game-room's info.
Use cases are usually to update the live info regarding the no:of
players in a game-room.
"""
@spec update_room_state(pid(), map()) :: :ok
def update_room_state(room_pid, update_info) do
GenServer.cast(__MODULE__, {:update_room, room_pid, update_info})
end
@doc """
Returns the game-channel name associated with the game-room's pid.
* pid - game-room pid.
This function is mostly used by game-rooms to get the game-channel name, so
that they can send messages to the channel.
corresponding game-channels.
"""
@spec get_channel_name(pid()) :: String.t()
def get_channel_name(pid) do
GenServer.call(__MODULE__, {"get_channel_name", pid})
end
@spec delete_room(pid()) :: any()
@doc """
Deletes a game-room info with a pid.
"""
def delete_room(room_pid) do
GenServer.call(__MODULE__, {"delete_room", room_pid})
end
@doc """
Saves the game-channel info with its pid as key.
As of now this is mainly used for getting the actual no:of connections on the server.
"""
@spec on_channel_connection(pid(), map()) :: any()
def on_channel_connection(channel_pid, info) do
GenServer.call(__MODULE__, {"channel_join", {channel_pid, info}})
end
@spec on_channel_terminate(pid()) :: any()
@doc """
Deletes a game-channel's info with give channel pid
"""
def on_channel_terminate(channel_pid) do
GenServer.call(__MODULE__, {"channel_leave", channel_pid})
end
@spec get_stats :: map()
@doc """
Returns overall game server info, required for Monitoring
"""
def get_stats do
GenServer.call(__MODULE__, "get_stats")
end
@spec get_room_state(String.t()) :: map()
@doc """
Returns the state of a given `room_id`
* `room_id` - Unique combination of room_name + ":" + room_id., ex ("tictactoe:ACFBEBW")
"""
def get_room_state(room_id) do
GenServer.call(__MODULE__, {"get_room_state", room_id})
end
@doc """
Removes the player from RoomDb.
* room_pid - pid of the game-room
* player_id - unique_id of player
"""
@spec on_player_leave(pid(), String.t()) :: any()
def on_player_leave(room_pid, player_id) do
GenServer.call(__MODULE__, {"room_left", room_pid, player_id})
end
@doc """
Updates player_id key in the room with reconnection_timer ref
"""
@spec update_timer_ref(pid(), String.t(), any()) :: any
def update_timer_ref(room_pid, player_id, ref) do
GenServer.call(__MODULE__, {"update_timer_ref", room_pid, player_id, ref})
end
@doc """
Returns player_id key in the room with reconnection_timer ref
"""
@spec get_timer_ref(pid(), String.t()) :: any
def get_timer_ref(room_pid, player_id) do
GenServer.call(__MODULE__, {"get_timer_ref", room_pid, player_id})
end
@doc """
Returns rejoin status of player in the room
"""
@spec has_rejoined(pid(), String.t()) :: any
def has_rejoined(room_pid, player_id) do
GenServer.call(__MODULE__, {"has_rejoined", room_pid, player_id})
end
@impl true
def init(_opts) do
{:ok, %{}}
end
@impl true
def handle_call({"delete_room", room_pid}, _from, state) do
case :ets.lookup(@room_db_name, room_pid) do
[{_room_name, details} | _t] ->
room_name = details["room_name"]
match_id = details["match_id"]
room_id = "#{room_name}:#{match_id}"
Matcher.delete_room(room_id)
:ets.delete(@room_db_name, room_pid)
{:reply, "ok", state}
_ ->
{:reply, "ok", state}
end
end
@impl true
def handle_call({"save_room", {room_pid, info}}, _from, state) do
:ets.insert(@room_db_name, {room_pid, info})
{:reply, "ok", state}
end
@impl true
def handle_call({"channel_leave", channel_pid}, _from, state) do
[{_key, details} | _t] = :ets.lookup(@room_db_name, "channels")
{popped_val, details} = pop_in(details[channel_pid])
:ets.insert(@room_db_name, {"channels", details})
{:reply, popped_val, state}
end
@impl true
def handle_call({"channel_join", {channel_pid, _info}}, _from, state) do
[{_key, details} | _t] = :ets.lookup(@room_db_name, "channels")
details = put_in(details[channel_pid], %{})
:ets.insert(@room_db_name, {"channels", details})
{:reply, "ok", state}
end
@impl true
def handle_call("get_stats", _from, state) do
[{_key, details} | _t] = :ets.lookup(@room_db_name, "channels")
# fun = :ets.fun2ms(fn {key, val} when is_pid(key) -> val end)
# fetching data of those rows, which have pid as keys.
room_data = :ets.select(@room_db_name, [{{:"$1", :"$2"}, [is_pid: :"$1"], [:"$2"]}])
stats = %{
"channel_count" => Map.keys(details) |> Enum.count(),
"room_count" => Enum.count(room_data),
"rooms" => room_data
}
{:reply, stats, state}
end
@impl true
def handle_call({"get_channel_name", room_pid}, _from, state) do
[{_room_name, details} | _t] = :ets.lookup(@room_db_name, room_pid)
room_name = details["room_name"]
match_id = details["match_id"]
{:reply, "room_" <> room_name <> ":" <> match_id, state}
end
@impl true
def handle_call({"get_room_state", room_id}, _from, state) do
room_state =
case Records.is_process_registered(room_id) do
true -> :sys.get_state(Records.via_tuple(room_id))
false -> %{}
end
{:reply, room_state, state}
end
@impl true
def handle_call({"room_join", room_pid, opts}, _from, state) do
player_id = Keyword.get(opts, :player_id)
status = add_to_room(:ets.lookup(@room_db_name, room_pid), player_id)
{:reply, status, state}
end
@impl true
def handle_call({"room_left", room_pid, player_id}, _from, state) do
[{_room_name, details} | _t] = :ets.lookup(@room_db_name, room_pid)
{popped_val, details} = pop_in(details["players"][player_id])
:ets.insert(@room_db_name, {room_pid, details})
room_name = details["room_name"]
match_id = details["match_id"]
room_id = room_name <> ":" <> match_id
Matcher.remove_player(room_id, player_id)
manage_room_deletion(room_pid, details)
{:reply, popped_val, state}
end
@impl true
def handle_call({"update_timer_ref", room_pid, player_id, ref}, _from, state) do
[{_room_name, details} | _t] = :ets.lookup(@room_db_name, room_pid)
details =
case ref do
ref when is_reference(ref) ->
put_in(details["players"][player_id]["recon_ref"], ref)
_ ->
put_in(details["players"][player_id], %{
"recon_ref" => ref,
"rejoin" => false
})
end
:ets.insert(@room_db_name, {room_pid, details})
{:reply, "updated", state}
end
@impl true
def handle_call({"get_timer_ref", room_pid, player_id}, _from, state) do
[{_room_name, details} | _t] = :ets.lookup(@room_db_name, room_pid)
ref = details["players"][player_id]["recon_ref"]
{:reply, ref, state}
end
@impl true
def handle_call({"has_rejoined", room_pid, player_id}, _from, state) do
[{_room_name, details} | _t] = :ets.lookup(@room_db_name, room_pid)
status = details["players"][player_id]["rejoin"]
{:reply, status, state}
end
## helper
defp add_to_room([], _player_id), do: "error"
defp add_to_room([{room_name, details} | _t], player_id) do
case details["players"][player_id] do
nil ->
details =
put_in(details["players"][player_id], %{
"recon_ref" => true,
"rejoin" => false
})
:ets.insert(@room_db_name, {room_name, details})
"ok"
_ ->
details = put_in(details["players"][player_id]["rejoin"], true)
:ets.insert(@room_db_name, {room_name, details})
"already_exists"
end
end
defp manage_room_deletion(room_pid, details) do
player_count =
details["players"]
|> Enum.count()
case player_count do
0 ->
:ets.delete(@room_db_name, room_pid)
_ ->
details
end
end
end
|
lib/room_manager/room_db.ex
| 0.807916
| 0.44077
|
room_db.ex
|
starcoder
|
defmodule Nostrum.Struct.Guild.Role do
@moduledoc ~S"""
Struct representing a Discord role.
## Mentioning Roles in Messages
A `Nostrum.Struct.Guild.Role` can be mentioned in message content using the `String.Chars`
protocol or `mention/1`.
```Elixir
role = %Nostrum.Struct.Guild.Role{id: 431886897539973131}
Nostrum.Api.create_message!(184046599834435585, "#{role}")
%Nostrum.Struct.Message{}
role = %Nostrum.Struct.Guild.Role{id: 431884023535632398}
Nostrum.Api.create_message!(280085880452939778, "#{Nostrum.Struct.Guild.Role.mention(role)}")
%Nostrum.Struct.Message{}
```
"""
alias Nostrum.Struct.Snowflake
alias Nostrum.Util
defstruct [
:id,
:name,
:color,
:hoist,
:position,
:permissions,
:managed,
:mentionable
]
defimpl String.Chars do
def to_string(role), do: @for.mention(role)
end
@typedoc "The id of the role"
@type id :: Snowflake.t()
@typedoc "The name of the role"
@type name :: String.t()
@typedoc "The hexadecimal color code"
@type color :: integer
@typedoc "Whether the role is pinned in the user listing"
@type hoist :: boolean
@typedoc "The position of the role"
@type position :: integer
@typedoc "The permission bit set"
@type permissions :: integer
@typedoc "Whether the role is managed by an integration"
@type managed :: boolean
@typedoc "Whether the role is mentionable"
@type mentionable :: boolean
@type t :: %__MODULE__{
id: id,
name: name,
color: color,
hoist: hoist,
position: position,
permissions: permissions,
managed: managed,
mentionable: mentionable
}
@doc ~S"""
Formats an `Nostrum.Struct.Role` into a mention.
## Examples
```Elixir
iex> role = %Nostrum.Struct.Guild.Role{id: 431886639627763722}
...> Nostrum.Struct.Guild.Role.mention(role)
"<@&431886639627763722>"
```
"""
@spec mention(t) :: String.t()
def mention(%__MODULE__{id: id}), do: "<@&#{id}>"
@doc false
def p_encode do
%__MODULE__{}
end
@doc false
def to_struct(map) do
new =
map
|> Map.new(fn {k, v} -> {Util.maybe_to_atom(k), v} end)
|> Map.update(:id, nil, &Util.cast(&1, Snowflake))
struct(__MODULE__, new)
end
end
|
lib/nostrum/struct/guild/role.ex
| 0.893434
| 0.723138
|
role.ex
|
starcoder
|
defmodule Weber.Utils do
@moduledoc """
Weber utils functions.
"""
import Enum
@doc """
Convert :calendar.local_time to string
"""
def get_time() do
{{year, month, day}, {hours, minutes, seconds}} = :calendar.local_time()
Integer.to_string(year) <> "." <> Integer.to_string(month) <> "." <> Integer.to_string(day) <> " " <>
Integer.to_string(hours) <> ":" <> Integer.to_string(minutes) <> ":" <> Integer.to_string(seconds) <> " "
end
@doc """
Recursively get all files from directory.
"""
def get_all_files(dir) do
find_files = fn(f, acc) -> [f | acc] end
:filelib.fold_files(dir, ".*", true, find_files, [])
end
@doc """
Find full path by file name
"""
def find_file_path(abs_filenames, filename) do
filter(abs_filenames, fn(f) ->
case f do
{bname, _mod, _file} ->
bname == filename
_ ->
Path.absname(f) == filename
end
end) |> head
end
def find_static_file_path(abs_filenames, filename) do
filter(abs_filenames, &( Path.basename(&1) == List.to_string(filename) ) )
end
@doc """
Return first element from list
"""
def head([]), do: []
def head([h | _]), do: h
@doc """
Collect all Helpers imports.
"""
def add_helpers_imports(view_content) do
"<% import Weber.Helper.Html %>" <> "<% import Weber.Helper.Partial %>" <>
"<% import Weber.Helper.ResourceHelper %>" <> "<% import Weber.I18n %>" <> view_content
end
def views(path) do
Enum.filter( get_all_files(:erlang.binary_to_list(path) ++ '/lib/views/'), &(Path.extname(&1) == '.html') )
end
@doc """
Build module name from view path.
Path: /home/user/testProj/lib/views/main.html
Module: Elixir.Views.Main
"""
def build_module_name(path) do
spliten_path = path |> to_string |> String.split("/")
drop_path = :lists.dropwhile(fn(segment) -> segment !== <<"views">> end, spliten_path)
Enum.map(drop_path, &( Weber.Utils.capitalize(Path.basename(&1, '.html')) ) ) |> Module.concat
end
@doc """
Capitalize only first character in string
"""
def capitalize("") do
""
end
def capitalize(str) do
(str |> String.at(0) |> String.capitalize) <> String.slice(str, 1..-1)
end
def to_bin(val) do
to_string(val)
end
end
|
lib/weber/utils/utils.ex
| 0.697712
| 0.417717
|
utils.ex
|
starcoder
|
defmodule Game.Command.Move do
@moduledoc """
The movement commands: north, east, south, west
"""
use Game.Command
use Game.Zone
alias Data.Exit
alias Game.Command.AFK
alias Game.Door
alias Game.Player
alias Game.Quest
alias Game.Session.GMCP
alias Metrics.CharacterInstrumenter
@must_be_alive true
commands(
[
"move",
{"north", ["n"]},
{"south", ["s"]},
{"east", ["e"]},
{"west", ["w"]},
{"up", ["u"]},
{"down", ["d"]},
{"north west", ["nw"]},
{"north east", ["ne"]},
{"south west", ["sw"]},
{"south east", ["se"]},
"in",
"out",
"open",
"close"
],
parse: false
)
@impl Game.Command
def help(:topic), do: "Move"
def help(:short), do: "Move in a direction"
def help(:full) do
"""
Move around rooms. You can move where you see an exit when looking.
Example:
[ ] > {command}move west{/command}
[ ] > {command}west{/command}
[ ] > {command}w{/command}
Sometimes doors will be present between rooms. You will automatically open doors
if they are closed and you move in their direction. You can open and close them
manually as well.
Example:
[ ] > {command}open west{/command}
[ ] > {command}close west{/command}
"""
end
@impl true
def parse(command, _context), do: parse(command)
@impl Game.Command
@doc """
Parse the command into arguments
"""
@spec parse(command :: String.t()) :: {atom}
def parse(commnd)
def parse("move " <> direction), do: parse(direction)
def parse("north"), do: {:move, "north"}
def parse("n"), do: {:move, "north"}
def parse("east"), do: {:move, "east"}
def parse("e"), do: {:move, "east"}
def parse("south"), do: {:move, "south"}
def parse("s"), do: {:move, "south"}
def parse("west"), do: {:move, "west"}
def parse("w"), do: {:move, "west"}
def parse("up"), do: {:move, "up"}
def parse("u"), do: {:move, "up"}
def parse("down"), do: {:move, "down"}
def parse("d"), do: {:move, "down"}
def parse("in"), do: {:move, "in"}
def parse("out"), do: {:move, "out"}
def parse("north west"), do: {:move, "north west"}
def parse("nw"), do: {:move, "north west"}
def parse("north east"), do: {:move, "north east"}
def parse("ne"), do: {:move, "north east"}
def parse("south west"), do: {:move, "south west"}
def parse("sw"), do: {:move, "south west"}
def parse("south east"), do: {:move, "south east"}
def parse("se"), do: {:move, "south east"}
def parse("open " <> direction) do
case parse(direction) do
{:move, direction} ->
{:open, direction}
_ ->
{:error, :bad_parse, "open #{direction}"}
end
end
def parse("close " <> direction) do
case parse(direction) do
{:move, direction} ->
{:close, direction}
_ ->
{:error, :bad_parse, "close #{direction}"}
end
end
@impl Game.Command
@doc """
Move in the direction provided
"""
def run(command, state)
def run({:move, direction}, state = %{save: %{room_id: room_id}}) do
{:ok, room} = @environment.look(room_id)
case room |> Exit.exit_to(direction) do
room_exit = %{finish_id: id} ->
maybe_move_to(state, id, room_exit, direction)
_ ->
state.socket
|> @socket.echo(
gettext("Could not move %{direction}, no exit found.", direction: direction)
)
{:error, :no_exit}
end
end
def run({:open, direction}, state = %{save: %{room_id: room_id}}) do
{:ok, room} = @environment.look(room_id)
case room |> Exit.exit_to(direction) do
%{door_id: door_id, has_door: true} ->
state |> maybe_open_door(door_id) |> update_mini_map(room_id)
%{id: _exit_id} ->
state.socket
|> @socket.echo(gettext("There is no door %{direction}.", direction: direction))
_ ->
state.socket
|> @socket.echo(gettext("There is no exit %{direction}.", direction: direction))
end
:ok
end
def run({:close, direction}, state = %{save: %{room_id: room_id}}) do
{:ok, room} = @environment.look(room_id)
case room |> Exit.exit_to(direction) do
%{door_id: door_id, has_door: true} ->
state |> maybe_close_door(door_id) |> update_mini_map(room_id)
%{id: _exit_id} ->
state.socket
|> @socket.echo(gettext("There is no door %{direction}.", direction: direction))
_ ->
state.socket
|> @socket.echo(gettext("There is no exit %{direction}.", direction: direction))
end
:ok
end
@doc """
Maybe move a player
They require at least 1 movement point to proceed
"""
def maybe_move_to(state, room_id, room_exit, direction)
def maybe_move_to(state = %{socket: socket}, room_id, room_exit = %{has_door: true}, direction) do
case Door.get(room_exit.door_id) do
"open" ->
maybe_move_to(state, room_id, %{}, direction)
"closed" ->
Door.set(room_exit.door_id, "open")
socket |> @socket.echo(gettext("You opened the door."))
maybe_move_to(state, room_id, room_exit, direction)
end
end
def maybe_move_to(state, room_id, _, direction) do
with {:ok, state} <- check_cooldowns(state) do
state |> move_to(room_id, {:leave, direction}, {:enter, Exit.opposite(direction)})
else
{:error, :cooldowns_active} ->
state.socket |> @socket.echo(gettext("You cannot move while a skill is cooling down."))
end
end
defp check_cooldowns(state) do
case Enum.empty?(Map.keys(state.skills)) do
true ->
{:ok, state}
false ->
{:error, :cooldowns_active}
end
end
@doc """
Move the player to a new room
"""
def move_to(state, room_id, leave_reason, enter_reason) do
state = move_to_instrumented(state, room_id, leave_reason, enter_reason)
Game.Command.run(%Game.Command{module: Game.Command.Look, args: {}, system: true}, state)
{:update, state}
end
defp move_to_instrumented(state, room_id, leave_reason, enter_reason) do
%{save: save, character: character} = state
CharacterInstrumenter.movement(:player, fn ->
@environment.unlink(save.room_id)
@environment.leave(save.room_id, {:player, character}, leave_reason)
clear_target(state)
save = %{save | room_id: room_id}
state |> maybe_welcome_back()
state =
state
|> Player.update_save(save)
|> Map.put(:target, nil)
|> Map.put(:is_targeting, MapSet.new())
|> Map.put(:is_afk, false)
@environment.enter(room_id, {:player, character}, enter_reason)
@environment.link(room_id)
Quest.track_progress(state.character, {:room, room_id})
state
end)
end
@doc """
Open a door, if the door was closed
"""
def maybe_open_door(state, door_id) do
case Door.get(door_id) do
"closed" ->
Door.set(door_id, "open")
state.socket |> @socket.echo(gettext("You opened the door."))
_ ->
state.socket |> @socket.echo(gettext("The door was already open."))
end
state
end
@doc """
Open a door, if the door was closed
"""
def maybe_close_door(state, door_id) do
case Door.get(door_id) do
"open" ->
Door.set(door_id, "closed")
state.socket |> @socket.echo(gettext("You closed the door."))
_ ->
state.socket |> @socket.echo(gettext("The door was already closed."))
end
state
end
@doc """
Open a door, if the door was closed
"""
def maybe_welcome_back(state) do
case state.is_afk do
true ->
state |> AFK.welcome_back()
_ ->
:ok
end
end
@doc """
Push out an update for the mini map after opening/closing doors
"""
def update_mini_map(state, room_id) do
{:ok, room} = @environment.look(room_id)
mini_map = room.zone_id |> @zone.map({room.x, room.y, room.map_layer}, mini: true)
state |> GMCP.map(mini_map)
:ok
end
@doc """
If the state has a target, send a GMCP message that the target was cleared
"""
@spec clear_target(Session.t()) :: :ok
def clear_target(state)
def clear_target(state = %{target: target}) when target != nil do
state |> GMCP.clear_target()
end
def clear_target(_state), do: :ok
end
|
lib/game/command/move.ex
| 0.786049
| 0.410638
|
move.ex
|
starcoder
|
defmodule Terrasol.Author do
@moduledoc """
Handling of Earthstar author strings and resulting
Terrasol.Author.t structures
"""
@enforce_keys [
:string,
:shortname,
:publickey
]
defstruct string: "",
shortname: "",
publickey: "",
privatekey: nil
@typedoc "An Earthstar author"
@type t() :: %__MODULE__{
string: String.t(),
shortname: String.t(),
publickey: binary,
privatekey: nil | binary
}
defimpl String.Chars, for: Terrasol.Author do
def to_string(author), do: "#{author.string}"
end
@doc """
Create a `Terrasol.Author` structure from a `keypair.json`-style file
`:error` on error
"""
def from_keypair_file(filename) do
try do
%{"address" => string, "secret" => privatekey} = filename |> File.read!() |> Jason.decode!()
case Terrasol.bdecode(privatekey) do
:error -> :error
pk -> build(%{string: string, privatekey: pk})
end
rescue
_ -> :error
end
end
@doc """
Write a `keypair.json`-style file from a supplied identity.
As a secret file, the `publickey` must be included.
"""
def to_keypair_file(author, filename)
def to_keypair_file(%Terrasol.Author{privatekey: secret, string: address} = author, filename) do
try do
content = %{"address" => address, "secret" => Terrasol.bencode(secret)} |> Jason.encode!()
File.write!(filename, content)
File.chmod(filename, 0o600)
author
rescue
_ -> :error
end
end
def to_keypair_file(_, _), do: :error
@doc """
Fill a `Terrasol.Author` structure from an address string or
(possibly incomplete) map.
Internal conflict resolution is determinisitic, but depends on
implementation-specific ordering which is not gauranteed and should
not be depended upon being the same between versions.
`:error` on invalid input
"""
def build(input)
def build(%Terrasol.Author{} = input), do: input
def build(input) when is_binary(input) and byte_size(input) == 59, do: parse(input)
def build(input) when is_binary(input) do
try do
input
|> Jason.decode!(keys: :atoms!)
|> build
rescue
_ -> :error
end
end
def build(%{string: string, privatekey: pk}) do
most = parse(string)
case proper_keys(pk) do
{raw, _} -> %__MODULE__{most | privatekey: raw}
:error -> :error
end
end
def build(%{string: string}), do: string |> parse |> build()
# Non-string containing versions
def build(%{shortname: sn, publickey: pk, privatekey: sk} = full) do
case {proper_keys(pk), proper_keys(sk), verifyname(sn)} do
{:error, _, _} ->
build(Map.delete(full, :publickey))
{_, :error, _} ->
build(Map.delete(full, :privatekey))
{_, _, :error} ->
build(Map.delete(full, :shortname))
{{rpk, bpk}, {rsk, _}, short} ->
%__MODULE__{
shortname: short,
publickey: rpk,
privatekey: rsk,
string: "@" <> short <> "." <> bpk
}
end
end
def build(%{shortname: sn, publickey: pk} = full) do
case {proper_keys(pk), verifyname(sn)} do
{:error, _} ->
build(Map.delete(full, :publickey))
{_, :error} ->
build(Map.delete(full, :shortname))
{{rpk, bpk}, short} ->
struct(
__MODULE__,
%{full | publickey: rpk} |> Map.put(:string, "@" <> short <> "." <> bpk)
)
end
end
def build(%{shortname: sn, privatekey: sk} = full) do
case {proper_keys(sk), verifyname(sn)} do
{:error, _} ->
build(Map.delete(full, :privatekey))
{_, :error} ->
build(Map.delete(full, :shortname))
{{rsk, _bsk}, short} ->
{rpk, bpk} = rsk |> Ed25519.derive_public_key() |> proper_keys
struct(
__MODULE__,
%{full | privatekey: rsk}
|> Map.put(:publickey, rpk)
|> Map.put(:string, "@" <> short <> "." <> bpk)
)
end
end
def build(%{shortname: sn} = full) do
case verifyname(sn) do
:error ->
build(%{})
^sn ->
{rsk, rpk} = Ed25519.generate_key_pair()
build(full |> Map.put(:publickey, rpk) |> Map.put(:privatekey, rsk))
end
end
def build(input) when is_map(input),
do: build(input |> Map.put(:shortname, build_random_sn([])))
@snfirst 'abcdefghijklmnopqrstuvwxyz'
@snok @snfirst ++ '1234567890'
defp build_random_sn(list) when length(list) == 4, do: list |> Enum.reverse() |> to_string
defp build_random_sn([]) do
build_random_sn([Enum.random(@snfirst)])
end
defp build_random_sn(list) do
build_random_sn([Enum.random(@snok) | list])
end
defp proper_keys(key) when is_binary(key) do
case byte_size(key) do
32 -> {key, Terrasol.bencode(key)}
53 -> {Terrasol.bdecode(key), key}
_ -> :error
end
end
@doc """
Parse an author address into a `Terrasol.Author`
`:error` on invalid input
"""
def parse(address)
def parse(%Terrasol.Author{} = author), do: author
def parse(<<"@", name::binary-size(4), ".b", encpub::binary-size(52)>> = string) do
case {verifyname(name), Terrasol.bdecode("b" <> encpub)} do
{_, :error} -> :error
{:error, _} -> :error
{shortname, key} -> %Terrasol.Author{string: string, shortname: shortname, publickey: key}
end
end
def parse(_), do: :error
defp verifyname(string), do: checknamelist(to_charlist(string), [])
defp checknamelist([f | rest], []) when f in 97..122, do: checknamelist(rest, [f])
defp checknamelist(_, []), do: :error
defp checknamelist([h | t], acc) when h in 97..122 or h in 48..57,
do: checknamelist(t, [h | acc])
defp checknamelist([], acc) when length(acc) == 4, do: acc |> Enum.reverse() |> to_string
defp checknamelist(_, _), do: :error
end
|
lib/terrasol/author.ex
| 0.735547
| 0.603844
|
author.ex
|
starcoder
|
defmodule Crdt.VectorClock do
@moduledoc """
A Vector Clock is capable of generating a partial ordering of events in a distributed system and
detecting causality vioations. It is essentially a map of actors to their logical clock value.
"""
@type t :: %{any() => non_neg_integer()}
@doc """
Returns a new, empty Vector Clock.
"""
@spec new :: t()
def new do
%{}
end
@doc """
Returns `true` if `v1` is a direct descendent of `v2`. Note that a vector clock is its own
descendent.
"""
@spec descends?(t(), t()) :: boolean()
def descends?(_v1, v2) when v2 == %{}, do: true
def descends?(v1, v2) do
Enum.all?(v2, fn {id, timestamp2} ->
case v1[id] do
nil -> false
timestamp1 -> timestamp1 >= timestamp2
end
end)
end
@doc """
Returns `true` if `v1` descends `v2` and `v1` does not equal `v2`.
"""
@spec dominates?(t(), t()) :: boolean()
def dominates?(v1, v2), do: descends?(v1, v2) && !descends?(v2, v1)
@doc """
Returns `true` if `v1` and `v2` have diverged.
"""
@spec concurrent?(t(), t()) :: boolean()
def concurrent?(v1, v2), do: !descends?(v1, v2) && !descends?(v2, v1)
@doc """
Forget any actors in `v1` that have smaller or equal counts than the count in `v2`.
"""
@spec forget(t(), t()) :: t()
def forget(v1, v2) do
v1
|> Stream.reject(fn {id, timestamp} -> timestamp <= get(v2, id) end)
|> Enum.into(%{})
end
@doc """
Merges `v1` and `v2`.
"""
@spec merge(t(), t()) :: t()
def merge(v1, v2), do: Map.merge(v1, v2, fn _id, count1, count2 -> max(count1, count2) end)
@doc """
Returns the greatest-lower-bound of `v1` and `v2`.
"""
@spec greatest_lower_bound(t(), t()) :: t()
def greatest_lower_bound(v1, v2) do
v1
|> Stream.filter(fn {id, _timestamp} -> v2[id] != nil end)
|> Stream.map(fn {id, timestamp} -> {id, min(timestamp, v2[id])} end)
|> Enum.into(%{})
end
@doc """
Returns the count of `id` in `v` if it exists. Else it will return 0.
"""
@spec get(t(), any()) :: non_neg_integer()
def get(v, id) do
case v[id] do
nil -> 0
timestamp -> timestamp
end
end
@doc """
Increments the count of `id` in `v` by `value`.
"""
@spec increment(t(), any(), non_neg_integer()) :: t()
def increment(v, id, value \\ 1) do
Map.put(v, id, get(v, id) + value)
end
@doc """
Apply a `{id, count}` to the clock.
"""
@spec apply_dot(t(), {any(), non_neg_integer()}) :: t()
def apply_dot(v, {id, count}) do
if get(v, id) < count do
Map.put(v, id, count)
else
v
end
end
@doc """
Returns a vector clock of all the common dots in `s1` and `s2`.
"""
@spec intersection(t(), t()) :: t()
def intersection(v1, v2) do
v1
|> Stream.filter(fn {id, count} -> get(v2, id) == count end)
|> Enum.into(%{})
end
end
|
lib/crdt/vector_clock.ex
| 0.883513
| 0.703269
|
vector_clock.ex
|
starcoder
|
defmodule QueryBuilder.Query.Where do
@moduledoc false
require Ecto.Query
import QueryBuilder.Utils
def where(ecto_query, assoc_list, filters, or_filters) do
dynamic_query = build_dynamic_query(ecto_query, assoc_list, filters, or_filters)
Ecto.Query.where(ecto_query, ^dynamic_query)
end
def build_dynamic_query(ecto_query, assoc_list, filters, or_filters) do
filters_list = [filters | Keyword.get_values(or_filters, :or)]
filters_list
|> Enum.filter(&(&1 != []))
|> Enum.map(fn filters ->
apply_filters(ecto_query, assoc_list, List.wrap(filters))
|> Enum.reduce(&Ecto.Query.dynamic(^&1 and ^&2))
end)
|> Enum.reduce(&Ecto.Query.dynamic(^&1 or ^&2))
end
defp apply_filters(_query, _assoc_list, []), do: []
defp apply_filters(query, assoc_list, [filter | tail]) do
[apply_filter(query, assoc_list, filter) | apply_filters(query, assoc_list, tail)]
end
defp apply_filter(query, assoc_list, {field, value}) do
apply_filter(query, assoc_list, {field, :eq, value, []})
end
defp apply_filter(query, assoc_list, {field, operator, value}) do
apply_filter(query, assoc_list, {field, operator, value, []})
end
defp apply_filter(query, assoc_list, {field1, operator, field2, operator_opts})
when is_atom(field2) and field2 not in [nil, false, true] do
{field1, binding_field1} = find_field_and_binding_from_token(query, assoc_list, field1)
{field2, binding_field2} = find_field_and_binding_from_token(query, assoc_list, field2)
do_where(
binding_field1,
binding_field2,
{field1, operator, field2, operator_opts}
)
end
defp apply_filter(query, assoc_list, {field, operator, value, operator_opts}) do
{field, binding} = find_field_and_binding_from_token(query, assoc_list, field)
do_where(binding, {field, operator, value, operator_opts})
end
defp apply_filter(query, assoc_list, custom_fun) when is_function(custom_fun) do
custom_fun.(&(find_field_and_binding_from_token(query, assoc_list, &1)))
end
defp do_where(binding, {field, :in, values, []}) when is_list(values) do
Ecto.Query.dynamic([{^binding, x}], field(x, ^field) in ^values)
end
defp do_where(binding, {field, :not_in, values, []}) when is_list(values) do
Ecto.Query.dynamic([{^binding, x}], field(x, ^field) not in ^values)
end
defp do_where(binding, {field, :include, value, []}) do
Ecto.Query.dynamic([{^binding, x}], ^value in field(x, ^field))
end
defp do_where(binding, {field, :exclude, value, []}) do
Ecto.Query.dynamic([{^binding, x}], ^value not in field(x, ^field))
end
defp do_where(binding, {field, operator, nil, []}) when operator in [:eq, :equal_to] do
Ecto.Query.dynamic([{^binding, x}], is_nil(field(x, ^field)))
end
defp do_where(binding, {field, operator, value, []}) when operator in [:eq, :equal_to] do
Ecto.Query.dynamic([{^binding, x}], field(x, ^field) == ^value)
end
defp do_where(binding, {field, operator, nil, []}) when operator in [:ne, :other_than] do
Ecto.Query.dynamic([{^binding, x}], not is_nil(field(x, ^field)))
end
defp do_where(binding, {field, operator, value, []}) when operator in [:ne, :other_than] do
Ecto.Query.dynamic([{^binding, x}], field(x, ^field) != ^value)
end
defp do_where(binding, {field, operator, value, []}) when operator in [:gt, :greater_than] do
Ecto.Query.dynamic([{^binding, x}], field(x, ^field) > ^value)
end
defp do_where(binding, {field, operator, value, []}) when operator in [:ge, :greater_than_or_equal_to] do
Ecto.Query.dynamic([{^binding, x}], field(x, ^field) >= ^value)
end
defp do_where(binding, {field, operator, value, []}) when operator in [:lt, :less_than] do
Ecto.Query.dynamic([{^binding, x}], field(x, ^field) < ^value)
end
defp do_where(binding, {field, operator, value, []}) when operator in [:le, :less_than_or_equal_to] do
Ecto.Query.dynamic([{^binding, x}], field(x, ^field) <= ^value)
end
defp do_where(binding, {field, search_operation, value, operator_opts})
when search_operation in [:starts_with, :ends_with, :contains] do
value =
value
|> String.replace("%", "\\%")
|> String.replace("_", "\\_")
value =
case search_operation do
:starts_with -> "#{value}%"
:ends_with -> "%#{value}"
:contains -> "%#{value}%"
end
case Keyword.get(operator_opts, :case, :sensitive) do
:sensitive ->
Ecto.Query.dynamic([{^binding, x}], like(field(x, ^field), ^value))
case_sensitivity when case_sensitivity in [:insensitive, :i] ->
Ecto.Query.dynamic([{^binding, x}], ilike(field(x, ^field), ^value))
end
end
defp do_where(binding, {field, :like, value, []}) do
Ecto.Query.dynamic([{^binding, x}], like(field(x, ^field), ^value))
end
defp do_where(binding, {field, :ilike, value, []}) do
Ecto.Query.dynamic([{^binding, x}], ilike(field(x, ^field), ^value))
end
defp do_where(b1, b2, {f1, operator, f2, []}) when operator in [:eq, :equal_to] do
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], field(x, ^f1) == field(y, ^f2))
end
defp do_where(b1, b2, {f1, operator, f2, []}) when operator in [:ne, :other_than] do
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], field(x, ^f1) != field(y, ^f2))
end
defp do_where(b1, b2, {f1, operator, f2, []}) when operator in [:gt, :greater_than] do
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], field(x, ^f1) > field(y, ^f2))
end
defp do_where(b1, b2, {f1, operator, f2, []}) when operator in [:ge, :greater_than_or_equal_to] do
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], field(x, ^f1) >= field(y, ^f2))
end
defp do_where(b1, b2, {f1, operator, f2, []}) when operator in [:lt, :less_than] do
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], field(x, ^f1) < field(y, ^f2))
end
defp do_where(b1, b2, {f1, operator, f2, []}) when operator in [:le, :less_than_or_equal_to] do
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], field(x, ^f1) <= field(y, ^f2))
end
defp do_where(b1, b2, {f1, search_operation, f2, operator_opts})
when search_operation in [:starts_with, :ends_with, :contains] do
case Keyword.get(operator_opts, :case, :sensitive) do
:sensitive ->
case search_operation do
:starts_with ->
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], fragment("? like concat(?, '%')", field(x, ^f1), field(y, ^f2)))
:ends_with ->
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], fragment("? like concat('%', ?)", field(x, ^f1), field(y, ^f2)))
:contains ->
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], fragment("? like concat('%', ?, '%')", field(x, ^f1), field(y, ^f2)))
end
case_sensitivity when case_sensitivity in [:insensitive, :i] ->
case search_operation do
:starts_with ->
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], fragment("? ilike concat(?, '%')", field(x, ^f1), field(y, ^f2)))
:ends_with ->
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], fragment("? ilike concat('%', ?)", field(x, ^f1), field(y, ^f2)))
:contains ->
Ecto.Query.dynamic([{^b1, x}, {^b2, y}], fragment("? ilike concat('%', ?, '%')", field(x, ^f1), field(y, ^f2)))
end
end
end
end
|
lib/query/where.ex
| 0.670177
| 0.536009
|
where.ex
|
starcoder
|
defmodule Slime.Parser.Nodes do
defmodule HTMLNode do
@moduledoc """
An HTML node.
* :name — tag name,
* :attributes — a list of {"name", :v} tuples, where :v is
either a string or an {:eex, "content"} tuple,
* :spaces — tag whitespace, represented as a keyword list of boolean
values for :leading and :trailing,
* :closed — the presence of a trailing "/", which explicitly closes the tag,
* :children — a list of nodes.
"""
defstruct name: "",
attributes: [],
spaces: %{},
closed: false,
children: []
end
defmodule EExNode do
@moduledoc """
An embedded code node.
* :content — embedded code,
* :output — should the return value be inserted in the page,
* :spaces — tag whitespace, represented as a keyword list of boolean
values for :leading and :trailing,
* :children — a list of nodes.
* :safe? - mark output as safe for html-escaping engines
"""
defstruct content: "",
output: false,
spaces: %{},
children: [],
safe?: false
end
defmodule HEExNode do
@moduledoc """
An HTML node that represents a HEEx function component.
* :name — function component (tag) name,
* :attributes — a list of {"name", :v} tuples, where :v is
either a string or an {:eex, "content"} tuple,
* :spaces — tag whitespace, represented as a keyword list of boolean
values for :leading and :trailing,
* :closed — the presence of a trailing "/", which explicitly closes the tag,
* :children — a list of nodes.
"""
defstruct name: "",
attributes: [],
spaces: %{},
closed: false,
children: []
end
defmodule VerbatimTextNode do
@moduledoc """
A verbatim text node.
* :content — a list of strings and %EExNode{} structs that
is concatenated during rendering. No newlines or spaces
are inserted between individual items.
"""
defstruct content: []
end
defmodule HTMLCommentNode do
@moduledoc """
An HTML comment node.
Similar to `Slime.Parser.Nodes.VerbatimTextNode`.
"""
defstruct content: []
end
defmodule InlineHTMLNode do
@moduledoc """
An inline HTML node.
Similar to `Slime.Parser.Nodes.VerbatimTextNode`, with the exeption
of :children field, which represents a list of nodes indented deeper
than the HTML content.
"""
defstruct content: [],
children: []
end
defmodule DoctypeNode do
@moduledoc """
A doctype node.
:name is a Slim shorthand (e.g. "xml" or "html").
"""
defstruct name: ""
end
end
|
lib/slime/parser/nodes.ex
| 0.78374
| 0.666283
|
nodes.ex
|
starcoder
|
defmodule Enchufeweb do
@moduledoc """
`Enchufeweb` is a websocket client library written in Elixir and based on
the Erlang library [websocket_client](https://hex.pm/packages/websocket_client).
"""
@type frame :: :close | :ping | :pong | binary
@type conn_mode :: :disconnected | :once | :reconnect
@type websocket_req :: map
@type state :: any
@doc """
Callback which will be called when a message is received.
The argument has to be a binary message or one of these atoms: :close, :ping or :pong
Output:
* {:ok, state} : nothing occurs
* {:reply, reply, state} : It will send `reply` to the server.
* {:close, reason, state} : It will close the connection due to `reason`.
"""
@callback handle_message(frame, state) :: {:ok, state}
| {:reply, frame, state}
| {:close, binary, state}
@doc """
Callback which will be called when the connection has been made.
Input:
* Websocket request information
* Current state
Output:
* {:ok, state}
* {:ok, keepalive, state} : `keepalive` will be the interval in ms for sending pings to the server.
* {:reply, reply, state} : It will directly send `reply` to the server.
* {:close, reason, state} : It will close the connection due to `reason`.
"""
@callback handle_connection(websocket_req, state) :: {:ok, state}
| {:ok , number, state}
| {:reply, frame, state}
| {:close, binary, state}
@doc """
Callback which will be called when the connection is closed
Input:
* Websocket request information
* Current state
Output:
* {:ok, state} : The process continues although the connection is closed.
* {:reconnect, state} : It tries to reconnect.
* {:reconnect, delay, state} : It tries to reconnect after `delay` ms.
* {:close, reason, state} : It terminates the process.
"""
@callback handle_disconnection(websocket_req, state) :: {:ok, state}
| {:reconnect, state}
| {:reconnect, integer, state}
| {:close, binary, state}
defmacro __using__(_) do
quote do
@behaviour Enchufeweb
require Logger
@msg_after_conn_time 10
def start_link(args) do
{:ok, url} = Keyword.fetch(args, :url)
:websocket_client.start_link(url, __MODULE__, args, args)
end
def ws_send(ws, message) do
frame = make_frame(message)
:websocket_client.cast(ws, frame)
end
def init(args) do
conn_mode =
with {:ok, ws_opts} <- Keyword.fetch(args, :ws_opts),
{:ok, conn_mode} <- Map.fetch(ws_opts, :conn_mode),
do: conn_mode
mode = if conn_mode == :disconnected, do: :ok, else: conn_mode
:crypto.start()
:ssl.start()
{mode, args}
end
def onconnect(msg, state) do
case handle_connection(msg, state) do
{:reply, reply, new_statte} ->
Process.send_after(self(), make_frame(reply), @msg_after_conn_time)
{:ok, new_statte}
response ->
response
end
end
def ondisconnect(reason, state), do: handle_disconnection(reason, state)
def websocket_info(msg, _conn_state, state), do: {:reply, msg, state}
def websocket_terminate(_msg, _conn_state, _state), do: :ok
def websocket_handle({type, msg}, _conn_state, state) do
data =
cond do
type == :ping -> :ping
type == :pong -> :pong
type == :close -> :close
msg == "" -> type
true -> msg
end
case handle_message(data, state) do
{:reply, reply, new_statte} ->
{:reply, make_frame(reply), new_statte}
{:close, reason, new_statte} ->
{:close, reason, state}
_ ->
{:ok, state}
end
end
defp make_frame(data) do
cond do
is_atom(data) ->
data
is_binary(data) ->
if String.valid?(data), do: {:text, data}, else: {:binary, data}
end
end
end
end
@doc """
It will start (linked) the websocket client.
The argument will be a keyword list:
* url: String. For instance `ws://host:port/endpoint`
* ws_opts: It has to be a map which has to contain the connection mode (`%{conn_mode: conn_mode}`)
* :disconnected : It will begin with the client in a disconnected mode
* :once : It only tries one connection
* :reconnect : It will try to reconnect until it get it
"""
@spec start_link([url: binary, ws_opts: map]) :: {:ok, pid} | {:error, term}
def start_link([url: url, ws_opts: ws_opts]) do
:websocket_client.start_link(url, __MODULE__, ws_opts)
end
@doc """
It will send the given message using the given websocket(pid)
"""
@spec ws_send(pid, frame) :: :ok
def ws_send(ws, message), do: :websocket_client.cast(ws, message)
end
|
lib/enchufeweb.ex
| 0.731538
| 0.405743
|
enchufeweb.ex
|
starcoder
|
defmodule Vow.Ref do
@moduledoc """
This vow is a reference to a 0-arity function that returns a vow.
This allows for the named definition of commonly used vows, and for
the definition of recursive vows.
"""
@behaviour Access
import Vow.FunctionWrapper, only: [wrap: 1]
alias Vow.ResolveError
defstruct [:mod, :fun]
@type t :: %__MODULE__{
mod: module | nil,
fun: atom
}
@doc false
@spec new(module | nil, atom) :: t
def new(module, function) do
%__MODULE__{
mod: module,
fun: function
}
end
@impl Access
def fetch(%__MODULE__{} = vow, key) do
case resolve(vow) do
{:ok, vow} -> Access.fetch(vow, key)
{:error, _} -> :error
end
end
@impl Access
def get_and_update(%__MODULE__{} = vow, key, fun) do
case resolve(vow) do
{:ok, vow} -> Access.get_and_update(vow, key, fun)
{:error, _} -> {nil, vow}
end
end
@impl Access
def pop(%__MODULE__{} = vow, key) do
case resolve(vow) do
{:ok, vow} -> Access.pop(vow, key)
{:error, _} -> {nil, vow}
end
end
@doc false
@spec resolve(t) :: {:ok, Vow.t()} | {:error, ResolveError.t()}
def resolve(%__MODULE__{mod: mod, fun: fun} = ref)
when is_atom(mod) and is_atom(fun) do
if function_exported?(mod, fun, 0) do
{:ok, apply(mod, fun, [])}
else
{:error, ResolveError.new(ref, wrap(&function_exported?(&1.mod, &1.fun, 0)))}
end
rescue
reason -> {:error, ResolveError.new(ref, nil, "#{inspect(reason)}")}
catch
:exit, reason ->
{:error, ResolveError.new(ref, nil, "Vow reference exited: #{inspect(reason)}")}
caught ->
{:error, ResolveError.new(ref, nil, "Vow reference threw: #{inspect(caught)}")}
end
def resolve(ref) do
{:error, ResolveError.new(ref, wrap(&(is_atom(&1.mod) and is_atom(&1.fun))))}
end
@doc """
Creates a new `Vow.Ref.t` using the `module` and function name (i.e. `atom`).
This should reference a 0-arity function that returns a vow in order to
resolved properly during a call to `Vow.conform/2`.
If `module` is not specified, then it defaults to the caller's module.
"""
@spec sref(module | nil, atom) :: Macro.t()
defmacro sref(module \\ nil, function) do
module = module || __CALLER__.module
quote do
Vow.Ref.new(
unquote(module),
unquote(function)
)
end
end
defimpl Vow.RegexOperator do
@moduledoc false
alias Vow.ConformError.Problem
@impl Vow.RegexOperator
def conform(ref, path, via, route, val) do
case @for.resolve(ref) do
{:error, error} ->
{:error, [Problem.from_resolve_error(error, path, via, route, val)]}
{:ok, vow} ->
if Vow.regex?(vow) do
@protocol.conform(vow, path, [ref | via], route, val)
else
case Vow.Conformable.conform(vow, path, via, route, val) do
{:ok, conformed} -> {:ok, conformed, []}
{:error, problems} -> {:error, problems}
end
end
end
end
@impl Vow.RegexOperator
def unform(vow, val) do
Vow.Conformable.Vow.Ref.unform(vow, val)
end
end
defimpl Vow.Conformable do
@moduledoc false
alias Vow.ConformError.Problem
@impl Vow.Conformable
def conform(ref, path, via, route, val) do
case @for.resolve(ref) do
{:ok, vow} ->
@protocol.conform(vow, path, [ref | via], route, val)
{:error, error} ->
{:error, [Problem.from_resolve_error(error, path, via, route, val)]}
end
end
@impl Vow.Conformable
def unform(vow, val) do
case @for.resolve(vow) do
{:ok, vow} ->
@protocol.unform(vow, val)
{:error, _} ->
{:error, %Vow.UnformError{vow: vow, val: val}}
end
end
@impl Vow.Conformable
def regex?(vow) do
case @for.resolve(vow) do
{:ok, vow} -> @protocol.regex?(vow)
{:error, _} -> false
end
end
end
defimpl Inspect do
@moduledoc false
@impl Inspect
def inspect(%@for{mod: nil, fun: fun}, _opts) do
"#SRef<#{fun}>"
end
def inspect(%@for{mod: mod, fun: fun}, _opts) do
"#SRef<#{mod}.#{fun}>"
end
end
if Code.ensure_loaded?(StreamData) do
defimpl Vow.Generatable do
@moduledoc false
import StreamDataUtils, only: [lazy: 1]
alias Vow.Utils
@impl Vow.Generatable
def gen(vow, opts) do
ignore_warn? = Keyword.get(opts, :ignore_warn?, false)
_ = Utils.no_override_warn(vow, ignore_warn?)
{:ok, lazy(delayed_gen(vow, opts))}
end
@spec delayed_gen(Vow.t(), keyword) :: @protocol.result
defp delayed_gen(vow, opts) do
case @for.resolve(vow) do
{:ok, vow} ->
@protocol.gen(vow, opts)
{:error, reason} ->
{:error, reason}
end
end
end
end
end
|
lib/vow/ref.ex
| 0.809464
| 0.471406
|
ref.ex
|
starcoder
|
defmodule Meddle.Pipe do
@moduledoc false
defstruct queue: :queue.new(),
stack: [],
direction: :enter
@type t :: %__MODULE__{
queue: :queue.queue(),
stack: list,
direction: direction
}
@type direction :: :enter | :leave
@spec new(list, direction) :: t
def new(items \\ [], direction \\ :enter) do
%__MODULE__{
queue: :queue.from_list(items),
direction: direction
}
end
@spec get_direction(t) :: direction
def get_direction(pipe), do: pipe.direction
@spec put_direction(t, direction) :: t
def put_direction(pipe, direction) do
%{pipe | direction: direction}
end
@spec terminate(t) :: t
def terminate(%{stack: [%__MODULE__{} = ip | s]} = pipe) do
terminate(%{pipe | stack: [terminate(ip) | s]})
end
def terminate(pipe) do
%{pipe | queue: :queue.new()}
end
@spec halt(t) :: t
def halt(pipe) do
%{pipe | queue: :queue.new(), stack: []}
end
@spec enqueue(t, list) :: t
def enqueue(pipe, items) do
Map.update(
pipe,
:queue,
:queue.from_list(items),
&enqueue_impl(&1, items)
)
end
@spec peek(t) :: {:ok, any} | :error
def peek(pipe)
def peek(%{stack: [%__MODULE__{} = ip | _]}) do
peek(ip)
end
def peek(%{direction: :enter, queue: q}) do
case :queue.peek(q) do
{:value, x} -> {:ok, x}
_ -> :error
end
end
def peek(%{stack: [x | _], direction: :leave}) do
{:ok, x}
end
def peek(%{stack: [], direction: :enter}) do
:error
end
@spec next(t) :: {:ok, t} | :error
def next(%{queue: q, stack: [%__MODULE__{} = ip | s], direction: dir} = pipe) do
case {next(ip), dir} do
{{:ok, ip}, _} -> {:ok, %{pipe | stack: [ip | s]}}
{_, :leave} -> next(%{pipe | queue: :queue.cons(ip, q), stack: s})
{_, :enter} -> next_impl(pipe)
end
end
def next(pipe) do
next_impl(pipe)
end
@spec previous(t) :: {:ok, t} | :error
def previous(_pipe) do
:error
end
@spec pop(t) :: {any, t} | nil
def pop(pipe) do
with {:ok, pipe} <- next(pipe),
{:ok, x} <- peek(pipe) do
{x, pipe}
else
_ -> nil
end
end
@spec next_impl(t) :: {:ok, t} | :error
defp next_impl(pipe) do
case next_impl_non_recur(pipe) do
{%__MODULE__{}, pipe} ->
next(pipe)
{_, %{stack: [x | xs]} = pipe} when is_list(x) ->
next(%{pipe | stack: [new(x) | xs]})
{_, pipe} ->
{:ok, pipe}
_ ->
:error
end
end
@spec next_impl_non_recur(t) :: {any, t} | nil
defp next_impl_non_recur(%{queue: q, stack: s, direction: :enter} = pipe) do
case :queue.out(q) do
{{:value, x}, q} -> {x, %{pipe | queue: q, stack: [x | s]}}
_ -> nil
end
end
defp next_impl_non_recur(%{queue: q, stack: [x | xs], direction: :leave} = pipe) do
{x, %{pipe | queue: :queue.cons(x, q), stack: xs}}
end
defp next_impl_non_recur(%{stack: [], direction: :leave}), do: nil
@spec enqueue_impl(:queue.queue() | nil, list) :: :queue.queue()
defp enqueue_impl(nil, items) do
enqueue_impl(:queue.new(), items)
end
defp enqueue_impl(queue, items) do
Enum.reduce(items, queue, fn i, q ->
:queue.in(i, q)
end)
end
defimpl Meddle.Interceptor do
def invoke(_pipe, context) do
pipe = Meddle.get_container(context)
case {@for.pop(pipe), pipe.direction} do
{{x, pipe}, _} ->
context
|> put_pipe(pipe)
|> (&@protocol.invoke(x, &1)).()
|> (&invoke(Meddle.get_container(&1), &1)).()
{nil, :enter} ->
pipe = %{pipe | direction: :leave}
invoke(pipe, put_pipe(context, pipe))
{nil, :leave} ->
context
end
end
def coerce(pipe) do
pipe
|> Map.update!(:stack, &coerce_impl(&1))
|> Map.update!(:queue, fn queue ->
queue
|> :queue.to_list()
|> coerce_impl()
|> :queue.from_list()
end)
end
@spec coerce_impl([...]) :: [@protocol.t]
defp coerce_impl([]), do: []
defp coerce_impl([h | t]), do: [@protocol.coerce(h) | coerce_impl(t)]
@spec put_pipe(Meddle.context(), @for.t) :: Meddle.context()
defp put_pipe(context, pipe) do
Meddle.update_container(context, fn _ -> pipe end)
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(pipe, opts) do
coll = [
:queue.to_list(pipe.queue),
if(pipe.direction == :enter, do: "->", else: "<-"),
:lists.reverse(pipe.stack)
]
container_doc("#Pipe|", coll, "|", opts, &inspect_coll/2, break: :flex, separator: "")
end
defp inspect_coll(items, opts) when is_list(items) do
container_doc("[", items, "]", opts, &@protocol.inspect/2, break: :flex, separator: ",")
end
defp inspect_coll(item, _opts) when is_binary(item) do
item
end
defp inspect_coll(item, opts) do
@protocol.inspect(item, opts)
end
end
end
|
lib/meddle/pipe.ex
| 0.820972
| 0.473536
|
pipe.ex
|
starcoder
|
defmodule StarkInfra.Webhook do
alias __MODULE__, as: Webhook
alias StarkInfra.Utils.Rest
alias StarkInfra.Utils.Check
alias StarkInfra.User.Project
alias StarkInfra.User.Organization
alias StarkInfra.Error
@moduledoc """
Groups Webhook related functions
"""
@doc """
A Webhook is used to subscribe to notification events on a user-selected endpoint.
Currently available services for subscription are contract, credit-note, signer, issuing-card, issuing-invoice, issuing-purchase, pix-request.in, pix-request.out, pix-reversal.in, pix-reversal.out, pix-claim, pix-key, pix-chargeback, pix-infraction.
## Parameters (required):
- `:url` [string]: Url that will be notified when an event occurs.
- `:subscriptions` [list of strings]: list of any non-empty combination of the available services. ex: ["contract", "credit-note", "signer", "issuing-card", "issuing-invoice", "issuing-purchase", "pix-request.in", "pix-request.out", "pix-reversal.in", "pix-reversal.out", "pix-claim", "pix-key", "pix-chargeback", "pix-infraction"]
## Attributes:
- `:id` [string, default nil]: unique id returned when the webhook is created. ex: "5656565656565656"
"""
@enforce_keys [
:url,
:subscriptions
]
defstruct [
:id,
:url,
:subscriptions
]
@type t() :: %__MODULE__{}
@doc """
Send a single Webhook subscription for creation in the Stark Infra API
## Parameters (required):
- `:url` [string]: url to which notification events will be sent to. ex: "https://webhook.site/60e9c18e-4b5c-4369-bda1-ab5fcd8e1b29"
- `:subscriptions` [list of strings]: list of any non-empty combination of the available services. ex: ["contract", "credit-note", "signer", "issuing-card", "issuing-invoice", "issuing-purchase", "pix-request.in", "pix-request.out", "pix-reversal.in", "pix-reversal.out", "pix-claim", "pix-key", "pix-chargeback", "pix-infraction"]
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- Webhook struct with updated attributes
"""
@spec create(
user: Project.t() | Organization.t() | nil,
url: binary,
subscriptions: [binary]
) ::
{:ok, Webhook.t()} |
{:error, [Error.t()]}
def create(options \\ []) do
%{user: user, url: url, subscriptions: subscriptions} =
Enum.into(
options |> Check.enforced_keys([:url, :subscriptions]),
%{user: nil}
)
Rest.post_single(
resource(),
%Webhook{url: url, subscriptions: subscriptions},
%{user: user}
)
end
@doc """
Same as create(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec create!(user: Project.t() | Organization.t() | nil, url: binary, subscriptions: [binary]) :: any
def create!(options \\ []) do
%{user: user, url: url, subscriptions: subscriptions} =
Enum.into(
options |> Check.enforced_keys([:url, :subscriptions]),
%{user: nil, url: nil, subscriptions: nil}
)
Rest.post_single!(
resource(),
%Webhook{url: url, subscriptions: subscriptions},
%{user: user}
)
end
@doc """
Receive a single Webhook subscription struct previously created in the Stark Infra API by passing its id
## Parameters (required):
- `id` [string]: struct unique id. ex: "5656565656565656"
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- Webhook struct with updated attributes
"""
@spec get(
binary,
user: Project.t() | Organization.t() | nil
) ::
{:ok, Webhook.t()} |
{:error, [%Error{}]}
def get(id, options \\ []) do
Rest.get_id(resource(), id, options)
end
@doc """
Same as get(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec get!(binary, user: Project.t() | Organization.t() | nil) :: Webhook.t()
def get!(id, options \\ []) do
Rest.get_id!(resource(), id, options)
end
@doc """
Receive a stream of Webhook subcription structs previously created in the Stark Infra API
## Options:
- `:limit` [integer, default nil]: maximum number of structs to be retrieved. Unlimited if nil. ex: 35
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- stream of Webhook structs with updated attributes
"""
@spec query(
limit: integer,
user: Project.t() | Organization.t()
) ::
(
{:cont, {:ok, [Webhook.t()]}} |
{:error, [Error.t()]} |
{:halt, any} |
{:suspend, any},
any -> any
)
def query(options \\ []) do
Rest.get_list(resource(), options)
end
@doc """
Same as query(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec query!(
limit: integer,
user: Project.t() | Organization.t()
) ::
(
{:cont, [Webhook.t()]} |
{:halt, any} |
{:suspend, any},
any -> any
)
def query!(options \\ []) do
Rest.get_list!(resource(), options)
end
@doc """
Receive a list of up to 100 Webhook objects previously created in the Stark Infra API and the cursor to the next page.
Use this function instead of query if you want to manually page your requests.
## Options:
- `:cursor` [string, default nil]: cursor returned on the previous page function call
- `:limit` [integer, default 100]: maximum number of structs to be retrieved. Max = 100. ex: 35
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- list of Webhook structs with updated attributes
- cursor to retrieve the next page of Webhook objects
"""
@spec page(
cursor: binary,
limit: integer,
user: Project.t() | Organization.t()
) ::
{:ok, {binary, [Webhook.t()]}} |
{:error, [%Error{}]}
def page(options \\ []) do
Rest.get_page(resource(), options)
end
@doc """
Same as page(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec page!(
cursor: binary,
limit: integer,
user: Project.t() | Organization.t()
) :: [Webhook.t()]
def page!(options \\ []) do
Rest.get_page!(resource(), options)
end
@doc """
Delete a Webhook subscription entity previously created in the Stark Infra API
## Parameters (required):
- `id` [string]: Webhook unique id. ex: "5656565656565656"
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- deleted Webhook struct
"""
@spec delete(
binary,
user: Project.t() | Organization.t() | nil
) ::
{:ok, Webhook.t()} |
{:error, [%Error{}]}
def delete(id, options \\ []) do
Rest.delete_id(resource(), id, options)
end
@doc """
Same as delete(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec delete!(
binary,
user: Project.t() | Organization.t() | nil
) :: Webhook.t()
def delete!(id, options \\ []) do
Rest.delete_id!(resource(), id, options)
end
@doc false
def resource() do
{
"Webhook",
&resource_maker/1
}
end
@doc false
def resource_maker(json) do
%Webhook{
id: json[:id],
url: json[:url],
subscriptions: json[:subscriptions]
}
end
end
|
lib/webhook/webhook.ex
| 0.885359
| 0.470311
|
webhook.ex
|
starcoder
|
defmodule Web3x.Abi do
@doc "Decodes event based on given data and provided signature"
@spec decode_event(binary(), binary()) :: any()
def decode_event(data, signature) do
formatted_data =
data
|> String.slice(2..-1)
|> Base.decode16!(case: :lower)
fs = ABI.FunctionSelector.decode(signature)
ABI.TypeDecoder.decode(formatted_data, fs)
end
@doc "Loads the abi at the file path and reformats it to a map"
@spec load_abi(binary()) :: list() | {:error, atom()}
def load_abi(file_path) do
with {:ok, cwd} <- File.cwd(),
{:ok, abi} <- File.read(Path.join([cwd, file_path])) do
reformat_abi(Jason.decode!(abi))
end
end
@doc "Loads an abi from map and reformats input / oututs into tuples for ABI to read"
def load_abi_map(map) do
reformat_abi(map)
end
@doc "Loads the hardhat abi at the file path and reformats it to a map"
@spec load_hardhat_abi(binary()) :: list() | {:error, atom()}
def load_hardhat_abi(file_path) do
with {:ok, cwd} <- File.cwd(),
{:ok, abi} <- File.read(Path.join([cwd, file_path])) do
abi_map = Jason.decode!(abi)
reformat_abi(abi_map["abi"])
end
end
@doc "Loads the bin at from the .json from the file path"
@spec load_bin(binary()) :: binary()
def load_bin(file_path) do
with {:ok, cwd} <- File.cwd(),
{:ok, bin} <- File.read(Path.join([cwd, file_path])) do
bin
end
end
@doc "Loads the hardhat_bin ar the file path"
@spec load_hardhat_bin(binary()) :: binary()
def load_hardhat_bin(file_path) do
with {:ok, cwd} <- File.cwd(),
{:ok, bin} <- File.read(Path.join([cwd, file_path])) do
bin_map = Jason.decode!(bin)
"0x" <> bytecode = bin_map["bytecode"]
bytecode
end
end
@doc "Decodes data based on given type signature"
@spec decode_data(binary(), binary()) :: any()
def decode_data(types_signature, data) do
{:ok, trim_data} = String.slice(data, 2..String.length(data)) |> Base.decode16(case: :lower)
ABI.decode(types_signature, trim_data) |> List.first()
end
@doc "Decodes output based on specified functions return signature"
@spec decode_output(map(), binary(), binary()) :: list()
def decode_output(abi, name, output) do
{:ok, trim_output} = String.slice(output, 2..-1) |> Base.decode16(case: :lower)
output_types = Enum.map(abi[name]["outputs"], fn x -> x["type"] end)
types_signature = format_output_type_signatures(output_types)
output_signature = "#{name}(#{types_signature})"
outputs =
ABI.decode(output_signature, trim_output)
|> List.first()
|> maybe_tuple_to_list()
outputs
end
@doc "Returns the type signature of a given function"
@spec types_signature(map(), binary()) :: binary()
def types_signature(abi, name) do
input_types = Enum.map(abi[name]["inputs"], fn x -> x["type"] end)
types_signature = Enum.join(["(", Enum.join(input_types, ","), ")"])
types_signature
end
@doc "Returns the 4 character method id based on the hash of the method signature"
@spec method_signature(map(), binary()) :: binary()
def method_signature(abi, name) do
if abi[name] do
input_signature = ExKeccak.hash_256("#{name}#{types_signature(abi, name)}")
# Take first four bytes
<<init::binary-size(4), _rest::binary>> = input_signature
init
else
raise "#{name} method not found in the given abi"
end
end
@doc "Encodes data into Ethereum hex string based on types signature"
@spec encode_data(binary(), list()) :: binary()
def encode_data(types_signature, data) do
ABI.TypeEncoder.encode_raw(
[List.to_tuple(data)],
ABI.FunctionSelector.decode_raw(types_signature)
)
end
@doc "Encodes list of options and returns them as a map"
@spec encode_options(map(), list()) :: map()
def encode_options(options, keys) do
keys
|> Enum.filter(fn option ->
Map.has_key?(options, option)
end)
|> Enum.map(fn option ->
{option, encode_option(options[option])}
end)
|> Enum.into(%{})
end
@doc "Encodes options into Ethereum JSON RPC hex string"
@spec encode_option(integer()) :: binary()
def encode_option(0), do: "0x0"
def encode_option(nil), do: nil
def encode_option(value) do
"0x" <>
(value
|> :binary.encode_unsigned()
|> Base.encode16(case: :lower)
|> String.trim_leading("0"))
end
@doc "Encodes data and appends it to the encoded method id"
@spec encode_method_call(map(), binary(), list()) :: binary()
def encode_method_call(abi, name, input) do
encoded_method_call =
method_signature(abi, name) <> encode_data(types_signature(abi, name), input)
encoded_method_call |> Base.encode16(case: :lower)
end
@doc "Encodes input from a method call based on function signature"
@spec encode_input(map(), binary(), list()) :: binary()
def encode_input(abi, name, input) do
if abi[name]["inputs"] do
input_types = Enum.map(abi[name]["inputs"], fn x -> x["type"] end)
types_signature = Enum.join(["(", Enum.join(input_types, ","), ")"])
input_signature = ExKeccak.hash_256("#{name}#{types_signature}")
# Take first four bytes
<<init::binary-size(4), _rest::binary>> = input_signature
encoded_input =
init <>
ABI.TypeEncoder.encode_raw(
[List.to_tuple(input)],
ABI.FunctionSelector.decode_raw(types_signature)
)
encoded_input |> Base.encode16(case: :lower)
else
raise "#{name} method not found with the given abi"
end
end
defp format_output_type_signatures(output_types) do
if length(output_types) == 1 and List.first(output_types) == "string" do
List.first(output_types)
else
Enum.join(["(", Enum.join(output_types, ","), ")"])
end
end
defp maybe_tuple_to_list(maybe_tuple) when is_binary(maybe_tuple) do
[maybe_tuple]
end
defp maybe_tuple_to_list(tuple) when is_tuple(tuple) do
Tuple.to_list(tuple)
end
defp reformat_abi(abi) do
abi
|> Enum.map(&map_abi/1)
|> Map.new()
end
defp map_abi(x) do
case {x["name"], x["type"]} do
{nil, "constructor"} -> {:constructor, x}
{nil, "fallback"} -> {:fallback, x}
{name, _} -> {name, x}
end
end
end
|
lib/web3x/abi.ex
| 0.817429
| 0.451447
|
abi.ex
|
starcoder
|
defmodule ExImageInfo do
alias ExImageInfo.Types.{PNG, GIF, JPEG, BMP, TIFF, WEBP, PSD, JP2, PNM, ICO}
@moduledoc """
ExImageInfo is an Elixir library to parse images (binaries) and get the dimensions (size), detected mime-type and overall validity for a set of image formats. Main module to parse a binary and get if it seems to be an image (validity), the mime-type (and variant detected) and the dimensions of the image, based on a specific image format.
It has convention functions to guess the type of an image
by trying the formats supported by the library.
## Main features
- Check the validity of binary by providing a specific image format*.
- Guess the validity of an image*.
- Get the mime-type and variant type by providing a specific format.
- Guess the mime-type and variant type of an image.
- Get the dimensions of an image by providing a specific format.
- Guess the dimensions of an image.
*Note: both cases as a general overview (partially checked).
## Formats
Supported formats (image type to be parsed as):
- `:bmp`
- `:gif`
- `:ico` (new in `v0.2.0`)
- `:jpeg`
- `:jpg` (alias of `jpeg` in `v0.2.3`)
- `:jp2` (new in `v0.2.0`)
- `:png`
- `:pnm` (new in `v0.2.0`)
- `:psd`
- `:tiff`
- `:webp` (VP8X animated in `v0.2.4`)
## Mime-types and Variants
The image variant type is an invented string to identify the
type of format recognized by this library (more specific than the
mime-type).
Each mime-type can be linked to at least one variant type:
| mime-type | variant type | description |
| ------------------------- | ------------ | ------------------ |
| `image/bmp` | `BMP` | |
| `image/gif` | `GIF87a` | 87a gif spec |
| `image/gif` | `GIF89a` | 89a gif spec |
| `image/x-icon` | `ICO` | |
| `image/jpeg` | `baseJPEG` | baseline JPEG |
| `image/jpeg` | `progJPEG` | progressive JPEG |
| `image/jp2` | `JP2` | JPEG2000 |
| `image/png` | `PNG` | |
| `image/x-portable-anymap` | `PNMpbm` | Portable BitMap |
| `image/x-portable-anymap` | `PNMpgm` | Portable GrayMap |
| `image/x-portable-anymap` | `PNMppm` | Portable PixMap |
| `image/psd` | `PSD` | |
| `image/tiff` | `TIFFII` | II variant |
| `image/tiff` | `TIFFMM` | MM variant |
| `image/webp` | `webpVP8` | lossy |
| `image/webp` | `webpVP8L` | lossless |
| `image/webp` | `webpVP8X` | animated |
The variant type is created just to provide a bit more of information
for every image format (if applicable).
*Note*: `:ico` returns the dimensions of the largest image contained (not the first found).
The guessing functions try to detect the format of the binary by testing every available type based on its global usage (popularity, [usage of image file formats](https://w3techs.com/technologies/overview/image_format/all), but still keeping the `:png` as the first one):
- `:png`, `:jpeg`, `:gif`, `:bmp`, `:ico`, `:tiff`, `:webp`, `:psd`, `:jp2`, `:pnm`
"""
# Guessing function ordered by global usage
# https://w3techs.com/technologies/overview/image_format/all
# but still keeping :png as the first
@types [:png, :jpeg, :gif, :bmp, :ico, :tiff, :webp, :psd, :jp2, :pnm]
## Public API
@doc """
Detects if the given binary seems to be in the given image format.
Valid [formats](#module-formats) to be used.
Returns `true` if seems to be, `false` otherwise.
## Examples
`89 50 4E 47 0D 0A 1A 0A` are the first 8 bytes in the `PNG` signature (`PNG\\r\\n0x1A\\n`).
iex> ExImageInfo.seems? <<0x89504E470D0A1A0A::size(64)>>, :png
true
iex> ExImageInfo.seems? <<0x89504E470D0A1A0A::size(64)>>, :webp
false
`ExImageInfo.seems?/2` and `ExImageInfo.seems?/1` does not necessarily needs a real image (as it is shown in the previous example) because it just checks the signature of every file format.
Usually it is used as:
ExImageInfo.seems? File.read!("path/to/image.gif"), :gif
# true
maybe_png_binary |> ExImageInfo.seems? :png
# false
"""
@spec seems?(binary, format :: atom) :: boolean | nil
def seems?(binary, format)
def seems?(binary, :png), do: PNG.seems?(binary)
def seems?(binary, :gif), do: GIF.seems?(binary)
def seems?(binary, :jpeg), do: JPEG.seems?(binary)
def seems?(binary, :jpg), do: JPEG.seems?(binary)
def seems?(binary, :bmp), do: BMP.seems?(binary)
def seems?(binary, :tiff), do: TIFF.seems?(binary)
def seems?(binary, :webp), do: WEBP.seems?(binary)
def seems?(binary, :psd), do: PSD.seems?(binary)
def seems?(binary, :jp2), do: JP2.seems?(binary)
def seems?(binary, :pnm), do: PNM.seems?(binary)
def seems?(binary, :ico), do: ICO.seems?(binary)
def seems?(_, _), do: nil
@doc """
Detects the image format that seems to be the given binary (*guessed* version of `ExImageInfo.seems?/2`).
Returns the valid [format](#module-formats) (atom) if it matches, `nil` otherwise.
## Examples
`38 42 50 53` are the first 4 bytes in the `PSD` signature (`8BPS`).
iex> ExImageInfo.seems? <<0x38425053::size(32)>>
:psd
iex> ExImageInfo.seems? <<0x384250::size(24)>>
nil
`ExImageInfo.seems?/2` and `ExImageInfo.seems?/1` does not necessarily needs a real image (as it is shown in the previous example) because it just checks the signature of every file format.
Usually it is used as:
ExImageInfo.seems? File.read!("path/to/image.unknown")
# :tiff
webp_full_binary |> ExImageInfo.seems?
# :webp
"""
@spec seems?(binary) :: atom | nil
def seems?(binary), do: try_seems?(binary, @types)
@doc """
Gets the mime-type and variant type for the given image format and binary.
Possible [Mime-types and Variants](#module-mime-types-and-variants) to be returned.
Valid [formats](#module-formats) to be used.
Returns a 2-item tuple with the mime-type and the variant type when the binary matches, `nil` otherwise.
## Examples
`89 50 4E 47 0D 0A 1A 0A` are the first 8 bytes in the `PNG` signature (`PNG\\r\\n0x1A\\n`).
iex> ExImageInfo.type <<0x89504E470D0A1A0A::size(64)>>, :png
nil
iex> ExImageInfo.type <<"RIFF", 0::size(32), "WEBPVP8L", 0::size(32), 0x2F7AC07100358683B68D::size(80)>>, :webp
{"image/webp", "webpVP8L"}
The signature part of a png it is now enough to get the type (it check also the IHDR field, just before the width and height).
Usually it is used as:
ExImageInfo.type File.read!("path/to/image.gif"), :gif
# {"image/gif", "GIF87a"}
maybe_png_binary |> ExImageInfo.type :png
# nil
"""
@spec type(binary, format :: atom) :: {mimetype :: String.t, variant :: String.t} | nil
def type(binary, format)
def type(binary, :png), do: PNG.type(binary)
def type(binary, :gif), do: GIF.type(binary)
def type(binary, :jpeg), do: JPEG.type(binary)
def type(binary, :jpg), do: JPEG.type(binary)
def type(binary, :bmp), do: BMP.type(binary)
def type(binary, :tiff), do: TIFF.type(binary)
def type(binary, :webp), do: WEBP.type(binary)
def type(binary, :psd), do: PSD.type(binary)
def type(binary, :jp2), do: JP2.type(binary)
def type(binary, :pnm), do: PNM.type(binary)
def type(binary, :ico), do: ICO.type(binary)
def type(_, _), do: nil
@doc """
Gets the mime-type and variant type for the given image binary (*guessed* version of `ExImageInfo.type/2`).
Possible [Mime-types and Variants](#module-mime-types-and-variants) to be returned.
Returns a 2-item tuple with the mime-type and the variant type when the binary matches, `nil` otherwise.
## Examples
iex> ExImageInfo.type <<0x38425053::size(32)>>
{"image/psd", "PSD"}
iex> ExImageInfo.type <<0x384250::size(24)>>
nil
Usually it is used as:
ExImageInfo.type File.read!("path/to/image.unknown")
# {"image/tiff", "TIFFMM"}
webp_full_binary |> ExImageInfo.type
# {"image/webp", "webpVP8"}
"""
@spec type(binary) :: {mimetype :: String.t, variant :: String.t} | nil
def type(binary), do: try_type(binary, @types)
@doc """
Gets the mime-type, variant-type and dimensions (width, height) for the given image format and binary.
Possible [Mime-types and Variants](#module-mime-types-and-variants) to be returned.
Valid [formats](#module-formats) to be used.
Returns a 4-item tuple with the mime-type, width, height and the variant type when the binary matches, `nil` otherwise.
## Examples
`89 50 4E 47 0D 0A 1A 0A` are the first 8 bytes in the `PNG` signature (`PNG\\r\\n0x1A\\n`).
iex> ExImageInfo.info <<0x89504E470D0A1A0A::size(64)>>, :png
nil
iex> ExImageInfo.info <<"RIFF", 0::size(32), "WEBPVP8L", 0::size(32), 0x2F7AC07100358683B68D::size(80)>>, :webp
{"image/webp", 123, 456, "webpVP8L"}
The signature part of a png it is now enough to get the type (it check also the IHDR field, just before the width and height).
Usually it is used as:
ExImageInfo.info File.read!("path/to/image.gif"), :gif
# {"image/gif", 1920, 1080, "GIF87a"}
maybe_png_binary |> ExImageInfo.info :png
# nil
"""
@spec info(binary, format :: atom) ::
{mimetype :: String.t, width :: Integer.t, height :: Integer.t, variant :: String.t} | nil
def info(binary, format)
def info(binary, :png), do: PNG.info(binary)
def info(binary, :gif), do: GIF.info(binary)
def info(binary, :jpeg), do: JPEG.info(binary)
def info(binary, :jpg), do: JPEG.info(binary)
def info(binary, :bmp), do: BMP.info(binary)
def info(binary, :tiff), do: TIFF.info(binary)
def info(binary, :webp), do: WEBP.info(binary)
def info(binary, :psd), do: PSD.info(binary)
def info(binary, :jp2), do: JP2.info(binary)
def info(binary, :pnm), do: PNM.info(binary)
def info(binary, :ico), do: ICO.info(binary)
def info(_, _), do: nil
@doc """
Gets the mime-type, variant-type and dimensions (width, height) for the given image binary (*guessed* version of `ExImageInfo.info/2`).
Possible [Mime-types and Variants](#module-mime-types-and-variants) to be returned.
Returns a 4-item tuple with the mime-type, width, height and the variant type when the binary matches, `nil` otherwise.
## Examples
iex> ExImageInfo.info <<0x38425053::size(32)>>
nil
iex> ExImageInfo.info <<0x38425053::size(32), 0::size(80), 10::size(32), 12::size(32)>>
{"image/psd", 12, 10, "PSD"}
Usually it is used as:
ExImageInfo.info File.read!("path/to/image.unknown")
# {"image/tiff", 128, 256, "TIFFMM"}
webp_full_binary |> ExImageInfo.info
# {"image/webp", 20, 100, "webpVP8"}
"""
@spec info(binary) :: {mimetype :: String.t, width :: Integer.t, height :: Integer.t, variant :: String.t} | nil
def info(binary), do: try_info(binary, @types)
## Private
@doc false
defp try_seems?(_binary, []), do: nil
defp try_seems?(binary, [type | types]) do
if seems?(binary, type), do: type, else: try_seems?(binary, types)
end
@doc false
defp try_type(_binary, []), do: nil
defp try_type(binary, [type | types]) do
case type(binary, type) do
type_t when is_tuple(type_t) -> type_t
_ -> try_type(binary, types)
end
end
@doc false
defp try_info(_binary, []), do: nil
defp try_info(binary, [type | types]) do
case info(binary, type) do
info_t when is_tuple(info_t) -> info_t
_ -> try_info(binary, types)
end
end
end
|
lib/ex_image_info.ex
| 0.846514
| 0.732041
|
ex_image_info.ex
|
starcoder
|
defmodule Adventofcode.Day14DiskDefragmentation do
alias Adventofcode.Day10KnotHash
def squares_count(input) do
input
|> squares()
|> Enum.map(&used_square_count/1)
|> Enum.sum()
end
def regions_count(input) do
input
|> regions()
|> length()
end
def regions(input) do
input
|> squares()
|> free_squares_to_coordinates()
|> group_coordinates()
end
defp squares(input) do
0..127
|> Enum.map(&"#{input}-#{&1}")
|> Enum.map(&Day10KnotHash.knot_hash/1)
|> Enum.map(&convert_hash_to_bits/1)
end
defp free_squares_to_coordinates(squares) do
Enum.flat_map(Enum.with_index(squares), fn {line, y} ->
line
|> String.graphemes()
|> Enum.with_index()
|> Enum.filter(fn {char, _} -> char == "1" end)
|> Enum.map(fn {_, x} -> {x, y} end)
end)
end
defp group_coordinates(coordinates, groups \\ [])
defp group_coordinates([], groups), do: groups
defp group_coordinates([{x, y} | coordinates], groups) do
{group, coordinates} = do_group_coordinates(MapSet.new([{x, y}]), coordinates)
group_coordinates(coordinates, [group | groups])
end
defp do_group_coordinates(group, coordinates) do
case take_neighbours(group, coordinates) do
{^group, coordinates} -> {group, coordinates}
{group, coordinates} -> do_group_coordinates(group, coordinates)
end
end
def take_neighbours(group, coordinates) do
neighbours =
group
|> Enum.flat_map(&neighbour_coordinates/1)
|> Enum.filter(&(&1 in coordinates))
|> Enum.reject(&(&1 in group))
group = MapSet.union(MapSet.new(group), MapSet.new(neighbours))
coordinates = Enum.reject(coordinates, &(&1 in group))
{group, coordinates}
end
def neighbour_coordinates({x, y}) do
[{x - 1, y}, {x + 1, y}, {x, y + 1}, {x, y - 1}]
|> Enum.filter(fn {x, y} -> x >= 0 and x <= 127 and y >= 0 and y <= 127 end)
end
defp convert_hash_to_bits(hex_hash) do
hex_hash
|> String.graphemes()
|> Enum.map(&convert_hex_digit_to_bits/1)
|> Enum.join()
end
defp convert_hex_digit_to_bits(hex_digit) do
hex_digit
|> String.to_integer(16)
|> Integer.to_string(2)
|> String.pad_leading(4, "0")
end
defp used_square_count(line) do
line
|> String.graphemes()
|> Enum.filter(&(&1 == "1"))
|> Enum.count()
end
def pretty_print(regions) do
coordinates =
regions
|> Enum.with_index()
|> Enum.reduce(%{}, fn {coordinates, index}, acc ->
Enum.reduce(coordinates, acc, &Map.put(&2, &1, index))
end)
Enum.map_join(0..127, "\n", fn y ->
Enum.map_join(0..127, "", fn x ->
case coordinates[{x, y}] do
nil -> " "
group -> Integer.to_string(group, 36)
end
end)
end)
end
end
|
lib/day_14_disk_defragmentation.ex
| 0.539226
| 0.594669
|
day_14_disk_defragmentation.ex
|
starcoder
|
defmodule Flower.Bloom do
use Bitwise
alias Flower.Native.BitArray, as: BitArray
@moduledoc """
Flower.Bloom implements a Bloom Filter.
For this Bloom Filter sha256 or sha512 is used as hash function.
"""
@ser_vsn 1
@byte_sizes [
:"8 Byte",
:"16 Byte",
:"32 Byte",
:"64 Byte",
:"128 Byte",
:"256 Byte",
:"512 Byte",
:"1 KB",
:"2 KB",
:"4 KB",
:"8 KB",
:"16 KB",
:"32 KB",
:"64 KB",
:"128 KB",
:"256 KB",
:"512 KB",
:"1 MB",
:"2 MB",
:"4 MB",
:"8 MB",
:"16 MB",
:"32 MB",
:"64 MB",
:"128 MB",
:"256 MB",
:"512 MB"
]
@type bloomfilter ::
{:bloom, bitarray :: reference(), bitaddrmask :: integer(), number_of_hashes :: 1..8}
@type size_atom ::
:"8 Byte"
| :"16 Byte"
| :"32 Byte"
| :"64 Byte"
| :"128 Byte"
| :"256 Byte"
| :"512 Byte"
| :"1 KB"
| :"2 KB"
| :"4 KB"
| :"8 KB"
| :"16 KB"
| :"32 KB"
| :"64 KB"
| :"128 KB"
| :"256 KB"
| :"512 KB"
| :"1 MB"
| :"2 MB"
| :"4 MB"
| :"8 MB"
| :"16 MB"
| :"32 MB"
| :"64 MB"
| :"128 MB"
| :"256 MB"
| :"512 MB"
@doc """
Create a new Bloom Filter with `size` :: `size_atom()` or 2^bitaddrlen bits.
|bitaddrlen| Size|Bitaddrlen| Size|Bitaddrlen| Size|
|-------:|---------:|-------:|---------:|-------:|---------:|
| __ __ | | __13__ | 1 KB | __23__ | 1 MB |
| __ __ | | __14__ | 2 KB | __24__ | 2 MB |
| __ __ | | __15__ | 4 KB | __25__ | 4 MB |
| __6 __ | 8 Byte | __16__ | 8 KB | __26__ | 8 MB |
| __7 __ | 16 Byte | __17__ | 16 KB | __27__ | 16 MB |
| __8 __ | 32 Byte | __18__ | 32 KB | __28__ | 32 MB |
| __9 __ | 64 Byte | __19__ | 64 KB | __29__ | 64 MB |
| __10__ | 128 Byte | __20__ | 128 KB | __30__ | 128 MB |
| __11__ | 256 Byte | __21__ | 256 KB | __31__ | 256 MB |
| __12__ | 512 Byte | __22__ | 512 KB | __32__ | 512 MB |
"""
@spec new(bitaddrlen :: 6..32, expected_elements :: pos_integer()) :: bloomfilter()
def new(bitaddrlen, expected_elements) when bitaddrlen in 6..32 do
number_of_hashes =
1..16 |> Enum.min_by(&calc_fp_prob(expected_elements, 1 <<< bitaddrlen, &1))
if number_of_hashes == 1 do
IO.warn("Your Bloom filter is too small for the expected number of elements!")
end
{:bloom, BitArray.new(1 <<< bitaddrlen), (1 <<< bitaddrlen) - 1, number_of_hashes}
end
@spec new(bitaddrlen :: size_atom(), expected_elements :: pos_integer()) :: bloomfilter()
def new(bytes, expected_elements) when bytes in @byte_sizes do
bitaddrlen = 6 + Enum.find_index(@byte_sizes, fn x -> x == bytes end)
new(bitaddrlen, expected_elements)
end
@doc """
Create a new Bloom Filter with maximum byte size 'bytes'. The size gets
rounded down to the next `size_atom()`.
"""
@spec new_by_byte_size(bytes :: size_atom(), expected_elements :: pos_integer()) ::
bloomfilter()
def new_by_byte_size(bytes, expected_elements) when bytes in @byte_sizes do
new(bytes, expected_elements)
end
@spec new_by_byte_size(bytes :: pos_integer(), expected_elements :: pos_integer()) ::
bloomfilter()
def new_by_byte_size(bytes, expected_elements) do
bitaddrlen = trunc(:math.log2(bytes * 8))
new(bitaddrlen, expected_elements)
end
defp calc_fp_prob(elem, size, number_of_hashes) do
e = 2.71828182846
fraction_of_0 = :math.pow(e, -number_of_hashes * elem / size)
fraction_of_1 = 1 - fraction_of_0
false_positives = :math.pow(fraction_of_1, number_of_hashes)
false_positives
end
@doc """
Calculates the false positive probability for a given bloom filter.
Return value is between `0.0` and `1.0`.
This Operation is slow for large Bloom Filters and should then be avoided.
"""
@spec false_positive_probability(bloomfilter()) :: float()
def false_positive_probability({:bloom, bitarray, _mask, number_of_hashes}) do
fraction_of_1 = BitArray.count_ones(bitarray) / BitArray.bit_length(bitarray)
false_positives = :math.pow(fraction_of_1, number_of_hashes)
false_positives
end
@doc """
Estimates how many unique elements have been added.
This Operation is slow for large Bloom Filters and should then be avoided.
"""
@spec estimate_count(bloomfilter()) :: non_neg_integer()
def estimate_count({:bloom, bitarray, _mask, number_of_hashes}) do
bits = BitArray.bit_length(bitarray)
ones = BitArray.count_ones(bitarray)
fraction_of_1 = ones / bits
fraction_of_0 = 1 - fraction_of_1
elmements = -1 * :math.log(fraction_of_0) * bits / number_of_hashes
round(elmements)
end
@doc """
Inserts an Erlang Term into the Bloom Filter.
"""
@spec insert(bloomfilter(), any()) :: :ok
def insert({:bloom, bitarray, mask, number_of_hashes}, bin) when is_binary(bin) do
bin
|> bin_to_offset_list(number_of_hashes)
|> Enum.map(&Bitwise.&&&(&1, mask))
|> write(bitarray)
end
def insert(bloom, term) do
insert(bloom, :erlang.term_to_binary(term))
end
@doc """
Checks if an element was inserted in the given Bloom Filter.
Returns `false` if it can be guaranteed that the element was not
inserted. Else `true`.
You can get the probability of a false positive
using `Flower.Bloom.false_positive_probability`.
|Was actually inserted?| has? | has_not? |
|:--------------------:|:----------------------:|:----------------------------:|
| yes | yes | no |
| no | most of the times: no | most of the times: yes |
"""
@spec has?(bloomfilter(), any()) :: boolean()
def has?({:bloom, bitarray, mask, number_of_hashes}, bin) when is_binary(bin) do
bin
|> bin_to_offset_list(number_of_hashes)
|> Enum.map(&Bitwise.&&&(&1, mask))
|> read(bitarray)
end
def has?(bloom, term) do
has?(bloom, :erlang.term_to_binary(term))
end
@doc """
Checks if an element is not in a given Bloom Filter.
Returns `true` if it can be guaranteed that the element was not
inserted. Else `false`.
This is equal to `!Bloom.has?(filter, term)`
"""
@spec has_not?(bloomfilter(), any()) :: boolean()
def has_not?(bloom, term), do: !has?(bloom, term)
@doc false
@deprecated "This is unstable, can change soon"
def serialize({:bloom, bitarray, _mask, number_of_hashes}) do
blen = BitArray.bit_length(bitarray)
bitaddrlen = :math.log2(blen) |> trunc()
<<@ser_vsn, 42, bitaddrlen::8, number_of_hashes::8, BitArray.to_bin(bitarray)::binary>>
end
@doc """
Creates a `Stream` of binaries. This should be used for serializing.
Example:
```
filter = Bloom.new(...)
file = File.stream!("myfile.bloomfilter", [:delayed_write, :binary], 8096)
Bloom.stream(filter)
|> Stream.into(file)
|> Stream.run
```
"""
@spec stream(bloomfilter()) :: Enumerable.t()
def stream({:bloom, bitarray, _mask, number_of_hashes}) do
blen = BitArray.bit_length(bitarray)
bitaddrlen = :math.log2(blen) |> trunc()
[<<@ser_vsn, 42, bitaddrlen::8, number_of_hashes::8>>]
|> Stream.concat(BitArray.stream(bitarray))
end
@doc false
@deprecated "This is unstable, can change soon"
def deserialize(<<@ser_vsn, 42, bitaddrlen::8, number_of_hashes::8, bitarray::binary>>) do
{:bloom, BitArray.from_bin(bitarray), (1 <<< bitaddrlen) - 1, number_of_hashes}
end
@doc """
Creates a Bloom Filter from a `Stream` of binaries. This should be used for deserializing.
Example:
```
file = File.stream!("myfile.bloomfilter", [:read_ahead, :binary], 8096)
Bloom.from_stream(file)
```
"""
@spec from_stream(Enumerable.t()) :: {:ok, bloomfilter()} | {:error, any()}
def from_stream(stream), do: from_stream(stream, <<>>)
defp from_stream(stream, <<@ser_vsn, 42, bitaddrlen::8, number_of_hashes::8, tail::binary>>) do
ref = BitArray.new(1 <<< bitaddrlen)
[tail]
|> Stream.concat(stream)
|> Stream.into(BitArray.stream(ref))
|> Stream.run()
{:ok, {:bloom, ref, (1 <<< bitaddrlen) - 1, number_of_hashes}}
end
defp from_stream(stream, acc) when byte_size(acc) < 100 do
[head] = stream |> Enum.take(1)
next_acc = acc <> head
next_stream = stream |> Stream.drop(1)
from_stream(next_stream, next_acc)
end
defp from_stream(_, _) do
{:error, :invalid_header}
end
defp write([p | tail], bitarray) do
BitArray.put(bitarray, p, true)
write(tail, bitarray)
end
defp write([], _) do
:ok
end
defp read([p | tail], bitarray) do
BitArray.get(bitarray, p) && read(tail, bitarray)
end
defp read([], _) do
true
end
defp bin_to_offset_list(bin, number_of_hashes) when number_of_hashes <= 8 do
:crypto.hash(:sha256, bin)
|> hash_to_offset_list(number_of_hashes)
end
defp bin_to_offset_list(bin, number_of_hashes) when number_of_hashes > 8 do
:crypto.hash(:sha512, bin)
|> hash_to_offset_list(number_of_hashes)
end
defp hash_to_offset_list(_, 0), do: []
defp hash_to_offset_list(<<num::32, rest::binary>>, n),
do: [num | hash_to_offset_list(rest, n - 1)]
@doc false
@deprecated "Use `has?` instead"
def has_maybe?(a, b), do: has?(a, b)
end
|
lib/flower/bloom.ex
| 0.915724
| 0.474449
|
bloom.ex
|
starcoder
|
defmodule PassiveSupport.String do
@moduledoc """
Helper functions for working with strings and UTF-8 binary data.
"""
@doc ~S"""
Converts the provided pattern to a regular expression, if necessary,
and then invokes `Regex.run` on the expression and the string.
Useful for invoking regular expressions on strings in the middle of
transformation pipelines.
## Examples
iex> match("footwear, fun, and fondue", "((f[ou])[no]).+")
["footwear, fun, and fondue", "foo", "fo"]
iex> match("fööd!", "öö")
["öö"]
iex> match("footwear, fun, and fondue", ~r/((f[ou])[no]).+/U)
["foot", "foo", "fo"]
"""
@spec match(String.t(), Regex.t() | String.t(), [keyword]) :: [String.t()]
def match(string, pattern, opts \\ [])
def match(string, %Regex{} = pattern, opts), do:
Regex.run(pattern, string, opts)
def match(string, "" <> pattern, opts), do:
Regex.compile!(pattern, "u")
|> Regex.run(string, opts)
@doc ~S"""
Converts the provided pattern to a regular expression, if necessary,
and then invokes `Regex.scan` on the expression and the string.
Useful for invoking regular expressions on strings in the middle of
transformation pipelines.
## Examples
iex> scan("footwear, fun, and fondue", "((f[ou])[no]).+")
[["footwear, fun, and fondue", "foo", "fo"]]
iex> scan("fööd!", "öö")
[["öö"]]
iex> scan("footwear, fun, and fondue", ~r/((f[ou])[no]).+/U)
[["foot", "foo", "fo"], ["fun,", "fun", "fu"], ["fond", "fon", "fo"]]
"""
@spec scan(String.t(), Regex.t() | String.t(), keyword) :: [[String.t()]]
def scan(string, pattern, opts \\ [])
def scan(string, %Regex{} = pattern, opts), do:
Regex.scan(pattern, string, opts)
def scan(string, "" <> pattern, opts), do:
Regex.compile!(pattern, "u")
|> Regex.scan(string, opts)
defguardp valid_length(length) when is_integer(length) and length > 0
@doc ~S"""
Splits a string by a given length or lengths.
When one length is given, splits the string into a list of substrings
of that length.
When a list of lengths is given, returns a list of lists
of substrings of the given lengths.
If the string does not fit within the given length(s),
the final substring will be the length of the remainder
of the string.
To retrieve only the first `length` or `lengths` of the string,
pass `first_split: true`. Note that in the case of a single `length`,
this is equivalent to calling `String.slice(string, 0..length)`, or
`binary_part(string, 0, length)`. This is useful when, while supplying
multiple lengths, only the first `lengths` of the given string are important
to the program, or when the sum of `lengths` is equal to the length
of the original string.
## Examples
iex> length_split("hello world!", 3)
["hel", "lo ", "wor", "ld!"]
iex> length_split("hello world!", 5)
["hello", " worl", "d!"]
iex> length_split("hello world!", 5, first_split: true)
"hello"
iex> length_split("Life, the universe, and everything... is pattern-matchable", [10, 9, 7])
[
["Life, the ", "universe,", " and ev"],
["erything..", ". is patt", "ern-mat"],
["chable"]
]
iex> length_split("Life, the universe, and everything... is pattern-matchable", [10, 9, 7], first_split: true)
["Life, the ", "universe,", " and ev"]
"""
@spec length_split(String.t(), integer | [integer], first_split: boolean) ::
String.t() | list(String.t()) | list(list(String.t()))
def length_split(string, lengths, opts \\ [first_split: false])
def length_split(""<>string, length, first_split: true) when valid_length(length), do:
String.slice(string, 0, length)
def length_split(""<>string, length, first_split: false) when valid_length(length), do:
string |> String.graphemes |> Stream.chunk_every(length) |> Enum.map(&Enum.join/1)
def length_split("" <> string, [], _opts), do: string
def length_split("" <> string, lengths, first_split: true) when is_list(lengths), do:
do_length_split(String.graphemes(string), lengths)
def length_split("" <> string, lengths, first_split: false) when is_list(lengths), do:
do_length_split(String.graphemes(string), lengths, lengths)
defp do_length_split([], _lengths), do: []
defp do_length_split(_graphemes, []), do: []
defp do_length_split(graphemes, [current_length | lengths]) do
{substr, graphemes} = Enum.split(graphemes, current_length)
[IO.iodata_to_binary(substr) | do_length_split(graphemes, lengths)]
end
defp do_length_split([], _lengths, _lengths_copy), do: []
defp do_length_split(graphemes, lengths, _lengths_copy) do
{substrings, rest} = Enum.reduce_while(lengths, {[], graphemes}, (fn
(_length, {parts, []}) ->
{:halt, {parts, []}}
(length, {parts, graphemes}) ->
{substr, rest} = Enum.split(graphemes, length)
{:cont, {[IO.iodata_to_binary(substr) | parts], rest}}
end))
[Enum.reverse(substrings) | do_length_split(rest, lengths, lengths)]
end
@doc ~S"""
Safely casts the string to an atom, returning `{:ok, atom}` if successful
and `:error` if not.
## Examples
iex> safe_existing_atom("ok")
{:ok, :ok}
iex> safe_existing_atom("not_particularly_ok")
:error
"""
@spec safe_existing_atom(String.t) :: {:ok, atom} | :error
def safe_existing_atom("" <> string) do
{:ok, String.to_existing_atom(string)}
rescue
ArgumentError -> :error
end
@doc """
Returns a copy of `string` with a newline removed from the end.
If there is no newline at the end of `string`, then it is returned unchanged
## Examples
iex> chomp("hello world!\\n")
"hello world!"
iex> chomp("hello\\nworld!")
"hello\\nworld!"
iex> chomp("multiline!\\n\\n")
"multiline!\\n"
iex> chomp("single line!")
"single line!"
"""
@spec chomp(String.t) :: String.t
def chomp(string), do: String.replace(string, ~r/\n$/, "", global: false)
end
|
lib/passive_support/base/string.ex
| 0.889466
| 0.621311
|
string.ex
|
starcoder
|
defmodule Raygun do
@moduledoc """
Send errors to Raygun. Errors can be captured in three different ways.
1. Any errors that are logged
2. Any exceptions that occur in a Plug
3. Programmatically
All the functions will return `:ok`, `{:error, reason}`, or :ignored
"""
@api_endpoint "https://api.raygun.io/entries"
@doc """
Reports a string message. This function is used by the Raygun.Logger but it
can also be used to report any string message.
"""
def report_message(msg, opts \\ []) do
if Raygun.Util.environment?() && Raygun.Util.msg_valid?(msg) do
msg |> Raygun.Format.message_payload(opts) |> send_report()
else
:ignored
end
end
@doc """
Reports an exception and its corresponding stacktrace to Raygun.
"""
def report_stacktrace(stacktrace, exception, opts \\ []) do
if Raygun.Util.environment?() do
stacktrace |> Raygun.Format.stacktrace_payload(exception, opts) |> send_report()
else
:ignored
end
end
@doc """
Reports an exception and its corresponding stacktrace to Raygun. Additionally
this captures some additional information about the environment in which
the exception occurred by retrieving some state from the Plug Conn.
"""
def report_plug(conn, stacktrace, exception, opts \\ []) do
if Raygun.Util.environment?() do
conn |> Raygun.Format.conn_payload(stacktrace, exception, opts) |> send_report()
else
:ignored
end
end
defp send_report(error) do
case HTTPoison.post(@api_endpoint, Jason.encode!(error), headers(), httpoison_opts()) do
{:ok, %HTTPoison.Response{status_code: 202}} -> :ok
{:ok, %HTTPoison.Response{status_code: 400}} -> {:error, :bad_message}
{:ok, %HTTPoison.Response{status_code: 403}} -> {:error, :invalid_api_key}
{:error, _} -> {:error, :unexpected}
end
end
defp headers do
[
{"Content-Type", "application/json; charset=utf-8"},
{"Accept", "application/json"},
{"User-Agent", "Elixir Client"},
{"X-ApiKey", Raygun.Util.get_env(:raygun, :api_key)}
]
end
defp httpoison_opts do
Application.get_env(:raygun, :httpoison_opts, [])
end
end
|
lib/raygun.ex
| 0.759582
| 0.424889
|
raygun.ex
|
starcoder
|
defmodule Servy.PledgeServer do
@name :pledge_server
use GenServer
defmodule State do
defstruct cache_size: 3, pledges: []
end
# Client Interface
def start do
IO.puts "Starting the pledge server..."
GenServer.start(__MODULE__, %State{}, name: @name)
end
def create_pledge(name, amount) do
GenServer.call @name, {:create_pledge, name, amount}
end
def recent_pledges do
GenServer.call @name, :recent_pledges
end
def total_pledged do
GenServer.call @name, :total_pledged
end
def clear do
GenServer.cast @name, :clear
end
def set_cache_size(size) do
GenServer.cast @name, {:set_cache_size, size}
end
# Server Callbacks
def init(state) do
pledges = fetch_recent_pledges_from_service()
new_state = %{state | pledges: pledges}
{:ok, new_state}
end
def handle_cast(:clear, state) do
{:noreply, %{ state | pledges: []}}
end
def handle_cast({:set_cache_size, size}, state) do
new_state = %{ state | cache_size: size}
{:noreply, new_state}
end
def handle_call(:total_pledged, _from, state) do
total = Enum.map(state.pledges, &elem(&1, 1)) |> Enum.sum
{:reply, total, state}
end
def handle_call(:recent_pledges, _from, state) do
{:reply, state.pledges, state}
end
def handle_call({:create_pledge, name, amount}, _from, state) do
{:ok, id} = send_pledge_to_service(name, amount)
most_recent_pledges = Enum.take(state.pledges, state.cache_size - 1)
cached_pledges = [ {name, amount} | most_recent_pledges ]
new_state = %{state | pledges: cached_pledges}
{:reply, id, new_state}
end
def handle_info(message, state) do
IO.puts "Can't touch this! #{inspect message}"
{:noreply, state}
end
defp send_pledge_to_service(_name, _amount) do
# CODE GOES HERE TO SEND PLEDGE TO EXTERNAL SERVICE
{:ok, "pledge-#{:rand.uniform(1000)}"}
end
defp fetch_recent_pledges_from_service do
# CODE GOES HERE TO FETCH RECENT PLEDGES FROM EXTERNAL SERVICE
# Example return value:
[ {"wilma", 15}, {"fred", 25} ]
end
end
# alias Servy.PledgeServer
# {:ok, pid} = PledgeServer.start()
# send pid, {:stop, "hammertime"}
# PledgeServer.set_cache_size(4)
# IO.inspect PledgeServer.create_pledge("larry", 10)
# # PledgeServer.clear()
# # IO.inspect PledgeServer.create_pledge("moe", 20)
# # IO.inspect PledgeServer.create_pledge("curly", 30)
# # IO.inspect PledgeServer.create_pledge("daisy", 40)
# # IO.inspect PledgeServer.create_pledge("grace", 50)
# IO.inspect PledgeServer.recent_pledges()
# IO.inspect PledgeServer.total_pledged()
|
video-code/28-linking-processes/servy/lib/servy/pledge_server.ex
| 0.552419
| 0.416678
|
pledge_server.ex
|
starcoder
|
defmodule AOC.Y2021.Day15 do
@behaviour AOC.Solution
@neighbor_offsets [{-1, 0}, {1, 0}, {0, -1}, {0, 1}]
def input_path() do
"./lib/2021/input/day15.txt"
end
def parse_input(input) do
# Expand grid to 5x5 repeating tiles (we'll calc the risk increases dynamically)
rows =
input
|> String.split("\n", trim: true)
|> Enum.map(fn row ->
String.duplicate(row, 5)
end)
|> List.duplicate(5)
|> List.flatten()
row_length = rows |> List.first() |> String.length()
col_length = length(rows)
# Build the directed graph, where the weight of an edge is the risk level of the incident vertex.
graph =
for {row, y} <- Enum.with_index(rows),
{risk, x} <- Enum.with_index(String.split(row, "", trim: true)),
# We have to provide a custom vertex id function, because the default would
# cause overlap for a large # of vertices.
reduce: Graph.new(vertex_identifier: & &1) do
graph ->
# Calculate risk depending on which 5x5 tile this point is.
tile_risk_increase = div(x, div(row_length, 5)) + div(y, div(col_length, 5))
risk = risk |> String.to_integer() |> Kernel.+(tile_risk_increase) |> wrap_to_one()
Enum.reduce(@neighbor_offsets, graph, fn {x_offset, y_offset}, g ->
to = {x, y}
from = {x + x_offset, y + y_offset}
Graph.add_edge(g, from, to, weight: risk)
end)
end
graph
|> Graph.edges()
|> Enum.filter(fn %Graph.Edge{v1: {x1, y1}, v2: {x2, y2}} ->
abs(x1 - x2) > 1 or abs(y1 - y2) > 1
end)
{graph, {row_length - 1, col_length - 1}}
end
defp wrap_to_one(risk), do: 1 + rem(risk - 1, 9)
def star_1({graph, {end_x, end_y}}) do
graph
|> Graph.dijkstra({0, 0}, {div(end_x, 5), div(end_y, 5)})
|> get_path_weight(graph)
end
defp get_path_weight([_only_one], _g, total_weight), do: total_weight
defp get_path_weight([v1, v2 | path], %Graph{} = graph, total_weight) do
%Graph.Edge{weight: weight} = Graph.edge(graph, v1, v2)
get_path_weight([v2 | path], graph, total_weight + weight)
end
defp get_path_weight(path, %Graph{} = graph), do: get_path_weight(path, graph, 0)
def star_2({graph, end_point}) do
graph
|> Graph.dijkstra({0, 0}, end_point)
|> get_path_weight(graph)
end
end
|
lib/2021/day15.ex
| 0.744656
| 0.662201
|
day15.ex
|
starcoder
|
defmodule RDF.XSD.AnyURI do
@moduledoc """
`RDF.XSD.Datatype` for XSD anyURIs.
See: <http://www.w3.org/TR/xmlschema11-2/#anyURI>
"""
@type valid_value :: URI.t()
use RDF.XSD.Datatype.Primitive,
name: "anyURI",
id: RDF.Utils.Bootstrapping.xsd_iri("anyURI")
alias RDF.{IRI, XSD}
import RDF.Guards
def_applicable_facet XSD.Facets.MinLength
def_applicable_facet XSD.Facets.MaxLength
def_applicable_facet XSD.Facets.Length
def_applicable_facet XSD.Facets.Pattern
@doc false
def min_length_conform?(min_length, _value, lexical) do
String.length(lexical) >= min_length
end
@doc false
def max_length_conform?(max_length, _value, lexical) do
String.length(lexical) <= max_length
end
@doc false
def length_conform?(length, _value, lexical) do
String.length(lexical) == length
end
@doc false
def pattern_conform?(pattern, _value, lexical) do
XSD.Facets.Pattern.conform?(pattern, lexical)
end
@impl XSD.Datatype
@spec lexical_mapping(String.t(), Keyword.t()) :: valid_value
def lexical_mapping(lexical, _), do: URI.parse(lexical)
@impl XSD.Datatype
@spec elixir_mapping(any, Keyword.t()) :: value
def elixir_mapping(%URI{} = uri, _), do: uri
def elixir_mapping(%IRI{} = iri, _), do: IRI.parse(iri)
def elixir_mapping(value, _) when maybe_ns_term(value) do
case RDF.Namespace.resolve_term(value) do
{:ok, iri} -> IRI.parse(iri)
_ -> @invalid_value
end
end
def elixir_mapping(_, _), do: @invalid_value
@impl RDF.Literal.Datatype
def do_cast(%IRI{} = iri), do: new(iri.value)
def do_cast(value), do: super(value)
@impl RDF.Literal.Datatype
def do_equal_value_different_datatypes?(left, right)
def do_equal_value_different_datatypes?(%IRI{} = iri, any_uri),
do: do_equal_value_different_datatypes?(any_uri, iri)
def do_equal_value_different_datatypes?(any_uri, %IRI{value: iri}),
do: lexical(any_uri) == iri
def do_equal_value_different_datatypes?(left, right) when maybe_ns_term(left),
do: do_equal_value_different_datatypes?(right, left)
def do_equal_value_different_datatypes?(left, right) when maybe_ns_term(right) do
case RDF.Namespace.resolve_term(right) do
{:ok, iri} -> do_equal_value_different_datatypes?(left, iri)
_ -> nil
end
end
def do_equal_value_different_datatypes?(literal1, literal2),
do: super(literal1, literal2)
end
|
lib/rdf/xsd/datatypes/any_uri.ex
| 0.712032
| 0.505676
|
any_uri.ex
|
starcoder
|
defmodule Riffed.Struct do
@moduledoc ~S"""
Parses your thrift files and builds some Elixir-y structs and conversion functions for you.
Assuming you have the following Thrift structs defined in src/request_types.erl:
struct User {
1: i32 id,
2: string firstName,
3: string lastName;
}
struct Request {
1: User user,
2: list<string> cookies,
3: map<string, string> params;
}
You import them thusly:
defmodule Request do
use Riffed.Struct, request_types: [:Request, :User]
end
Note that the `use` statement takes a keyword list whose names are thrift modules and whose values are
the structs that you would like to import.
Your request module now has `User` and `Request` submodules, and the top level module has conversion
functions added to it so you can do the following:
iex> Request.to_elixir({:User, 32, "Steve", "Cohen"})
%Request.User{id: 32, firstName: "Steve", lastName: "Cohen"}
iex> user = Request.User.new(firstName: "Richard", lastName: "Feynman", id: 3221)
%Request.User{id: 3221, firstName: "Richard", lastName: "Feynman"}
iex> Request.to_erlang(user)
{:User, 3221, "Richard", "Feynman"}
## Controlling destination modules
If you have a complex thrift hierarchy, or a group of shared thrift structs,
importing into a single module can be ugly. In that case, you can control the
destination module of one (or all) of your imported structs by specifying the
`dest_modules` key. For example:
defmodule ImportExample do
use Riffed.Struct, dest_modules: [common_types: Common,
user_types: User,
account_types: Account],
common_types: [:RequestContext],
user_types: [:User, :Location, :Reputation],
account_types: [:Profile, :BillingInfo]
end
After Riffed runs, the ImportExample module will have three submodules,
`Common`, `User`, and `Account`. `Common` will contain the `RequestContext`,
`User` will contain `User`, `Location`, and `Reputation` and `Account` will
contain `Profile` and `BillingInfo`.
Any servers or clients that wish to use these should set their structs module
to `ImportExample`.
### Note:
Keys not set will have the initial value of `:undefined`.
"""
defmodule StructData do
@moduledoc false
defstruct struct_modules: [], tuple_converters: [], struct_converters: []
def append(data=%StructData{}, struct_module, tuple_stanza, struct_function) do
%StructData{struct_modules: [struct_module | data.struct_modules],
tuple_converters: [tuple_stanza | data.tuple_converters],
struct_converters: [struct_function | data.struct_converters]}
end
end
defmacro __using__(opts) do
Module.register_attribute(__CALLER__.module, :callbacks, accumulate: true)
{module_mapping, opts} = Keyword.pop(opts, :dest_modules, Keyword.new)
quote do
use Riffed.Callbacks
use Riffed.Enumeration
require Riffed.Struct
import Riffed.Struct
@thrift_options unquote(opts)
@dest_modules unquote(module_mapping)
@before_compile Riffed.Struct
end
end
defp build_struct_args(struct_meta) do
Enum.map(struct_meta, &build_struct_defaults/1)
end
defp build_struct_defaults({_, _, _, name, :undefined}) do
{name, :undefined}
end
defp build_struct_defaults({_, _, :string, name, default}) do
{name, List.to_string(default)}
end
defp build_struct_defaults({field_idx, required, {:list, type}, name, default}) do
default_list = default
|> Enum.map(&build_struct_defaults({field_idx, required, type, name, &1}))
|> Enum.map(fn {_, val} -> val end)
{name, default_list}
end
defp build_struct_defaults({field_idx, required, {:set, type}, name, default}) do
default_set = default
|> :sets.to_list
|> Enum.map(&build_struct_defaults({field_idx, required, type, name, &1}))
|> Enum.into(HashSet.new, fn {_, val} -> val end)
{name, Macro.escape(default_set)}
end
defp build_struct_defaults({field_idx, required, {:map, key_type, val_type}, name, default}) do
default_map = default
|> :dict.to_list
|> Enum.into(Map.new,
fn {k, v} ->
{_, key} = build_struct_defaults({field_idx, required, key_type, name, k})
{_, val} = build_struct_defaults({field_idx, required, val_type, name, v})
{key, val}
end)
{name, Macro.escape(default_map)}
end
# Note: default values are not supported when the value is a struct
defp build_struct_defaults({_, _, {:struct, _}, name, _}) do
{name, :undefined}
end
defp build_struct_defaults({_, _, _, name, default}) do
{name, default}
end
defp downcase_first(s) when is_bitstring(s) do
<<first, rest :: binary>> = s
String.downcase(List.to_string([first])) <> rest
end
defp downcase_first(a) when is_atom(a) do
a
|> Atom.to_string
|> downcase_first
|> String.to_atom
end
defp build_struct_and_conversion_function(struct_data=%StructData{}, root_module, container_module, struct_module_name, thrift_module) do
{:struct, meta} = :erlang.apply(thrift_module, :struct_info_ext, [struct_module_name])
struct_args = build_struct_args(meta)
fq_module_name = Module.concat([container_module, struct_module_name])
record_name = downcase_first(struct_module_name)
record_file = "src/#{thrift_module}.hrl"
tuple_to_elixir = build_tuple_to_elixir(thrift_module, root_module, fq_module_name, meta, struct_module_name)
struct_to_erlang = build_struct_to_erlang(root_module, fq_module_name, meta, struct_module_name, record_name)
struct_module = quote do
defmodule unquote(fq_module_name) do
require Record
Record.defrecord(unquote(record_name),
Record.extract(unquote(struct_module_name),
from: unquote(record_file)))
defstruct unquote(struct_args)
def new(opts \\ unquote(struct_args)) do
Enum.reduce(opts, %unquote(fq_module_name){}, fn({k, v}, s) -> Map.put(s, k, v) end)
end
end
end
StructData.append(struct_data, struct_module, tuple_to_elixir, struct_to_erlang)
end
@doc false
def to_riffed_type_spec({:set, item_type}) do
{:set, to_riffed_type_spec(item_type)}
end
def to_riffed_type_spec({:list, item_type}) do
{:list, to_riffed_type_spec(item_type)}
end
def to_riffed_type_spec({:map, key_type, val_type}) do
{:map, {to_riffed_type_spec(key_type), to_riffed_type_spec(val_type)}}
end
def to_riffed_type_spec(other) do
other
end
defp get_overridden_type_spec(container_module, struct_module, thrift_type_spec, field_name) do
overrides = Riffed.Enumeration.get_overrides(container_module).structs
|> Map.get(struct_module)
if overrides do
Keyword.get(overrides, field_name, thrift_type_spec) |> to_riffed_type_spec
else
to_riffed_type_spec(thrift_type_spec)
end
end
defp build_tuple_to_elixir(thrift_module, container_module, module_name, meta, thrift_name) do
# Builds a conversion function that take a tuple and converts it into an Elixir struct
pos_args = [thrift_name] ++ Enum.map(meta,
fn({_, _, _, name, _}) ->
Macro.var(name, module_name)
end)
pos_args = {:{}, [], pos_args}
keyword_args = meta
|> Enum.map(
fn({_ ,_ , _type ,name ,_}) ->
# the meta format is {index, :undefined, type, name, :undefined}
var = Macro.var(name, module_name)
quote do
{unquote(name), unquote(var)}
end
end)
enum_conversions = meta
|> Enum.map(
fn({_idx, _, type, name, _}) ->
var = Macro.var(name, module_name)
match_type = get_overridden_type_spec(container_module, module_name, type, name)
quote do
unquote(var) = unquote(container_module).to_elixir(
unquote(var),
unquote(match_type))
end
end)
quote do
def to_elixir(unquote(pos_args), {:struct, {unquote(thrift_module), unquote(thrift_name)}}) do
unquote_splicing(enum_conversions)
unquote(module_name).new(unquote(keyword_args)) |> after_to_elixir
end
end
end
defp build_struct_to_erlang(dest_module, struct_module, meta, record_name, record_fn_name) do
# Builds a conversion function that turns an Elixir struct into an erlang record
# The output is quote:
kwargs = Enum.map(
meta,
fn({_, _, type, name, _}) ->
# The meta format is {index, :undefined, type, name, :undefined}
field_variable = Macro.var(name, struct_module)
type_spec = get_overridden_type_spec(dest_module, struct_module, type, name)
quote do
{unquote(name),
unquote(dest_module).to_erlang(
s.unquote(field_variable)(), unquote(type_spec))
}
end
end)
quote do
def to_erlang(s = %unquote(struct_module){}, type_spec) do
require unquote(struct_module)
unquote(struct_module).unquote(record_fn_name)(unquote(kwargs))
|> put_elem(0, unquote(record_name))
|> after_to_erlang
end
end
end
defmacro __before_compile__(env) do
options = Module.get_attribute(env.module, :thrift_options)
build_cast_to_erlang = Module.get_attribute(env.module, :build_cast_to_erlang)
module_mapping = Module.get_attribute(env.module, :dest_modules, Keyword.new)
struct_data = options
|> Enum.reduce(
%StructData{},
fn({thrift_module, struct_names}, data) ->
curr_module = env.module
dest_module = case Keyword.get(module_mapping, thrift_module, env.module) do
^curr_module ->
curr_module
override_module ->
Module.concat([env.module, override_module])
end
Enum.reduce(struct_names, data,
fn(struct_name, data) ->
build_struct_and_conversion_function(data, env.module, dest_module, struct_name, thrift_module)
end)
end)
callbacks = Riffed.Callbacks.build(env.module)
enums = Riffed.Enumeration.build(env.module)
erlang_casts = if build_cast_to_erlang do
Riffed.Enumeration.build_cast_return_value_to_erlang(env.module)
else
[]
end
quote do
unquote_splicing(struct_data.struct_modules)
unquote_splicing(struct_data.tuple_converters)
unquote_splicing(enums.modules)
unquote_splicing(enums.conversion_fns)
unquote_splicing(struct_data.struct_converters)
unquote_splicing(erlang_casts)
unquote(Riffed.Callbacks.default_to_elixir)
unquote(Riffed.Callbacks.default_to_erlang)
unquote(callbacks)
end
end
end
|
lib/riffed/struct.ex
| 0.67854
| 0.463201
|
struct.ex
|
starcoder
|
defmodule Imagineer.Image.PNG.Filter.Basic.Sub do
import Imagineer.Image.PNG.Helpers, only: [null_binary: 1]
@moduledoc """
The Sub filter transmits the difference between each byte and the value of the
corresponding byte of the prior pixel.
"""
@doc """
Takes in the uncompressed binary for a sub-filtered row of pixels plus the
number of bytes per pixel and returns the a binary of the row as
unfiltered pixel data.
For more information, see the PNG Filter [documentation for the Sub filter type
](http://www.w3.org/TR/PNG-Filters.html#Filter-type-1-Sub).
## Example
iex> filtered = <<127, 138, 255, 20, 21, 107>>
iex> Imagineer.Image.PNG.Filter.Basic.Sub.unfilter(filtered, 3)
<<127, 138, 255, 147, 159, 106>>
iex> filtered = <<1, 77, 16, 234, 234, 154>>
iex> Imagineer.Image.PNG.Filter.Basic.Sub.unfilter(filtered, 3)
<<1, 77, 16, 235, 55, 170>>
iex> filtered = <<1, 77, 16, 234, 234, 154>>
iex> Imagineer.Image.PNG.Filter.Basic.Sub.unfilter(filtered, 2)
<<1, 77, 17, 55, 251, 209>>
"""
def unfilter(row, bytes_per_pixel) do
# the pixel data before the first pixel is assumed to be all bagels
ghost_prior_pixel = null_binary(bytes_per_pixel)
unfilter(row, ghost_prior_pixel, bytes_per_pixel, [])
|> Enum.join()
end
# In the base case, we'll have a reversed list of lists, each of which
# contains a the unfiltered bytes for a pixel. Flatten them
defp unfilter(<<>>, _prior_pixel, _bytes_per_pixel, unfiltered_pixels) do
List.flatten(Enum.reverse(unfiltered_pixels))
end
defp unfilter(row, prior_pixel, bytes_per_pixel, unfiltered_pixels) do
<<pixel_bytes::bytes-size(bytes_per_pixel), rest::binary>> = row
unfiltered_pixel_bytes = unfilter_pixel_bytes(prior_pixel, pixel_bytes, [])
unfiltered_pixel = Enum.join(unfiltered_pixel_bytes)
unfilter(rest, unfiltered_pixel, bytes_per_pixel, [unfiltered_pixel_bytes | unfiltered_pixels])
end
# In the base case, we'll have a reversed list of a bunch of unfiltered bytes
defp unfilter_pixel_bytes(<<>>, <<>>, unfiltered_bytes) do
Enum.reverse(unfiltered_bytes)
end
# Adds the corresponding byte values of the current pixel and the previous one
defp unfilter_pixel_bytes(
<<prior_byte::integer-size(8), rest_prior::binary>>,
<<pixel_byte::integer-size(8), rest_pixel::binary>>,
unfiltered_bytes
) do
unfiltered_byte = <<prior_byte + pixel_byte>>
unfilter_pixel_bytes(rest_prior, rest_pixel, [unfiltered_byte | unfiltered_bytes])
end
end
|
lib/imagineer/image/png/filter/basic/sub.ex
| 0.825308
| 0.521898
|
sub.ex
|
starcoder
|
defmodule Ecto.Query.Builder.Filter do
@moduledoc false
alias Ecto.Query.Builder
@doc """
Escapes a where or having clause.
It allows query expressions that evaluate to a boolean
or a keyword list of field names and values. In a keyword
list multiple key value pairs will be joined with "and".
"""
@spec escape(:where | :having, Macro.t, Keyword.t, Macro.Env.t) :: {Macro.t, %{}}
def escape(_kind, [], _vars, _env) do
{true, %{}}
end
def escape(kind, expr, vars, env) when is_list(expr) do
{parts, params} =
Enum.map_reduce(expr, %{}, fn
{field, nil}, _acc ->
Builder.error! "nil given for #{inspect field}, comparison with nil is forbidden as it always evaluates to false. " <>
"Pass a full query expression and use is_nil/1 instead."
{field, value}, acc when is_atom(field) ->
{value, params} = Builder.escape(value, {0, field}, acc, vars, env)
{{:{}, [], [:==, [], [to_escaped_field(field), value]]}, params}
_, _acc ->
Builder.error! "expected a keyword list at compile time in #{kind}, " <>
"got: `#{Macro.to_string expr}`. If you would like to " <>
"pass a list dynamically, please interpolate the whole list with ^"
end)
expr = Enum.reduce parts, &{:{}, [], [:and, [], [&2, &1]]}
{expr, params}
end
def escape(_kind, expr, vars, env) do
Builder.escape(expr, :boolean, %{}, vars, env)
end
@doc """
Builds a quoted expression.
The quoted expression should evaluate to a query at runtime.
If possible, it does all calculations at compile time to avoid
runtime work.
"""
@spec build(:where | :having, Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t
def build(kind, query, _binding, {:^, _, [var]}, env) do
expr =
quote do
{expr, params} = Ecto.Query.Builder.Filter.runtime!(unquote(kind), unquote(var))
%Ecto.Query.QueryExpr{expr: expr, params: params,
file: unquote(env.file), line: unquote(env.line)}
end
Builder.apply_query(query, __MODULE__, [kind, expr], env)
end
def build(kind, query, binding, expr, env) do
binding = Builder.escape_binding(binding)
{expr, params} = escape(kind, expr, binding, env)
params = Builder.escape_params(params)
expr = quote do: %Ecto.Query.QueryExpr{
expr: unquote(expr),
params: unquote(params),
file: unquote(env.file),
line: unquote(env.line)}
Builder.apply_query(query, __MODULE__, [kind, expr], env)
end
@doc """
The callback applied by `build/4` to build the query.
"""
@spec apply(Ecto.Queryable.t, :where | :having, term) :: Ecto.Query.t
def apply(query, _, %{expr: true}) do
query
end
def apply(query, :where, expr) do
query = Ecto.Queryable.to_query(query)
%{query | wheres: query.wheres ++ [expr]}
end
def apply(query, :having, expr) do
query = Ecto.Queryable.to_query(query)
%{query | havings: query.havings ++ [expr]}
end
@doc """
Invoked at runtime for interpolated lists.
"""
def runtime!(_kind, []) do
{true, []}
end
def runtime!(kind, kw) when is_list(kw) do
{parts, params} = runtime!(kw, 0, [], [], kind, kw)
{Enum.reduce(parts, &{:and, [], [&2, &1]}), params}
end
def runtime!(kind, other) do
raise ArgumentError, "expected a keyword list in `#{kind}`, got: `#{inspect other}`"
end
defp runtime!([{field, nil}|_], _counter, _exprs, _params, _kind, _original) when is_atom(field) do
raise ArgumentError, "nil given for #{inspect field}, comparison with nil is forbidden as it always evaluates to false. " <>
"Pass a full query expression and use is_nil/1 instead."
end
defp runtime!([{field, value}|t], counter, exprs, params, kind, original) when is_atom(field) do
runtime!(t, counter + 1,
[{:==, [], [to_field(field), {:^, [], [counter]}]}|exprs],
[{value, {0, field}}|params],
kind, original)
end
defp runtime!([], _counter, exprs, params, _kind, _original) do
{Enum.reverse(exprs), Enum.reverse(params)}
end
defp runtime!(_, _counter, _exprs, _params, kind, original) do
raise ArgumentError, "expected a keyword list in `#{kind}`, got: `#{inspect original}`"
end
defp to_escaped_field(field), do: Macro.escape to_field(field)
defp to_field(field), do: {{:., [], [{:&, [], [0]}, field]}, [], []}
end
|
lib/ecto/query/builder/filter.ex
| 0.827828
| 0.496277
|
filter.ex
|
starcoder
|
defmodule ExXml do
@moduledoc """
ExXml allows you to use XML in your library to construct code.
This can be helpful if you need to deal more descriptive type of
programming like the ones that are already using a SGML or XML languages
like HTML and ODF. It also allows you to compose components out of Pascal
case XML elements. You can also have a list of elements which are wrapped
with a fragment.
Out of the box ExXml sigil `~x()` constructs the `Element` and `Fragment`
structs that you can convert to quoted version of Elixir to create code out
of this XML like data. It's as close to JSX as can be done in Elixir.
## Examples
Simple syntax with some nesting and a self closing element
~x(
<foo something=#{"b"}>
<bar2 something="a"/>
<a>
2
</a>
</foo>
)
Now with a fragment
~x(
<>
<foo>
<bar2 something="a"/>
<a>
2
</a>
</foo>
</>
)
This allows you to use a module if you want
~x(
<Foo>
<bar2 something="a"/>
<a>2</a>
</Foo>
)
## How do you implement your library with ExXml
You have to include `use ExXml` in your module then
implement either `parse_ex_xml` function in our library
if you want to expose the sigil x syntax or `@parse_ex_xml`
attribute if you want to use the syntax only within your library.
You have to return a quoted from of you data. You can look at Elixir
Macro documentation for that. Because the sigil syntax is a macro and
you are probably using this library to construct something else than
just the output of this library.
"""
import NimbleParsec
alias __MODULE__.{Element, Fragment}
defmacro __using__(_opts) do
quote do
import ExXml
defmacro sigil_x(params, options) do
caller = __CALLER__
module = __MODULE__
do_sigil_x(params, options, caller, module)
end
end
end
whitespace =
ascii_string([?\s, ?\n, ?\t], max: 100)
|> label("whitespace")
tag =
ascii_char([?a..?z])
|> reduce({Kernel, :to_string, []})
|> concat(optional(ascii_string([?a..?z, ?_, ?0..?9, not: ?=], min: 1)))
|> ignore(whitespace)
|> reduce({Enum, :join, [""]})
|> label("tag")
|> tag(:tag)
module =
ascii_char([?A..?Z])
|> reduce({Kernel, :to_string, []})
|> concat(optional(ascii_string([?a..?z, ?0..?9, ?A..?Z, ?., not: ?=], min: 1)))
|> ignore(whitespace)
|> reduce({Enum, :join, [""]})
|> label("module")
|> tag(:module)
element_name =
choice([tag, module])
|> label("element_name")
|> tag(:element_name)
text =
ignore(whitespace)
|> utf8_string([not: ?<, not: ?\n], min: 1)
|> reduce({:trim, []})
|> post_traverse({:sub_context_in_text, []})
|> label("text")
sub =
string("$")
|> concat(ascii_string([?0..?9], min: 1))
|> post_traverse({:sub_context, []})
|> label("sub")
quote_string =
ascii_char([?"])
|> label("quote_string")
quoted_attribute_text =
ignore(whitespace)
|> ignore(quote_string)
|> repeat(
lookahead_not(ascii_char([?"]))
|> choice([
~s(\") |> string() |> replace(?'),
utf8_char([])
])
)
|> ignore(quote_string)
|> reduce({List, :to_string, []})
|> label("quoted_attribute_text")
attribute =
ignore(whitespace)
|> concat(tag)
|> ignore(string("="))
|> choice([sub, quoted_attribute_text])
|> label("attribute")
|> tag(:attribute)
opening_tag =
ignore(whitespace)
|> ignore(string("<"))
|> concat(element_name)
|> repeat(
lookahead_not(choice([ascii_char([?>]), string("/>")]))
|> choice([attribute, ascii_char([?>]), string("/>")])
)
|> ignore(optional(string(">")))
|> ignore(whitespace)
|> label("opening_tag")
|> tag(:element)
fragment_tag =
ignore(whitespace)
|> ignore(string("<"))
|> repeat(
lookahead_not(choice([ascii_char([?>]), string("/>")]))
|> choice([attribute, ascii_char([?>]), string("/>")])
)
|> ignore(string(">"))
|> ignore(whitespace)
|> label("fragment_tag")
|> tag(:fragment)
closing_tag =
ignore(whitespace)
|> ignore(string("</"))
|> concat(element_name)
|> ignore(string(">"))
|> ignore(whitespace)
|> label("closing_tag")
|> tag(:closing_tag)
closing_fragment =
ignore(whitespace)
|> ignore(string("</>"))
|> ignore(whitespace)
|> label("closing_fragment")
|> tag(:closing_fragment)
self_closing =
ignore(whitespace)
|> ignore(string("/>"))
|> ignore(whitespace)
|> label("self_closing")
defparsec(
:parse_xml,
parsec(:xml)
)
defcombinatorp(
:xml,
choice([fragment_tag, opening_tag])
|> repeat(
lookahead_not(choice([string("</"), string("/>")]))
|> choice([parsec(:xml), sub, text])
)
|> choice([closing_fragment, closing_tag, self_closing])
|> reduce({:fix_element, []})
)
@spec parse_ex_xml([...]) :: {:ok, [%Element{} | %Fragment{}]} | {:error, String.t()}
def parse_ex_xml(ex_xml) do
{bin, context} = list_to_context(ex_xml)
with {:ok, results, _, _, _, _} <- parse_xml(String.trim(bin), context: context) do
{:ok, results}
end
end
@spec list_to_context([...]) :: {binary, map}
def list_to_context(list) when is_list(list) do
{_, context, acc_list} =
list
|> Enum.reduce({1, [], []}, fn
bin, {index, context, acc_list} when is_binary(bin) ->
{index, context, [bin | acc_list]}
other, {index, context, acc_list} ->
ref = "$#{index}"
{index + 1, [{ref, other} | context], [ref | acc_list]}
end)
{acc_list |> Enum.reverse() |> Enum.join(), Enum.into(context, %{})}
end
def do_sigil_x({:<<>>, _meta, pieces}, 'raw', _, _) do
pieces
|> Enum.map(&clean_litteral/1)
end
def do_sigil_x({:<<>>, _meta, pieces}, 'parse', _, _) do
pieces
|> Enum.map(&clean_litteral/1)
|> parse_ex_xml()
nil
end
def do_sigil_x({:<<>>, _meta, pieces}, 'debug', caller, module) do
{:ok, ex_xml} =
pieces
|> Enum.map(&clean_litteral/1)
|> parse_ex_xml()
ast =
if Kernel.function_exported?(module, :process_ex_xml, 2) do
module.process_ex_xml(ex_xml, caller)
else
case Module.get_attribute(caller.module, :process_ex_xml) do
nil ->
{:ok, escape_ex_xml(ex_xml)}
process_ex_xml ->
process_ex_xml.(ex_xml, caller)
end
end
ast |> Macro.to_string() |> Code.format_string!() |> IO.puts()
ast
end
def do_sigil_x({:<<>>, _meta, pieces}, '', caller, module) do
with {:ok, ex_xml} <-
pieces
|> Enum.map(&clean_litteral/1)
|> parse_ex_xml() do
if Kernel.function_exported?(module, :process_ex_xml, 2) do
module.process_ex_xml(ex_xml, caller)
else
case Module.get_attribute(caller.module, :process_ex_xml) do
nil ->
{:ok, escape_ex_xml(ex_xml)}
process_ex_xml ->
process_ex_xml.(ex_xml, caller)
end
end
else
{:error, message, _rest, _context, _line, _column} ->
{:error, message}
end
end
@spec escape_ex_xml([...]) :: Macro.t()
def escape_ex_xml(list) when is_list(list) do
do_escape_ex_xml(list)
end
defp do_escape_ex_xml(list) when is_list(list) do
Enum.map(list, &do_escape_ex_xml/1)
end
defp do_escape_ex_xml(%{__struct__: module} = struct) do
keyword_list =
struct
|> Map.from_struct()
|> Enum.map(&do_escape_ex_xml/1)
{:%, [], [{:__aliases__, [alias: false], [module]}, {:%{}, [], keyword_list}]}
end
defp do_escape_ex_xml(%{} = map) do
keyword_list =
map
|> Enum.map(&do_escape_ex_xml/1)
{:%{}, [], keyword_list}
end
defp do_escape_ex_xml({key, value}) do
{key, do_escape_ex_xml(value)}
end
defp do_escape_ex_xml(other) do
other
end
@spec fix_element_based_on_type(atom, [...], [...]) ::
{:ok, %Element{} | %Fragment{}} | {:error, String.t()}
defp fix_element_based_on_type(:fragment, content, nested) do
meta = Enum.reduce(content, %{}, &get_meta_content/2)
{closing_fragment, new_nested} = List.pop_at(nested, -1)
if {:closing_fragment, []} !== closing_fragment do
{:error, "Fragment isn't closed"}
else
{:ok, struct(Fragment, Map.put(meta, :children, List.flatten(new_nested)))}
end
end
defp fix_element_based_on_type(:element, content, nested) do
meta = Enum.reduce(content, %{}, &get_meta_content/2)
tag = List.first(content)
{closing_tag, new_nested} = List.pop_at(nested, -1)
if not (is_nil(closing_tag) or {:closing_tag, List.wrap(tag)} === closing_tag) do
with {:closing_tag, cl_tag} <- closing_tag do
{:error,
"Closing tag doesn't match opening tag open_tag: #{inspect(tag)} closing_tag: #{
inspect(cl_tag)
}"}
else
_ ->
{:error,
"Closing tag doesn't match opening tag open_tag: #{inspect(tag)} closing_tag: #{
inspect(closing_tag)
}"}
end
else
{:ok, struct(Element, Map.put(meta, :children, List.flatten(new_nested)))}
end
end
@spec fix_element([{atom, [...]}, ...]) :: %Element{} | %Fragment{} | {:error, String.t()}
defp fix_element([{type, content} | nested]) do
with {:ok, result} <- fix_element_based_on_type(type, content, nested) do
result
end
end
@spec trim([binary]) :: binary
defp trim([string]) when is_binary(string) do
string
|> String.trim()
end
@spec get_meta_content({atom, [...]}, map) :: map
def get_meta_content({:attribute, [{:tag, [key]}, value]}, acc) do
Map.update(acc, :attributes, %{key => value}, &Map.put(&1, key, value))
end
def get_meta_content({:element_name, [{type, [name]}]}, acc) do
acc
|> Map.put(:name, name)
|> Map.put(:type, type)
end
@spec clean_litteral(Macro.t() | binary) :: Macro.t() | binary
defp clean_litteral(
{:"::", _, [{{:., _, [Kernel, :to_string]}, _, [litteral]}, {:binary, _, nil}]}
) do
{:ok, litteral}
end
defp clean_litteral(other) do
other
end
@spec sub_context(binary, [binary], map, {integer, integer}, integer) :: {[...], map}
defp sub_context(_rest, args, context, _line, _offset) do
ref = args |> Enum.reverse() |> Enum.join()
{:ok, value} = context |> Map.get(ref)
{[value], context}
end
@spec sub_context_in_text(binary, [binary], map, {integer, integer}, integer) :: {[...], map}
defp sub_context_in_text(_rest, [text], context, _line, _offset) do
new_text =
Regex.split(~r/\$\d+/, text, include_captures: true)
|> Enum.map(fn text_fragment ->
case Map.get(context, text_fragment) do
{:ok, value} -> value
_ -> text_fragment
end
end)
|> Enum.reject(&match?("", &1))
|> :lists.reverse()
{new_text, context}
end
end
|
lib/ex_xml.ex
| 0.840881
| 0.465145
|
ex_xml.ex
|
starcoder
|
defmodule Rummage.Ecto.CustomHook.SimpleSearch do
@moduledoc """
`Rummage.Ecto.CustomHook.SimpleSearch` is an example of a Custom Hook that
comes with `Rummage.Ecto`.
This module provides a operations that can add searching functionality to
a pipeline of `Ecto` queries. This module works by taking fields, and
`search_type` and `search_term`.
This module doesn't support associations and hence is a simple alternative
to Rummage's default search hook.
NOTE: This module doesn't return a list of entries, but a `Ecto.Query.t`.
This module `uses` `Rummage.Ecto.Hook`.
_____________________________________________________________________________
# ABOUT:
## Arguments:
This Hook expects a `queryable` (an `Ecto.Queryable`) and
`search_params` (a `Map`). The map should be in the format:
`%{field_name: %{search_term: true, search_type: :eq}}`
Details:
* `field_name`: The field name to search by.
* `search_term`: Term to compare the `field_name` against.
* `search_type`: Determines the kind of search to perform. If `:eq`, it
expects the `field_name`'s value to be equal to `search_term`,
If `lt`, it expects it to be less than `search_term`.
To see all the `search_type`s, check
`Rummage.Ecto.Services.BuildSearchQuery`
* `search_expr`: This is optional. Defaults to `:where`. This is the way the
search expression is appended to the existing query.
To see all the `search_expr`s, check
`Rummage.Ecto.Services.BuildSearchQuery`
For example, if we want to search products with `available` = `true`, we would
do the following:
```elixir
Rummage.Ecto.CustomHook.SimpleSearch.run(Product, %{available:
%{search_type: :eq,
search_term: true}})
```
This can be used for a search with multiple fields as well. Say, we want to
search for products that are `available`, but have a price less than `10.0`.
```elixir
Rummage.Ecto.CustomHook.SimpleSearch.run(Product,
%{available: %{search_type: :eq,
search_term: true},
%{price: %{search_type: :lt,
search_term: 10.0}})
```
## Assoications:
This module doesn't support assocations.
____________________________________________________________________________
# ASSUMPTIONS/NOTES:
* This Hook assumes that the searched field is a part of the schema passed
as the `queryable`.
* This Hook has the default `search_type` of `:eq`.
* This Hook has the default `search_expr` of `:where`.
____________________________________________________________________________
# USAGE:
For a regular search:
This returns a `queryable` which upon running will give a list of `Parent`(s)
searched by ascending `field_1`
```elixir
alias Rummage.Ecto.CustomHook.SimpleSearch
searched_queryable = SimpleSearch.run(Parent,
%{field_1: %{search_type: :like, search_term: "field_!"}}})
```
For a case-insensitive search:
This returns a `queryable` which upon running will give a list of `Parent`(s)
searched by ascending case insensitive `field_1`.
Keep in mind that `case_insensitive` can only be called for `text` fields
```elixir
alias Rummage.Ecto.CustomHook.SimpleSearch
searched_queryable = SimpleSearch.run(Parent,
%{field_1: %{ search_type: "ilike", search_term: "field_!"}}})
```
There are many other `search_types`. Check out
`Rummage.Ecto.Services.BuildSearchQuery` docs to explore more `search_types`.
This module can be used by overriding the default module. This can be done
in the following ways:
In the `Rummage.Ecto` call:
```elixir
Rummage.Ecto.rummage(queryable, rummage,
search: Rummage.Ecto.CustomHook.SimpleSearch)
or
MySchema.rummage(rummage, search: Rummage.Ecto.CustomHook.SimpleSearch)
```
OR
Globally for all models in `config.exs`:
```elixir
config :my_app,
Rummage.Ecto,
search: Rummage.Ecto.CustomHook.SimpleSearch
```
OR
When `using` Rummage.Ecto with an `Ecto.Schema`:
```elixir
defmodule MySchema do
use Rummage.Ecto, repo: SomeRepo,
search: Rummage.Ecto.CustomHook.SimpleSearch
end
"""
use Rummage.Ecto.Hook
import Ecto.Query
@expected_keys ~w(search_type search_term)a
@err_msg "Error in params, No values given for keys: "
alias Rummage.Ecto.Services.BuildSearchQuery
@doc ~S"""
This is the callback implementation of Rummage.Ecto.Hook.run/2.
Builds a search `Ecto.Query.t` on top of a given `Ecto.Query.t` variable
with given `params`.
Besides an `Ecto.Query.t` an `Ecto.Schema` module can also be passed as it
implements `Ecto.Queryable`
Params is a `Map`, keys of which are field names which will be searched for and
value corresponding to that key is a list of params for that key, which
should include the keys: `#{Enum.join(@expected_keys, ", ")}`.
This function expects a `search_expr`, `search_type`.
The `search_term` is what the `field`
will be matched to based on the `search_type` and `search_expr`.
If no `search_expr` is given, it defaults to `where`.
For all `search_exprs`, refer to `Rummage.Ecto.Services.BuildSearchQuery`.
For all `search_types`, refer to `Rummage.Ecto.Services.BuildSearchQuery`.
If an expected key isn't given, a `Runtime Error` is raised.
NOTE:This hook isn't responsible for doing type validations. That's the
responsibility of the user sending `search_term` and `search_type`.
## Examples
When search_params are empty, it simply returns the same `queryable`:
iex> alias Rummage.Ecto.CustomHook.SimpleSearch
iex> import Ecto.Query
iex> SimpleSearch.run(Parent, %{})
Parent
When a non-empty map is passed as a field `params`, but with a missing key:
iex> alias Rummage.Ecto.CustomHook.SimpleSearch
iex> import Ecto.Query
iex> SimpleSearch.run(Parent, %{field: %{search_type: :eq}})
** (RuntimeError) Error in params, No values given for keys: search_term
When a valid map of params is passed with an `Ecto.Schema` module:
iex> alias Rummage.Ecto.CustomHook.SimpleSearch
iex> import Ecto.Query
iex> search_params = %{field1: %{
...> search_type: :like,
...> search_term: "field1",
...> search_expr: :where}}
iex> SimpleSearch.run(Rummage.Ecto.Product, search_params)
#Ecto.Query<from p0 in subquery(from p0 in Rummage.Ecto.Product), where: like(p0.field1, ^"%field1%")>
When a valid map of params is passed with an `Ecto.Query.t`:
iex> alias Rummage.Ecto.CustomHook.SimpleSearch
iex> import Ecto.Query
iex> search_params = %{field1: %{
...> search_type: :like,
...> search_term: "field1",
...> search_expr: :where}}
iex> query = from p0 in "products"
iex> SimpleSearch.run(query, search_params)
#Ecto.Query<from p0 in subquery(from p0 in "products"), where: like(p0.field1, ^"%field1%")>
When a valid map of params is passed with an `Ecto.Query.t` and `:on_where`:
iex> alias Rummage.Ecto.CustomHook.SimpleSearch
iex> import Ecto.Query
iex> search_params = %{field1: %{
...> search_type: :like,
...> search_term: "field1",
...> search_expr: :or_where}}
iex> query = from p0 in "products"
iex> SimpleSearch.run(query, search_params)
#Ecto.Query<from p0 in subquery(from p0 in "products"), or_where: like(p0.field1, ^"%field1%")>
When a valid map of params is passed with an `Ecto.Query.t`, searching on
a boolean param
iex> alias Rummage.Ecto.CustomHook.SimpleSearch
iex> import Ecto.Query
iex> search_params = %{available: %{
...> search_type: :eq,
...> search_term: true,
...> search_expr: :where}}
iex> query = from p0 in "products"
iex> SimpleSearch.run(query, search_params)
#Ecto.Query<from p0 in subquery(from p0 in "products"), where: p0.available == ^true>
When a valid map of params is passed with an `Ecto.Query.t`, searching on
a float param
iex> alias Rummage.Ecto.CustomHook.SimpleSearch
iex> import Ecto.Query
iex> search_params = %{price: %{
...> search_type: :gteq,
...> search_term: 10.0,
...> search_expr: :where}}
iex> query = from p0 in "products"
iex> SimpleSearch.run(query, search_params)
#Ecto.Query<from p0 in subquery(from p0 in "products"), where: p0.price >= ^10.0>
When a valid map of params is passed with an `Ecto.Query.t`, searching on
a boolean param, but with a wrong `search_type`.
NOTE: This doesn't validate the search_type of search_term
iex> alias Rummage.Ecto.CustomHook.SimpleSearch
iex> import Ecto.Query
iex> search_params = %{available: %{
...> search_type: :ilike,
...> search_term: true,
...> search_expr: :where}}
iex> query = from p0 in "products"
iex> SimpleSearch.run(query, search_params)
** (ArgumentError) argument error
"""
@spec run(Ecto.Query.t(), map()) :: Ecto.Query.t()
def run(q, s), do: handle_search(q, s)
# Helper function which handles addition of search query on top of
# the sent queryable variable, for all search fields.
defp handle_search(queryable, search_params) do
search_params
|> Map.to_list()
|> Enum.reduce(queryable, &search_queryable(&1, &2))
end
# Helper function which handles addition of search query on top of
# the sent queryable variable, for ONE search fields.
# This delegates the query building to `BuildSearchQuery` module
defp search_queryable(param, queryable) do
field = elem(param, 0)
field_params = elem(param, 1)
:ok = validate_params(field_params)
search_type = Map.get(field_params, :search_type)
search_term = Map.get(field_params, :search_term)
search_expr = Map.get(field_params, :search_expr, :where)
BuildSearchQuery.run(
from(e in subquery(queryable)),
field,
{search_expr, search_type},
search_term
)
end
# Helper function that validates the list of params based on
# @expected_keys list
defp validate_params(params) do
key_validations = Enum.map(@expected_keys, &Map.fetch(params, &1))
case Enum.filter(key_validations, &(&1 == :error)) do
[] -> :ok
_ -> raise @err_msg <> missing_keys(key_validations)
end
end
# Helper function used to build error message using missing keys
defp missing_keys(key_validations) do
key_validations
|> Enum.with_index()
|> Enum.filter(fn {v, _i} -> v == :error end)
|> Enum.map(fn {_v, i} -> Enum.at(@expected_keys, i) end)
|> Enum.map(&to_string/1)
|> Enum.join(", ")
end
@doc """
Callback implementation for Rummage.Ecto.Hook.format_params/3.
This function ensures that params for each field have keys `assoc`, `search_type` and
`search_expr` which are essential for running this hook module.
## Examples
iex> alias Rummage.Ecto.CustomHook.SimpleSearch
iex> SimpleSearch.format_params(Parent, %{field: %{}}, [])
%{field: %{search_expr: :where, search_type: :eq}}
"""
@spec format_params(Ecto.Query.t(), map(), keyword()) :: map()
def format_params(_queryable, search_params, _opts) do
search_params
|> Map.to_list()
|> Enum.map(&put_keys/1)
|> Enum.into(%{})
end
defp put_keys({field, field_params}) do
field_params =
field_params
|> Map.put_new(:search_type, :eq)
|> Map.put_new(:search_expr, :where)
{field, field_params}
end
end
|
lib/rummage_ecto/custom_hooks/simple_search.ex
| 0.812198
| 0.936227
|
simple_search.ex
|
starcoder
|
defmodule Lichex.Summary do
defmodule Category do
defstruct count: 0, wins: 0
def new(attrs \\ []) do
struct(__MODULE__, attrs)
end
def inc(%{count: count, wins: wins}, true) do
new(count: count + 1, wins: wins + 1)
end
def inc(%{count: count} = cat, false) do
%{cat | count: count + 1}
end
end
alias Lichex.Summary.Category
defstruct win: 0,
loss: 0,
draw: 0,
black: Category.new(),
white: Category.new(),
rated: Category.new(),
casual: Category.new(),
blitz: Category.new(),
rapid: Category.new()
def new(username, games) when is_list(games) do
Enum.reduce(games, %__MODULE__{}, &reducer(&1, &2, username))
end
defp reducer(game, acc, username) do
winner? = win?(game, username)
acc
|> reduce_result(game, username)
|> reduce_color(game, username, winner?)
|> reduce_speed(game, winner?)
|> reduce_rated(game, winner?)
end
defp reduce_result(acc, game, username) do
case result(game, username) do
:win -> %{acc | win: acc.win + 1}
:loss -> %{acc | loss: acc.loss + 1}
:draw -> %{acc | draw: acc.draw + 1}
end
end
defp reduce_color(
acc,
%{"players" => players},
username,
winner?
) do
case user_color(players, username) do
"white" -> %{acc | white: Category.inc(acc.white, winner?)}
"black" -> %{acc | black: Category.inc(acc.black, winner?)}
end
end
defp reduce_speed(acc, game, winner?) do
case Map.get(game, "speed") do
"blitz" -> %{acc | blitz: Category.inc(acc.blitz, winner?)}
"rapid" -> %{acc | rapid: Category.inc(acc.rapid, winner?)}
_ -> acc
end
end
defp reduce_rated(acc, game, winner?) do
case Map.get(game, "rated") do
true -> %{acc | rated: Category.inc(acc.rated, winner?)}
false -> %{acc | casual: Category.inc(acc.casual, winner?)}
end
end
defp result(%{"status" => status}, _) when status in ["draw", "stalemate"], do: :draw
defp result(game, username) do
case win?(game, username) do
true -> :win
false -> :loss
end
end
defp win?(%{"status" => "draw"}, _), do: false
defp win?(%{"status" => "stalemate"}, _), do: false
defp win?(%{"winner" => winner, "players" => players}, username) do
user_color(players, username) == winner
end
defp user_color(players, username) do
case get_in(players, ["black", "user", "id"]) do
^username -> "black"
_ -> "white"
end
end
defimpl String.Chars do
def to_string(t) do
"""
+===========================+
| Recap |
+===========================+
#{pad_out(t.win, "WON")}
#{pad_out(t.loss, "LOST")}
#{pad_out(t.draw, "DRAWN")}
+---------------------------+
#{pad_out(t.black, "BLACK")}
#{pad_out(t.white, "WHITE")}
+---------------------------+
#{pad_out(t.rated, "RATED")}
#{pad_out(t.casual, "CASUAL")}
+---------------------------+
#{pad_out(t.blitz, "BLITZ")}
#{pad_out(t.rapid, "RAPID")}
+===========================+
"""
end
defp pad_out(count, label) when is_integer(count) do
output = String.pad_trailing("| #{count} games #{label}", 28)
"#{output}|"
end
defp pad_out(%{count: count, wins: wins}, label) do
output = String.pad_trailing("| #{count} #{label} games (#{wins} wins)", 28)
"#{output}|"
end
end
end
|
lib/lichess/summary.ex
| 0.708112
| 0.459076
|
summary.ex
|
starcoder
|
defmodule PoxTool.CLI do
@moduledoc """
PoxTool is a utility for working with poxels.
usage:
poxtool [command] [args]
Commands:
* `create` - Create a poxel.
* `help` - Prints this message and exits.
### create
poxtool create [options] FILE
Options:
* `--voxel`, `-v FILE` - Create from voxel input.
* `--depth`, `-d PRECISION MODE` - Set the depth precision and mode. Defaults to finding the first usable size and mode.
* `--depth-bits`, `-db PRECISION` - Set the depth precision. Defaults to finding the first usable size.
* `--depth-mode`, `-dm MODES` - Set the depth mode (comma separated list of numbers). Defaults to finding the first usable mode.
* `--size`, `-s WIDTH HEIGHT DEPTH` - Set the size of the poxel data.
* `--shared-palette`, `-sp BOOL` - Set whether the palette should be shared or not. Defaults to finding finding the best choice.
* `--shared-depth`, `-sd BOOL` - Set whether the depth maps should be shared or not. Defaults to finding finding the best choice.
* `--shared-colour`, `-sc BOOL` - Set whether the colour maps should be shared or not. Defaults to finding finding the best choice.
* `--palette`, `-p BOOL` - Set whether the colour map should use a palette or not. Defaults to finding finding the best choice.
"""
def main(args \\ [])
def main(["help"|_]), do: help()
def main(["create"|args]), do: create(args)
def main(_), do: help()
def create(args, opts \\ [])
def create([cmd, file|args], opts) when cmd in ["-v", "--voxel"], do: create(args, [{ :source, { :voxel, file } }|opts])
def create([cmd, model|args], opts) when cmd in ["-m", "--model"], do: create(args, [{ :model, to_integer(model) }|opts])
def create([cmd, precision, mode|args], opts) when cmd in ["-d", "--depth"], do: create(args, [{ :depth_bits, to_integer(precision) }, { :depth_mode, to_integer(mode) }|opts])
def create([cmd, precision|args], opts) when cmd in ["-db", "--depth-bits"], do: create(args, [{ :depth_bits, to_integer(precision) }|opts])
def create([cmd, mode|args], opts) when cmd in ["-dm", "--depth-mode"], do: create(args, [{ :depth_mode, to_integer_list(mode) }|opts])
def create([cmd, width, height, depth|args], opts) when cmd in ["-s", "--size"], do: create(args, [{ :size, { to_integer(width), to_integer(height), to_integer(depth) } }|opts])
def create([cmd, shared|args], opts) when cmd in ["-sp", "--shared-palette"], do: create(args, [{ :shared_palette, to_boolean(shared) }|opts])
def create([cmd, shared|args], opts) when cmd in ["-sd", "--shared-depth"], do: create(args, [{ :shared_depth, to_boolean(shared) }|opts])
def create([cmd, shared|args], opts) when cmd in ["-sc", "--shared-colour"], do: create(args, [{ :shared_colour, to_boolean(shared) }|opts])
def create([cmd, palette|args], opts) when cmd in ["-p", "--palette"], do: create(args, [{ :palette, to_boolean(palette) }|opts])
def create([file], opts) do
case opts[:source] do
{ :voxel, file } ->
file
|> File.read!
|> Vox.new
|> Vox.transform(:left, :bottom, :front)
|> PoxTool.Voxel.to_poxel(opts)
nil ->
{ w, h, d } = opts[:size] || { 64, 64, 64 }
face_front = Enum.map(1..h, fn _ -> Enum.map(1..w, fn _ -> [{ { 1, nil }, { 0.0, 0.0, 0.0, 1.0 }, :diffuse }] end) end)
face_left = Enum.map(1..h, fn _ -> Enum.map(1..d, fn _ -> [{ { 1, nil }, { 0.0, 0.0, 0.0, 1.0 }, :diffuse }] end) end)
face_bottom = Enum.map(1..d, fn _ -> Enum.map(1..w, fn _ -> [{ { 1, nil }, { 0.0, 0.0, 0.0, 1.0 }, :diffuse }] end) end)
%PoxTool.Poxel{
front: face_front,
back: face_front,
left: face_left,
right: face_left,
bottom: face_bottom,
top: face_bottom
}
end
|> PoxTool.create(file, opts)
end
def create(_, _), do: help()
defp help(), do: get_docs() |> SimpleMarkdown.convert(render: &SimpleMarkdownExtensionCLI.Formatter.format/1) |> IO.puts
defp get_docs() do
if Version.match?(System.version, "> 1.7.0") do
{ :docs_v1, _, :elixir, "text/markdown", %{ "en" => doc }, _, _ } = Code.fetch_docs(__MODULE__)
doc
else
{ _, doc } = Code.get_docs(__MODULE__, :moduledoc)
doc
end
end
defp to_boolean(value) when value in ["true", "TRUE", "1", "yes", "YES", "y", "Y"], do: true
defp to_boolean(_), do: false
defp to_integer(value) do
{ value, _ } = Integer.parse(value)
value
end
defp to_integer_list(value), do: value |> String.split(",") |> Enum.map(&to_integer/1)
end
|
lib/pox_tool/cli.ex
| 0.781164
| 0.568326
|
cli.ex
|
starcoder
|
defmodule Handle do
@moduledoc """
Handle module for list entries such as `items` and `emails`.
The most used function and the `calculate/2` function.
"""
@doc """
Function calculates the total value of a list of items,
divides the total amount according to the amount and past emails.
## Function parameters
- items: list of maps contains information about the list item
- emails: list of people who will be divided the total amount
## Additional Information
- if the parameters passed are empty lists, the function returns an empty list
## Example empty lists
iex> Handle.calculate([], [])
[]
## Example non-empty lists
iex> Handle.calculate([%{name: "arroz", amount: 4, price: 598, type: "kg"}, %{name: "feijão", amount: 4, price: 799, type: "kg"}, %{name: "<NAME>", amount: 4, price: 4289, type: "kg"}], ["<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>"])
[
%{email: "<EMAIL>", value: "R$ 56.86"},
%{email: "<EMAIL>", value: "R$ 56.86"},
%{email: "<EMAIL>", value: "R$ 56.86"},
%{email: "<EMAIL>", value: "R$ 56.86"}
]
"""
def calculate([], []), do: []
@spec calculate(list(), list()) :: [map()]
def calculate(items, emails) do
amount = items |> Enum.map(fn item -> item.amount * item.price end) |> Enum.sum()
people = emails |> Enum.count()
split(amount, people, emails)
end
@doc """
Function calculates apportionment of how much each person must pay.
And it displays in a `map` with the email of each person and the amount of must be paid.
## Function parameters
- value: total value of the list of items, the result of the sum of the quantity
and the unique value of each item.
- quantity: number of people / emails from the email list
- emails: the email list
## Additional Information
- function returns a `list` of` map`:
```Elixir
[%{email: <EMAIL>, value: "R$ 50.00"}]
```
## Example
iex> Handle.split(100, 3, ["<EMAIL>", "<EMAIL>", "<EMAIL>"])
[
%{email: "<EMAIL>", value: "R$ 0.33"},
%{email: "<EMAIL>", value: "R$ 0.33"},
%{email: "<EMAIL>", value: "R$ 0.34"}
]
"""
@spec split(number, number, list()) :: [map()]
def split(value, quantity, emails) do
cloven = value / quantity
case rem(value, quantity) do
0 ->
value = cloven
Enum.map(emails, fn email -> %{email: email, value: value} end) |> print()
_ ->
value = process(cloven) |> String.to_integer()
list = Enum.map(emails, fn email -> %{email: email, value: value} end)
key = Enum.count(list) - 1
list
|> List.update_at(key, fn pessoas ->
%{email: pessoas.email, value: pessoas.value + 1}
end)
|> print()
end
end
@doc """
the function receives an infinite decimal decimal or a fractional decimal. Put it to two decimal places
and return an instring in integer format.
## Function parameters
- clove: value obtained by dividing the sum of items by number of people / email
## Additional Information
- this function only reaches numbers with infinite decimals or fractions with more than 3 decimal places
## Example
iex> Handle.process(33.33333333333)
"033"
iex> Handle.process(2986.333333333336)
"2986"
"""
@spec process(float) :: String.t()
def process(cloven) when is_float(cloven) do
cloven = cloven / 100
cloven
|> Float.floor(2)
|> Float.to_string()
|> String.split(".")
|> Enum.join()
end
defp print(list) do
Enum.map(list, fn list -> %{email: list.email, value: "R$ #{list.value / 100}"} end)
end
end
|
lib/handle.ex
| 0.868227
| 0.922552
|
handle.ex
|
starcoder
|
defmodule Uplink.Monitor do
@moduledoc """
A behaviour module for implementing a library or application monitor.
Uplink Monitors provide a template for the most common requirements to instrument a library
or application in a consistent manner. These provide a simple abstraction for building
standard observability patterns for whatever is being monitored while removing the need
for users to repeatedly define and configure things like `TelemetryMetrics` definitions.
There are three required callbacks which make up a monitor. The provided macro provides
default implementations for each, allowing you to only implement what is needed for a
particular monitor.
Each callback receives the same argument defined in `t:Uplink.monitor/0`.
## Usage
defmodule MyApp.CustomMonitor do
using Uplink.Monitor
# override any callbacks you need
@impl true
def init(_) do
# some setup
:ok
end
@impl true
def metric_definitions(_) do
[Telemetry.Metrics.counter("some.event")]
end
@impl true
def poller_specs(_) do
[
{:timer.seconds(5), [
{MyApp.Telemetry, :emit_stats, []}
]}
]
end
See `Uplink.Monitors.VM` for an example implementation.
"""
@doc """
Invoked when Uplink is starting.
It is useful for initializing any custom monitoring for the library or application
being monitored. Examples include creating `telemetry` handlers to log slow `Ecto` queries
which exceed a threshold or starting an OpenTelemetry bridge library.
If the function returns `:error`, Uplink will exit.
This callback is required.
"""
@callback init(args :: any()) :: :ok | :error
@doc """
Invoked when Uplink is starting.
It is useful for providing a standard set of `t:Telemetry.Metrics.t/0` definitions
for the library or application being monitored.
This callback is required.
"""
@callback metric_definitions(args :: any()) :: Uplink.metric_definitions()
@doc """
Invoked when Uplink is starting.
It is useful for providing a standard set of `telemetry_poller`s for the library or
application being monitored. Examples include emitting cache sizes, memory usage, or
process counts.
This callback is required.
"""
@callback poller_specs(args :: any()) :: Uplink.poller_specs()
defmacro __using__(_options) do
quote location: :keep do
@behaviour Uplink.Monitor
@doc false
def init(_args), do: :ok
@doc false
def metric_definitions(_args), do: []
@doc false
def poller_specs(_args), do: []
defoverridable init: 1, metric_definitions: 1, poller_specs: 1
end
end
end
|
lib/uplink/monitor.ex
| 0.909754
| 0.506286
|
monitor.ex
|
starcoder
|
defmodule Timex.Ecto.Time do
@moduledoc """
Support for using Timex with :time fields
"""
use Timex
@behaviour Ecto.Type
def type, do: :time
@doc """
We can let Ecto handle blank input
"""
defdelegate blank?(value), to: Ecto.Type
@doc """
Handle casting to Timex.Ecto.Time
"""
def cast(input) when is_binary(input) do
case Timex.parse(input, "{ISOtime}") do
{:ok, %Timex.DateTime{hour: hour,
minute: minute,
second: second,
millisecond: millisecond}} ->
load({hour, minute, second, millisecond * 1_000})
{:error, _} -> :error
end
end
def cast({_, _, _} = timestamp), do: {:ok, timestamp}
# Support embeds_one/embeds_many
def cast(%{"calendar" => _,
"year" => _, "month" => _, "day" => _,
"hour" => h, "minute" => mm, "second" => s, "ms" => ms,
"timezone" => _}) do
load({h, mm, s, ms * 1_000})
end
def cast(%{"calendar" => _,
"year" => _, "month" => _, "day" => _,
"hour" => h, "minute" => mm, "second" => s, "millisecond" => ms,
"timezone" => _}) do
load({h, mm, s, ms * 1_000})
end
def cast(input) do
case Ecto.Time.cast(input) do
{:ok, time} -> load({time.hour, time.minute, time.second, time.usecs})
:error -> :error
end
end
@doc """
Load from the native Ecto representation
"""
def load({hour, minute, second, usecs}) do
millis = Time.from(usecs, :microseconds) |> Time.to_milliseconds
time = %{DateTime.epoch | :hour => hour, :minute => minute, :second => second, :millisecond => millis} |> DateTime.to_timestamp(:epoch)
{:ok, time}
end
def load(_), do: :error
@doc """
Convert to the native Ecto representation
"""
def dump({_mega, _sec, _micro} = timestamp) do
%DateTime{hour: h, minute: m, second: s, millisecond: ms} = DateTime.from_timestamp(timestamp, :epoch)
{:ok, {h, m, s, ms * 1_000}}
end
def dump(_), do: :error
end
|
lib/types/time.ex
| 0.765593
| 0.406479
|
time.ex
|
starcoder
|
defmodule AnyAscii do
@moduledoc """
Unicode to ASCII transliteration
Converts Unicode characters to their best ASCII representation
AnyAscii provides ASCII-only replacement strings for practically all Unicode
characters. Text is converted character-by-character without considering the
context. The mappings for each script are based on popular existing
romanization systems. Symbolic characters are converted based on their meaning
or appearance. All ASCII characters in the input are left unchanged, every
other character is replaced with printable ASCII characters. Unknown
characters and some known characters are replaced with an empty string and
removed.
"""
import Bitwise
@doc """
Transliterates chardata into ASCII.
## Examples
iex> AnyAscii.transliterate("άνθρωποι") |> IO.iodata_to_binary()
"anthropoi"
iex> AnyAscii.transliterate('Борис') |> IO.iodata_to_binary()
"Boris"
iex> AnyAscii.transliterate([?深]) |> IO.iodata_to_binary()
"Shen"
"""
@spec transliterate(IO.chardata()) :: IO.chardata()
def transliterate(chardata)
def transliterate([c | t]) when c in 0..0x7F,
do: [c | transliterate(t)]
def transliterate([c | t]) when c in 0x80..0x10FFFF,
do: [transliterate_char(c) | transliterate(t)]
def transliterate([h | t]),
do: [transliterate(h) | transliterate(t)]
def transliterate([]),
do: []
def transliterate(<<s::binary>>),
do: transliterate(String.to_charlist(s))
defp transliterate_char(c) do
block_num = c >>> 8
lo = c &&& 0xFF
block = get_block(block_num)
if lo < tuple_size(block), do: elem(block, lo), else: []
end
defp get_block(block_num) do
key = {__MODULE__, block_num}
case :persistent_term.get(key, nil) do
nil ->
b = read_block(block_num)
:persistent_term.put(key, b)
b
b ->
b
end
end
defp read_block(block_num) do
path = Path.join([Application.app_dir(:any_ascii), "priv", Integer.to_string(block_num)])
if File.exists?(path) do
path
|> File.read!()
|> :zlib.unzip()
|> String.split("\t")
|> Enum.map(&minimize_string/1)
|> List.to_tuple()
else
{}
end
end
defp minimize_string(s) do
case s do
"" -> []
<<c>> -> c
_ -> s
end
end
end
|
impl/elixir/lib/any_ascii.ex
| 0.751375
| 0.406391
|
any_ascii.ex
|
starcoder
|
defmodule Mix.Tasks.Xtra do
use Mix.Task
alias Extractly.Toc.Options
@shortdoc "Transforms templates"
@moduledoc """
## Mix task to Transform EEx templates in the context of the `Extractly` module.
This tool serves two purposes.
1. A simple CLI to basicly `EEx.eval_file/2`
1. Access to the `Extractly` module (available as binding `xtra` too)
1. Access to the name of the rendered template with the `template` binding
The `Extractly` module gives easy access to Elixir metainformation of the application using
the `extractly` package, notably, _module_ and _function_ documentation.
This is BTW the raison d'être of this package, simple creation of a `README.md` file with very simple
access to the projects hex documentation.
Thusly hexdoc and Github will always be synchronized.
To see that in action just look at the [`README.md.eex`](README.md.eex) file of this package and compare
with what you are reading here.
Example Template:
Some text
<%= xtra.functiondoc("M.shiny_function/2") %>
<%= xtra.moduledoc("String") %>
<%= xtra.moduledoc("MyModule", include: :all) %>
<%= xtra.toc "SomeFile.md" %>
More text
A special case is the occurrence of `<%= xtra.toc :self, ... %>` which just inserts a
placeholder which than is replaced by the TOC of the generated output in a second pass
"""
@strict [
help: :boolean,
output: :string,
quiet: :boolean,
verbose: :boolean,
version: :boolean,
]
@aliases [
h: :help,
q: :quiet,
v: :version,
V: :verbose
]
@impl true
def run(args) do
Extractly.Messages.Agent.start_link
OptionParser.parse(args, strict: @strict, aliases: @aliases)
|> _mappify_options()
|> _run()
|> _output()
end
defp _mappify_options({options, args, errors}),
do: {options |> Enum.into(%{}), args, errors}
defp _maybe_insert_toc(line, result) do
if String.contains?(line, Extractly.Toc.placeholder_pfx) do
options = Options.from_string!(line)
Extractly.Toc.render(result, options)
else
[line]
end
end
# run returns the options unless an error occurred
defp _output(opts_or_error)
defp _output(opts) when is_map(opts) do
opts
|> _severity()
|> Extractly.Messages.messages
|> Enum.each(&_output_message/1)
end
defp _output(_), do: nil
defp _output_message({status, message}), do: IO.puts(:stderr, "*#{status}* -- #{message}")
@help_text """
mix xtra
convert Eex template file to output with a shiny CLI and __Extra__ context from the `Extractly` module
For more detailed information use
mix xtra.help
"""
defp _process(options, template)
defp _process(%{help: true}, _), do: IO.puts(@help_text)
defp _process(%{version: true}, _), do: IO.puts(Extractly.version)
defp _process(options, template), do:
if File.exists?(template),
do: _process_template(options, template),
else: _puts_err("Template #{template} does not exist", options)
defp _process_template(options, template) do
try do
Mix.Task.run("compile")
rescue
UndefinedFunctionError -> nil
end
options
|> _process_pass1(template)
|> _process_pass2()
|> _write_result(options, template)
end
defp _process_pass1(options, template) do
EEx.eval_file(template, [xtra: Extractly.Xtra, template: template, options: options])
end
defp _process_pass2(result) do
lines = String.split(result, "\n")
lines
|> Enum.flat_map(&_maybe_insert_toc(&1, lines))
|> Enum.join("\n")
end
defp _run(parsed)
defp _run({options, [], []}), do: _process(options, "README.md.eex")
defp _run({options, [template | args], []}) do
unless Enum.empty?(args) do
_puts_err("WARNING: Spourious templates #{inspect args} are ignored", options)
end
_process(options, template)
end
defp _run({options, _, errors}), do:
_puts_err("ERROR: Illegal arguments: #{inspect(errors)}\n\nTry `mix xtra.help` for, well, some help", options)
defp _puts_err(message, options)
defp _puts_err(_, %{quiet: true}), do: nil
defp _puts_err(message, _), do: IO.puts(:stderr, message)
defp _severity(opts)
defp _severity(%{verbose: true}), do: :debug
defp _severity(%{quiet: true}), do: :error
defp _severity(_), do: :info
defp _write_result(output, options, template) do
output_fn = Map.get(options, :output, String.replace(template, ~r{\.eex\z}, ""))
case File.write(output_fn, output) do
:ok -> options
{:error, posix_reason} = x -> _puts_err("Cannot write to #{output_fn}, reason: #{:file.format_error(posix_reason)}", options)
x
end
end
end
|
lib/tasks/xtra.ex
| 0.646237
| 0.509764
|
xtra.ex
|
starcoder
|
defmodule Formex.View do
use Phoenix.HTML
alias Formex.Form
alias Formex.Field
alias Formex.FormCollection
alias Formex.FormNested
alias Formex.Button
@moduledoc """
Helper functions for templating.
Example of use:
<%= formex_form_for @form, @action, fn f -> %>
<%= if @form.submitted? do %>
<div class="alert alert-danger">
<p>Oops, something went wrong! Please check the errors below.</p>
</div>
<% end %>
<%= formex_rows f %>
<div class="form-group">
<%= submit "Submit", class: "btn btn-primary" %>
</div>
<% end %>
## Changing a form template
You can change the template globally or in the specific form/field.
* config
```
config :formex,
template: Formex.Template.BootstrapHorizontal
template_options: [ # options used by this template
left_column: "col-xs-2",
right_column: "col-xs-10"
]
```
* `formex_form_for/4`:
```
<%= formex_form_for @form, @action, [
class: "form-horizontal",
template: Formex.Template.BootstrapHorizontal
], fn f -> %>
...
<% end %>
```
* `formex_rows/2`:
```
<%= formex_rows f, template: Formex.Template.BootstrapHorizontal %>
```
* `formex_row/3`:
```
<%= formex_row f, :name, template: Formex.Template.BootstrapHorizontal %>
```
"""
defmacro __using__([]) do
quote do
import Formex.View
import Formex.View.Nested
import Formex.View.Collection
end
end
@doc """
Works similar to a
[Phoenix.HTML.Form](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html#form_for/4)
In the callback function the first argument is `t:Formex.Form.t/0` instead of a
`t:Phoenix.HTML.Form.t/0`.
This argument contains the `t:Phoenix.HTML.Form.t/0` under a `:phoenix_form` key
## Options
In `options` argument you are passing together options for `Formex.View` and for `Phoenix.HTML`.
### Formex options
* `template` - a form template that implements `Formex.Template`, for example:
`Formex.Template.BootstrapHorizontal`
* `template_options` - additional options, supported by the template
### Phoenix options
Options not mentioned before will be passed to a
[Phoenix.HTML.Form](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html#form_for/4)
function. Options below are already set by Formex and can be overriden.
* `as` - form name, defaults to struct name
* `method` - method, defaults to `:post`
For rest of options, see
[Phoenix.HTML.Form](https://hexdocs.pm/phoenix_html/Phoenix.HTML.Form.html#form_for/4) docs.
"""
@spec formex_form_for(
form :: Form.t(),
action :: String.t(),
options :: Keyword.t(),
fun :: (Formex.t() -> Phoenix.HTML.unsafe())
) :: Phoenix.HTML.safe()
def formex_form_for(form, action, options \\ [], fun) do
phoenix_options =
options
|> Keyword.delete(:template)
|> Keyword.delete(:template_options)
|> Keyword.put_new(:as, form_for_name(form))
|> Keyword.put_new(:method, form.method || :post)
fake_params =
%{}
|> Map.put(to_string(phoenix_options[:as]), form_to_params(form))
fake_conn = %Plug.Conn{params: fake_params, method: "POST"}
Phoenix.HTML.Form.form_for(fake_conn, action, phoenix_options, fn phx_form ->
form
|> Map.put(:phoenix_form, phx_form)
|> Map.put(:template, options[:template])
|> Map.put(:template_options, options[:template_options])
|> fun.()
end)
end
defp form_for_name(%{struct_module: module}) do
module
|> Module.split()
|> List.last()
|> Macro.underscore()
end
@spec form_to_params(form :: Form.t()) :: Map.t()
defp form_to_params(form) do
form.items
|> Enum.map(fn item ->
case item do
%Field{} ->
form_to_params_field(form, item)
%FormNested{} ->
form_to_params_nested(form, item)
%FormCollection{} ->
form_to_params_collection(form, item)
_ ->
false
end
end)
|> Enum.filter(& &1)
|> Enum.into(%{})
end
@spec form_to_params_field(form :: Form.t(), item :: Field.t()) :: Map.t()
defp form_to_params_field(form, item) do
val =
if item.custom_value do
value = Map.get(form.new_struct, item.struct_name)
item.custom_value.(value)
else
Map.get(form.new_struct, item.struct_name)
end
new_val =
case item.type do
:multiple_select ->
(val || [])
|> Enum.map(fn subval ->
subval
|> case do
substruct when is_map(substruct) ->
substruct.id
_ ->
subval
end
|> to_string
end)
_ ->
val
end
{to_string(item.name), new_val}
end
@spec form_to_params_nested(form :: Form.t(), item :: FormNested.t()) :: Map.t()
defp form_to_params_nested(_form, item) do
sub_params = form_to_params(item.form)
sub_struct = item.form.new_struct
sub_params =
if Map.has_key?(sub_struct, :id) do
sub_params
|> Map.put("id", sub_struct.id |> to_string)
else
sub_params
end
{to_string(item.name), sub_params}
end
@spec form_to_params_collection(form :: Form.t(), item :: FormCollection.t()) :: Map.t()
defp form_to_params_collection(_form, item) do
new_val =
item.forms
|> Enum.with_index()
|> Enum.map(fn {nested_form, key} ->
sub_struct = nested_form.form.new_struct
subparams =
nested_form.form
|> form_to_params()
|> Map.put("id", sub_struct.id |> to_string)
|> Map.put("formex_id", sub_struct.formex_id)
|> Map.put(
to_string(item.delete_field),
sub_struct
|> Map.get(item.delete_field)
|> to_string
)
{key, subparams}
end)
|> Enum.into(%{})
{to_string(item.name), new_val}
end
@doc """
Generates all `formex_row/2`s at once
## Options
* `template` - a form template that implements `Formex.Template`, for example:
`Formex.Template.BootstrapHorizontal`
* `template_options` - additional options, supported by the template
"""
@spec formex_rows(Form.t(), Keyword.t()) :: Phoenix.HTML.safe()
def formex_rows(form, options \\ []) do
Enum.map(form.items, fn item ->
formex_row(form, item.name, options)
end)
end
@doc """
Generates a row
Example of use:
<%= formex_row f, :title %>
<%= formex_row f, :content %>
<%= formex_row f, :category_id %>
## Options
* `template` - a form template that implements `Formex.Template`, for example:
`Formex.Template.BootstrapHorizontal`
* `template_options` - additional options, supported by the template
"""
@spec formex_row(Form.t(), Atom.t(), Keyword.t()) :: Phoenix.HTML.safe()
def formex_row(form, item_name, options \\ []) do
item = get_item(form, item_name)
template = get_template(form, options)
template_options = get_template_options(form, options)
case item do
%Field{} ->
template.generate_row(form, item, template_options)
%Button{} ->
template.generate_row(form, item, template_options)
%FormNested{} ->
Formex.View.Nested.formex_nested(form, item_name, options)
%FormCollection{} ->
Formex.View.Collection.formex_collection(form, item_name, options)
end
end
@spec formex_input(Form.t(), Atom.t(), Keyword.t()) :: Phoenix.HTML.safe()
def formex_input(form, item_name, options \\ []) do
item = get_item(form, item_name)
template = get_template(form, options)
template.generate_input(form, item)
end
@spec formex_label(Form.t(), Atom.t(), Keyword.t()) :: Phoenix.HTML.safe()
def formex_label(form, item_name, options \\ []) do
item = get_item(form, item_name)
template = get_template(form, options)
class = (options[:class] && options[:class]) || ""
template.generate_label(form, item, class)
end
def get_template(form, row_options) do
row_options[:template] || form.template || Application.get_env(:formex, :template) ||
Formex.Template.BootstrapVertical
end
def get_template_options(form, row_options) do
[]
|> Keyword.merge(Application.get_env(:formex, :template_options) || [])
|> Keyword.merge(form.template_options || [])
|> Keyword.merge(row_options[:template_options] || [])
end
defp get_item(form, item_name) do
item = Enum.find(form.items, &(&1.name == item_name))
if !item do
throw("Key :" <> to_string(item_name) <> " not found in form " <> to_string(form.type))
end
item
end
end
|
lib/formex/view.ex
| 0.765067
| 0.501221
|
view.ex
|
starcoder
|
defmodule AWS.GameLift do
@moduledoc """
Amazon GameLift Service
Amazon GameLift is a managed service for developers who need a scalable,
dedicated server solution for their multiplayer games. Use Amazon GameLift
for these tasks: (1) set up computing resources and deploy your game
servers, (2) run game sessions and get players into games, (3)
automatically scale your resources to meet player demand and manage costs,
and (4) track in-depth metrics on game server performance and player usage.
The Amazon GameLift service API includes two important function sets:
<ul> <li> **Manage game sessions and player access** -- Retrieve
information on available game sessions; create new game sessions; send
player requests to join a game session.
</li> <li> **Configure and manage game server resources** -- Manage builds,
fleets, queues, and aliases; set autoscaling policies; retrieve logs and
metrics.
</li> </ul> This reference guide describes the low-level service API for
Amazon GameLift. You can use the API functionality with these tools:
<ul> <li> The Amazon Web Services software development kit ([AWS
SDK](http://aws.amazon.com/tools/#sdk)) is available in [multiple
languages](http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-supported.html#gamelift-supported-clients)
including C++ and C#. Use the SDK to access the API programmatically from
an application, such as a game client.
</li> <li> The [AWS command-line interface](http://aws.amazon.com/cli/)
(CLI) tool is primarily useful for handling administrative actions, such as
setting up and managing Amazon GameLift settings and resources. You can use
the AWS CLI to manage all of your AWS services.
</li> <li> The [AWS Management
Console](https://console.aws.amazon.com/gamelift/home) for Amazon GameLift
provides a web interface to manage your Amazon GameLift settings and
resources. The console includes a dashboard for tracking key resources,
including builds and fleets, and displays usage and performance metrics for
your games as customizable graphs.
</li> <li> Amazon GameLift Local is a tool for testing your game's
integration with Amazon GameLift before deploying it on the service. This
tools supports a subset of key API actions, which can be called from either
the AWS CLI or programmatically. See [Testing an
Integration](http://docs.aws.amazon.com/gamelift/latest/developerguide/integration-testing-local.html).
</li> </ul> **Learn more**
<ul> <li> [ Developer
Guide](http://docs.aws.amazon.com/gamelift/latest/developerguide/) -- Read
about Amazon GameLift features and how to use them.
</li> <li> [Tutorials](https://gamedev.amazon.com/forums/tutorials) -- Get
started fast with walkthroughs and sample projects.
</li> <li> [GameDev Blog](http://aws.amazon.com/blogs/gamedev/) -- Stay up
to date with new features and techniques.
</li> <li> [GameDev
Forums](https://gamedev.amazon.com/forums/spaces/123/gamelift-discussion.html)
-- Connect with the GameDev community.
</li> <li> [Release
notes](http://aws.amazon.com/releasenotes/Amazon-GameLift/) and [document
history](http://docs.aws.amazon.com/gamelift/latest/developerguide/doc-history.html)
-- Stay current with updates to the Amazon GameLift service, SDKs, and
documentation.
</li> </ul> **API SUMMARY**
This list offers a functional overview of the Amazon GameLift service API.
**Managing Games and Players**
Use these actions to start new game sessions, find existing game sessions,
track game session status and other information, and enable player access
to game sessions.
<ul> <li> **Discover existing game sessions**
<ul> <li> `SearchGameSessions` -- Retrieve all available game sessions or
search for game sessions that match a set of criteria.
</li> </ul> </li> <li> **Start new game sessions**
<ul> <li> Start new games with Queues to find the best available hosting
resources across multiple regions, minimize player latency, and balance
game session activity for efficiency and cost effectiveness.
<ul> <li> `StartGameSessionPlacement` -- Request a new game session
placement and add one or more players to it.
</li> <li> `DescribeGameSessionPlacement` -- Get details on a placement
request, including status.
</li> <li> `StopGameSessionPlacement` -- Cancel a placement request.
</li> </ul> </li> <li> `CreateGameSession` -- Start a new game session on a
specific fleet. *Available in Amazon GameLift Local.*
</li> </ul> </li> <li> **Match players to game sessions with FlexMatch
matchmaking**
<ul> <li> `StartMatchmaking` -- Request matchmaking for one players or a
group who want to play together.
</li> <li> `StartMatchBackfill` - Request additional player matches to fill
empty slots in an existing game session.
</li> <li> `DescribeMatchmaking` -- Get details on a matchmaking request,
including status.
</li> <li> `AcceptMatch` -- Register that a player accepts a proposed
match, for matches that require player acceptance.
</li> <li> `StopMatchmaking` -- Cancel a matchmaking request.
</li> </ul> </li> <li> **Manage game session data**
<ul> <li> `DescribeGameSessions` -- Retrieve metadata for one or more game
sessions, including length of time active and current player count.
*Available in Amazon GameLift Local.*
</li> <li> `DescribeGameSessionDetails` -- Retrieve metadata and the game
session protection setting for one or more game sessions.
</li> <li> `UpdateGameSession` -- Change game session settings, such as
maximum player count and join policy.
</li> <li> `GetGameSessionLogUrl` -- Get the location of saved logs for a
game session.
</li> </ul> </li> <li> **Manage player sessions**
<ul> <li> `CreatePlayerSession` -- Send a request for a player to join a
game session. *Available in Amazon GameLift Local.*
</li> <li> `CreatePlayerSessions` -- Send a request for multiple players to
join a game session. *Available in Amazon GameLift Local.*
</li> <li> `DescribePlayerSessions` -- Get details on player activity,
including status, playing time, and player data. *Available in Amazon
GameLift Local.*
</li> </ul> </li> </ul> **Setting Up and Managing Game Servers**
When setting up Amazon GameLift resources for your game, you first [create
a game
build](http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
and upload it to Amazon GameLift. You can then use these actions to
configure and manage a fleet of resources to run your game servers, scale
capacity to meet player demand, access performance and utilization metrics,
and more.
<ul> <li> **Manage game builds**
<ul> <li> `CreateBuild` -- Create a new build using files stored in an
Amazon S3 bucket. To create a build and upload files from a local path, use
the AWS CLI command `upload-build`.
</li> <li> `ListBuilds` -- Get a list of all builds uploaded to a Amazon
GameLift region.
</li> <li> `DescribeBuild` -- Retrieve information associated with a build.
</li> <li> `UpdateBuild` -- Change build metadata, including build name and
version.
</li> <li> `DeleteBuild` -- Remove a build from Amazon GameLift.
</li> </ul> </li> <li> **Manage fleets**
<ul> <li> `CreateFleet` -- Configure and activate a new fleet to run a
build's game servers.
</li> <li> `ListFleets` -- Get a list of all fleet IDs in a Amazon GameLift
region (all statuses).
</li> <li> `DeleteFleet` -- Terminate a fleet that is no longer running
game servers or hosting players.
</li> <li> View / update fleet configurations.
<ul> <li> `DescribeFleetAttributes` / `UpdateFleetAttributes` -- View or
change a fleet's metadata and settings for game session protection and
resource creation limits.
</li> <li> `DescribeFleetPortSettings` / `UpdateFleetPortSettings` -- View
or change the inbound permissions (IP address and port setting ranges)
allowed for a fleet.
</li> <li> `DescribeRuntimeConfiguration` / `UpdateRuntimeConfiguration` --
View or change what server processes (and how many) to run on each instance
in a fleet.
</li> </ul> </li> </ul> </li> <li> **Control fleet capacity**
<ul> <li> `DescribeEC2InstanceLimits` -- Retrieve maximum number of
instances allowed for the current AWS account and the current usage level.
</li> <li> `DescribeFleetCapacity` / `UpdateFleetCapacity` -- Retrieve the
capacity settings and the current number of instances in a fleet; adjust
fleet capacity settings to scale up or down.
</li> <li> Autoscale -- Manage autoscaling rules and apply them to a fleet.
<ul> <li> `PutScalingPolicy` -- Create a new autoscaling policy, or update
an existing one.
</li> <li> `DescribeScalingPolicies` -- Retrieve an existing autoscaling
policy.
</li> <li> `DeleteScalingPolicy` -- Delete an autoscaling policy and stop
it from affecting a fleet's capacity.
</li> </ul> </li> </ul> </li> <li> **Manage VPC peering connections for
fleets**
<ul> <li> `CreateVpcPeeringAuthorization` -- Authorize a peering connection
to one of your VPCs.
</li> <li> `DescribeVpcPeeringAuthorizations` -- Retrieve valid peering
connection authorizations.
</li> <li> `DeleteVpcPeeringAuthorization` -- Delete a peering connection
authorization.
</li> <li> `CreateVpcPeeringConnection` -- Establish a peering connection
between the VPC for a Amazon GameLift fleet and one of your VPCs.
</li> <li> `DescribeVpcPeeringConnections` -- Retrieve information on
active or pending VPC peering connections with a Amazon GameLift fleet.
</li> <li> `DeleteVpcPeeringConnection` -- Delete a VPC peering connection
with a Amazon GameLift fleet.
</li> </ul> </li> <li> **Access fleet activity statistics**
<ul> <li> `DescribeFleetUtilization` -- Get current data on the number of
server processes, game sessions, and players currently active on a fleet.
</li> <li> `DescribeFleetEvents` -- Get a fleet's logged events for a
specified time span.
</li> <li> `DescribeGameSessions` -- Retrieve metadata associated with one
or more game sessions, including length of time active and current player
count.
</li> </ul> </li> <li> **Remotely access an instance**
<ul> <li> `DescribeInstances` -- Get information on each instance in a
fleet, including instance ID, IP address, and status.
</li> <li> `GetInstanceAccess` -- Request access credentials needed to
remotely connect to a specified instance in a fleet.
</li> </ul> </li> <li> **Manage fleet aliases**
<ul> <li> `CreateAlias` -- Define a new alias and optionally assign it to a
fleet.
</li> <li> `ListAliases` -- Get all fleet aliases defined in a Amazon
GameLift region.
</li> <li> `DescribeAlias` -- Retrieve information on an existing alias.
</li> <li> `UpdateAlias` -- Change settings for a alias, such as
redirecting it from one fleet to another.
</li> <li> `DeleteAlias` -- Remove an alias from the region.
</li> <li> `ResolveAlias` -- Get the fleet ID that a specified alias points
to.
</li> </ul> </li> <li> **Manage game session queues**
<ul> <li> `CreateGameSessionQueue` -- Create a queue for processing
requests for new game sessions.
</li> <li> `DescribeGameSessionQueues` -- Retrieve game session queues
defined in a Amazon GameLift region.
</li> <li> `UpdateGameSessionQueue` -- Change the configuration of a game
session queue.
</li> <li> `DeleteGameSessionQueue` -- Remove a game session queue from the
region.
</li> </ul> </li> <li> **Manage FlexMatch resources**
<ul> <li> `CreateMatchmakingConfiguration` -- Create a matchmaking
configuration with instructions for building a player group and placing in
a new game session.
</li> <li> `DescribeMatchmakingConfigurations` -- Retrieve matchmaking
configurations defined a Amazon GameLift region.
</li> <li> `UpdateMatchmakingConfiguration` -- Change settings for
matchmaking configuration. queue.
</li> <li> `DeleteMatchmakingConfiguration` -- Remove a matchmaking
configuration from the region.
</li> <li> `CreateMatchmakingRuleSet` -- Create a set of rules to use when
searching for player matches.
</li> <li> `DescribeMatchmakingRuleSets` -- Retrieve matchmaking rule sets
defined in a Amazon GameLift region.
</li> <li> `ValidateMatchmakingRuleSet` -- Verify syntax for a set of
matchmaking rules.
</li> </ul> </li> </ul>
"""
@doc """
Registers a player's acceptance or rejection of a proposed FlexMatch match.
A matchmaking configuration may require player acceptance; if so, then
matches built with that configuration cannot be completed unless all
players accept the proposed match within a specified time limit.
When FlexMatch builds a match, all the matchmaking tickets involved in the
proposed match are placed into status `REQUIRES_ACCEPTANCE`. This is a
trigger for your game to get acceptance from all players in the ticket.
Acceptances are only valid for tickets when they are in this status; all
other acceptances result in an error.
To register acceptance, specify the ticket ID, a response, and one or more
players. Once all players have registered acceptance, the matchmaking
tickets advance to status `PLACING`, where a new game session is created
for the match.
If any player rejects the match, or if acceptances are not received before
a specified timeout, the proposed match is dropped. The matchmaking tickets
are then handled in one of two ways: For tickets where all players accepted
the match, the ticket status is returned to `SEARCHING` to find a new
match. For tickets where one or more players failed to accept the match,
the ticket status is set to `FAILED`, and processing is terminated. A new
matchmaking request for these players can be submitted as needed.
Matchmaking-related operations include:
<ul> <li> `StartMatchmaking`
</li> <li> `DescribeMatchmaking`
</li> <li> `StopMatchmaking`
</li> <li> `AcceptMatch`
</li> <li> `StartMatchBackfill`
</li> </ul>
"""
def accept_match(client, input, options \\ []) do
request(client, "AcceptMatch", input, options)
end
@doc """
Creates an alias for a fleet. In most situations, you can use an alias ID
in place of a fleet ID. By using a fleet alias instead of a specific fleet
ID, you can switch gameplay and players to a new fleet without changing
your game client or other game components. For example, for games in
production, using an alias allows you to seamlessly redirect your player
base to a new game server update.
Amazon GameLift supports two types of routing strategies for aliases:
simple and terminal. A simple alias points to an active fleet. A terminal
alias is used to display messaging or link to a URL instead of routing
players to an active fleet. For example, you might use a terminal alias
when a game version is no longer supported and you want to direct players
to an upgrade site.
To create a fleet alias, specify an alias name, routing strategy, and
optional description. Each simple alias can point to only one fleet, but a
fleet can have multiple aliases. If successful, a new alias record is
returned, including an alias ID, which you can reference when creating a
game session. You can reassign an alias to another fleet by calling
`UpdateAlias`.
Alias-related operations include:
<ul> <li> `CreateAlias`
</li> <li> `ListAliases`
</li> <li> `DescribeAlias`
</li> <li> `UpdateAlias`
</li> <li> `DeleteAlias`
</li> <li> `ResolveAlias`
</li> </ul>
"""
def create_alias(client, input, options \\ []) do
request(client, "CreateAlias", input, options)
end
@doc """
Creates a new Amazon GameLift build record for your game server binary
files and points to the location of your game server build files in an
Amazon Simple Storage Service (Amazon S3) location.
Game server binaries must be combined into a `.zip` file for use with
Amazon GameLift. See [Uploading Your
Game](http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html)
for more information.
<important> To create new builds quickly and easily, use the AWS CLI
command **
[upload-build](http://docs.aws.amazon.com/cli/latest/reference/gamelift/upload-build.html)
**. This helper command uploads your build and creates a new build record
in one step, and automatically handles the necessary permissions. See [
Upload Build Files to Amazon
GameLift](http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html)
for more help.
</important> The `CreateBuild` operation should be used only when you need
to manually upload your build files, as in the following scenarios:
<ul> <li> Store a build file in an Amazon S3 bucket under your own AWS
account. To use this option, you must first give Amazon GameLift access to
that Amazon S3 bucket. See [ Create a Build with Files in Amazon
S3](http://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build)
for detailed help. To create a new build record using files in your Amazon
S3 bucket, call `CreateBuild` and specify a build name, operating system,
and the storage location of your game build.
</li> <li> Upload a build file directly to Amazon GameLift's Amazon S3
account. To use this option, you first call `CreateBuild` with a build name
and operating system. This action creates a new build record and returns an
Amazon S3 storage location (bucket and key only) and temporary access
credentials. Use the credentials to manually upload your build file to the
storage location (see the Amazon S3 topic [Uploading
Objects](http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html)).
You can upload files to a location only once.
</li> </ul> If successful, this operation creates a new build record with a
unique build ID and places it in `INITIALIZED` status. You can use
`DescribeBuild` to check the status of your build. A build must be in
`READY` status before it can be used to create fleets.
Build-related operations include:
<ul> <li> `CreateBuild`
</li> <li> `ListBuilds`
</li> <li> `DescribeBuild`
</li> <li> `UpdateBuild`
</li> <li> `DeleteBuild`
</li> </ul>
"""
def create_build(client, input, options \\ []) do
request(client, "CreateBuild", input, options)
end
@doc """
Creates a new fleet to run your game servers. A fleet is a set of Amazon
Elastic Compute Cloud (Amazon EC2) instances, each of which can run
multiple server processes to host game sessions. You set up a fleet to use
instances with certain hardware specifications (see [Amazon EC2 Instance
Types](http://aws.amazon.com/ec2/instance-types/) for more information),
and deploy your game build to run on each instance.
To create a new fleet, you must specify the following: (1) a fleet name,
(2) the build ID of a successfully uploaded game build, (3) an EC2 instance
type, and (4) a run-time configuration, which describes the server
processes to run on each instance in the fleet. If you don't specify a
fleet type (on-demand or spot), the new fleet uses on-demand instances by
default.
You can also configure the new fleet with the following settings:
<ul> <li> Fleet description
</li> <li> Access permissions for inbound traffic
</li> <li> Fleet-wide game session protection
</li> <li> Resource usage limits
</li> </ul> <ul> <li> VPC peering connection (see [VPC Peering with Amazon
GameLift
Fleets](http://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html))
</li> </ul> If you use Amazon CloudWatch for metrics, you can add the new
fleet to a metric group. By adding multiple fleets to a metric group, you
can view aggregated metrics for all the fleets in the group.
If the `CreateFleet` call is successful, Amazon GameLift performs the
following tasks. You can track the process of a fleet by checking the fleet
status or by monitoring fleet creation events:
<ul> <li> Creates a fleet record. Status: `NEW`.
</li> <li> Begins writing events to the fleet event log, which can be
accessed in the Amazon GameLift console.
Sets the fleet's target capacity to 1 (desired instances), which triggers
Amazon GameLift to start one new EC2 instance.
</li> <li> Downloads the game build to the new instance and installs it.
Statuses: `DOWNLOADING`, `VALIDATING`, `BUILDING`.
</li> <li> Starts launching server processes on the instance. If the fleet
is configured to run multiple server processes per instance, Amazon
GameLift staggers each launch by a few seconds. Status: `ACTIVATING`.
</li> <li> Sets the fleet's status to `ACTIVE` as soon as one server
process is ready to host a game session.
</li> </ul> Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def create_fleet(client, input, options \\ []) do
request(client, "CreateFleet", input, options)
end
@doc """
Creates a multiplayer game session for players. This action creates a game
session record and assigns an available server process in the specified
fleet to host the game session. A fleet must have an `ACTIVE` status before
a game session can be created in it.
To create a game session, specify either fleet ID or alias ID and indicate
a maximum number of players to allow in the game session. You can also
provide a name and game-specific properties for this game session. If
successful, a `GameSession` object is returned containing the game session
properties and other settings you specified.
**Idempotency tokens.** You can add a token that uniquely identifies game
session requests. This is useful for ensuring that game session requests
are idempotent. Multiple requests with the same idempotency token are
processed only once; subsequent requests return the original result. All
response values are the same with the exception of game session status,
which may change.
**Resource creation limits.** If you are creating a game session on a fleet
with a resource creation limit policy in force, then you must specify a
creator ID. Without this ID, Amazon GameLift has no way to evaluate the
policy for this new game session request.
**Player acceptance policy.** By default, newly created game sessions are
open to new players. You can restrict new player access by using
`UpdateGameSession` to change the game session's player session creation
policy.
**Game session logs.** Logs are retained for all active game sessions for
14 days. To access the logs, call `GetGameSessionLogUrl` to download the
log files.
*Available in Amazon GameLift Local.*
Game-session-related operations include:
<ul> <li> `CreateGameSession`
</li> <li> `DescribeGameSessions`
</li> <li> `DescribeGameSessionDetails`
</li> <li> `SearchGameSessions`
</li> <li> `UpdateGameSession`
</li> <li> `GetGameSessionLogUrl`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def create_game_session(client, input, options \\ []) do
request(client, "CreateGameSession", input, options)
end
@doc """
Establishes a new queue for processing requests to place new game sessions.
A queue identifies where new game sessions can be hosted -- by specifying a
list of destinations (fleets or aliases) -- and how long requests can wait
in the queue before timing out. You can set up a queue to try to place game
sessions on fleets in multiple regions. To add placement requests to a
queue, call `StartGameSessionPlacement` and reference the queue name.
**Destination order.** When processing a request for a game session, Amazon
GameLift tries each destination in order until it finds one with available
resources to host the new game session. A queue's default order is
determined by how destinations are listed. The default order is overridden
when a game session placement request provides player latency information.
Player latency information enables Amazon GameLift to prioritize
destinations where players report the lowest average latency, as a result
placing the new game session where the majority of players will have the
best possible gameplay experience.
**Player latency policies.** For placement requests containing player
latency information, use player latency policies to protect individual
players from very high latencies. With a latency cap, even when a
destination can deliver a low latency for most players, the game is not
placed where any individual player is reporting latency higher than a
policy's maximum. A queue can have multiple latency policies, which are
enforced consecutively starting with the policy with the lowest latency
cap. Use multiple policies to gradually relax latency controls; for
example, you might set a policy with a low latency cap for the first 60
seconds, a second policy with a higher cap for the next 60 seconds, etc.
To create a new queue, provide a name, timeout value, a list of
destinations and, if desired, a set of latency policies. If successful, a
new queue object is returned.
Queue-related operations include:
<ul> <li> `CreateGameSessionQueue`
</li> <li> `DescribeGameSessionQueues`
</li> <li> `UpdateGameSessionQueue`
</li> <li> `DeleteGameSessionQueue`
</li> </ul>
"""
def create_game_session_queue(client, input, options \\ []) do
request(client, "CreateGameSessionQueue", input, options)
end
@doc """
Defines a new matchmaking configuration for use with FlexMatch. A
matchmaking configuration sets out guidelines for matching players and
getting the matches into games. You can set up multiple matchmaking
configurations to handle the scenarios needed for your game. Each
matchmaking ticket (`StartMatchmaking` or `StartMatchBackfill`) specifies a
configuration for the match and provides player attributes to support the
configuration being used.
To create a matchmaking configuration, at a minimum you must specify the
following: configuration name; a rule set that governs how to evaluate
players and find acceptable matches; a game session queue to use when
placing a new game session for the match; and the maximum time allowed for
a matchmaking attempt.
**Player acceptance** -- In each configuration, you have the option to
require that all players accept participation in a proposed match. To
enable this feature, set *AcceptanceRequired* to true and specify a time
limit for player acceptance. Players have the option to accept or reject a
proposed match, and a match does not move ahead to game session placement
unless all matched players accept.
**Matchmaking status notification** -- There are two ways to track the
progress of matchmaking tickets: (1) polling ticket status with
`DescribeMatchmaking`; or (2) receiving notifications with Amazon Simple
Notification Service (SNS). To use notifications, you first need to set up
an SNS topic to receive the notifications, and provide the topic ARN in the
matchmaking configuration (see [ Setting up Notifications for
Matchmaking](http://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html)).
Since notifications promise only "best effort" delivery, we recommend
calling `DescribeMatchmaking` if no notifications are received within 30
seconds.
Operations related to match configurations and rule sets include:
<ul> <li> `CreateMatchmakingConfiguration`
</li> <li> `DescribeMatchmakingConfigurations`
</li> <li> `UpdateMatchmakingConfiguration`
</li> <li> `DeleteMatchmakingConfiguration`
</li> <li> `CreateMatchmakingRuleSet`
</li> <li> `DescribeMatchmakingRuleSets`
</li> <li> `ValidateMatchmakingRuleSet`
</li> </ul>
"""
def create_matchmaking_configuration(client, input, options \\ []) do
request(client, "CreateMatchmakingConfiguration", input, options)
end
@doc """
Creates a new rule set for FlexMatch matchmaking. A rule set describes the
type of match to create, such as the number and size of teams, and sets the
parameters for acceptable player matches, such as minimum skill level or
character type. Rule sets are used in matchmaking configurations, which
define how matchmaking requests are handled. Each
`MatchmakingConfiguration` uses one rule set; you can set up multiple rule
sets to handle the scenarios that suit your game (such as for different
game modes), and create a separate matchmaking configuration for each rule
set. See additional information on rule set content in the
`MatchmakingRuleSet` structure. For help creating rule sets, including
useful examples, see the topic [ Adding FlexMatch to Your
Game](http://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html).
Once created, matchmaking rule sets cannot be changed or deleted, so we
recommend checking the rule set syntax using `ValidateMatchmakingRuleSet`
before creating the rule set.
To create a matchmaking rule set, provide the set of rules and a unique
name. Rule sets must be defined in the same region as the matchmaking
configuration they will be used with. Rule sets cannot be edited or
deleted. If you need to change a rule set, create a new one with the
necessary edits and then update matchmaking configurations to use the new
rule set.
Operations related to match configurations and rule sets include:
<ul> <li> `CreateMatchmakingConfiguration`
</li> <li> `DescribeMatchmakingConfigurations`
</li> <li> `UpdateMatchmakingConfiguration`
</li> <li> `DeleteMatchmakingConfiguration`
</li> <li> `CreateMatchmakingRuleSet`
</li> <li> `DescribeMatchmakingRuleSets`
</li> <li> `ValidateMatchmakingRuleSet`
</li> </ul>
"""
def create_matchmaking_rule_set(client, input, options \\ []) do
request(client, "CreateMatchmakingRuleSet", input, options)
end
@doc """
Adds a player to a game session and creates a player session record. Before
a player can be added, a game session must have an `ACTIVE` status, have a
creation policy of `ALLOW_ALL`, and have an open player slot. To add a
group of players to a game session, use `CreatePlayerSessions`.
To create a player session, specify a game session ID, player ID, and
optionally a string of player data. If successful, the player is added to
the game session and a new `PlayerSession` object is returned. Player
sessions cannot be updated.
*Available in Amazon GameLift Local.*
Player-session-related operations include:
<ul> <li> `CreatePlayerSession`
</li> <li> `CreatePlayerSessions`
</li> <li> `DescribePlayerSessions`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def create_player_session(client, input, options \\ []) do
request(client, "CreatePlayerSession", input, options)
end
@doc """
Adds a group of players to a game session. This action is useful with a
team matching feature. Before players can be added, a game session must
have an `ACTIVE` status, have a creation policy of `ALLOW_ALL`, and have an
open player slot. To add a single player to a game session, use
`CreatePlayerSession`.
To create player sessions, specify a game session ID, a list of player IDs,
and optionally a set of player data strings. If successful, the players are
added to the game session and a set of new `PlayerSession` objects is
returned. Player sessions cannot be updated.
*Available in Amazon GameLift Local.*
Player-session-related operations include:
<ul> <li> `CreatePlayerSession`
</li> <li> `CreatePlayerSessions`
</li> <li> `DescribePlayerSessions`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def create_player_sessions(client, input, options \\ []) do
request(client, "CreatePlayerSessions", input, options)
end
@doc """
Requests authorization to create or delete a peer connection between the
VPC for your Amazon GameLift fleet and a virtual private cloud (VPC) in
your AWS account. VPC peering enables the game servers on your fleet to
communicate directly with other AWS resources. Once you've received
authorization, call `CreateVpcPeeringConnection` to establish the peering
connection. For more information, see [VPC Peering with Amazon GameLift
Fleets](http://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html).
You can peer with VPCs that are owned by any AWS account you have access
to, including the account that you use to manage your Amazon GameLift
fleets. You cannot peer with VPCs that are in different regions.
To request authorization to create a connection, call this operation from
the AWS account with the VPC that you want to peer to your Amazon GameLift
fleet. For example, to enable your game servers to retrieve data from a
DynamoDB table, use the account that manages that DynamoDB resource.
Identify the following values: (1) The ID of the VPC that you want to peer
with, and (2) the ID of the AWS account that you use to manage Amazon
GameLift. If successful, VPC peering is authorized for the specified VPC.
To request authorization to delete a connection, call this operation from
the AWS account with the VPC that is peered with your Amazon GameLift
fleet. Identify the following values: (1) VPC ID that you want to delete
the peering connection for, and (2) ID of the AWS account that you use to
manage Amazon GameLift.
The authorization remains valid for 24 hours unless it is canceled by a
call to `DeleteVpcPeeringAuthorization`. You must create or delete the
peering connection while the authorization is valid.
VPC peering connection operations include:
<ul> <li> `CreateVpcPeeringAuthorization`
</li> <li> `DescribeVpcPeeringAuthorizations`
</li> <li> `DeleteVpcPeeringAuthorization`
</li> <li> `CreateVpcPeeringConnection`
</li> <li> `DescribeVpcPeeringConnections`
</li> <li> `DeleteVpcPeeringConnection`
</li> </ul>
"""
def create_vpc_peering_authorization(client, input, options \\ []) do
request(client, "CreateVpcPeeringAuthorization", input, options)
end
@doc """
Establishes a VPC peering connection between a virtual private cloud (VPC)
in an AWS account with the VPC for your Amazon GameLift fleet. VPC peering
enables the game servers on your fleet to communicate directly with other
AWS resources. You can peer with VPCs in any AWS account that you have
access to, including the account that you use to manage your Amazon
GameLift fleets. You cannot peer with VPCs that are in different regions.
For more information, see [VPC Peering with Amazon GameLift
Fleets](http://docs.aws.amazon.com/gamelift/latest/developerguide/vpc-peering.html).
Before calling this operation to establish the peering connection, you
first need to call `CreateVpcPeeringAuthorization` and identify the VPC you
want to peer with. Once the authorization for the specified VPC is issued,
you have 24 hours to establish the connection. These two operations handle
all tasks necessary to peer the two VPCs, including acceptance, updating
routing tables, etc.
To establish the connection, call this operation from the AWS account that
is used to manage the Amazon GameLift fleets. Identify the following
values: (1) The ID of the fleet you want to be enable a VPC peering
connection for; (2) The AWS account with the VPC that you want to peer
with; and (3) The ID of the VPC you want to peer with. This operation is
asynchronous. If successful, a `VpcPeeringConnection` request is created.
You can use continuous polling to track the request's status using
`DescribeVpcPeeringConnections`, or by monitoring fleet events for success
or failure using `DescribeFleetEvents`.
VPC peering connection operations include:
<ul> <li> `CreateVpcPeeringAuthorization`
</li> <li> `DescribeVpcPeeringAuthorizations`
</li> <li> `DeleteVpcPeeringAuthorization`
</li> <li> `CreateVpcPeeringConnection`
</li> <li> `DescribeVpcPeeringConnections`
</li> <li> `DeleteVpcPeeringConnection`
</li> </ul>
"""
def create_vpc_peering_connection(client, input, options \\ []) do
request(client, "CreateVpcPeeringConnection", input, options)
end
@doc """
Deletes an alias. This action removes all record of the alias. Game clients
attempting to access a server process using the deleted alias receive an
error. To delete an alias, specify the alias ID to be deleted.
Alias-related operations include:
<ul> <li> `CreateAlias`
</li> <li> `ListAliases`
</li> <li> `DescribeAlias`
</li> <li> `UpdateAlias`
</li> <li> `DeleteAlias`
</li> <li> `ResolveAlias`
</li> </ul>
"""
def delete_alias(client, input, options \\ []) do
request(client, "DeleteAlias", input, options)
end
@doc """
Deletes a build. This action permanently deletes the build record and any
uploaded build files.
To delete a build, specify its ID. Deleting a build does not affect the
status of any active fleets using the build, but you can no longer create
new fleets with the deleted build.
Build-related operations include:
<ul> <li> `CreateBuild`
</li> <li> `ListBuilds`
</li> <li> `DescribeBuild`
</li> <li> `UpdateBuild`
</li> <li> `DeleteBuild`
</li> </ul>
"""
def delete_build(client, input, options \\ []) do
request(client, "DeleteBuild", input, options)
end
@doc """
Deletes everything related to a fleet. Before deleting a fleet, you must
set the fleet's desired capacity to zero. See `UpdateFleetCapacity`.
This action removes the fleet's resources and the fleet record. Once a
fleet is deleted, you can no longer use that fleet.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def delete_fleet(client, input, options \\ []) do
request(client, "DeleteFleet", input, options)
end
@doc """
Deletes a game session queue. This action means that any
`StartGameSessionPlacement` requests that reference this queue will fail.
To delete a queue, specify the queue name.
Queue-related operations include:
<ul> <li> `CreateGameSessionQueue`
</li> <li> `DescribeGameSessionQueues`
</li> <li> `UpdateGameSessionQueue`
</li> <li> `DeleteGameSessionQueue`
</li> </ul>
"""
def delete_game_session_queue(client, input, options \\ []) do
request(client, "DeleteGameSessionQueue", input, options)
end
@doc """
Permanently removes a FlexMatch matchmaking configuration. To delete,
specify the configuration name. A matchmaking configuration cannot be
deleted if it is being used in any active matchmaking tickets.
Operations related to match configurations and rule sets include:
<ul> <li> `CreateMatchmakingConfiguration`
</li> <li> `DescribeMatchmakingConfigurations`
</li> <li> `UpdateMatchmakingConfiguration`
</li> <li> `DeleteMatchmakingConfiguration`
</li> <li> `CreateMatchmakingRuleSet`
</li> <li> `DescribeMatchmakingRuleSets`
</li> <li> `ValidateMatchmakingRuleSet`
</li> </ul>
"""
def delete_matchmaking_configuration(client, input, options \\ []) do
request(client, "DeleteMatchmakingConfiguration", input, options)
end
@doc """
Deletes a fleet scaling policy. This action means that the policy is no
longer in force and removes all record of it. To delete a scaling policy,
specify both the scaling policy name and the fleet ID it is associated
with.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def delete_scaling_policy(client, input, options \\ []) do
request(client, "DeleteScalingPolicy", input, options)
end
@doc """
Cancels a pending VPC peering authorization for the specified VPC. If the
authorization has already been used to create a peering connection, call
`DeleteVpcPeeringConnection` to remove the connection.
VPC peering connection operations include:
<ul> <li> `CreateVpcPeeringAuthorization`
</li> <li> `DescribeVpcPeeringAuthorizations`
</li> <li> `DeleteVpcPeeringAuthorization`
</li> <li> `CreateVpcPeeringConnection`
</li> <li> `DescribeVpcPeeringConnections`
</li> <li> `DeleteVpcPeeringConnection`
</li> </ul>
"""
def delete_vpc_peering_authorization(client, input, options \\ []) do
request(client, "DeleteVpcPeeringAuthorization", input, options)
end
@doc """
Removes a VPC peering connection. To delete the connection, you must have a
valid authorization for the VPC peering connection that you want to delete.
You can check for an authorization by calling
`DescribeVpcPeeringAuthorizations` or request a new one using
`CreateVpcPeeringAuthorization`.
Once a valid authorization exists, call this operation from the AWS account
that is used to manage the Amazon GameLift fleets. Identify the connection
to delete by the connection ID and fleet ID. If successful, the connection
is removed.
VPC peering connection operations include:
<ul> <li> `CreateVpcPeeringAuthorization`
</li> <li> `DescribeVpcPeeringAuthorizations`
</li> <li> `DeleteVpcPeeringAuthorization`
</li> <li> `CreateVpcPeeringConnection`
</li> <li> `DescribeVpcPeeringConnections`
</li> <li> `DeleteVpcPeeringConnection`
</li> </ul>
"""
def delete_vpc_peering_connection(client, input, options \\ []) do
request(client, "DeleteVpcPeeringConnection", input, options)
end
@doc """
Retrieves properties for an alias. This operation returns all alias
metadata and settings. To get an alias's target fleet ID only, use
`ResolveAlias`.
To get alias properties, specify the alias ID. If successful, the requested
alias record is returned.
Alias-related operations include:
<ul> <li> `CreateAlias`
</li> <li> `ListAliases`
</li> <li> `DescribeAlias`
</li> <li> `UpdateAlias`
</li> <li> `DeleteAlias`
</li> <li> `ResolveAlias`
</li> </ul>
"""
def describe_alias(client, input, options \\ []) do
request(client, "DescribeAlias", input, options)
end
@doc """
Retrieves properties for a build. To request a build record, specify a
build ID. If successful, an object containing the build properties is
returned.
Build-related operations include:
<ul> <li> `CreateBuild`
</li> <li> `ListBuilds`
</li> <li> `DescribeBuild`
</li> <li> `UpdateBuild`
</li> <li> `DeleteBuild`
</li> </ul>
"""
def describe_build(client, input, options \\ []) do
request(client, "DescribeBuild", input, options)
end
@doc """
Retrieves the following information for the specified EC2 instance type:
<ul> <li> maximum number of instances allowed per AWS account (service
limit)
</li> <li> current usage level for the AWS account
</li> </ul> Service limits vary depending on region. Available regions for
Amazon GameLift can be found in the AWS Management Console for Amazon
GameLift (see the drop-down list in the upper right corner).
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def describe_e_c2_instance_limits(client, input, options \\ []) do
request(client, "DescribeEC2InstanceLimits", input, options)
end
@doc """
Retrieves fleet properties, including metadata, status, and configuration,
for one or more fleets. You can request attributes for all fleets, or
specify a list of one or more fleet IDs. When requesting multiple fleets,
use the pagination parameters to retrieve results as a set of sequential
pages. If successful, a `FleetAttributes` object is returned for each
requested fleet ID. When specifying a list of fleet IDs, attribute objects
are returned only for fleets that currently exist.
<note> Some API actions may limit the number of fleet IDs allowed in one
request. If a request exceeds this limit, the request fails and the error
message includes the maximum allowed.
</note> Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def describe_fleet_attributes(client, input, options \\ []) do
request(client, "DescribeFleetAttributes", input, options)
end
@doc """
Retrieves the current status of fleet capacity for one or more fleets. This
information includes the number of instances that have been requested for
the fleet and the number currently active. You can request capacity for all
fleets, or specify a list of one or more fleet IDs. When requesting
multiple fleets, use the pagination parameters to retrieve results as a set
of sequential pages. If successful, a `FleetCapacity` object is returned
for each requested fleet ID. When specifying a list of fleet IDs, attribute
objects are returned only for fleets that currently exist.
<note> Some API actions may limit the number of fleet IDs allowed in one
request. If a request exceeds this limit, the request fails and the error
message includes the maximum allowed.
</note> Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def describe_fleet_capacity(client, input, options \\ []) do
request(client, "DescribeFleetCapacity", input, options)
end
@doc """
Retrieves entries from the specified fleet's event log. You can specify a
time range to limit the result set. Use the pagination parameters to
retrieve results as a set of sequential pages. If successful, a collection
of event log entries matching the request are returned.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def describe_fleet_events(client, input, options \\ []) do
request(client, "DescribeFleetEvents", input, options)
end
@doc """
Retrieves the inbound connection permissions for a fleet. Connection
permissions include a range of IP addresses and port settings that incoming
traffic can use to access server processes in the fleet. To get a fleet's
inbound connection permissions, specify a fleet ID. If successful, a
collection of `IpPermission` objects is returned for the requested fleet
ID. If the requested fleet has been deleted, the result set is empty.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def describe_fleet_port_settings(client, input, options \\ []) do
request(client, "DescribeFleetPortSettings", input, options)
end
@doc """
Retrieves utilization statistics for one or more fleets. You can request
utilization data for all fleets, or specify a list of one or more fleet
IDs. When requesting multiple fleets, use the pagination parameters to
retrieve results as a set of sequential pages. If successful, a
`FleetUtilization` object is returned for each requested fleet ID. When
specifying a list of fleet IDs, utilization objects are returned only for
fleets that currently exist.
<note> Some API actions may limit the number of fleet IDs allowed in one
request. If a request exceeds this limit, the request fails and the error
message includes the maximum allowed.
</note> Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def describe_fleet_utilization(client, input, options \\ []) do
request(client, "DescribeFleetUtilization", input, options)
end
@doc """
Retrieves properties, including the protection policy in force, for one or
more game sessions. This action can be used in several ways: (1) provide a
`GameSessionId` or `GameSessionArn` to request details for a specific game
session; (2) provide either a `FleetId` or an `AliasId` to request
properties for all game sessions running on a fleet.
To get game session record(s), specify just one of the following: game
session ID, fleet ID, or alias ID. You can filter this request by game
session status. Use the pagination parameters to retrieve results as a set
of sequential pages. If successful, a `GameSessionDetail` object is
returned for each session matching the request.
Game-session-related operations include:
<ul> <li> `CreateGameSession`
</li> <li> `DescribeGameSessions`
</li> <li> `DescribeGameSessionDetails`
</li> <li> `SearchGameSessions`
</li> <li> `UpdateGameSession`
</li> <li> `GetGameSessionLogUrl`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def describe_game_session_details(client, input, options \\ []) do
request(client, "DescribeGameSessionDetails", input, options)
end
@doc """
Retrieves properties and current status of a game session placement
request. To get game session placement details, specify the placement ID.
If successful, a `GameSessionPlacement` object is returned.
Game-session-related operations include:
<ul> <li> `CreateGameSession`
</li> <li> `DescribeGameSessions`
</li> <li> `DescribeGameSessionDetails`
</li> <li> `SearchGameSessions`
</li> <li> `UpdateGameSession`
</li> <li> `GetGameSessionLogUrl`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def describe_game_session_placement(client, input, options \\ []) do
request(client, "DescribeGameSessionPlacement", input, options)
end
@doc """
Retrieves the properties for one or more game session queues. When
requesting multiple queues, use the pagination parameters to retrieve
results as a set of sequential pages. If successful, a `GameSessionQueue`
object is returned for each requested queue. When specifying a list of
queues, objects are returned only for queues that currently exist in the
region.
Queue-related operations include:
<ul> <li> `CreateGameSessionQueue`
</li> <li> `DescribeGameSessionQueues`
</li> <li> `UpdateGameSessionQueue`
</li> <li> `DeleteGameSessionQueue`
</li> </ul>
"""
def describe_game_session_queues(client, input, options \\ []) do
request(client, "DescribeGameSessionQueues", input, options)
end
@doc """
Retrieves a set of one or more game sessions. Request a specific game
session or request all game sessions on a fleet. Alternatively, use
`SearchGameSessions` to request a set of active game sessions that are
filtered by certain criteria. To retrieve protection policy settings for
game sessions, use `DescribeGameSessionDetails`.
To get game sessions, specify one of the following: game session ID, fleet
ID, or alias ID. You can filter this request by game session status. Use
the pagination parameters to retrieve results as a set of sequential pages.
If successful, a `GameSession` object is returned for each game session
matching the request.
*Available in Amazon GameLift Local.*
Game-session-related operations include:
<ul> <li> `CreateGameSession`
</li> <li> `DescribeGameSessions`
</li> <li> `DescribeGameSessionDetails`
</li> <li> `SearchGameSessions`
</li> <li> `UpdateGameSession`
</li> <li> `GetGameSessionLogUrl`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def describe_game_sessions(client, input, options \\ []) do
request(client, "DescribeGameSessions", input, options)
end
@doc """
Retrieves information about a fleet's instances, including instance IDs.
Use this action to get details on all instances in the fleet or get details
on one specific instance.
To get a specific instance, specify fleet ID and instance ID. To get all
instances in a fleet, specify a fleet ID only. Use the pagination
parameters to retrieve results as a set of sequential pages. If successful,
an `Instance` object is returned for each result.
"""
def describe_instances(client, input, options \\ []) do
request(client, "DescribeInstances", input, options)
end
@doc """
Retrieves one or more matchmaking tickets. Use this operation to retrieve
ticket information, including status and--once a successful match is
made--acquire connection information for the resulting new game session.
You can use this operation to track the progress of matchmaking requests
(through polling) as an alternative to using event notifications. See more
details on tracking matchmaking requests through polling or notifications
in `StartMatchmaking`.
To request matchmaking tickets, provide a list of up to 10 ticket IDs. If
the request is successful, a ticket object is returned for each requested
ID that currently exists.
Matchmaking-related operations include:
<ul> <li> `StartMatchmaking`
</li> <li> `DescribeMatchmaking`
</li> <li> `StopMatchmaking`
</li> <li> `AcceptMatch`
</li> <li> `StartMatchBackfill`
</li> </ul>
"""
def describe_matchmaking(client, input, options \\ []) do
request(client, "DescribeMatchmaking", input, options)
end
@doc """
Retrieves the details of FlexMatch matchmaking configurations. with this
operation, you have the following options: (1) retrieve all existing
configurations, (2) provide the names of one or more configurations to
retrieve, or (3) retrieve all configurations that use a specified rule set
name. When requesting multiple items, use the pagination parameters to
retrieve results as a set of sequential pages. If successful, a
configuration is returned for each requested name. When specifying a list
of names, only configurations that currently exist are returned.
Operations related to match configurations and rule sets include:
<ul> <li> `CreateMatchmakingConfiguration`
</li> <li> `DescribeMatchmakingConfigurations`
</li> <li> `UpdateMatchmakingConfiguration`
</li> <li> `DeleteMatchmakingConfiguration`
</li> <li> `CreateMatchmakingRuleSet`
</li> <li> `DescribeMatchmakingRuleSets`
</li> <li> `ValidateMatchmakingRuleSet`
</li> </ul>
"""
def describe_matchmaking_configurations(client, input, options \\ []) do
request(client, "DescribeMatchmakingConfigurations", input, options)
end
@doc """
Retrieves the details for FlexMatch matchmaking rule sets. You can request
all existing rule sets for the region, or provide a list of one or more
rule set names. When requesting multiple items, use the pagination
parameters to retrieve results as a set of sequential pages. If successful,
a rule set is returned for each requested name.
Operations related to match configurations and rule sets include:
<ul> <li> `CreateMatchmakingConfiguration`
</li> <li> `DescribeMatchmakingConfigurations`
</li> <li> `UpdateMatchmakingConfiguration`
</li> <li> `DeleteMatchmakingConfiguration`
</li> <li> `CreateMatchmakingRuleSet`
</li> <li> `DescribeMatchmakingRuleSets`
</li> <li> `ValidateMatchmakingRuleSet`
</li> </ul>
"""
def describe_matchmaking_rule_sets(client, input, options \\ []) do
request(client, "DescribeMatchmakingRuleSets", input, options)
end
@doc """
Retrieves properties for one or more player sessions. This action can be
used in several ways: (1) provide a `PlayerSessionId` to request properties
for a specific player session; (2) provide a `GameSessionId` to request
properties for all player sessions in the specified game session; (3)
provide a `PlayerId` to request properties for all player sessions of a
specified player.
To get game session record(s), specify only one of the following: a player
session ID, a game session ID, or a player ID. You can filter this request
by player session status. Use the pagination parameters to retrieve results
as a set of sequential pages. If successful, a `PlayerSession` object is
returned for each session matching the request.
*Available in Amazon GameLift Local.*
Player-session-related operations include:
<ul> <li> `CreatePlayerSession`
</li> <li> `CreatePlayerSessions`
</li> <li> `DescribePlayerSessions`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def describe_player_sessions(client, input, options \\ []) do
request(client, "DescribePlayerSessions", input, options)
end
@doc """
Retrieves the current run-time configuration for the specified fleet. The
run-time configuration tells Amazon GameLift how to launch server processes
on instances in the fleet.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def describe_runtime_configuration(client, input, options \\ []) do
request(client, "DescribeRuntimeConfiguration", input, options)
end
@doc """
Retrieves all scaling policies applied to a fleet.
To get a fleet's scaling policies, specify the fleet ID. You can filter
this request by policy status, such as to retrieve only active scaling
policies. Use the pagination parameters to retrieve results as a set of
sequential pages. If successful, set of `ScalingPolicy` objects is returned
for the fleet.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def describe_scaling_policies(client, input, options \\ []) do
request(client, "DescribeScalingPolicies", input, options)
end
@doc """
Retrieves valid VPC peering authorizations that are pending for the AWS
account. This operation returns all VPC peering authorizations and requests
for peering. This includes those initiated and received by this account.
VPC peering connection operations include:
<ul> <li> `CreateVpcPeeringAuthorization`
</li> <li> `DescribeVpcPeeringAuthorizations`
</li> <li> `DeleteVpcPeeringAuthorization`
</li> <li> `CreateVpcPeeringConnection`
</li> <li> `DescribeVpcPeeringConnections`
</li> <li> `DeleteVpcPeeringConnection`
</li> </ul>
"""
def describe_vpc_peering_authorizations(client, input, options \\ []) do
request(client, "DescribeVpcPeeringAuthorizations", input, options)
end
@doc """
Retrieves information on VPC peering connections. Use this operation to get
peering information for all fleets or for one specific fleet ID.
To retrieve connection information, call this operation from the AWS
account that is used to manage the Amazon GameLift fleets. Specify a fleet
ID or leave the parameter empty to retrieve all connection records. If
successful, the retrieved information includes both active and pending
connections. Active connections identify the IpV4 CIDR block that the VPC
uses to connect.
VPC peering connection operations include:
<ul> <li> `CreateVpcPeeringAuthorization`
</li> <li> `DescribeVpcPeeringAuthorizations`
</li> <li> `DeleteVpcPeeringAuthorization`
</li> <li> `CreateVpcPeeringConnection`
</li> <li> `DescribeVpcPeeringConnections`
</li> <li> `DeleteVpcPeeringConnection`
</li> </ul>
"""
def describe_vpc_peering_connections(client, input, options \\ []) do
request(client, "DescribeVpcPeeringConnections", input, options)
end
@doc """
Retrieves the location of stored game session logs for a specified game
session. When a game session is terminated, Amazon GameLift automatically
stores the logs in Amazon S3 and retains them for 14 days. Use this URL to
download the logs.
<note> See the [AWS Service
Limits](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift)
page for maximum log file sizes. Log files that exceed this limit are not
saved.
</note> Game-session-related operations include:
<ul> <li> `CreateGameSession`
</li> <li> `DescribeGameSessions`
</li> <li> `DescribeGameSessionDetails`
</li> <li> `SearchGameSessions`
</li> <li> `UpdateGameSession`
</li> <li> `GetGameSessionLogUrl`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def get_game_session_log_url(client, input, options \\ []) do
request(client, "GetGameSessionLogUrl", input, options)
end
@doc """
Requests remote access to a fleet instance. Remote access is useful for
debugging, gathering benchmarking data, or watching activity in real time.
Access requires credentials that match the operating system of the
instance. For a Windows instance, Amazon GameLift returns a user name and
password as strings for use with a Windows Remote Desktop client. For a
Linux instance, Amazon GameLift returns a user name and RSA private key,
also as strings, for use with an SSH client. The private key must be saved
in the proper format to a `.pem` file before using. If you're making this
request using the AWS CLI, saving the secret can be handled as part of the
GetInstanceAccess request. (See the example later in this topic). For more
information on remote access, see [Remotely Accessing an
Instance](http://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html).
To request access to a specific instance, specify the IDs of the instance
and the fleet it belongs to. If successful, an `InstanceAccess` object is
returned containing the instance's IP address and a set of credentials.
"""
def get_instance_access(client, input, options \\ []) do
request(client, "GetInstanceAccess", input, options)
end
@doc """
Retrieves all aliases for this AWS account. You can filter the result set
by alias name and/or routing strategy type. Use the pagination parameters
to retrieve results in sequential pages.
<note> Returned aliases are not listed in any particular order.
</note> Alias-related operations include:
<ul> <li> `CreateAlias`
</li> <li> `ListAliases`
</li> <li> `DescribeAlias`
</li> <li> `UpdateAlias`
</li> <li> `DeleteAlias`
</li> <li> `ResolveAlias`
</li> </ul>
"""
def list_aliases(client, input, options \\ []) do
request(client, "ListAliases", input, options)
end
@doc """
Retrieves build records for all builds associated with the AWS account in
use. You can limit results to builds that are in a specific status by using
the `Status` parameter. Use the pagination parameters to retrieve results
in a set of sequential pages.
<note> Build records are not listed in any particular order.
</note> Build-related operations include:
<ul> <li> `CreateBuild`
</li> <li> `ListBuilds`
</li> <li> `DescribeBuild`
</li> <li> `UpdateBuild`
</li> <li> `DeleteBuild`
</li> </ul>
"""
def list_builds(client, input, options \\ []) do
request(client, "ListBuilds", input, options)
end
@doc """
Retrieves a collection of fleet records for this AWS account. You can
filter the result set by build ID. Use the pagination parameters to
retrieve results in sequential pages.
<note> Fleet records are not listed in any particular order.
</note> Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def list_fleets(client, input, options \\ []) do
request(client, "ListFleets", input, options)
end
@doc """
Creates or updates a scaling policy for a fleet. An active scaling policy
prompts Amazon GameLift to track a certain metric for a fleet and
automatically change the fleet's capacity in specific circumstances. Each
scaling policy contains one rule statement. Fleets can have multiple
scaling policies in force simultaneously.
A scaling policy rule statement has the following structure:
If `[MetricName]` is `[ComparisonOperator]` `[Threshold]` for
`[EvaluationPeriods]` minutes, then `[ScalingAdjustmentType]` to/by
`[ScalingAdjustment]`.
For example, this policy: "If the number of idle instances exceeds 20 for
more than 15 minutes, then reduce the fleet capacity by 10 instances" could
be implemented as the following rule statement:
If [IdleInstances] is [GreaterThanOrEqualToThreshold] [20] for [15]
minutes, then [ChangeInCapacity] by [-10].
To create or update a scaling policy, specify a unique combination of name
and fleet ID, and set the rule values. All parameters for this action are
required. If successful, the policy name is returned. Scaling policies
cannot be suspended or made inactive. To stop enforcing a scaling policy,
call `DeleteScalingPolicy`.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def put_scaling_policy(client, input, options \\ []) do
request(client, "PutScalingPolicy", input, options)
end
@doc """
Retrieves a fresh set of credentials for use when uploading a new set of
game build files to Amazon GameLift's Amazon S3. This is done as part of
the build creation process; see `CreateBuild`.
To request new credentials, specify the build ID as returned with an
initial `CreateBuild` request. If successful, a new set of credentials are
returned, along with the S3 storage location associated with the build ID.
"""
def request_upload_credentials(client, input, options \\ []) do
request(client, "RequestUploadCredentials", input, options)
end
@doc """
Retrieves the fleet ID that a specified alias is currently pointing to.
Alias-related operations include:
<ul> <li> `CreateAlias`
</li> <li> `ListAliases`
</li> <li> `DescribeAlias`
</li> <li> `UpdateAlias`
</li> <li> `DeleteAlias`
</li> <li> `ResolveAlias`
</li> </ul>
"""
def resolve_alias(client, input, options \\ []) do
request(client, "ResolveAlias", input, options)
end
@doc """
Retrieves all active game sessions that match a set of search criteria and
sorts them in a specified order. You can search or sort by the following
game session attributes:
<ul> <li> **gameSessionId** -- Unique identifier for the game session. You
can use either a `GameSessionId` or `GameSessionArn` value.
</li> <li> **gameSessionName** -- Name assigned to a game session. This
value is set when requesting a new game session with `CreateGameSession` or
updating with `UpdateGameSession`. Game session names do not need to be
unique to a game session.
</li> <li> **gameSessionProperties** -- Custom data defined in a game
session's `GameProperty` parameter. `GameProperty` values are stored as
key:value pairs; the filter expression must indicate the key and a string
to search the data values for. For example, to search for game sessions
with custom data containing the key:value pair "gameMode:brawl", specify
the following: gameSessionProperties.gameMode = "brawl". All custom data
values are searched as strings.
</li> <li> **maximumSessions** -- Maximum number of player sessions allowed
for a game session. This value is set when requesting a new game session
with `CreateGameSession` or updating with `UpdateGameSession`.
</li> <li> **creationTimeMillis** -- Value indicating when a game session
was created. It is expressed in Unix time as milliseconds.
</li> <li> **playerSessionCount** -- Number of players currently connected
to a game session. This value changes rapidly as players join the session
or drop out.
</li> <li> **hasAvailablePlayerSessions** -- Boolean value indicating
whether a game session has reached its maximum number of players. It is
highly recommended that all search requests include this filter attribute
to optimize search performance and return only sessions that players can
join.
</li> </ul> <note> Returned values for `playerSessionCount` and
`hasAvailablePlayerSessions` change quickly as players join sessions and
others drop out. Results should be considered a snapshot in time. Be sure
to refresh search results often, and handle sessions that fill up before a
player can join.
</note> To search or sort, specify either a fleet ID or an alias ID, and
provide a search filter expression, a sort expression, or both. If
successful, a collection of `GameSession` objects matching the request is
returned. Use the pagination parameters to retrieve results as a set of
sequential pages.
You can search for game sessions one fleet at a time only. To find game
sessions across multiple fleets, you must search each fleet separately and
combine the results. This search feature finds only game sessions that are
in `ACTIVE` status. To locate games in statuses other than active, use
`DescribeGameSessionDetails`.
Game-session-related operations include:
<ul> <li> `CreateGameSession`
</li> <li> `DescribeGameSessions`
</li> <li> `DescribeGameSessionDetails`
</li> <li> `SearchGameSessions`
</li> <li> `UpdateGameSession`
</li> <li> `GetGameSessionLogUrl`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def search_game_sessions(client, input, options \\ []) do
request(client, "SearchGameSessions", input, options)
end
@doc """
Places a request for a new game session in a queue (see
`CreateGameSessionQueue`). When processing a placement request, Amazon
GameLift searches for available resources on the queue's destinations,
scanning each until it finds resources or the placement request times out.
A game session placement request can also request player sessions. When a
new game session is successfully created, Amazon GameLift creates a player
session for each player included in the request.
When placing a game session, by default Amazon GameLift tries each fleet in
the order they are listed in the queue configuration. Ideally, a queue's
destinations are listed in preference order.
Alternatively, when requesting a game session with players, you can also
provide latency data for each player in relevant regions. Latency data
indicates the performance lag a player experiences when connected to a
fleet in the region. Amazon GameLift uses latency data to reorder the list
of destinations to place the game session in a region with minimal lag. If
latency data is provided for multiple players, Amazon GameLift calculates
each region's average lag for all players and reorders to get the best game
play across all players.
To place a new game session request, specify the following:
<ul> <li> The queue name and a set of game session properties and settings
</li> <li> A unique ID (such as a UUID) for the placement. You use this ID
to track the status of the placement request
</li> <li> (Optional) A set of IDs and player data for each player you want
to join to the new game session
</li> <li> Latency data for all players (if you want to optimize game play
for the players)
</li> </ul> If successful, a new game session placement is created.
To track the status of a placement request, call
`DescribeGameSessionPlacement` and check the request's status. If the
status is `FULFILLED`, a new game session has been created and a game
session ARN and region are referenced. If the placement request times out,
you can resubmit the request or retry it with a different queue.
Game-session-related operations include:
<ul> <li> `CreateGameSession`
</li> <li> `DescribeGameSessions`
</li> <li> `DescribeGameSessionDetails`
</li> <li> `SearchGameSessions`
</li> <li> `UpdateGameSession`
</li> <li> `GetGameSessionLogUrl`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def start_game_session_placement(client, input, options \\ []) do
request(client, "StartGameSessionPlacement", input, options)
end
@doc """
Finds new players to fill open slots in an existing game session. This
operation can be used to add players to matched games that start with fewer
than the maximum number of players or to replace players when they drop
out. By backfilling with the same matchmaker used to create the original
match, you ensure that new players meet the match criteria and maintain a
consistent experience throughout the game session. You can backfill a match
anytime after a game session has been created.
To request a match backfill, specify a unique ticket ID, the existing game
session's ARN, a matchmaking configuration, and a set of data that
describes all current players in the game session. If successful, a match
backfill ticket is created and returned with status set to QUEUED. The
ticket is placed in the matchmaker's ticket pool and processed. Track the
status of the ticket to respond as needed. For more detail how to set up
backfilling, see [ Backfill Existing Games with
FlexMatch](http://docs.aws.amazon.com/gamelift/latest/developerguide/match-backfill.html).
The process of finding backfill matches is essentially identical to the
initial matchmaking process. The matchmaker searches the pool and groups
tickets together to form potential matches, allowing only one backfill
ticket per potential match. Once the a match is formed, the matchmaker
creates player sessions for the new players. All tickets in the match are
updated with the game session's connection information, and the
`GameSession` object is updated to include matchmaker data on the new
players. For more detail on how match backfill requests are processed, see
[ How Amazon GameLift FlexMatch
Works](http://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html).
Matchmaking-related operations include:
<ul> <li> `StartMatchmaking`
</li> <li> `DescribeMatchmaking`
</li> <li> `StopMatchmaking`
</li> <li> `AcceptMatch`
</li> <li> `StartMatchBackfill`
</li> </ul>
"""
def start_match_backfill(client, input, options \\ []) do
request(client, "StartMatchBackfill", input, options)
end
@doc """
Uses FlexMatch to create a game match for a group of players based on
custom matchmaking rules, and starts a new game for the matched players.
Each matchmaking request specifies the type of match to build (team
configuration, rules for an acceptable match, etc.). The request also
specifies the players to find a match for and where to host the new game
session for optimal performance. A matchmaking request might start with a
single player or a group of players who want to play together. FlexMatch
finds additional players as needed to fill the match. Match type, rules,
and the queue used to place a new game session are defined in a
`MatchmakingConfiguration`. For complete information on setting up and
using FlexMatch, see the topic [ Adding FlexMatch to Your
Game](http://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html).
To start matchmaking, provide a unique ticket ID, specify a matchmaking
configuration, and include the players to be matched. You must also include
a set of player attributes relevant for the matchmaking configuration. If
successful, a matchmaking ticket is returned with status set to `QUEUED`.
Track the status of the ticket to respond as needed and acquire game
session connection information for successfully completed matches.
**Tracking ticket status** -- A couple of options are available for
tracking the status of matchmaking requests:
<ul> <li> Polling -- Call `DescribeMatchmaking`. This operation returns the
full ticket object, including current status and (for completed tickets)
game session connection info. We recommend polling no more than once every
10 seconds.
</li> <li> Notifications -- Get event notifications for changes in ticket
status using Amazon Simple Notification Service (SNS). Notifications are
easy to set up (see `CreateMatchmakingConfiguration`) and typically deliver
match status changes faster and more efficiently than polling. We recommend
that you use polling to back up to notifications (since delivery is not
guaranteed) and call `DescribeMatchmaking` only when notifications are not
received within 30 seconds.
</li> </ul> **Processing a matchmaking request** -- FlexMatch handles a
matchmaking request as follows:
<ol> <li> Your client code submits a `StartMatchmaking` request for one or
more players and tracks the status of the request ticket.
</li> <li> FlexMatch uses this ticket and others in process to build an
acceptable match. When a potential match is identified, all tickets in the
proposed match are advanced to the next status.
</li> <li> If the match requires player acceptance (set in the matchmaking
configuration), the tickets move into status `REQUIRES_ACCEPTANCE`. This
status triggers your client code to solicit acceptance from all players in
every ticket involved in the match, and then call `AcceptMatch` for each
player. If any player rejects or fails to accept the match before a
specified timeout, the proposed match is dropped (see `AcceptMatch` for
more details).
</li> <li> Once a match is proposed and accepted, the matchmaking tickets
move into status `PLACING`. FlexMatch locates resources for a new game
session using the game session queue (set in the matchmaking configuration)
and creates the game session based on the match data.
</li> <li> When the match is successfully placed, the matchmaking tickets
move into `COMPLETED` status. Connection information (including game
session endpoint and player session) is added to the matchmaking tickets.
Matched players can use the connection information to join the game.
</li> </ol> Matchmaking-related operations include:
<ul> <li> `StartMatchmaking`
</li> <li> `DescribeMatchmaking`
</li> <li> `StopMatchmaking`
</li> <li> `AcceptMatch`
</li> <li> `StartMatchBackfill`
</li> </ul>
"""
def start_matchmaking(client, input, options \\ []) do
request(client, "StartMatchmaking", input, options)
end
@doc """
Cancels a game session placement that is in `PENDING` status. To stop a
placement, provide the placement ID values. If successful, the placement is
moved to `CANCELLED` status.
Game-session-related operations include:
<ul> <li> `CreateGameSession`
</li> <li> `DescribeGameSessions`
</li> <li> `DescribeGameSessionDetails`
</li> <li> `SearchGameSessions`
</li> <li> `UpdateGameSession`
</li> <li> `GetGameSessionLogUrl`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def stop_game_session_placement(client, input, options \\ []) do
request(client, "StopGameSessionPlacement", input, options)
end
@doc """
Cancels a matchmaking ticket that is currently being processed. To stop the
matchmaking operation, specify the ticket ID. If successful, work on the
ticket is stopped, and the ticket status is changed to `CANCELLED`.
Matchmaking-related operations include:
<ul> <li> `StartMatchmaking`
</li> <li> `DescribeMatchmaking`
</li> <li> `StopMatchmaking`
</li> <li> `AcceptMatch`
</li> <li> `StartMatchBackfill`
</li> </ul>
"""
def stop_matchmaking(client, input, options \\ []) do
request(client, "StopMatchmaking", input, options)
end
@doc """
Updates properties for an alias. To update properties, specify the alias ID
to be updated and provide the information to be changed. To reassign an
alias to another fleet, provide an updated routing strategy. If successful,
the updated alias record is returned.
Alias-related operations include:
<ul> <li> `CreateAlias`
</li> <li> `ListAliases`
</li> <li> `DescribeAlias`
</li> <li> `UpdateAlias`
</li> <li> `DeleteAlias`
</li> <li> `ResolveAlias`
</li> </ul>
"""
def update_alias(client, input, options \\ []) do
request(client, "UpdateAlias", input, options)
end
@doc """
Updates metadata in a build record, including the build name and version.
To update the metadata, specify the build ID to update and provide the new
values. If successful, a build object containing the updated metadata is
returned.
Build-related operations include:
<ul> <li> `CreateBuild`
</li> <li> `ListBuilds`
</li> <li> `DescribeBuild`
</li> <li> `UpdateBuild`
</li> <li> `DeleteBuild`
</li> </ul>
"""
def update_build(client, input, options \\ []) do
request(client, "UpdateBuild", input, options)
end
@doc """
Updates fleet properties, including name and description, for a fleet. To
update metadata, specify the fleet ID and the property values that you want
to change. If successful, the fleet ID for the updated fleet is returned.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def update_fleet_attributes(client, input, options \\ []) do
request(client, "UpdateFleetAttributes", input, options)
end
@doc """
Updates capacity settings for a fleet. Use this action to specify the
number of EC2 instances (hosts) that you want this fleet to contain. Before
calling this action, you may want to call `DescribeEC2InstanceLimits` to
get the maximum capacity based on the fleet's EC2 instance type.
If you're using autoscaling (see `PutScalingPolicy`), you may want to
specify a minimum and/or maximum capacity. If you don't provide these,
autoscaling can set capacity anywhere between zero and the [service
limits](http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift).
To update fleet capacity, specify the fleet ID and the number of instances
you want the fleet to host. If successful, Amazon GameLift starts or
terminates instances so that the fleet's active instance count matches the
desired instance count. You can view a fleet's current capacity information
by calling `DescribeFleetCapacity`. If the desired instance count is higher
than the instance type's limit, the "Limit Exceeded" exception occurs.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def update_fleet_capacity(client, input, options \\ []) do
request(client, "UpdateFleetCapacity", input, options)
end
@doc """
Updates port settings for a fleet. To update settings, specify the fleet ID
to be updated and list the permissions you want to update. List the
permissions you want to add in `InboundPermissionAuthorizations`, and
permissions you want to remove in `InboundPermissionRevocations`.
Permissions to be removed must match existing fleet permissions. If
successful, the fleet ID for the updated fleet is returned.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def update_fleet_port_settings(client, input, options \\ []) do
request(client, "UpdateFleetPortSettings", input, options)
end
@doc """
Updates game session properties. This includes the session name, maximum
player count, protection policy, which controls whether or not an active
game session can be terminated during a scale-down event, and the player
session creation policy, which controls whether or not new players can join
the session. To update a game session, specify the game session ID and the
values you want to change. If successful, an updated `GameSession` object
is returned.
Game-session-related operations include:
<ul> <li> `CreateGameSession`
</li> <li> `DescribeGameSessions`
</li> <li> `DescribeGameSessionDetails`
</li> <li> `SearchGameSessions`
</li> <li> `UpdateGameSession`
</li> <li> `GetGameSessionLogUrl`
</li> <li> Game session placements
<ul> <li> `StartGameSessionPlacement`
</li> <li> `DescribeGameSessionPlacement`
</li> <li> `StopGameSessionPlacement`
</li> </ul> </li> </ul>
"""
def update_game_session(client, input, options \\ []) do
request(client, "UpdateGameSession", input, options)
end
@doc """
Updates settings for a game session queue, which determines how new game
session requests in the queue are processed. To update settings, specify
the queue name to be updated and provide the new settings. When updating
destinations, provide a complete list of destinations.
Queue-related operations include:
<ul> <li> `CreateGameSessionQueue`
</li> <li> `DescribeGameSessionQueues`
</li> <li> `UpdateGameSessionQueue`
</li> <li> `DeleteGameSessionQueue`
</li> </ul>
"""
def update_game_session_queue(client, input, options \\ []) do
request(client, "UpdateGameSessionQueue", input, options)
end
@doc """
Updates settings for a FlexMatch matchmaking configuration. To update
settings, specify the configuration name to be updated and provide the new
settings.
Operations related to match configurations and rule sets include:
<ul> <li> `CreateMatchmakingConfiguration`
</li> <li> `DescribeMatchmakingConfigurations`
</li> <li> `UpdateMatchmakingConfiguration`
</li> <li> `DeleteMatchmakingConfiguration`
</li> <li> `CreateMatchmakingRuleSet`
</li> <li> `DescribeMatchmakingRuleSets`
</li> <li> `ValidateMatchmakingRuleSet`
</li> </ul>
"""
def update_matchmaking_configuration(client, input, options \\ []) do
request(client, "UpdateMatchmakingConfiguration", input, options)
end
@doc """
Updates the current run-time configuration for the specified fleet, which
tells Amazon GameLift how to launch server processes on instances in the
fleet. You can update a fleet's run-time configuration at any time after
the fleet is created; it does not need to be in an `ACTIVE` status.
To update run-time configuration, specify the fleet ID and provide a
`RuntimeConfiguration` object with the updated collection of server process
configurations.
Each instance in a Amazon GameLift fleet checks regularly for an updated
run-time configuration and changes how it launches server processes to
comply with the latest version. Existing server processes are not affected
by the update; they continue to run until they end, while Amazon GameLift
simply adds new server processes to fit the current run-time configuration.
As a result, the run-time configuration changes are applied gradually as
existing processes shut down and new processes are launched in Amazon
GameLift's normal process recycling activity.
Fleet-related operations include:
<ul> <li> `CreateFleet`
</li> <li> `ListFleets`
</li> <li> Describe fleets:
<ul> <li> `DescribeFleetAttributes`
</li> <li> `DescribeFleetPortSettings`
</li> <li> `DescribeFleetUtilization`
</li> <li> `DescribeRuntimeConfiguration`
</li> <li> `DescribeFleetEvents`
</li> </ul> </li> <li> Update fleets:
<ul> <li> `UpdateFleetAttributes`
</li> <li> `UpdateFleetCapacity`
</li> <li> `UpdateFleetPortSettings`
</li> <li> `UpdateRuntimeConfiguration`
</li> </ul> </li> <li> Manage fleet capacity:
<ul> <li> `DescribeFleetCapacity`
</li> <li> `UpdateFleetCapacity`
</li> <li> `PutScalingPolicy` (automatic scaling)
</li> <li> `DescribeScalingPolicies` (automatic scaling)
</li> <li> `DeleteScalingPolicy` (automatic scaling)
</li> <li> `DescribeEC2InstanceLimits`
</li> </ul> </li> <li> `DeleteFleet`
</li> </ul>
"""
def update_runtime_configuration(client, input, options \\ []) do
request(client, "UpdateRuntimeConfiguration", input, options)
end
@doc """
Validates the syntax of a matchmaking rule or rule set. This operation
checks that the rule set uses syntactically correct JSON and that it
conforms to allowed property expressions. To validate syntax, provide a
rule set string.
Operations related to match configurations and rule sets include:
<ul> <li> `CreateMatchmakingConfiguration`
</li> <li> `DescribeMatchmakingConfigurations`
</li> <li> `UpdateMatchmakingConfiguration`
</li> <li> `DeleteMatchmakingConfiguration`
</li> <li> `CreateMatchmakingRuleSet`
</li> <li> `DescribeMatchmakingRuleSets`
</li> <li> `ValidateMatchmakingRuleSet`
</li> </ul>
"""
def validate_matchmaking_rule_set(client, input, options \\ []) do
request(client, "ValidateMatchmakingRuleSet", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "gamelift"}
host = get_host("gamelift", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "GameLift.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/gamelift.ex
| 0.806777
| 0.623749
|
gamelift.ex
|
starcoder
|
defmodule Hand do
@hands %{
high_card: 1,
pair: 2,
two_pair: 3,
three_of_a_kind: 4,
straight: 5,
flush: 6,
full_house: 7,
four_of_a_kind: 8,
straight_flush: 9,
royal_flush: 10
}
def high(hand) do
hand = Enum.sort_by(hand, &(15 - &1.rank))
ranks = _divide_by_ranks(hand)
suits = _divide_by_suits(hand)
possible_flush = _is_flush(suits)
possible_three_of_a_kind = _is_three_of_a_kind(hand, ranks)
possible_pair = _is_pair(hand, ranks)
possible_straight_flush = _is_straight_flush(suits, possible_flush)
_is_royal_flush(possible_straight_flush)
|| possible_straight_flush
|| _is_four_of_a_kind(hand, ranks)
|| _is_full_house(possible_three_of_a_kind, ranks)
|| possible_flush
|| _is_straight(hand)
|| possible_three_of_a_kind
|| _is_two_pair(hand, ranks)
|| possible_pair
|| _is_high_card(hand)
end
def high_eval_sorter(hand1, hand2) do
high_sorter(high(hand1), high(hand2))
end
def high_sorter(hand1 = %{hand: h1}, hand2 = %{hand: h2}) when h1 == h2 do
_royal_flush_high_sorter(hand1, hand2)
|| _straight_flush_high_sorter(hand1, hand2)
|| _four_of_a_kind_high_sorter(hand1, hand2)
|| _full_house_high_sorter(hand1, hand2)
|| _flush_high_sorter(hand1, hand2)
|| _straight_high_sorter(hand1, hand2)
|| _three_of_a_kind_high_sorter(hand1, hand2)
|| _two_pair_high_sorter(hand1, hand2)
|| _pair_high_sorter(hand1, hand2)
|| _high_card_high_sorter(hand1, hand2)
|| false
end
def high_sorter(hand1, hand2) do
@hands[hand1[:hand]] >= @hands[hand2[:hand]]
end
def equal(hand1, hand2) do
Map.delete(hand1, :suit) == Map.delete(hand2, :suit)
end
defp _is_royal_flush(possible_straight_flush) do
if possible_straight_flush do
%{hand: :straight_flush, high: high, suit: suit} = possible_straight_flush
if high == 14 do
%{hand: :royal_flush, high: high, suit: suit}
end
end
end
defp _royal_flush_high_sorter(%{hand: :royal_flush}, %{hand: :royal_flush}), do: true
defp _royal_flush_high_sorter(_, _), do: nil
defp _is_straight_flush(suits, possible_flush) do
if possible_flush do
%{suit: suit} = possible_flush
possible_straight = _is_straight(suits[suit])
if possible_straight do
%{high: high} = possible_straight
%{hand: :straight_flush, high: high, suit: suit}
end
end
end
defp _straight_flush_high_sorter(
%{hand: :straight_flush, high: high1},
%{hand: :straight_flush, high: high2}
) do
high1 >= high2
end
defp _straight_flush_high_sorter(_, _), do: nil
defp _is_four_of_a_kind(hand, ranks) do
rank = _find_highest_rank_with_count(ranks, 4)
if rank do
cards = ranks[rank]
kicker = Enum.at(Deck.remove(hand, cards), 0).rank
%{hand: :four_of_a_kind, rank: rank, kickers: [kicker]}
end
end
defp _four_of_a_kind_high_sorter(
%{hand: :four_of_a_kind, rank: rank1, kickers: kickers1 },
%{hand: :four_of_a_kind, rank: rank2, kickers: kickers2 }
) when rank1 == rank2 do
kickers1 >= kickers2
end
defp _four_of_a_kind_high_sorter(
%{hand: :four_of_a_kind, rank: rank1},
%{hand: :four_of_a_kind, rank: rank2}
) do
rank1 >= rank2
end
defp _four_of_a_kind_high_sorter(_, _), do: nil
defp _is_full_house(possible_three_of_a_kind, ranks) do
if possible_three_of_a_kind do
%{rank: rank} = possible_three_of_a_kind
remaining_ranks = Map.delete(ranks, rank)
eligible_ranks = Enum.filter([
_find_highest_rank_with_count(remaining_ranks, 3),
_find_highest_rank_with_count(remaining_ranks, 2)
], &(&1))
if length(eligible_ranks) > 0 do
max = Enum.max(eligible_ranks)
%{hand: :full_house, rank: rank, over: max}
end
end
end
defp _full_house_high_sorter(
%{hand: :full_house, rank: rank1, over: over1},
%{hand: :full_house, rank: rank2, over: over2}
) when rank1 == rank2 do
over1 >= over2
end
defp _full_house_high_sorter(
%{hand: :full_house, rank: rank1},
%{hand: :full_house, rank: rank2}
) do
rank1 >= rank2
end
defp _full_house_high_sorter(_, _), do: nil
defp _is_flush(suits) do
{suit, count} = _suit_with_most_cards(suits)
if count >= 5 do
%{hand: :flush, suit: suit, ranks: Enum.take(Enum.map(suits[suit], &(&1.rank)), 5)}
end
end
defp _flush_high_sorter(%{hand: :flush, ranks: ranks1}, %{hand: :flush, ranks: ranks2}) do
ranks1 >= ranks2
end
defp _flush_high_sorter(_, _), do: nil
defp _is_straight(hand) do
unique_rank_cards = Enum.dedup_by(hand, &(&1.rank))
possible_ace = Enum.at(unique_rank_cards, 0)
if Card.rank(possible_ace) == "A" do
# dummy entry for ace-low straights
unique_rank_cards = unique_rank_cards ++ [%Card{rank: 1, suit: possible_ace.suit}]
end
ranks = Enum.map(unique_rank_cards, &(&1.rank))
highest_rank = _is_consecutive(ranks, 5, nil, nil)
if highest_rank do
%{hand: :straight, high: highest_rank}
end
end
defp _straight_high_sorter(%{hand: :straight, high: high1}, %{hand: :straight, high: high2}) do
high1 >= high2
end
defp _straight_high_sorter(_, _), do: nil
defp _is_three_of_a_kind(hand, ranks) do
rank = _find_highest_rank_with_count(ranks, 3)
if rank do
cards = ranks[rank]
kickers = Enum.map(Enum.take(Deck.remove(hand, cards), 2), &(&1.rank))
%{hand: :three_of_a_kind, rank: rank, kickers: kickers}
end
end
defp _three_of_a_kind_high_sorter(
%{hand: :three_of_a_kind, rank: rank1, kickers: kickers1},
%{hand: :three_of_a_kind, rank: rank2, kickers: kickers2}
) when rank1 == rank2 do
kickers1 >= kickers2
end
defp _three_of_a_kind_high_sorter(
%{hand: :three_of_a_kind, rank: rank1},
%{hand: :three_of_a_kind, rank: rank2}
) do
rank1 >= rank2
end
defp _three_of_a_kind_high_sorter(_, _), do: nil
defp _is_two_pair(hand, ranks) do
pair_ranks = Enum.reduce(Enum.reverse(Map.keys(ranks)), [], fn(rank, acc)
-> if length(ranks[rank]) == 2, do: acc ++ [rank], else: acc end)
if length(pair_ranks) >= 2 do
[high_rank, low_rank] = Enum.take(pair_ranks, 2)
high_rank_cards = ranks[high_rank]
low_rank_cards = ranks[low_rank]
kickers = Enum.map(Enum.take(Deck.remove(hand, high_rank_cards ++ low_rank_cards), 1), &(&1.rank))
%{hand: :two_pair, high_rank: high_rank, low_rank: low_rank,
kickers: kickers}
end
end
def _two_pair_high_sorter(
%{hand: :two_pair, high_rank: high_rank1, low_rank: low_rank1, kickers: kickers1},
%{hand: :two_pair, high_rank: high_rank2, low_rank: low_rank2, kickers: kickers2}
) when high_rank1 == high_rank2 and low_rank1 == low_rank2 do
kickers1 >= kickers2
end
def _two_pair_high_sorter(
%{hand: :two_pair, high_rank: high_rank1, low_rank: low_rank1},
%{hand: :two_pair, high_rank: high_rank2, low_rank: low_rank2}
) when high_rank1 == high_rank2 do
low_rank1 >= low_rank2
end
def _two_pair_high_sorter(
%{hand: :two_pair, high_rank: high_rank1},
%{hand: :two_pair, high_rank: high_rank2}
) do
high_rank1 >= high_rank2
end
def _two_pair_high_sorter(_, _), do: nil
defp _is_pair(hand, ranks) do
rank = _find_highest_rank_with_count(ranks, 2)
if rank do
cards = ranks[rank]
kickers = Enum.map(Enum.take(Deck.remove(hand, cards), 3), &(&1.rank))
%{hand: :pair, rank: rank, kickers: kickers}
end
end
defp _pair_high_sorter(
%{hand: :pair, rank: rank1, kickers: kickers1},
%{hand: :pair, rank: rank2, kickers: kickers2}
) when rank1 == rank2 do
kickers1 >= kickers2
end
defp _pair_high_sorter(
%{hand: :pair, rank: rank1},
%{hand: :pair, rank: rank2}
) do
rank1 >= rank2
end
defp _pair_high_sorter(_, _), do: nil
defp _is_high_card(hand) do
%{hand: :high_card, kickers: Enum.map(Enum.take(hand, 5), &(&1.rank))}
end
defp _high_card_high_sorter(%{hand: :high_card, kickers: kickers1}, %{hand: :high_card, kickers: kickers2}) do
kickers1 >= kickers2
end
defp _high_card_high_sorter(_, _), do: nil
defp _find_highest_rank_with_count(ranks, count) do
Enum.find(Enum.reverse(Map.keys(ranks)), &(length(ranks[&1]) == count))
end
defp _is_consecutive([this | rest], remaining, nil, nil), do: _is_consecutive(rest, remaining - 1, this, this)
defp _is_consecutive(_, 0, _, highest_rank), do: highest_rank
defp _is_consecutive([], _, _, _), do: nil
defp _is_consecutive([this | rest], remaining, prev, highest_rank) do
if prev - this == 1 do
_is_consecutive(rest, remaining - 1, this, highest_rank)
else
_is_consecutive(rest, 4, this, this)
end
end
defp _divide_by_ranks(hand) do
Enum.reduce(hand, %{}, fn(card, ranks)
-> Map.update(ranks, card.rank, [card], &(Enum.into(&1, [card])))
end)
end
defp _divide_by_suits(hand) do
Enum.reduce(hand, %{}, fn(card, suits)
-> Map.update(suits, card.suit, [card], &(&1 ++ [card]))
end)
end
defp _suit_with_most_cards(divided_suits) do
suits = Map.keys(divided_suits)
suits_with_counts = Enum.map(suits, &({ &1, length(divided_suits[&1]) }))
Enum.max_by(suits_with_counts, fn({_suit, count}) -> count end)
end
end
|
poker_equity_evaluator/lib/hand.ex
| 0.542621
| 0.44089
|
hand.ex
|
starcoder
|
defmodule Plymio.Funcio.Enum.Map do
@moduledoc ~S"""
Map Functions for an Enumerable.
See `Plymio.Funcio` for overview and documentation terms.
> The concurrent functions currently produce a `List` rather than a
`Stream`. But this could change in the future. The examples below assumes a `Stream` was returned.
"""
use Plymio.Funcio.Attribute
@type opts :: Plymio.Funcio.opts()
@type error :: Plymio.Funcio.error()
@type result :: Plymio.Funcio.result()
@type fun1_map :: Plymio.Funcio.fun1_map()
import Plymio.Fontais.Option,
only: [
opts_normalise: 1,
opts_validate: 1,
opts_get: 3
]
import Plymio.Funcio.Enum.Collate,
only: [
collate0_enum: 1
]
import Plymio.Funcio.Map.Utility,
only: [
reduce_or_passthru_map1_funs: 1,
reduce_map1_funs: 1
]
@doc ~S"""
`map_enum/3` takes an *enum* and a *map/1*, reduces the
*map/1* to a single, composite function and applied the composite to each value in
the *enum* returning `{:ok, values}` where `values` *may* be a `Stream`.
## Examples
iex> enum = 0 .. 4
...> fun_map = fn v -> v * v end
...> {:ok, values} = enum
...> |> map_enum(fun_map)
...> values |> Enum.to_list
[0, 1, 4, 9, 16]
iex> enum = 0 .. 4
...> fun_map1 = fn v -> v + 1 end
...> fun_map2 = fn v -> v * v end
...> fun_map3 = fn v -> v - 1 end
...> {:ok, values} = enum
...> |> map_enum([fun_map1, fun_map2, fun_map3])
...> values |> Enum.to_list
[0, 3, 8, 15, 24]
iex> enum = 0 .. 99 |> Stream.map(&(&1))
...> fun_map = fn v -> v end
...> {:ok, values} = enum
...> |> map_enum(fun_map)
...> values |> Enum.reduce(0, fn v,s -> s + v end)
4950
iex> enum = :not_an_enum
...> fun_map = fn v -> v * v end
...> {:ok, stream} = enum |> map_enum(fun_map)
...> try do
...> stream |> Enum.to_list
...> rescue
...> error -> error
...> end
...> |> Exception.message
...> |> String.starts_with?("protocol Enumerable not implemented for :not_an_enum")
true
"""
@since "0.1.0"
@spec map_enum(any, fun) :: result
def map_enum(enum, fun)
def map_enum(enum, fun) do
with {:ok, fun_map} <- fun |> reduce_or_passthru_map1_funs do
try do
{:ok, enum |> Stream.map(fun_map)}
rescue
error ->
{:error, error}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
@doc ~S"""
`map_concurrent_enum/3` takes an *enum* and a *map/1*, reduces the
*map/1* to a single, composite function and applied the composite to each value in
the *enum* in its own, separate task using using
`Task.Supervisor.async_stream_nolink/4`.
It returns `{:ok, values}` where the `values` *may* be a `Stream`.
## Examples
iex> enum = 0 .. 4
...> fun_map = fn v -> v * v end
...> {:ok, values} = enum
...> |> map_concurrent_enum(fun_map)
...> values |> Enum.to_list
[0, 1, 4, 9, 16]
iex> enum = 0 .. 4
...> fun_map1 = fn v -> v + 1 end
...> fun_map2 = fn v -> v * v end
...> fun_map3 = fn v -> v - 1 end
...> {:ok, values} = enum
...> |> map_concurrent_enum([fun_map1, fun_map2, fun_map3])
...> values |> Enum.to_list
[0, 3, 8, 15, 24]
iex> enum = 0 .. 99
...> fun_map = fn v -> v end
...> {:ok, values} = enum |> map_concurrent_enum(fun_map)
...> values |> Enum.reduce(0, fn v, s -> s + v end)
4950
iex> enum = :not_an_enum
...> fun_map = fn v -> v * v end
...> {:error, error} = enum |> map_concurrent_enum(fun_map)
...> error |> Exception.message
...> |> String.starts_with?("protocol Enumerable not implemented for :not_an_enum")
true
"""
@since "0.1.0"
@spec map_concurrent_enum(any, fun, opts) :: result
def map_concurrent_enum(enum, fun, opts \\ [])
def map_concurrent_enum(enum, fun, opts) do
with {:ok, opts} <- opts |> opts_normalise,
{:ok, fun_map} <- fun |> reduce_or_passthru_map1_funs,
{:ok, task_sup} <- opts |> opts_resolve_task_sup_pid,
{:ok, async_stream_opts} <- opts |> opts_resolve_task_sup_async_stream_opts do
try do
task_stream =
task_sup
|> Task.Supervisor.async_stream_nolink(enum, fun_map, async_stream_opts)
with {:ok, _results} = result <- task_stream |> realise_task_stream_results,
{:ok, _} <- task_sup |> stop_task_supervisor do
result
else
{:error, %{__exception__: true}} = result -> result
end
rescue
error ->
{:error, error}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
@doc ~S"""
`map_with_index_enum/2` take a *enum* and a *index map/1* and
maps the *enum* with `Stream.with_index/1` and then `Stream.map/2` returning
`{:ok, stream}`.
The *index map/1* is called with `{value, index}` for each `value` in the *enum*.
## Examples
iex> {:ok, stream} = [1,2,3] |> map_with_index_enum(fn {v,_i} -> v * v end)
...> stream |> Enum.to_list
[1, 4, 9]
iex> {:ok, stream} = [1,2,3] |> map_with_index_enum(fn {_v,i} -> i * i end)
...> stream |> Enum.to_list
[0, 1, 4]
iex> {:ok, stream} = [a: 1, b: 2, c: 3]
...> |> map_with_index_enum(fn {{_k,v},i} -> v * v * i end)
...> stream |> Enum.to_list
[0, 4, 18]
iex> {:ok, stream} = [a: 1, b: 2, c: 3]
...> |> map_with_index_enum([
...> fn {{_k,v},i} -> {i, v * v} end,
...> fn {i, v} -> i + v end,
...> ])
...> stream |> Enum.to_list
[1, 5, 11]
iex> enum = :not_an_enum
...> fun_map = fn v -> v * v end
...> {:ok, stream} = enum |> map_with_index_enum(fun_map)
...> try do
...> stream |> Enum.to_list
...> rescue
...> error -> error
...> end
...> |> Exception.message
...> |> String.starts_with?("protocol Enumerable not implemented for :not_an_enum")
true
iex> {:error, error} = [1,2,3] |> map_with_index_enum(:not_a_fun)
...> error |> Exception.message
"map/1 function invalid, got: :not_a_fun"
"""
@since "0.1.0"
@spec map_with_index_enum(any, any) :: {:ok, list} | {:error, error}
def map_with_index_enum(derivable_list, mapper)
def map_with_index_enum(state, mapper) do
with {:ok, fun} <- mapper |> reduce_map1_funs do
try do
{:ok, state |> Stream.with_index() |> Stream.map(fun)}
rescue
error ->
{:error, error}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
@doc ~S"""
`map_concurrent_with_index_enum/2` take a *enum* and a *index map/1* and
maps the *enum* with `Stream.with_index/1` and then `map_concurrent_enum/2` returning
`{:ok, stream}`.
The *index map/1* is called with `{value, index}` for each `value` in the *enum*.
## Examples
iex> {:ok, stream} = [1,2,3] |> map_concurrent_with_index_enum(fn {v,_i} -> v * v end)
...> stream |> Enum.to_list
[1, 4, 9]
iex> {:ok, stream} = [1,2,3] |> map_concurrent_with_index_enum(fn {_v,i} -> i * i end)
...> stream |> Enum.to_list
[0, 1, 4]
iex> {:ok, stream} = [a: 1, b: 2, c: 3]
...> |> map_concurrent_with_index_enum(fn {{_k,v},i} -> v * v * i end)
...> stream |> Enum.to_list
[0, 4, 18]
iex> {:ok, stream} = [a: 1, b: 2, c: 3]
...> |> map_concurrent_with_index_enum([
...> fn {{_k,v},i} -> {i, v * v} end,
...> fn {i, v} -> i + v end,
...> ])
...> stream |> Enum.to_list
[1, 5, 11]
iex> {:error, error} = 42 |> map_concurrent_with_index_enum(&(&1))
...> error |> Exception.message
...> |> String.starts_with?("protocol Enumerable not implemented for 42")
true
iex> {:error, error} = [1,2,3] |> map_concurrent_with_index_enum(:not_a_fun)
...> error |> Exception.message
"map/1 function invalid, got: :not_a_fun"
"""
@since "0.1.0"
@spec map_concurrent_with_index_enum(any, any) :: {:ok, list} | {:error, error}
def map_concurrent_with_index_enum(enum, mapper)
def map_concurrent_with_index_enum(state, mapper) do
state |> Stream.with_index() |> map_concurrent_enum(mapper)
end
defp normalise_task_stream_result(value)
defp normalise_task_stream_result({:ok, _} = result) do
result
end
defp normalise_task_stream_result({:error, error}) do
error |> normalise_task_stream_error_result!
end
defp normalise_task_stream_result({:exit, {value, _stacktrace}}) do
value |> normalise_task_stream_error_result!
end
defp normalise_task_stream_results(results) do
{:ok, results |> Stream.map(&normalise_task_stream_result/1)}
end
defp realise_task_stream_results(results) do
with {:ok, stream} <- results |> normalise_task_stream_results do
stream
|> collate0_enum
else
{:error, %{__exception__: true}} = result -> result
end
end
defp normalise_task_stream_error_value(value, exception) do
cond do
Exception.exception?(value) ->
{:ok, value}
true ->
cond do
Exception.exception?(exception) ->
{:ok, exception}
end
end
end
defp normalise_task_stream_error_result(value, exception) do
value
|> normalise_task_stream_error_value(exception)
|> case do
{:ok, error} ->
{:ok, {:error, error}}
end
end
defp normalise_task_stream_error_result!(value, exception \\ nil) do
value
|> normalise_task_stream_error_result(exception)
|> case do
{:ok, result} ->
result
end
end
defp opts_resolve_task_sup_pid(opts)
defp opts_resolve_task_sup_pid(opts) do
with {:ok, opts} <- opts |> opts_validate do
opts
|> Keyword.has_key?(@plymio_funcio_key_task_sup_pid)
|> case do
true ->
opts |> Keyword.fetch(@plymio_funcio_key_task_sup_pid)
_ ->
with {:ok, sup_opts} <- opts |> opts_resolve_task_sup_start_link_opts,
{:ok, _sup_pid} = result <- sup_opts |> Task.Supervisor.start_link() do
result
else
{:error, %{__exception__: true}} = result -> result
end
end
else
{:error, %{__exception__: true}} = result -> result
end
end
defp opts_resolve_task_sup_start_link_opts(opts)
defp opts_resolve_task_sup_start_link_opts(
opts,
defaults \\ @plymio_funcio_defaults_task_sup_start_link_opts
) do
with {:ok, _sup_pid} = result <-
opts |> opts_get(@plymio_funcio_key_task_sup_start_link_opts, defaults) do
result
else
{:error, %{__exception__: true}} = result -> result
end
end
defp opts_resolve_task_sup_async_stream_opts(opts)
defp opts_resolve_task_sup_async_stream_opts(
opts,
defaults \\ @plymio_funcio_defaults_task_sup_async_stream_opts
) do
with {:ok, _sup_pid} = result <-
opts |> opts_get(@plymio_funcio_key_task_sup_async_stream_opts, defaults) do
result
else
{:error, %{__exception__: true}} = result -> result
end
end
defp stop_task_supervisor(pid)
defp stop_task_supervisor(pid) when is_pid(pid) do
pid
|> Supervisor.stop()
|> case do
:ok -> {:ok, pid}
end
end
end
|
lib/funcio/enum/map/map.ex
| 0.841044
| 0.488527
|
map.ex
|
starcoder
|
defmodule Sue.DB do
@moduledoc """
The following is, I suppose, temporary. While I find Mnesia really cool, I
think going forward we're better off using something else.
"""
use GenServer
require Logger
alias __MODULE__
alias :mnesia, as: Mnesia
@kv_tables [:state]
def start_link(args) do
GenServer.start_link(__MODULE__, args, name: __MODULE__)
end
@impl true
def init(_args) do
nodes = [Node.self() | Node.list()]
Mnesia.create_schema(nodes)
:ok = Mnesia.start()
:ok = Mnesia.wait_for_tables(Mnesia.system_info(:local_tables), 5_000)
create_tables(nodes)
{:ok, []}
end
def set({table, k, v}) when is_atom(table), do: write({table, k, v})
def write(record) when is_tuple(record) do
fn -> Mnesia.write(record) end
|> Mnesia.transaction()
|> elixirize_output()
end
@spec get!(Atom.t(), any, any) :: any
def get!(table, key, default \\ nil) do
{:ok, val} = get(table, key, default)
val
end
@spec get(Atom.t(), any, any) :: {:error, any} | {:ok, any}
def get(table, key, default \\ nil) do
with {:atomic, res} <-
fn -> Mnesia.read({table, key}) end
|> Mnesia.transaction() do
case res do
[] -> {:ok, default}
[{^table, ^key, val}] -> {:ok, val}
end
else
{:aborted, reason} -> {:error, reason}
end
end
def create_table(name, opts) when is_atom(name) do
Mnesia.create_table(name, opts)
end
def clear_table(name) when is_atom(name) do
Mnesia.clear_table(name)
end
def all_keys(name) when is_atom(name) do
fn -> Mnesia.all_keys(name) end
|> Mnesia.transaction()
end
defp create_tables(nodes) do
# Generic KVs
for kv_table_name <- @kv_tables do
create_table(kv_table_name,
type: :set,
disc_copies: nodes,
attributes: [:key, :val]
)
end
# Individual Modules
[DB.Account, DB.Defn, DB.Graph, DB.Poll]
|> Enum.map(fn module ->
module.db_tables()
|> Enum.map(fn {table_name, opts} ->
res = create_table(table_name, opts |> Keyword.put_new(:disc_copies, nodes))
Logger.info("[Sue.DB] create_table(#{table_name}) -> #{inspect(res)}")
end)
end)
end
def match!(record) when is_tuple(record) do
{:ok, res} = match(record)
res
end
@spec match!(tuple) :: [tuple()]
def match!(record) do
{:ok, res} = match(record)
res
end
@spec match(tuple) :: {:error, any} | {:ok, [tuple()]}
def match(record) when is_tuple(record) do
fn -> Mnesia.match_object(record) end
|> Mnesia.transaction()
|> elixirize_output()
end
def elixirize_output(out) do
case out do
{:atomic, res} -> {:ok, res}
{:aborted, reason} -> {:error, reason}
end
end
end
|
lib/sue/db/d_b.ex
| 0.786336
| 0.413033
|
d_b.ex
|
starcoder
|
defmodule GenStage.BroadcastDispatcher do
@moduledoc """
A dispatcher that accumulates demand from all consumers
before broadcasting events to all of them.
If a producer uses `GenStage.BroadcastDispatcher`, its subscribers
can specify an optional `:selector` function that receives the event
and returns a boolean in the subscription options.
Assume `producer` and `consumer` are stages exchanging events of type
`%{:key => String.t, any => any}`, then by calling
GenStage.sync_subscribe(consumer,
to: producer,
selector: fn %{key: key} -> String.starts_with?(key, "foo-") end)
`consumer` will receive only the events broadcast from `producer`
for which the selector function returns a truthy value.
The `:selector` option can be specified in sync and async subscriptions,
as well as in the `:subscribe_to` list in the return tuple of
`c:GenStage.init/1`. For example:
def init(:ok) do
{:consumer, :ok, subscribe_to:
[{producer, selector: fn %{key: key} -> String.starts_with?(key, "foo-") end}]}
end
"""
@behaviour GenStage.Dispatcher
@doc false
def init(_opts) do
{:ok, {[], 0}}
end
@doc false
def info(msg, state) do
send(self(), msg)
{:ok, state}
end
@doc false
def subscribe(opts, {pid, ref}, {demands, waiting}) do
selector = validate_selector(opts)
{:ok, 0, {add_demand(-waiting, pid, ref, selector, demands), waiting}}
end
@doc false
def cancel({_, ref}, {demands, waiting}) do
# Since we may have removed the process we were waiting on,
# cancellation may actually generate demand!
demands = delete_demand(ref, demands)
new_min = get_min(demands)
demands = adjust_demand(new_min, demands)
{:ok, new_min, {demands, waiting + new_min}}
end
@doc false
def ask(counter, {pid, ref}, {demands, waiting}) do
{current, selector, demands} = pop_demand(ref, demands)
demands = add_demand(current + counter, pid, ref, selector, demands)
new_min = get_min(demands)
demands = adjust_demand(new_min, demands)
{:ok, new_min, {demands, waiting + new_min}}
end
@doc false
def dispatch(events, _length, {demands, 0}) do
{:ok, events, {demands, 0}}
end
def dispatch(events, length, {demands, waiting}) do
{deliver_now, deliver_later, waiting} = split_events(events, length, waiting)
for {_, pid, ref, selector} <- demands do
selected =
case filter_and_count(deliver_now, selector) do
{selected, 0} ->
selected
{selected, discarded} ->
send(self(), {:"$gen_producer", {pid, ref}, {:ask, discarded}})
selected
end
Process.send(pid, {:"$gen_consumer", {self(), ref}, selected}, [:noconnect])
:ok
end
{:ok, deliver_later, {demands, waiting}}
end
defp filter_and_count(messages, nil) do
{messages, 0}
end
defp filter_and_count(messages, selector) do
filter_and_count(messages, selector, [], 0)
end
defp filter_and_count([message | messages], selector, acc, count) do
if selector.(message) do
filter_and_count(messages, selector, [message | acc], count)
else
filter_and_count(messages, selector, acc, count + 1)
end
end
defp filter_and_count([], _selector, acc, count) do
{:lists.reverse(acc), count}
end
defp validate_selector(opts) do
case Keyword.get(opts, :selector) do
nil ->
nil
selector when is_function(selector, 1) ->
selector
other ->
raise ArgumentError,
":selector option must be passed a unary function, got: #{inspect(other)}"
end
end
defp get_min([]), do: 0
defp get_min([{acc, _, _, _} | demands]),
do: demands |> Enum.reduce(acc, fn {val, _, _, _}, acc -> min(val, acc) end) |> max(0)
defp split_events(events, length, counter) when length <= counter do
{events, [], counter - length}
end
defp split_events(events, _length, counter) do
{now, later} = Enum.split(events, counter)
{now, later, 0}
end
defp adjust_demand(0, demands) do
demands
end
defp adjust_demand(min, demands) do
Enum.map(demands, fn {counter, pid, key, selector} ->
{counter - min, pid, key, selector}
end)
end
defp add_demand(counter, pid, ref, selector, demands)
when is_integer(counter) and is_pid(pid) and (is_nil(selector) or is_function(selector, 1)) do
[{counter, pid, ref, selector} | demands]
end
defp pop_demand(ref, demands) do
case List.keytake(demands, ref, 2) do
{{current, _pid, ^ref, selector}, rest} -> {current, selector, rest}
nil -> {0, nil, demands}
end
end
defp delete_demand(ref, demands) do
List.keydelete(demands, ref, 2)
end
end
|
deps/gen_stage/lib/gen_stage/dispatchers/broadcast_dispatcher.ex
| 0.846101
| 0.611034
|
broadcast_dispatcher.ex
|
starcoder
|
defmodule Casex do
@moduledoc """
Simple case conversion for web applications.
Easily decodes `camelCase` body payloads to `snake_case` and
response payloads from `camelCase` to `snake_case`.
Useful to maintain to expose your API in `camelCase` but keep internally the elixir naming conventions.
It leverages [recase](https://github.com/sobolevn/recase) to provide case conversions
without relying on the `Macro` module and
easily integrates with [plug](https://hex.pm/packages/plug)-based applications.
## Phoenix Integration
1. Add `Casex.CamelCaseDecoderPlug` to your api pipeline:
```elixir
# router.ex
pipeline :api do
plug :accepts, ["json"]
plug Casex.CamelCaseDecoderPlug
end
```
Now, all request bodies and params will be converted to snake case.
2. Add `Casex.CamelCaseEncoder` as json format encoder for phoenix:
```elixir
# config.exs
config :phoenix, :format_encoders, json: Casex.CamelCaseEncoder
```
Now all outcoming json response bodies will be converted to camel case.
"""
alias Casex.Serializable
@doc """
Converts all keys of a map to snake case.
If the map is a struct with no `Enumerable` implementation the value is returned without convertion.
## Examples
iex> data = %{
...> "user" => %{
...> "firstName" => "James",
...> "lastName" => "Kirk",
...> "crew" => [
...> %{"name" => "Spock", "serialNumber" => "S 179-276 SP"},
...> %{"name" => "Scotty", "serialNumber" => "SE 19754 T"}
...> ]
...> }
...> }
iex> Casex.to_snake_case(data)
%{
"user" => %{
"first_name" => "James",
"last_name" => "Kirk",
"crew" => [
%{"name" => "Spock", "serial_number" => "S 179-276 SP"},
%{"name" => "Scotty", "serial_number" => "SE 19754 T"}
]
}
}
"""
@spec to_snake_case(data :: term()) :: term()
def to_snake_case(data) when is_map(data) do
data
|> Enum.map(fn {key, value} -> {snake_case(key), to_snake_case(value)} end)
|> Enum.into(%{})
rescue
Protocol.UndefinedError -> data
end
def to_snake_case(data) when is_list(data) do
Enum.map(data, &to_snake_case/1)
end
def to_snake_case(data), do: data
defp snake_case(value) when is_atom(value) do
value
|> to_string()
|> snake_case()
end
defp snake_case(value) when is_binary(value) do
Recase.to_snake(value)
end
@doc """
Converts all keys of a map to camel case.
If the map is a struct with no `Enumerable` implementation the value is returned without convertion.
## Examples
iex> data = %{
...> user: %{
...> first_name: "James",
...> last_name: "Kirk",
...> crew: [
...> %{name: "Spock", serial_number: "S 179-276 SP"},
...> %{name: "Scotty", serial_number: "SE 19754 T"}
...> ]
...> }
...> }
iex> Casex.to_camel_case(data)
%{
"user" => %{
"firstName" => "James",
"lastName" => "Kirk",
"crew" => [
%{"name" => "Spock", "serialNumber" => "S 179-276 SP"},
%{"name" => "Scotty", "serialNumber" => "SE 19754 T"}
]
}
}
"""
@spec to_camel_case(data :: term()) :: term()
def to_camel_case(data) when is_map(data) do
result = Serializable.serialize(data)
case result do
{map, dict} ->
map
|> Enum.map(fn {key, value} ->
{Map.get_lazy(dict, key, fn -> camel_case(key) end), to_camel_case(value)}
end)
|> Enum.into(%{})
map ->
map
|> Enum.map(fn {key, value} -> {camel_case(key), to_camel_case(value)} end)
|> Enum.into(%{})
end
rescue
Protocol.UndefinedError -> data
end
def to_camel_case(data) when is_list(data) do
Enum.map(data, &to_camel_case/1)
end
def to_camel_case(data), do: Serializable.serialize(data)
defp camel_case(value) when is_atom(value) do
value
|> to_string()
|> camel_case()
end
defp camel_case(value) when is_binary(value) do
Recase.to_camel(value)
end
end
|
lib/casex.ex
| 0.857351
| 0.866076
|
casex.ex
|
starcoder
|
defmodule AlgorithmSelector do
@moduledoc """
Determines which weighting and summing strategy to use to validate a
bank account number.
"""
@doc """
Checks that a given bank_id, bank_branch, and base falls has the expected
value or falls within the expected range.
Returns {:ok, <summing strategy>, <weighting>} or {:error, <message>} if
the bank_id, bank_branch, and base are invalid.
## Examples
iex> AlgorithmSelector.which_algo?(1, 902, 68389)
{:ok, :sum, :a}
iex> AlgorithmSelector.which_algo?(1, 3333, 68389)
{:error, "Invalid bank_branch: 3333"}
"""
@spec which_algo?( number, number, number ) :: { :ok, atom, atom } | { :error, String.t }
def which_algo?( bank_id, bank_branch, base )
def which_algo?( 1, bank_branch, base )
when bank_branch in 1..999
when bank_branch in 1100..1199
when bank_branch in 1800..1899 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 1, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 2, bank_branch, base )
when bank_branch in 1..999
when bank_branch in 1200..1299 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 2, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 3, bank_branch, base )
when bank_branch in 1..999
when bank_branch in 1300..1399
when bank_branch in 1500..1599
when bank_branch in 1700..1799
when bank_branch in 1900..1999 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 3, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 6, bank_branch, base )
when bank_branch in 1..999
when bank_branch in 1400..1499 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 6, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 8, bank_branch, _ )
when bank_branch in 6500..6599 do
{ :ok, :sum, :d }
end
def which_algo?( 8, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 9, 0, _ ) do
{ :ok, :sum, :e }
end
def which_algo?( 9, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}"}
end
def which_algo?( 11, bank_branch, base )
when bank_branch in 5000..6499
when bank_branch in 6600..8999 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 11, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 12, bank_branch, base )
when bank_branch in 3000..3299
when bank_branch in 3400..3499
when bank_branch in 3600..3699 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 12, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 13, bank_branch, base )
when bank_branch in 4900..4999 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 13, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 14, bank_branch, base )
when bank_branch in 4700..4799 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 14, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 15, bank_branch, base )
when bank_branch in 3900..3999 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 15, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 16, bank_branch, base )
when bank_branch in 4400..4499 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 16, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 17, bank_branch, base )
when bank_branch in 3300..3399 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 17, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 18, bank_branch, base )
when bank_branch in 3500..3599 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 18, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 19, bank_branch, base )
when bank_branch in 4600..4649 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 19, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 20, bank_branch, base )
when bank_branch in 4100..4199 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 20, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 21, bank_branch, base )
when bank_branch in 4800..4899 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 21, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 22, bank_branch, base )
when bank_branch in 4000..4049 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 22, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 23, bank_branch, base )
when bank_branch in 3700..3799 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 23, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 24, bank_branch, base )
when bank_branch in 4300..4349 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 24, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 25, bank_branch, _ )
when bank_branch in 2500..2599 do
{ :ok, :sum, :f }
end
def which_algo?( 25, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 26, bank_branch, _ )
when bank_branch in 2600..2699 do
{ :ok, :sum_digits, :g }
end
def which_algo?( 26, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 27, bank_branch, base )
when bank_branch in 3800..3849 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 27, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 28, bank_branch, _ )
when bank_branch in 2100..2149 do
{ :ok, :sum_digits, :g }
end
def which_algo?( 28, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 29, bank_branch, _ )
when bank_branch in 2150..2299 do
{ :ok, :sum_digits, :g }
end
def which_algo?( 29, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 30, bank_branch, base )
when bank_branch in 2900..2949 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 30, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 31, bank_branch, _ )
when bank_branch in 2800..2849 do
{ :ok, :sum, :x }
end
def which_algo?( 33, bank_branch, _ )
when bank_branch in 6700..6799 do
{ :ok, :sum, :f }
end
def which_algo?( 33, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 35, bank_branch, base )
when bank_branch in 2400..2499 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 35, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( 38, bank_branch, base )
when bank_branch in 9000..9499 do
case base do
x when x in 0..899999 -> { :ok, :sum, :a }
x when x in 900000..99999999 -> { :ok, :sum, :b }
_ -> { :error, "Invalid base: #{base}" }
end
end
def which_algo?( 38, bank_branch, _ ) do
{ :error, "Invalid bank_branch: #{bank_branch}" }
end
def which_algo?( bank_id, _, _) do
{ :error, "Invalid bank_id: #{bank_id}" }
end
end
|
lib/algorithm_selector.ex
| 0.76145
| 0.734976
|
algorithm_selector.ex
|
starcoder
|
defmodule BatchPlease do
@moduledoc ~S"""
BatchPlease is a tool for collecting batches of items, and doing something
with each batch when it reaches a certain size or age.
It is built on top of GenServer, implemented as a behaviour,
and usually invoked through `use BatchPlease`.
Simple/trivial usage example:
defmodule Summer do
use BatchPlease, max_batch_size: 3
def batch_init(_opts) do
{:ok, %{sum: 0}}
end
def batch_add_item(batch, item) do
{:ok, %{batch | sum: batch.sum + item}}
end
def batch_flush(batch) do
IO.puts("This batch added up to #{batch.sum}")
:ok
end
end
{:ok, pid} = GenServer.start_link(Summer, [])
BatchPlease.add_item(pid, 1)
BatchPlease.add_item(pid, 2)
BatchPlease.add_item(pid, 3) # prints "This batch added up to 6"
BatchPlease.add_item(pid, 4)
BatchPlease.add_item(pid, 5)
BatchPlease.add_item(pid, 6) # prints "This batch added up to 15"
It is useful to build specialized batch collectors on top of BatchPlease,
Examples of this approach include `BatchPlease.MemoryBatcher`
(which stores items in memory when batching) and `BatchPlease.FileBatcher`
(which encodes items to string format and accumulates them in an on-disk
file until ready for processing).
## Behaviour
The BatchPlease behaviour consists of several required callbacks:
* `batch_init/1` takes a keyword list of options (from `use BatchPlease`
and `GenServer.start_link/2-3`) and returns `{:error, msg}` or
`{:ok, state}`, where `state` is a map representing the state of
a single batch. This function is called at the start of every batch.
* `batch_add_item/2` adds an item to the batch state, returning
`{:ok, state}` or `{:error, msg}`.
* `batch_flush/1` performs an operation on the batch when it is ready
for processing. It returns `:ok` or `{:error, msg}`; returning an
updated state is not necessary, because iff `batch_flush/1` returns
successfully, a new batch is created with `batch_init/1`.
As well as some optional callbacks:
* `batch_pre_flush/1` can be used to perform pre-processing on a batch
before `batch_flush/1` is called.
* `batch_post_flush/1` can be used to perform post-processing on a batch
after `batch_flush/1` returns successfully.
* `batch_terminate/1` performs cleanup on a batch when the batch server
is terminating. Warning: it hooks into `GenServer.terminate/2`, which
is not guaranteed to execute upon server shutdown! See the GenServer
docs for more information:
https://hexdocs.pm/elixir/GenServer.html#c:terminate/2
* `should_flush/1` is used to implement custom logic to determine when to
flush a batch.
## Options
BatchPlease supports several options to customize operation.
They deal with flushing. Each may be set to `nil` to disable
that mode of behavior.
* `max_batch_size: X` can be set to a positive integer to specify that
flushing should occur when the batch hits size `X`.
This check takes place before and after every `batch_add_item/2` call.
* `max_time_since_last_flush: X` can be set to a positive integer to specify
that a batch should flush if at least `X` milliseconds have passed since
the last flush.
This check takes place before and after every `batch_add_item/2` call.
* `max_time_since_first_item: X` can be set to a positive integer to specify
that a batch should flush if at least `X` milliseconds have passed
since the time the first item was added to the current batch.
This check takes place before and after every `batch_add_item/2` call.
* `flush_interval: X` can be set to a positive integer to specify that
flushing should take place every `X` milliseconds. This option uses an
internal timer, and is therefore independent from `batch_add_item/2`.
"""
#### `use BatchPlease`
@doc false
defmacro __using__(opts) do
quote do
use GenServer
@impl GenServer
def init(args), do: BatchPlease.Impl.init(args ++ unquote(opts), __MODULE__)
@impl GenServer
def handle_call(msg, from, state), do: BatchPlease.Impl.handle_call(msg, from, state)
@impl GenServer
def handle_cast(msg, state), do: BatchPlease.Impl.handle_cast(msg, state)
@impl GenServer
def handle_info(msg, state), do: BatchPlease.Impl.handle_info(msg, state)
@impl GenServer
def terminate(reason, state), do: BatchPlease.Impl.terminate(reason, state)
@behaviour BatchPlease
end
end
#### Types
@typedoc ~S"""
A GenServer performing as a batch server.
"""
@type batch_server :: pid
@typedoc ~S"""
A map representing the internal state of a batch server. This map
contains a `batch` key, representing the state of the current batch.
"""
@type state :: %{
opts: opts,
module: atom,
batch: batch,
last_item: item | nil,
flush_timer: pid,
config: state_config,
overrides: state_overrides,
counts: state_counts,
times: state_times,
}
@type state_config :: %{
lazy_flush: boolean | nil,
max_batch_size: non_neg_integer | nil,
max_time_since_last_flush: non_neg_integer | nil,
max_time_since_first_item: non_neg_integer | nil,
flush_interval: non_neg_integer | nil,
}
@type state_overrides :: %{
batch_init: ((opts) -> batch_return) | nil,
batch_add_item: ((batch, item) -> batch_return) | nil,
batch_pre_flush: ((batch) -> batch_return) | nil,
batch_flush: ((batch) -> ok_or_error) | nil,
batch_post_flush: ((batch) -> ok_or_error) | nil,
batch_terminate: ((batch) -> ok_or_error) | nil,
should_flush: ((state) -> boolean) | nil,
}
@type state_counts :: %{
batch_items: non_neg_integer,
total_items: non_neg_integer,
flushes: non_neg_integer,
}
@type state_times :: %{
first_item_of_batch: integer | nil,
last_flush: integer | nil,
}
@type state_return :: {:ok, state} | {:error, String.t}
@type reply_return :: {ok_or_error, state}
@typedoc ~S"""
A map representing the state of the current batch.
Contents of this map are implementation-specific.
"""
@type batch :: map
@typedoc ~S"""
Represents the return value of functions which generate a new batch state
(`batch_init/1` and `batch_add_item/2`).
"""
@type batch_return :: {:ok, batch} | {:error, String.t}
@typedoc ~S"""
Return value of functions which may fail, but do not return a new batch
state (`batch_flush/1` and `batch_terminate/1`).
"""
@type ok_or_error :: :ok | {:error, String.t}
@typedoc ~S"""
Any item that can be added to a batch.
"""
@type item :: any
@typedoc ~S"""
Configuration parameters for a batch server.
"""
@type opts :: [option]
@type option ::
{:max_batch_size, non_neg_integer | nil} |
{:max_time_since_last_flush, non_neg_integer | nil} |
{:max_time_since_first_item, non_neg_integer | nil} |
{:flush_interval, non_neg_integer | nil}
#### Behaviour
@doc ~S"""
Creates a new batch state, given the configuration options in `opts`,
which come from the options given in `use BatchPlease` and
`GenServer.start_link/2-3`.
This function is called every time a new batch is created (i.e., at startup
and after every flush).
Returns the new batch state or an error message.
"""
@callback batch_init(opts) :: batch_return
@doc ~S"""
Adds an item to the batch.
Returns the updated batch state or an error message.
"""
@callback batch_add_item(batch, item) :: batch_return
@doc ~S"""
Performs some pre-processing on the batch before it is passed to
`batch_flush/1`. Returns the updated batch state or an error message.
This is an optional callback.
"""
@callback batch_pre_flush(batch) :: batch_return
@doc ~S"""
Processes the batch, whatever that entails.
Returns `:ok` on success, or `{:error, message}` otherwise.
"""
@callback batch_flush(batch) :: ok_or_error
@doc ~S"""
Performs some post-processing on the batch after `batch_flush/1`
has completed successfully. Does not return an updated batch, because
this operation is immediately followed by `batch_init/1` to create a new
batch.
Returns `:ok` on success, or `{:error, message}` otherwise.
This is an optional callback.
"""
@callback batch_post_flush(batch) :: ok_or_error
@doc ~S"""
Cleans up batch state before the batcher process is terminated.
Defaults to no-op. This is not guaranteed to be called at
termination time -- for more information, see:
https://hexdocs.pm/elixir/GenServer.html#c:terminate/2
Returns `:ok` on success, or `{:error, message}` otherwise.
This is an optional callback.
"""
@callback batch_terminate(batch) :: ok_or_error
@doc ~S"""
Given the current module state, returns whether the current batch
should be processed now. Does not prevent the evaluation of other
flushing options (e.g., `max_batch_size`, etc).
This is an optional callback.
"""
@callback should_flush(state) :: boolean
@optional_callbacks [
batch_terminate: 1,
batch_pre_flush: 1,
batch_post_flush: 1,
should_flush: 1,
]
#### Public API
@doc ~S"""
Adds an item to a batch asynchronously.
Set `opts[:error_pid]` to receive a message of the form `{:error, msg}`
in case this operation fails. Alternately, use `sync_add_item/2` for
a synchronous version of this function.
Returns `:ok`.
"""
@spec add_item(batch_server, item, Keyword.t) :: :ok
def add_item(batch_server, item, opts \\ []) do
GenServer.cast(batch_server, {:add_item, item, opts[:error_pid]})
end
@doc ~S"""
Adds an item to a batch synchronously.
Causes a flush if appropriate, also synchronously.
Returns `:ok` or `{:error, msg}`.
"""
@spec sync_add_item(batch_server, item) :: :ok | {:error, String.t}
def sync_add_item(batch_server, item) do
GenServer.call(batch_server, {:add_item, item})
end
@doc ~S"""
Forces the processing and flushing of a batch asynchronously.
Set `opts[:error_pid]` to receive a message of the form `{:error, msg}`
in case this operation fails. Alternately, use `sync_flush/1` for
a synchronous version of this function.
Returns `:ok`.
"""
@spec flush(batch_server, Keyword.t) :: :ok | {:error, String.t}
def flush(batch_server, opts \\ []) do
GenServer.cast(batch_server, {:flush, opts[:error_pid]})
end
@doc ~S"""
Forces the processing and flushing of a batch synchronously.
Returns `:ok` or `{:error, msg}`.
"""
@spec sync_flush(batch_server) :: :ok | {:error, String.t}
def sync_flush(batch_server) do
GenServer.call(batch_server, :flush)
end
@doc false
def get_internal_state(batch_server) do
GenServer.call(batch_server, :get_internal_state)
end
end
|
lib/batch_please.ex
| 0.789031
| 0.560132
|
batch_please.ex
|
starcoder
|
defmodule ExRets.DigestAccessAuthentication.Challenge do
@moduledoc false
@moduledoc since: "0.1.0"
defstruct realm: nil,
domain: [],
nonce: nil,
opaque: nil,
stale: false,
algorithm: :md5,
qop: []
@typedoc "Digest access authentication challenge as described in RFC 2617 section 3.2.1."
@typedoc since: "0.1.0"
@type t :: %__MODULE__{
realm: realm(),
domain: domain(),
nonce: nonce(),
opaque: opaque(),
stale: stale(),
algorithm: algorithm(),
qop: qop_options()
}
@typedoc "A string to be displayed to users so they know which username and password to use."
@typedoc since: "0.1.0"
@type realm :: String.t()
@typedoc "A list of `t:URI.t/0` that define the protection space."
@typedoc since: "0.1.0"
@type domain :: [URI.t()]
@typedoc """
A server-specified data string which should be uniquely generated each time a 401 response is
made.
"""
@typedoc since: "0.1.0"
@type nonce :: String.t()
@typedoc """
A string of data, specified by the server, which should be returned by the client unchanged in
the Authorization header of subsequent requests with URIs in the same protection space.
"""
@typedoc since: "0.1.0"
@type opaque :: String.t() | nil
@typedoc """
Boolean indicating that the previous request from the client was rejected because the nonce
value was stale.
If `true`, the client may wish to simply retry the request with a new encrypted
response, without reprompting the user for a new username and password.
"""
@typedoc since: "0.1.0"
@type stale :: boolean()
@typedoc "Algorithm used to produce the digest and a checksum."
@typedoc since: "0.1.0"
@type algorithm :: :md5 | :md5_sess
@typedoc """
List of `t:qop_value/0` indicating the "quality of protection" values supported by the server.
"""
@typedoc since: "0.1.0"
@type qop_options :: [qop_value()]
@typedoc """
"Quality of protection" value.
Possible values include:
* `:auth` - indicates authentication
* `:auth_sess` - indicates authentication with integrity protection
"""
@typedoc since: "0.1.0"
@type qop_value :: :auth | :auth_int
@doc """
Parses a digest access authentication `challenge` string.
Uses `request_uri` to fully qualify any relative paths in the `domain` directive.
## Examples
iex> challenge = \"""
...> realm="<EMAIL>",
...> nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
...> domain="/search"
...> \"""
iex> request_uri = URI.parse("https://example.com/login")
iex> ExRets.DigestAccessAuthentication.Challenge.parse(challenge, request_uri)
{:ok,
%ExRets.DigestAccessAuthentication.Challenge{
realm: "<EMAIL>",
nonce: "dcd98b7102dd2f0e8b11d0f600bfb0c093",
domain: [
%URI{
host: "example.com",
path: "/search",
authority: "example.com",
port: 443,
scheme: "https"
}
]
}
}
iex> ExRets.DigestAccessAuthentication.Challenge.parse("", %URI{})
{:error, ["missing realm", "missing nonce"]}
"""
@doc since: "0.1.0"
@spec parse(challenge :: String.t(), request_uri :: URI.t()) ::
{:ok, t()} | {:error, reasons :: [String.t()]}
def parse(challenge, %URI{} = request_uri) when is_binary(challenge) do
challenge
|> parse_directives()
|> new_challenge_from_directives(request_uri)
end
@directives_regex ~r/(?<directive>\w+)="(?<value>[^"]*)"/
defp parse_directives(challenge) do
@directives_regex
|> Regex.scan(challenge, capture: :all_names)
|> Enum.into(%{}, fn [k, v] -> {k, String.trim(v)} end)
end
defp new_challenge_from_directives(directives, request_uri) do
directives
|> Enum.reduce({%__MODULE__{}, []}, &reduce_directives/2)
|> handle_relative_domain_uris(request_uri)
|> check_realm()
|> check_nonce()
|> return_challenge_or_errors()
end
defp reduce_directives({"realm", realm}, {challenge, errors}) do
{%__MODULE__{challenge | realm: realm}, errors}
end
defp reduce_directives({"domain", domain}, {challenge, errors}) do
parsed_domain =
domain
|> String.split()
|> Enum.map(&URI.parse/1)
{%__MODULE__{challenge | domain: parsed_domain}, errors}
end
defp reduce_directives({"nonce", nonce}, {challenge, errors}) do
{%__MODULE__{challenge | nonce: nonce}, errors}
end
defp reduce_directives({"opaque", opaque}, {challenge, errors}) do
{%__MODULE__{challenge | opaque: opaque}, errors}
end
defp reduce_directives({"stale", stale}, {challenge, errors}) do
parsed_stale = String.downcase(stale) == "true"
{%__MODULE__{challenge | stale: parsed_stale}, errors}
end
defp reduce_directives({"algorithm", algorithm}, {challenge, errors}) do
case String.downcase(algorithm) do
"md5" -> {%__MODULE__{challenge | algorithm: :md5}, errors}
"md5-sess" -> {%__MODULE__{challenge | algorithm: :md5_sess}, errors}
_ -> {challenge, [~s/unknown algorithm "#{algorithm}"/ | errors]}
end
end
defp reduce_directives({"qop", qop_options}, {challenge, errors}) do
parsed_qop_options =
qop_options
|> String.split(",")
|> Enum.reduce([], fn
"auth", acc -> [:auth | acc]
"auth-int", acc -> [:auth_int | acc]
_, acc -> acc
end)
|> Enum.reverse()
{%__MODULE__{challenge | qop: parsed_qop_options}, errors}
end
defp reduce_directives(_, challenge_and_errors), do: challenge_and_errors
defp handle_relative_domain_uris({%__MODULE__{} = challenge, errors}, request_uri) do
domain =
Enum.map(challenge.domain, fn
%URI{host: nil} = domain_uri -> %URI{request_uri | path: domain_uri.path}
domain_uri -> domain_uri
end)
{%__MODULE__{challenge | domain: domain}, errors}
end
defp check_realm({%__MODULE__{realm: nil} = challenge, errors}) do
{challenge, ["missing realm" | errors]}
end
defp check_realm(challenge_and_errors), do: challenge_and_errors
defp check_nonce({%__MODULE__{nonce: nil} = challenge, errors}) do
{challenge, ["missing nonce" | errors]}
end
defp check_nonce(challenge_and_errors), do: challenge_and_errors
defp return_challenge_or_errors({challenge, errors}) when errors == [] do
{:ok, challenge}
end
defp return_challenge_or_errors({_challenge, errors}) do
{:error, Enum.reverse(errors)}
end
end
|
lib/ex_rets/digest_access_authentication/challenge.ex
| 0.923782
| 0.549399
|
challenge.ex
|
starcoder
|
defmodule AlchemyVM.HostFunction do
@moduledoc """
Exposes a DSL for defining and importing host functions
"""
@doc false
defmacro __using__(_opts) do
quote do
import AlchemyVM.HostFunction
@before_compile AlchemyVM.HostFunction
Module.register_attribute(__MODULE__, :host_funcs, accumulate: true)
end
end
@doc """
Defines a host function that can be passed in to the VM using `create_imports/1`
Will use the name of the module that it's defined in as the name of the
corresponding WebAssembly module that this host function can be imported from.
`fname` can be a string or an atom. A variable called `ctx` is available
within the context of the macro body as a pointer to VM state, to be used
with functions defined in `AlchemyVM.HostFunction.API`.
## Usage
Create an Elixir module that will be used to import host functions into
WebAssembly:
defmodule Math do
use AlchemyVM.HostFunction
defhost add(a, b) do
a + b
end
end
Somewhere in your app:
defmodule MyWaspApp do
def start do
{:ok, pid} = AlchemyVM.start()
imports = AlchemyVM.HostFunction.create_imports(Math)
AlchemyVM.load_file(pid, "path/to/wasm/file.wasm", imports)
end
end
In the above "path/to/wasm/file.wasm", the host function can now be imported:
(import "Math" "add" (func (param i32 i32) (result i32)))
Note that the Elixir module name was used to define the WebAssembly module name
that's being used for the import.
"""
defmacro defhost(head, do: block) do
{fname, args} = Macro.decompose_call(head)
name = to_string(fname)
quote generated: true do
def hostfunc(unquote(name), unquote(args), var!(ctx)), do: unquote(block)
Module.put_attribute(__MODULE__, :host_funcs, unquote(name))
end
end
@doc """
Pass in an Elixir module or list of Elixir modules that implement `defhost`
calls to generate imports for WebAssembly to be passed in when loading a
WebAssembly module into the VM
## Usage
When using a single module to define imports:
AlchemyVM.HostFunction.create_imports(Module1)
Functions will be accessible in the WebAssembly module as:
(import "Module1" "function_name")
When using multiple modules to define imports:
AlchemyVM.HostFunction.create_imports([Module1, Module2, Module3])
Functions will be accessible in the WebAssembly module as:
(import "Module1" "function_name")
(import "Module2" "function_name")
(import "Module3" "function_name")
"""
@spec create_imports(list | atom) :: map
def create_imports(modules) when is_list(modules) do
Enum.reduce(modules, %{}, fn mod, acc ->
"Elixir." <> mod_string = to_string(mod)
Map.put(acc, mod_string, create_import(mod))
end)
end
def create_imports(module), do: create_imports([module])
defp create_import(module) do
module
|> apply(:hostfuncs, [])
|> Enum.reduce(%{}, fn fname, acc ->
Map.put(acc, fname, fn ctx, args -> apply(module, :hostfunc, [fname, args, ctx]) end)
end)
end
defmacro __before_compile__(_env) do
quote do
def hostfuncs, do: @host_funcs
end
end
end
|
lib/execution/host_function.ex
| 0.821044
| 0.406067
|
host_function.ex
|
starcoder
|
defmodule ConCache.Item do
@moduledoc """
This struct can be used in place of naked values to set per-item TTL values.
"""
defstruct value: nil, ttl: 0
@type t :: %ConCache.Item{value: ConCache.value, ttl: pos_integer}
end
defmodule ConCache do
@moduledoc """
Implements an ETS based key/value storage with following additional features:
- row level synchronized writes (inserts, read/modify/write updates, deletes)
- TTL support
- modification callbacks
Example usage:
ConCache.start_link([], name: :my_cache)
ConCache.put(:my_cache, :foo, 1)
ConCache.get(:my_cache, :foo) # 1
The following rules apply:
- Modifications are by isolated per row. Two processes can't modify the same
row at the same time. Dirty operations are available through `dirty_` equivalents.
- Reads are dirty by default. You can use `isolated/4` to perform isolated custom
operations.
- Operations are always performed in the caller process. Custom lock implementation
is used to ensure synchronism. See `README.md` for more details.
- By default, items don't expire. You can change this with `:ttl` and `:ttl_check`
options.
- Expiry of an item is by default extended only on modifications. This can be changed
while starting the cache.
- In all store operations, you can use `ConCache.Item` struct instead of naked values,
if you need fine-grained control of item's TTL.
See `start_link/2` for more details.
"""
alias ConCache.Owner
alias ConCache.Operations
defstruct [
:owner_pid, :ets, :ttl_manager, :ttl, :acquire_lock_timeout, :callback, :touch_on_read
]
@type t :: pid | atom | {:global, any} | {:via, atom, any}
@type key :: any
@type value :: any
@type store_value :: value | ConCache.Item.t
@type callback_fun :: (({:update, pid, key, value} | {:delete, pid, key}) -> any)
@type ets_option ::
:named_table | :compressed | {:heir, pid} |
{:write_concurrency, boolean} | {:read_concurrency, boolean} |
:ordered_set | :set | {:name, atom}
@type options :: [
{:ttl, non_neg_integer} |
{:acquire_lock_timeout, pos_integer} |
{:callback, callback_fun} |
{:touch_on_read, boolean} |
{:ttl_check, non_neg_integer} |
{:time_size, pos_integer} |
{:ets_options, [ets_option]}
]
@type update_fun :: ((value) -> {:ok, store_value} | {:error, any})
@type store_fun :: (() -> store_value)
@doc """
Starts the server and creates an ETS table.
Options:
- `:set` - An ETS table will be of the `:set` type (default).
- `:ordered_set` - An ETS table will be of the `:ordered_set` type.
- `{:ttl_check, time_ms}` - A check interval for TTL expiry. This value is
by default `nil` and you need to provide a positive integer for TTL to work.
See below for more details on inner workings of TTL.
- `{:ttl, time_ms}` - The default time after which an item expires.
When an item expires, it is removed from the cache. Updating the item
extends its expiry time. By default, items never expire.
- `{:touch_on_read, true | false}` - Controls whether read operation extends
expiry of items. False by default.
- `{:callback, callback_fun}` - If provided, this function is invoked __after__
an item is inserted or updated, or __before__ it is deleted.
- `{:acquire_lock_timeout, timeout_ms}` - The time a client process waits for
the lock. Default is 5000.
In addition, following ETS options are supported:
- `:named_table`
- `:name`
- `:heir`
- `:write_concurrency`
- `:read_concurrency`
## Choosing ttl_check time
When TTL is configured, the owner process works in discrete steps, doing
cleanups every `ttl_check_time` milliseconds. This approach allows the owner
process to do fairly small amount of work in each discrete step.
Assuming there's no huge system overload, an item's max lifetime is thus
`ttl_time + ttl_check_time` [ms], after the last item's update.
Thus, lower value of ttl_check time means more frequent purging which may
reduce your memory consumption, but could also cause performance penalties.
Higher values put less pressure on processing, but item expiry is less precise.
"""
@spec start_link(options, GenServer.options) :: GenServer.on_start
def start_link(options \\ [], gen_server_options \\ []) do
Owner.start_link(options, gen_server_options)
end
@doc """
Starts the server.
See `start_link/2` for more details.
"""
@spec start(options, GenServer.options) :: GenServer.on_start
def start(options \\ [], gen_server_options \\ []) do
Owner.start(options, gen_server_options)
end
@doc """
Returns the ets table managed by the cache.
"""
@spec ets(t) :: :ets.tab
def ets(cache_id), do: Operations.ets(Owner.cache(cache_id))
@doc """
Reads the item from the cache.
A read is always "dirty", meaning it doesn't block while someone is updating
the item under the same key. A read doesn't expire TTL of the item, unless
`touch_on_read` option is set while starting the cache.
"""
@spec get(t, key) :: value
def get(cache_id, key), do: Operations.get(Owner.cache(cache_id), key)
@doc """
Stores the item into the cache.
"""
@spec put(t, key, store_value) :: :ok
def put(cache_id, key, value),
do: Operations.put(Owner.cache(cache_id), key, value)
@doc """
Returns the number of items stored in the cache.
"""
@spec size(t) :: non_neg_integer
def size(cache_id),
do: Operations.size(Owner.cache(cache_id))
@doc """
Dirty equivalent of `put/3`.
"""
@spec dirty_put(t, key, store_value) :: :ok
def dirty_put(cache_id, key, value),
do: Operations.dirty_put(Owner.cache(cache_id), key, value)
@doc """
Inserts the item into the cache unless it exists.
"""
@spec insert_new(t, key, store_value) :: :ok | {:error, :already_exists}
def insert_new(cache_id, key, value),
do: Operations.insert_new(Owner.cache(cache_id), key, value)
@doc """
Dirty equivalent of `insert_new/3`.
"""
@spec dirty_insert_new(t, key, store_value) :: :ok | {:error, :already_exists}
def dirty_insert_new(cache_id, key, value),
do: Operations.insert_new(Owner.cache(cache_id), key, value)
@doc """
Updates the item, or stores new item if it doesn't exist.
The `update_fun` is invoked after the item is locked. Here, you can be certain
that no other process will update this item, unless they are doing dirty updates
or writing directly to the underlying ETS table.
The updater lambda must return one of the following:
- `{:ok, value}` - causes the value to be stored into the table
"""
@spec update(t, key, update_fun) :: :ok | {:error, any}
def update(cache_id, key, update_fun),
do: Operations.update(Owner.cache(cache_id), key, update_fun)
@doc """
Dirty equivalent of `update/3`.
"""
@spec dirty_update(t, key, update_fun) :: :ok | {:error, any}
def dirty_update(cache_id, key, update_fun),
do: Operations.dirty_update(Owner.cache(cache_id), key, update_fun)
@doc """
Updates the item only if it exists. Otherwise works just like `update/3`.
"""
@spec update_existing(t, key, update_fun) :: :ok | {:error, :not_existing} | {:error, any}
def update_existing(cache_id, key, update_fun),
do: Operations.update_existing(Owner.cache(cache_id), key, update_fun)
@doc """
Dirty equivalent of `update_existing/3`.
"""
@spec dirty_update_existing(t, key, update_fun) :: :ok | {:error, :not_existing} | {:error, any}
def dirty_update_existing(cache_id, key, update_fun),
do: Operations.dirty_update_existing(Owner.cache(cache_id), key, update_fun)
@doc """
Deletes the item from the cache.
"""
@spec delete(t, key) :: :ok
def delete(cache_id, key), do: Operations.delete(Owner.cache(cache_id), key)
@doc """
Dirty equivalent of `delete/2`.
"""
@spec dirty_delete(t, key) :: :ok
def dirty_delete(cache_id, key), do: Operations.dirty_delete(Owner.cache(cache_id), key)
@doc """
Retrieves the item from the cache, or inserts the new item.
If the item exists in the cache, it is retrieved. Otherwise, the lambda
function is executed and its result is stored under the given key.
Note: if the item is already in the cache, this function amounts to a simple get
without any locking, so you can expect it to be fairly fast.
"""
@spec get_or_store(t, key, store_fun) :: value
def get_or_store(cache_id, key, store_fun),
do: Operations.get_or_store(Owner.cache(cache_id), key, store_fun)
@doc """
Dirty equivalent of `get_or_store/3`.
"""
@spec dirty_get_or_store(t, key, store_fun) :: value
def dirty_get_or_store(cache_id, key, store_fun),
do: Operations.dirty_get_or_store(Owner.cache(cache_id), key, store_fun)
@doc """
Manually touches the item to prolongate its expiry.
"""
@spec touch(t, key) :: :ok
def touch(cache_id, key), do: Operations.touch(Owner.cache(cache_id), key)
@doc """
Isolated execution over arbitrary lock in the cache.
You can do whatever you want in the function, not necessarily related to the
cache. The return value is the result of the provided lambda.
This allows you to perform flexible isolation. If you use the key
of your item as a `key`, then this operation will be exclusive to
updates. This can be used e.g. to perform isolated reads:
# Process A:
ConCache.isolated(:my_cache, :my_item_key, fn() -> ... end)
# Process B:
ConCache.update(:my_cache, :my_item, fn(old_value) -> ... end)
These two operations are mutually exclusive.
"""
@spec isolated(t, key, nil | pos_integer, (() -> any)) :: any
def isolated(cache_id, key, timeout \\ nil, fun),
do: Operations.isolated(Owner.cache(cache_id), key, timeout, fun)
@doc """
Similar to `isolated/4` except it doesn't wait for the lock to be available.
If the lock can be acquired immediately, it will be acquired and the function
will be invoked. Otherwise, an error is returned immediately.
"""
@spec try_isolated(t, key, nil | pos_integer, (() -> any)) :: {:error, :locked} | {:ok, any}
def try_isolated(cache_id, key, timeout \\ nil, on_success),
do: Operations.try_isolated(Owner.cache(cache_id), key, timeout, on_success)
end
|
lib/con_cache.ex
| 0.895583
| 0.457076
|
con_cache.ex
|
starcoder
|
defmodule Softmax.AllReduce do
@world_size 3
def start(rank, vector, world_size, caller) do
name = String.to_atom("node_#{inspect(rank)}")
:logger.debug("Rank #{inspect(rank)} - Got: #{inspect(vector)}")
Process.register(self(), name)
normalizer =
vector
|> Matrex.to_list()
|> Enum.map(fn x -> {x, 1} end)
|> Enum.reduce(fn x, acc ->
:logger.debug("Rank: #{inspect(rank)} - Reducing...")
merge(x, acc)
end)
Barrier.synchronize(caller)
:logger.debug("Rank #{inspect(rank)} - Normalizer: #{inspect(normalizer)}")
Utils.synchronize(rank, world_size)
:logger.debug("Rank #{inspect(rank)} - Synchronized!")
normalizer = distribute_normalizer(normalizer, rank, world_size)
:logger.debug("Rank #{inspect(rank)} - Full Normalizer: #{inspect(normalizer)}")
result = rescale(vector, normalizer)
send(caller, {:result, rank, result})
Process.unregister(name)
end
@spec merge({number(), number()}, {number(), number()}) :: {number(), float()}
def merge({m1, d1}, {m2, d2}) do
m3 = max(m1, m2)
d3 = d1 * :math.exp(m1 - m3) + d2 * :math.exp(m2 - m3)
{m3, d3}
end
def rescale(vector, {mv, dv}) do
vector
|> Matrex.subtract(mv)
|> Matrex.apply(:exp)
|> Matrex.divide(dv)
end
def distribute_normalizer(normalizer, rank, world_size) do
ranks = 0..(world_size - 1)
ranks
|> Enum.map(fn
^rank -> :skip
any_rank -> send_normalizer_to_rank(rank, any_rank, normalizer)
end)
reduce_normalizer(normalizer, rank, world_size - 1)
end
def send_normalizer_to_rank(from_rank, to_rank, normalizer) do
rank_name = String.to_atom("node_#{inspect(to_rank)}")
send({rank_name, node()}, {:normalizer, from_rank, normalizer})
end
def reduce_normalizer(normalizer, rank, 0) do
:logger.debug("Rank #{inspect(rank)} - All normalizers reduced!")
normalizer
end
def reduce_normalizer(this_normalizer, rank, count) do
:logger.debug("Rank #{inspect(rank)} - #{inspect(count)} normalizers remaining")
new_normalizer =
receive do
{:normalizer, _, that_normalizer} ->
merge(this_normalizer, that_normalizer)
end
reduce_normalizer(new_normalizer, rank, count - 1)
end
def softmax(%Matrex{} = vector) do
inputs =
vector
|> Utils.split_matrix(@world_size, :cols)
pids =
0..(@world_size - 1)
|> Enum.zip(inputs)
|> Enum.map(fn {rank, input} ->
spawn_link(__MODULE__, :start, [rank, input, @world_size, self()])
end)
:logger.debug("PIDs: #{inspect pids}")
Barrier.synchronize(pids)
wait_for_results(pids)
end
defp wait_for_results(pids) do
acc =
0..(length(pids) - 1)
|> Enum.map(fn elem -> {elem, %{}} end)
|> Map.new()
wait_for_results(pids, acc)
end
defp wait_for_results([], acc) do
acc
end
defp wait_for_results([_ | pids], acc) do
acc =
receive do
{:result, rank, result} ->
%{acc | rank => result}
end
wait_for_results(pids, acc)
end
end
|
lib/allreduce_softmax.ex
| 0.641198
| 0.504455
|
allreduce_softmax.ex
|
starcoder
|
defmodule Discuss.Discussions do
@moduledoc """
The Discussions context.
"""
import Ecto.Query, warn: false
alias Discuss.Repo
alias Discuss.Discussions.Topic2
@doc """
Returns the list of topics.
## Examples
iex> list_topics()
[%Topic2{}, ...]
"""
def list_topics do
Repo.all(Topic2)
end
@doc """
Gets a single topic2.
Raises `Ecto.NoResultsError` if the Topic2 does not exist.
## Examples
iex> get_topic2!(123)
%Topic2{}
iex> get_topic2!(456)
** (Ecto.NoResultsError)
"""
def get_topic2!(id), do: Repo.get!(Topic2, id)
@doc """
Creates a topic2.
## Examples
iex> create_topic2(%{field: value})
{:ok, %Topic2{}}
iex> create_topic2(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_topic2(attrs \\ %{}) do
%Topic2{}
|> Topic2.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a topic2.
## Examples
iex> update_topic2(topic2, %{field: new_value})
{:ok, %Topic2{}}
iex> update_topic2(topic2, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_topic2(%Topic2{} = topic2, attrs) do
topic2
|> Topic2.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a topic2.
## Examples
iex> delete_topic2(topic2)
{:ok, %Topic2{}}
iex> delete_topic2(topic2)
{:error, %Ecto.Changeset{}}
"""
def delete_topic2(%Topic2{} = topic2) do
Repo.delete(topic2)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking topic2 changes.
## Examples
iex> change_topic2(topic2)
%Ecto.Changeset{data: %Topic2{}}
"""
def change_topic2(%Topic2{} = topic2, attrs \\ %{}) do
Topic2.changeset(topic2, attrs)
end
alias Discuss.Discussions.Comment
@doc """
Returns the list of comments.
## Examples
iex> list_comments()
[%Comment{}, ...]
"""
def list_comments do
Repo.all(Comment)
end
@doc """
Gets a single comment.
Raises `Ecto.NoResultsError` if the Comment does not exist.
## Examples
iex> get_comment!(123)
%Comment{}
iex> get_comment!(456)
** (Ecto.NoResultsError)
"""
def get_comment!(id), do: Repo.get!(Comment, id)
@doc """
Creates a comment.
## Examples
iex> create_comment(%{field: value})
{:ok, %Comment{}}
iex> create_comment(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_comment(attrs \\ %{}) do
%Comment{}
|> Comment.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a comment.
## Examples
iex> update_comment(comment, %{field: new_value})
{:ok, %Comment{}}
iex> update_comment(comment, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_comment(%Comment{} = comment, attrs) do
comment
|> Comment.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a comment.
## Examples
iex> delete_comment(comment)
{:ok, %Comment{}}
iex> delete_comment(comment)
{:error, %Ecto.Changeset{}}
"""
def delete_comment(%Comment{} = comment) do
Repo.delete(comment)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking comment changes.
## Examples
iex> change_comment(comment)
%Ecto.Changeset{data: %Comment{}}
"""
def change_comment(%Comment{} = comment, attrs \\ %{}) do
Comment.changeset(comment, attrs)
end
end
|
programming/elixir-course/discuss/lib/discuss/discussions.ex
| 0.771499
| 0.492859
|
discussions.ex
|
starcoder
|
defmodule XUtil.Dsf do
@moduledoc "Utilities for working with X-Plane's DSF scenery tiles"
import Comb
import XUtil.Math
# These values are for Mobile; Desktop uses 3x2 or even 4x3
@lon_degrees_loaded 2
@lat_degrees_loaded 2
@doc """
The DSF for a given lon/lat.
iex(1)> XUtil.Dsf.dsf(0.1234, 0.5678)
{0, 0}
iex(1)> XUtil.Dsf.dsf({179.9, 79.9})
{179, 79}
iex(1)> XUtil.Dsf.dsf({-179.9, -79.9})
{-180, -80}
"""
def dsf({lon, lat}) when is_number(lon) and is_number(lat), do: {floor(lon), floor(lat)}
def dsf({lon, lat, _ele}) when is_number(lon) and is_number(lat), do: {floor(lon), floor(lat)}
def dsf(%{lon: lon, lat: lat}), do: {floor(lon), floor(lat)}
def dsf(lon, lat) when is_number(lon) and is_number(lat), do: {floor(lon), floor(lat)}
@doc """
The {lon_west, lat_south} 3x2 block the aircraft is currently using.
This is necessarily one of connected_blocks() (i.e., it's among the set
of six 3x2 DSF blocks that contain this DSF).
Mirrors the logic in X-Plane's UTL_geoid::check_ref().
"""
def decode_block(%{lon: lon, lat: lat}) do
lon_w = wrap_lon(round(lon - @lon_degrees_loaded * 0.5))
lat_s = wrap_lat(round(lat - @lat_degrees_loaded * 0.5))
{lon_w, lat_s}
end
@doc """
Returns the 3x2 DSF block indices (indexed on the lower left/southwest corner of the DSF block)
that contain the specified DSF.
iex(1)> XUtil.Dsf.connected_blocks(0, 0)
[{-2, -1}, {-2, 0}, {-1, -1}, {-1, 0}, {0, -1}, {0, 0}]
iex(1)> XUtil.Dsf.connected_blocks(12, 12)
[{10, 11}, {10, 12}, {11, 11}, {11, 12}, {12, 11}, {12, 12}]
# DSF lon 180 wraps to -180, and lat 90 wraps to -90 (since we index DSFs on their southwest corner)
iex(1)> XUtil.Dsf.connected_blocks(180, 90)
[{178, 89}, {178, -90}, {179, 89}, {179, -90}, {-180, 89}, {-180, -90}]
# Wraps properly
iex(1)> XUtil.Dsf.connected_blocks(-178, -88) == XUtil.Dsf.connected_blocks(182, 92)
true
"""
def connected_blocks(lon, lat) when is_integer(lon) and is_integer(lat) do
raw = cartesian_product((lon - 2)..lon, (lat - 1)..lat)
Enum.map(raw, fn lon_lat ->
{wrap_lon(Enum.at(lon_lat, 0)), wrap_lat(Enum.at(lon_lat, 1))}
end)
end
def connected_blocks({lon, lat}) when is_integer(lon) and is_integer(lat), do: connected_blocks(lon, lat)
@doc """
The set of DSFs at a particular (integer) offset away from your base DSF.
Handles equatorial/international dateline wrapping.
iex(1)> XUtil.Dsf.dsfs_at_offset({-180, -30}, 1)
#MapSet<[{-180, -31}, {-180, -29}, {-179, -31}, {-179, -30}, {-179, -29}, {179, -31}, {179, -30}, {179, -29}]>
"""
def dsfs_at_offset(base_dsf, offset)
def dsfs_at_offset({lon, lat} = base_dsf, 0) when is_integer(lon) and is_integer(lat) do
MapSet.new([base_dsf])
end
def dsfs_at_offset({lon, lat} = _base_dsf, offset) when is_integer(lon) and is_integer(lat) and is_integer(offset) do
complete_range_raw = cartesian_product((lon - offset)..(lon + offset), (lat - offset)..(lat + offset))
inner_to_remove_raw = cartesian_product((lon - offset + 1)..(lon + offset - 1), (lat - offset + 1)..(lat + offset - 1))
inner_removed = MapSet.difference(MapSet.new(complete_range_raw), MapSet.new(inner_to_remove_raw))
inner_removed |> Enum.map(&wrap_lon_lat/1) |> MapSet.new()
end
@doc """
The weird string representation X-Plane uses for indexing DSFs.
iex(1)> XUtil.Dsf.to_string(%{lon: -15, lat: -8})
"-08-015"
iex(1)> XUtil.Dsf.to_string({81, 8})
"+08+081"
iex(1)> XUtil.Dsf.to_string({36, -9, 12345})
"-09+036"
iex(1)> XUtil.Dsf.to_string({153, -24})
"-24+153"
iex(1)> XUtil.Dsf.to_string({0, 0})
"+00+000"
"""
def to_string(lon_lat_optional_ele) do
{lon, lat} = dsf(lon_lat_optional_ele)
"#{pad_leading(lat, 3)}#{pad_leading(lon, 4)}"
end
@doc """
Parses X-Plane's weird string representation used to index dsfs into {lon, lat}
iex(1)> XUtil.Dsf.from_string("-08-015")
{-15, -8}
iex(1)> XUtil.Dsf.from_string("+08+081")
{81, 8}
iex(1)> XUtil.Dsf.from_string("-09+036")
{36, -9}
iex(1)> XUtil.Dsf.from_string("+24-153")
{-153, 24}
iex(1)> XUtil.Dsf.from_string("+00+000")
{0, 0}
"""
def from_string(dsf_id) when is_binary(dsf_id) and byte_size(dsf_id) == 7 do
{lat, ""} = dsf_id |> String.slice(0..2) |> Integer.parse()
{lon, ""} = dsf_id |> String.slice(3..7) |> Integer.parse()
{lon, lat}
rescue
_ ->
{}
end
def from_string(_), do: {}
defp pad_leading(lon_or_lat, field_width) do
sign = if lon_or_lat >= 0, do: "+", else: "-"
with_leading_zeros = lon_or_lat |> abs() |> Integer.to_string() |> String.pad_leading(field_width - 1, "0")
sign <> with_leading_zeros
end
end
|
lib/x_util/dsf.ex
| 0.730001
| 0.572723
|
dsf.ex
|
starcoder
|
% Regular expressions for Elixir built on top of the re module
% in the Erlang Standard Library. More information can be found
% on re documentation: http://www.erlang.org/doc/man/re.html
%
% Regular expressions in Elixir can be created using Regexp.new,
% Regexp.compile (check their documentation) or using the special
% form with ~r:
%
% % A simple regular expressions that matches foo anywhere in the string
% ~r(foo)
%
% % A regular expression with case insensitive options and handle unicode chars
% ~r(foo)iu
%
% The re module provides several options, some of them are not available
% in Elixir while others are enabled by default. The ones enabled by default are:
%
% * multiline - the given string is always considered to be multiline, so
% ^ and $ marks the beginning and end of each line. You need to use \A
% and \z to match the end or beginning of the string
%
% The available options, followed by their shortcut in parenthesis, are:
%
% * unicode (u) - used when you want to match against specific unicode characters
% * caseless (i) - add case insensitivity
% * dotall (m) - causes dot to match newlines and also set newline to anycrlf.
% The new line setting can be overwritten by setting (*CR) or (*LF) or (*CRLF)
% or (*ANY) according to re documentation
% * extended (x) - whitespace characters are ignored except when escaped and
% allow # to delimit comments
% * firstline (f) - forces the unanchored pattern to match before or at the first
% newline, though the matched text may continue over the newline
% * ungreedy (r) - invert the "greediness" of the regexp
%
% The options not available are:
%
% * anchored - not available, use ^ or \A instead
% * dollar_endonly - not available, use \z instead
% * no_auto_capture - not available, use ?: instead
% * newline - not available, use (*CR) or (*LF) or (*CRLF) or (*ANYCRLF)
% or (*ANY) at the beginning of the regexp according to the re documentation
%
module Regexp
def new(regexp_bin, options := [])
#Regexp::Behavior(regexp_bin, options)
end
% Escape the given string so it can match a regular expression.
def escape(string)
Erlang.re.replace(string, @escape_regexp, "\\\\&", [{'return,'binary},'global])
end
% Have the escape regexp pre-compiled and stored.
{ 'ok, compiled } = Erlang.re.compile("\\\\|\\{|\\[|\\(|\\)|\\]|\\}|\\.|\\?|\\*")
@('escape_regexp, compiled)
module Behavior
% Creates a new regular expression. It expects two arguments,
% the regular expression and a set of options. Both should be
% a string or a list of chars and, if not, to_char_list is
% invoked in order to retrieve the list of chars.
%
% ## Examples
%
% Regexp.new("foo", "iu")
%
def __bound__(regexp_bin, options)
parsed_options = options.to_char_list.foldl ['multiline], do (x, acc)
parse_option(x, acc)
end
{ 'ok, compiled } = Erlang.re.compile(regexp_bin, parsed_options)
@('bin: regexp_bin, 'parsed_options: parsed_options, 'compiled: compiled)
end
% Returns a boolean depending if the regular expressions matches the given string.
def match?(target)
'nomatch != Erlang.re.run(target, @compiled)
end
% Run the regular expression against the given target. It returns a list with
% all matches or nil if no match occurred.
def run(target)
case Erlang.re.run(target, @compiled, [{'capture, 'all, 'binary}])
match 'nomatch
nil
match {'match, results}
results
end
end
% Returns lists with the match indexes in the given string.
def indexes(target, offset := 0)
case Erlang.re.run(target, @compiled, [{'capture, 'all, 'index},{'offset,offset}])
match 'nomatch
nil
match {'match, results}
results
end
end
% Same as run, but scan the target several times collecting all matches of
% the regular expression. This is similar to the /g option in Perl.
def scan(target, offset := 0)
case Erlang.re.run(target, @compiled, [{'capture, 'all, 'binary},'global,{'offset,offset}])
match 'nomatch
[]
match {'match, results}
results.map do (result)
case result
match [t]
t
match [h|t]
t
end
end
end
end
% Split the given *target* in the number of *parts* specified.
def split(target, parts := 'infinity)
list = Erlang.re.split(target, @compiled, [{'return,'binary},'trim,{'parts, parts}])
[l for l in list, l != ""]
end
% Receives a string and a replacement and returns a string where the first match
% of the regular expressions is replaced by replacement. Inside the replacement,
% you can either give "&" to access the whole regular expression or \N, where
% N is in integer to access an specific matching parens.
%
% ## Examples
%
% "abc" = ~r(d).replace("abc", "d")
% "adc" = ~r(b).replace("abc", "d")
% "a[b]c" = ~r(b).replace("abc", "[&]")
% "a[&]c" = ~r(b).replace("abc", "[\\&]")
% "a[b]c" = ~r[(b)].replace("abc", "[\\1]")
%
def replace(string, replacement)
Erlang.re.replace(string, @compiled, replacement, [{'return,'binary}])
end
% The same as replace, but replaces all parts where the regular expressions
% matches in the string. Please read `replace` for documentation and examples.
def replace_all(string, replacement)
Erlang.re.replace(string, @compiled, replacement, [{'return,'binary},'global])
end
private
def parse_option($u, acc); ['unicode|acc]; end
def parse_option($i, acc); ['caseless|acc]; end
def parse_option($x, acc); ['extended|acc]; end
def parse_option($f, acc); ['firstline|acc]; end
def parse_option($r, acc); ['ungreedy|acc]; end
def parse_option($m, acc); ['dotall, {'newline, 'anycrlf}|acc]; end
def parse_option(option, _)
error({'badarg, "unknown option \"#{option.chr}\""})
end
end
end
|
lib/regexp.ex
| 0.747984
| 0.548069
|
regexp.ex
|
starcoder
|
defmodule Spf.Context do
@moduledoc """
Functions to create, access and update an SPF evaluation context.
Many functions take and return an evaluation context whose purpose
is to store information gathered during the evaluation. This includes
a dns cache, an ip lookup table that maps prefixes to SPF terms that
named them, a stack for recursive evaluations, as well as some statistics
around DNS mechanisms seen and void DNS responses seen.
"""
@typedoc """
An SPF evaluation result.
"""
@type verdict :: :fail | :neutral | :none | :pass | :permerror | :softfail | :temperror
@typedoc """
An SPF evaluation context.
Field notes:
- `ast` is a list of SPF terms to be evaluated as produced by `Spf.Parser`
- `atype` is set according to the sender's IP address
- `contact` is gleaned from the soa record for `domain` under evaluation
- `depth` is the nested depth during recursion, used to print a tree of log messages
- `dns` is the DNS cache, used to report on DNS information gathered during evaluation
- `duration` is the time (in seconds) it took to evaluate the SPF policy
- `error` set by either `Spf.Parser` or `Spf.Eval` and halts evaluation if set
- `explain` is the token for the `exp=`-modifier, if any (not needed for actual evaluation)
- `explain_string` is the explanation after all expansions (when available and applicable)
- `helo` as set by the `:helo` option given to `Spf.check/2`
- `ip` is the sender IP, as set by the `:ip` option given to `Spf.check/2` (default `127.0.0.1`)
- `ipt` is an `t:Iptrie.t/0` used to record addresses and/or prefixes authorized to send mails
- `local` is the local part of the `sender`
- `log` is the user callback log function as provided by the `:log` option to `Spf.check/2`
- `map` is used to record `nth` => domain and domain => spf-string
- `max_dnsm` is the max of dns-mechanisms allowed (default 10), if it took more => permerror
- `max_dnsv` is the max of void dns-responses allowed (default 2), if it took more => permerror
- `msg` the list of logged messages by the Spf modules
- `nameservers` a list of nameservers to use or nil (uses system default)
- `nth` is the nth SPF record being evaluated
- `num_checks` counts how many checks were performed during evaluation
- `num_dnsm` counts the number of dns-mechanisms seen during evaluation
- `num_dnsq` counts the number of dns queries performed during evaluation
- `num_dnsv` counts the number of void DNS responses seen during evaluation
- `num_error` counts the number of errors seen during evaluation
- `num_spf` counts the number of SPF records evaluated
- `num_warn` counts the number of warnings seen during evaluation
- `owner` shows the SOA zone for the original SPF domain being evaluated
- `reason` shows the reason for the verdict, usually in the form of an SPF term
- `sender` is the sender as given to `Spf.check/2`
- `spf` is the SPF string of the `domain` being evaluated (if any)
- `spf_rest` is the remainder of the SPF string (should always by "")
- `spf_tokens` is the `Spf.Lexer`'s result of lexing the SPF string (last seen)
- `stack` is used to push/pop the evaluation state during recursive calls
- `t0` is the Unix Epoch time the evaluation started
- `traces` is a map used to detect loops in an SPF policy
- `verbosity` controls the level of logged messages to stderr
- `verdict` is the final result of the SPF evaluation by `Spf.check/2`
Other notes:
- `max_dnsm` and `max_dnsv` are only checked *after* evaluating the entire policy
- this allows to debug most of the SPF policy under consideration
- `Spf.Parser` may set an syntax `error`, in which case the SPF record results in a permerror
- the `ast` is produced by the parser by processing *all* `spf_tokens`
- whitespace tokens are used to report on repeated whitespace in an SPF string
- whitespace tokens donot end up in the AST
- `v=spf1`-modifier is checked and if not present, results in an error
- by processing all tokens, any `error` set reflects the last error seen
- `Spf.Eval` may set an evaluation `error`, which *may* result in an overall permerror
- a void DNS response is either a `NXDOMAIN` or `ZERO ANSWERS`
"""
@type t :: %{
:ast => list(),
:atype => :a | :aaaa,
:contact => binary(),
:depth => non_neg_integer(),
:dns => map(),
:dns_timeout => non_neg_integer(),
:domain => binary(),
:duration => non_neg_integer(),
:error => nil | atom(),
:explain => nil | tuple(),
:explain_string => binary(),
:explanation => binary(),
:helo => binary(),
:ip => binary(),
:ipt => Iptrie.t(),
:local => binary(),
:log => nil | function(),
:map => map(),
:max_dnsm => non_neg_integer(),
:max_dnsv => non_neg_integer(),
:msg => list(),
:nameservers => nil | list(),
:nth => non_neg_integer(),
:num_checks => non_neg_integer(),
:num_dnsm => non_neg_integer(),
:num_dnsq => non_neg_integer(),
:num_dnsv => non_neg_integer(),
:num_error => non_neg_integer(),
:num_spf => non_neg_integer(),
:num_warn => non_neg_integer(),
:owner => binary(),
:reason => binary(),
:sender => binary(),
:spf => binary(),
:spf_rest => binary(),
:spf_tokens => list(),
:stack => list(),
:t0 => non_neg_integer(),
:traces => map(),
:verbosity => non_neg_integer(),
:verdict => verdict()
}
@typedoc """
A `t:Spf.Lexer.token/0`.
"""
@type token :: Spf.Lexer.token()
@typedoc """
A `t:Pfx.prefix/0`.
"""
@type prefix :: Pfx.prefix()
@typedoc """
A `{qualifier, nth, term}` tuple, where `nth` is the nth SPF record where `term` was
found.
The context's ip lookup table stores these tuples thus tracking which term in
which SPF record provided a qualifier for a prefix. Since an evaluation may
involve multiple SPF records, each prefix actually stores a list of these
tuples.
Once the sender's ip has a longest prefix match, the qualifier will tell how
the mechanism at hand matches.
"""
@type iptval :: {Spf.Lexer.q(), non_neg_integer, binary}
@context %{
:ast => [],
:atype => :a,
:contact => "",
:depth => 0,
:dns => %{},
:dns_timeout => 2000,
:domain => "",
:duration => 0,
:error => nil,
:explain => nil,
:explain_string => "",
:explanation => "",
:helo => "",
:ip => "",
:ipt => Iptrie.new(),
:local => "",
:log => nil,
:map => %{},
:max_dnsm => 10,
:max_dnsv => 2,
:msg => [],
:nameservers => nil,
:nth => 0,
:num_checks => 0,
:num_dnsm => 0,
:num_dnsq => 0,
:num_dnsv => 0,
:num_error => 0,
:num_spf => 1,
:num_warn => 0,
:owner => "",
:reason => "",
:sender => "",
:spf => "",
:spf_rest => "",
:spf_tokens => [],
:stack => [],
:t0 => 0,
:traces => %{},
:verbosity => 4,
:verdict => :neutral
}
@counters [
:num_dnsq,
:num_dnsm,
:num_dnsv,
:depth,
:num_warn,
:num_spf,
:num_error,
:num_checks
]
# Helpers
@spec ipt_values(list, prefix()) :: list
defp ipt_values(keyvals, k) do
# filter & turn keyvals [{pfx, [{q, nth, "term"}]}] into [{q, nth, "term"}]
# used to retrieve ipt_values for more/less specifics of given prefix `k`
keyvals
|> Enum.filter(fn {p, _vals} -> p != k end)
|> Enum.map(&elem(&1, 1))
|> List.flatten()
|> Enum.reverse()
end
@spec ipt_update({prefix, iptval}, t) :: t
defp ipt_update({k, v}, ctx) do
q = elem(v, 0)
notq = fn {qq, _, _} -> qq != q end
# less specific entries (if any)
less = Iptrie.less(ctx.ipt, k) |> ipt_values(k)
less_n = length(less)
less_t = Enum.map(less, &elem(&1, 2)) |> Enum.uniq() |> Enum.join(", ")
less_q = Enum.map([v | less], &elem(&1, 0)) |> MapSet.new() |> MapSet.size()
less_i = Enum.filter(less, notq) |> Enum.map(&elem(&1, 2)) |> Enum.join(", ")
# more specific entries (if any)
more = Iptrie.more(ctx.ipt, k) |> ipt_values(k)
more_n = length(more)
more_t = Enum.map(more, &elem(&1, 2)) |> Enum.uniq() |> Enum.join(", ")
more_q = Enum.map([v | more], &elem(&1, 0)) |> MapSet.new() |> MapSet.size()
more_i = Enum.filter(more, notq) |> Enum.map(&elem(&1, 2)) |> Enum.join(", ")
# same prefix entries (if any) -> [{q, nth, "term"}]
other = Iptrie.get(ctx.ipt, k, {k, []}) |> elem(1)
other_n = length(other)
other_t = Enum.map(other, &elem(&1, 2)) |> Enum.uniq() |> Enum.reverse() |> Enum.join(", ")
other_q = Enum.map([v | other], &elem(&1, 0)) |> MapSet.new() |> MapSet.size()
other_i = Enum.filter(other, notq) |> Enum.map(&elem(&1, 2))
t = elem(v, 2)
ctx
|> Map.put(:ipt, Iptrie.put(ctx.ipt, k, [v | other]))
|> log(:ipt, :debug, "#{t} - adds #{k} -> #{inspect(v)}")
|> test(:ipt, :warn, other_n > 0, "#{t} - redundant entry, already have: #{other_t}")
|> test(:ipt, :warn, other_q > 1, "#{t} - inconsistent with #{other_i}")
|> test(:ipt, :warn, less_n > 0, "#{t} - unreachable due to less specific #{less_t}")
|> test(:ipt, :warn, less_q > 1, "#{t} - inconsistent with less specific #{less_i}")
|> test(:ipt, :warn, more_n > 0, "#{t} - overlaps with more specific #{more_t}")
|> test(:ipt, :warn, more_q > 1, "#{t} - inconsistent with more specific #{more_i}")
end
@spec opt_ip(t, Keyword.t()) :: t
defp opt_ip(ctx, opts) do
# IPV4-mapped IPv6 addresses are converted to the mapped IPv4 address
# note: check validity of user supplied IP address, default to 127.0.0.1
ip = Keyword.get(opts, :ip, "127.0.0.1")
{ipinvalid, pfx} =
try do
{false, Pfx.new(ip)}
rescue
ArgumentError -> {true, Pfx.new("127.0.0.1")}
end
{xtracted, pfx} =
case Pfx.member?(pfx, "::FFFF:0:0/96") do
true -> {true, Pfx.cut(pfx, -1, -32)}
false -> {false, pfx}
end
# atype = if pfx.maxlen == 32 or Pfx.member?(pfx, "::FFFF:0/96"), do: :a, else: :aaaa
atype = if pfx.maxlen == 32, do: :a, else: :aaaa
ctx
|> Map.put(:atype, atype)
|> Map.put(:ip, "#{pfx}")
|> log(:ctx, :info, "sender ip '#{pfx}'")
|> test(:ctx, :error, ipinvalid, "ip '#{ip}' is invalid, so using '#{pfx}' instead")
|> test(:ctx, :note, xtracted, "'#{pfx}' was extracted from IPv4-mapped IPv6 address '#{ip}'")
|> log(:ctx, :debug, "atype set to '#{atype}'")
end
@spec opt_nameserver(t, Keyword.t()) :: t
defp opt_nameserver(ctx, opts) do
# pickup any user provided nameserver (if any)
nameservers =
Keyword.take(opts, [:nameserver])
|> Enum.map(fn {_, ip} -> Pfx.parse(ip) end)
|> Enum.filter(fn {res, _} -> res == :ok end)
|> Enum.map(fn {_, ip} -> Pfx.marshall(ip, {0, 0, 0, 0}) end)
|> Enum.map(fn ip -> {ip, 53} end)
|> case do
[] -> nil
list -> list
end
ctx
|> Map.put(:nameservers, nameservers)
|> test(:ctx, :debug, nameservers != nil, "nameservers set to #{inspect(nameservers)}")
|> test(:ctx, :debug, nameservers == nil, "nameservers set to default")
end
@spec opt_sender(t, binary, Keyword.t()) :: t
defp opt_sender(ctx, sender, opts) do
helo = Keyword.get(opts, :helo, sender)
{local, domain} = split(sender)
{local, domain} =
if String.length(domain) < 1,
do: split(helo),
else: {local, domain}
ctx
|> Map.put(:sender, sender)
|> Map.put(:local, local)
|> Map.put(:domain, domain)
|> Map.put(:helo, helo)
|> Map.put(:map, %{0 => domain, domain => ""})
|> log(:ctx, :info, "sender is '#{sender}'")
|> log(:ctx, :info, "local is '#{local}'")
|> log(:ctx, :info, "domain is '#{domain}'")
|> log(:ctx, :debug, "helo is '#{helo}'")
|> test(:ctx, :debug, helo == sender, "helo defaults to sender value")
end
@spec prefix(binary, [non_neg_integer]) :: :error | prefix
defp prefix(ip, [len4, len6]) do
pfx = Pfx.new(ip)
case pfx.maxlen do
32 -> Pfx.keep(pfx, len4)
_ -> Pfx.keep(pfx, len6)
end
rescue
_ -> :error
end
@spec trace(t, binary) :: t
defp trace(ctx, new_domain) do
# called when current domain includes or redirects to new_domain
cur_domain = String.downcase(ctx.domain)
new_domain = String.downcase(new_domain)
# keep track of where domains lead to:
# - cur_domain -> new_domain
# - if domain -> cur_domain, then it leads to new_domain also
upd_trace = fn cur_domain, new_domain, trace ->
if cur_domain in trace,
do: [new_domain | trace],
else: trace
end
Map.update(ctx.traces, cur_domain, [new_domain], fn v -> [new_domain | v] end)
|> Enum.reduce(%{}, fn {domain, trace}, acc ->
Map.put(acc, domain, upd_trace.(cur_domain, new_domain, trace))
end)
|> then(fn traces -> Map.put(ctx, :traces, traces) end)
end
# API CONTEXT
@doc """
Updates `context.ipt` with one or more {`t:prefix/0`, `t:iptval/0`}-pairs.
When given a list op ip's, they all will be be updated with given
`t:iptval/0` which records the SPF record and term (including the qualifier)
that attributed the ip or ip's.
The `dual` parameter contains the dual-cidr lengths to apply to the given
ip addresses.
"""
@spec addip(t, list(), list(), iptval) :: t
def addip(context, ips, dual, value) when is_list(ips) do
kvs =
Enum.map(ips, fn ip -> {prefix(ip, dual), value} end)
|> Enum.filter(fn {k, _v} -> k != :error end)
Enum.reduce(kvs, context, &ipt_update/2)
end
@spec addip(t, binary, list(), iptval) :: t
def addip(context, ip, dual, value) when is_binary(ip) do
case prefix(ip, dual) do
:error -> log(context, :ctx, :error, "ignored malformed IP #{ip}")
pfx -> ipt_update({pfx, value}, context)
end
end
@doc """
Updates `context` with given `error`, `reason` and `verdict`.
When `verdict` is nil, `context.verdict` is not updated. This
allows for setting error conditions whose impact is to be evaluated
at a later stage.
"""
@spec error(t, atom, atom, binary, atom) :: t
def error(context, facility, error, reason, verdict) do
Map.put(context, :error, error)
|> Map.put(:reason, reason)
|> Map.put(:verdict, verdict || context.verdict)
|> log(facility, :error, reason)
end
@doc """
Returns a previous SPF string given either its `domain` of `nth`-tracking number.
Used for reporting rather than evalutation an SPF record.
"""
@spec get_spf(t, integer | binary) :: binary
def get_spf(context, nth) when is_integer(nth) do
with domain when is_binary(domain) <- context.map[nth] do
get_spf(context, domain)
else
_ -> "ERROR SPF[#{nth}] NOT FOUND"
end
end
def get_spf(context, domain) when is_binary(domain) do
case Spf.DNS.from_cache(context, domain, :txt) do
{_ctx, {:error, _}} -> "ERROR SPF NOT FOUND"
{_ctx, {:ok, rrs}} -> Enum.find(rrs, "ERROR SPF NOT FOUND", &Spf.Eval.spf?/1)
end
end
@doc """
Given a current `context` and a `range`, return the SPF term in that range.
Retrieves a slice of the current SPF record being evaluated. Used for logging
events.
"""
@spec spf_term(t, Range.t()) :: binary
def spf_term(context, first..last),
do: "spf[#{context.nth}] " <> binary_part(context.spf, first, 1 + last - first)
@doc """
Updates `context`'s message queue and, if available, calls the user supplied log
function.
The `log/4` is called with:
- `context` the current context/state of the evalution
- `facility` an atom denoting which part of the program emitted the event
- `severity` an atom describing the severity
- `msg` a binary with event details
"""
@spec log(t, atom, atom, binary) :: t
def log(context, facility, severity, msg) do
if context[:log],
do: context.log.(context, facility, severity, msg)
nth = Map.get(context, :nth, 0)
context =
Map.update(context, :msg, [{nth, facility, severity, msg}], fn msgs ->
[{nth, facility, severity, msg} | msgs]
end)
case severity do
:warn -> tick(context, :num_warn)
:error -> tick(context, :num_error)
_ -> context
end
end
@doc """
Returns true if `new_domain` constitues a loop for given `context`, false
otherwise.
Loops may occur when two SPF records (eventually) include or redirect to
each other and is considered a permanent error.
"""
@spec loop?(t, binary) :: boolean
def loop?(context, new_domain) do
new_domain = String.downcase(new_domain)
cur_domain = String.downcase(context.domain)
cur_domain in Map.get(context.traces, new_domain, [])
end
@doc """
Split an email address into a local and a domain part.
The local part is left to the left-most `@`, if there is no local
part it defaults to "postmaster". Note that splitting an empty
string yields `{"postmaster", ""}`.
"""
@spec split(binary) :: {binary, binary}
def split(mbox) do
words =
String.replace(mbox, ~r/\.$/, "")
|> String.split("@", parts: 2, trim: true)
case words do
[] -> {"postmaster", ""}
[local, domain] -> {local, domain}
[domain] -> {"postmaster", domain}
end
end
@doc """
Returns a new `t:Spf.Context.t/0` for given `sender`.
Options include:
- `:dns`, filepath or binary with zonedata (defaults to nil)
- `:helo`, sender's helo string to use (defaults to `sender`)
- `:ip`, sender ip to use (defaults to `127.0.0.1`)
- `:log`, user supplied log function (defaults to nil)
- `:verbosity`, log level `0..5` to use (defaults to `4`)
- `:nameserver`, IPv4 or IPv6 address of a nameserver to use instead of the default
The initial `domain` is derived from given `sender`. The default for
`ip` is likely to traverse all SPF mechanisms during evaluation, gathering
as much information as possible. Set `:ip` to a real IPv4 or IPv6 address
to check an SPF policy for that specific address.
The context is used for the entire SPF evaluation, including during any
recursive calls. When evaluating an `include` mechanism, the current state (a
few selected context properties) is pushed onto an internal stack and a new
`domain` is set. After evaluating the `include` mechanism, the state if
popped and the results are processed according to the `include`-mechanism's
qualifier.
When evaluating a `redirect` modifier, the current state is altered for the
new domain specified by the modifier.
Specify more than one recursive nameserver by repeating the `:nameserver`
option in the Keyword list. They will be tried in the order listed. Mainly
useful when the local default recursive nameserver is having problems, or
when an external nameserver is to be used for checking an SPF policy instead
of an internal nameserver. As an example, use in opts `[nameserver:
"fc00:e968:6179::de52:7100", nameserver: "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"]` to use the IPv6
dns.google servers.
"""
@spec new(binary, Keyword.t()) :: t
def new(sender, opts \\ []) do
@context
|> Map.put(:log, Keyword.get(opts, :log, nil))
|> Map.put(:verbosity, Keyword.get(opts, :verbosity, 4))
|> Map.put(:dns_timeout, Keyword.get(opts, :timeout, 2000))
|> opt_sender(sender, opts)
|> opt_ip(opts)
|> opt_nameserver(opts)
|> Map.put(:t0, System.os_time(:second))
|> Spf.DNS.load(Keyword.get(opts, :dns, nil))
|> then(&log(&1, :ctx, :info, "DNS cache preloaded with #{map_size(&1.dns)} entrie(s)"))
|> then(&log(&1, :ctx, :info, "verbosity level #{&1.verbosity}"))
|> then(&log(&1, :ctx, :debug, "DNS timeout set to #{&1.dns_timeout}"))
|> then(&log(&1, :ctx, :debug, "max DNS mechanisms set to #{&1.max_dnsm}"))
|> then(&log(&1, :ctx, :debug, "max void DNS lookups set to #{&1.max_dnsv}"))
|> then(&log(&1, :ctx, :debug, "verdict defaults to '#{&1.verdict}'"))
|> then(&log(&1, :ctx, :info, "created context for '#{&1.domain}'"))
|> then(&log(&1, :spf, :note, "spfcheck(#{&1.domain}, #{&1.ip}, #{&1.sender})"))
end
@doc """
Pop the previous state of given `context` from its stack.
Before evaluating an include mechanism, the current SPF's record state
is pushed onto the stack. This function restores that state from the
stack.
"""
@spec pop(t) :: t
def pop(%{:stack => [state | tail]} = context) do
Map.put(context, :stack, tail)
|> Map.merge(state)
|> log(:ctx, :debug, "popped state, back to #{state.domain}")
end
@doc """
Push the current state of given `context` onto its stack and re-init the context.
The details of the current SPF record are pushed onto a stack and the context
is re-initialized for retrieving, parsing and evaluate a new `include`d
record.
"""
@spec push(t, binary) :: t
def push(context, domain) do
state = %{
depth: context.depth,
domain: context.domain,
nth: context.nth,
ast: context.ast,
spf: context.spf,
explain: context.explain,
verdict: context.verdict,
reason: context.reason
}
nth = context.num_spf
tick(context, :num_spf)
|> tick(:depth)
|> trace(domain)
|> Map.put(:stack, [state | context.stack])
|> Map.put(:map, Map.merge(context.map, %{nth => domain, domain => ""}))
|> Map.put(:domain, domain)
|> Map.put(:nth, nth)
|> Map.put(:ast, [])
|> Map.put(:spf, "")
|> Map.put(:explain, nil)
|> log(:ctx, :debug, "pushed state for #{state.domain}")
end
@doc """
Reinitializes current `context` for given `domain` of a redirect modifier.
When a redirect modifier is encountered it basically replaces the current SPF
record and the context is modified accordingly.
"""
@spec redirect(t, binary) :: t
def redirect(context, domain) do
# do NOT empty the stack: a redirect modifier may be in an included record
tick(context, :num_spf)
|> trace(domain)
|> Map.put(:depth, 0)
|> Map.put(:nth, context.num_spf)
|> Map.put(
:map,
Map.merge(context.map, %{context.num_spf => domain, domain => ""})
)
|> Map.put(:domain, domain)
|> Map.put(:error, nil)
|> Map.put(:ast, [])
|> Map.put(:spf, "")
|> Map.put(:explain, nil)
end
@doc """
If `test` is true, logs the given `msg` with its `facility` and `severity`.
A convencience function to quickly check some test and, if true, log it as
well in one go.
"""
@spec test(t, atom, atom, boolean, binary) :: t
def test(context, facility, severity, test, msg)
def test(context, facility, severity, true, msg),
do: log(context, facility, severity, msg)
def test(context, _, _, _, _),
# nil is also false
do: context
@doc """
Adds `delta` to `counter` and returns updated `context`.
Valid counters include:
- `:num_spf`, the number of SPF records seen
- `:num_dnsm` the number of DNS mechanisms seen
- `:num_dnsq` the number of DNS queries performed
- `:num_dnsv` the number of void DNS queries seen
- `:num_checks` the number of checks performed
- `:num_warn` the number of warnings seen
- `:num_error` the number of errors see (may not be fatal)
- `:depth` the current recursion depth
"""
@spec tick(t, atom, integer) :: t
def tick(context, counter, delta \\ 1) when counter in @counters do
count = Map.get(context, counter, nil)
Map.put(context, counter, count + delta)
end
end
|
lib/spf/context.ex
| 0.853974
| 0.836588
|
context.ex
|
starcoder
|
defmodule Ws.Models.Bird do
@moduledoc """
A model entry. A struct is currently the preferred data structure vs, say, a record.
"""
defstruct name: "<new>", type: "<type>", age: 0
end
defmodule Ws.Models.Birds do
require Ws.Models.Bird
use GenServer
@moduledoc """
Demonstrates a simple Model consisting of a list of structs. In practice the data
would be stored using ecto, Riak, or some other persistent store. The standard
Ws.Supervisor has been extended to add this process as a child.
"""
@doc """
Called by Ws.Supervisor, which also supervises the Phoenix application process
"""
def start_link do
# create an initial entry
tim = %Ws.Models.Bird{name: "Phyre", type: "phoenix", age: 17}
initial_entries = [tim]
# delegate to OTP. initial_entries will be passed to Ws.Models.Birds.init/1
:gen_server.start_link({:local, :bird_data_server_pid}, __MODULE__, initial_entries, [])
end
@doc """
Called by OTP with the initial list of birds
"""
def init(initial_entries) do
{:ok, initial_entries}
end
# Private convenience methods. Alternatively you can expose these to clients and have them
# accept a PID argument so that handle_call/handle_cast can be called. This would expose
# two functions to the global namespace for each call so on general principles it is not
# done here.
@doc """
Add a bird. Convenience function.
"""
defp add(entries, new_entry) do
[new_entry | entries]
end
@doc """
Delete a bird. Convenience function.
"""
defp del(entries, ex_entry) do
if Enum.any?(entries, fn(x) -> x == ex_entry end) do
List.delete(entries, ex_entry)
else
entries
end
end
# GenServer API. The following functions are called by OTP in response to a client request.
@doc """
List birds
"""
def handle_call(:list_entries, _from, entries) do
{:reply, entries, entries}
end
@doc """
Add a bird
"""
def handle_cast({:add_entry, new_entry}, entries) do
{:noreply, add(entries, new_entry)}
end
@doc """
Delete a bird
"""
def handle_cast({:del_entry, ex_entry}, entries) do
{:noreply, del(entries, ex_entry)}
end
@doc """
Delete all birds
"""
def handle_cast({:del_all_entries}, entries) do
{:noreply, []}
end
end
|
lib/ws/models/birds.ex
| 0.591487
| 0.505188
|
birds.ex
|
starcoder
|
defmodule Bounds.Set do
use Bitwise
import Bounds.Map.Records
alias Bounds.Map.Impl
defstruct [
root: nil,
segments: 0
]
@infinityish 1.0e100
def new, do: %__MODULE__{}
def new(coerceable) do
{bounds, _} = Coerce.coerce(coerceable, %Bounds{})
from_bounds(bounds)
end
def from_covered_points(enum) do
{root, segments} =
as_ival_stream(enum)
|> Enum.uniq()
|> Enum.reduce([], fn
interval(lower: b, upper: upper), [interval(lower: lower, upper: a) | ivals] when a + 1 == b ->
[interval(lower: lower, upper: upper) | ivals]
ival, ivals ->
[ival | ivals]
end)
|> Enum.reduce({nil, 0}, fn ival, impl_acc ->
Impl.insert(impl_acc, ival)
end)
%__MODULE__{root: root, segments: segments}
end
def from_covered_intervals(enum) do
as_ival_stream(enum)
|> Enum.reduce(new(), fn ival, bset_acc ->
set(bset_acc, ival)
end)
end
def from_bounds(interval() = ival) do
{tnode0, size0} = Impl.insert({nil, 0}, ival)
%__MODULE__{root: tnode0, segments: size0}
end
def from_bounds(boundable) do
{%Bounds{lower: lower, upper: upper}, _} = Coerce.coerce(boundable, %Bounds{})
from_bounds(interval(lower: lower, upper: upper))
end
def from_bounds_map(%{root: tnode}, opts \\ []) do
v_stream = Impl.stream_vertices(tnode)
case Keyword.fetch(opts, :as) do
{:ok, :mask} ->
Enum.reduce(v_stream, from_bounds(interval(lower: 0, upper: @infinityish)), fn ival, bset ->
unset(bset, ival)
end)
_ ->
Enum.reduce(v_stream, new(), fn ival, bset ->
set(bset, ival)
end)
end
end
def covers_intervals(%__MODULE__{root: tnode}) do
Impl.stream_vertices(tnode)
|> Stream.map(fn interval(lower: lower, upper: upper) ->
%Bounds{lower: lower, upper: upper}
end)
|> Enum.into(MapSet.new())
end
def covers_points(%__MODULE__{root: tnode}) do
Impl.stream_vertices(tnode)
|> Stream.flat_map(fn interval(lower: lower, upper: upper) ->
Enum.map(lower..(upper - 1), fn i ->
%Bounds{lower: i, upper: i}
end)
end)
|> Enum.into(MapSet.new())
end
def set(%__MODULE__{root: tnode0, segments: size0}, interval(lower: lower, upper: upper) = ival) do
existing_ivals = Impl.overlaps(tnode0, interval(lower: :erlang.max(lower - 1, 0), upper: upper + 1))
new_ival = concat_ivals([ival | existing_ivals])
tnode1_and_size1 = Impl.delete_matches({tnode0, size0}, existing_ivals)
{tnode2, size2} = Impl.insert(tnode1_and_size1, new_ival)
%__MODULE__{root: tnode2, segments: size2}
end
def set(%__MODULE__{} = bset, boundable) do
{%Bounds{lower: lower, upper: upper}, _} = Coerce.coerce(boundable, %Bounds{})
set(bset, interval(lower: lower, upper: upper))
end
def unset(%__MODULE__{root: tnode0, segments: size0}, interval(lower: sub_lower, upper: sub_upper) = sub_ival) do
existing_ivals = Impl.overlaps(tnode0, sub_ival)
tnode1_and_size1 = Impl.delete_matches({tnode0, size0}, existing_ivals)
replacement_ivals =
Stream.flat_map(existing_ivals, fn interval(lower: shape_lower, upper: shape_upper) ->
clip_lower = :erlang.max(sub_lower, shape_lower)
clip_upper = :erlang.min(sub_upper, shape_upper)
[interval(lower: shape_lower, upper: clip_lower), interval(lower: clip_upper, upper: shape_upper)]
end)
|> Stream.filter(fn
interval(lower: common, upper: common) -> false
_ -> true
end)
|> Enum.into(MapSet.new())
{tnode2, size2} = Enum.reduce(replacement_ivals, tnode1_and_size1, fn ival, tnode_and_size ->
Impl.insert(tnode_and_size, ival)
end)
%__MODULE__{root: tnode2, segments: size2}
end
def unset(%__MODULE__{} = bset, boundable) do
{%Bounds{lower: sub_lower, upper: sub_upper}, _} = Coerce.coerce(boundable, %Bounds{})
unset(bset, interval(lower: sub_lower, upper: sub_upper))
end
def union(%__MODULE__{segments: a_size} = a, %__MODULE__{segments: b_size} = b) when a_size < b_size, do:
union(b, a)
def union(%__MODULE__{} = a, %__MODULE__{root: b_tnode}) do
Impl.stream_vertices(b_tnode)
|> Enum.reduce(a, fn ival, bset_acc ->
set(bset_acc, ival)
end)
end
def union(coerceable_a, coerceable_b) do
{%Bounds.Set{} = a, %Bounds.Set{} = b} = Coerce.coerce(coerceable_a, coerceable_b)
union(a, b)
end
def complement(%__MODULE__{root: tnode}) do
interval(lower: min_lower) = Impl.min_ival(tnode)
interval(upper: max_upper) = Impl.max_ival(tnode)
pre = [interval(lower: 0, upper: min_lower)]
post = [interval(lower: max_upper, upper: @infinityish)]
body =
Impl.stream_vertices(tnode)
|> Stream.transform(nil, fn
ival, nil -> {[], ival}
ival_b, ival_a -> {[{ival_a, ival_b}], ival_b}
end)
|> Stream.map(fn {interval(upper: lower), interval(lower: upper)} ->
interval(lower: lower, upper: upper)
end)
Stream.concat([pre, body, post])
|> Stream.filter(fn
interval(lower: common, upper: common) -> false
_ -> true
end)
|> Enum.into(new())
end
def complement(coerceable) do
{%Bounds.Set{} = bset, _} = Coerce.coerce(coerceable, %Bounds.Set{})
complement(bset)
end
def extent(%__MODULE__{root: tnode}) do
interval(lower: min_lower) = Impl.min_ival(tnode)
interval(upper: max_upper) = Impl.max_ival(tnode)
Bounds.new(min_lower, max_upper)
end
def difference(%__MODULE__{} = a, %__MODULE__{root: b_tnode}) do
Impl.stream_vertices(b_tnode)
|> Enum.reduce(a, fn ival, bset_acc ->
unset(bset_acc, ival)
end)
end
def difference(coerceable_a, coerceable_b) do
{%Bounds.Set{} = a, %Bounds.Set{} = b} = Coerce.coerce(coerceable_a, coerceable_b)
difference(a, b)
end
def intersection(%__MODULE__{segments: a_size} = a, %__MODULE__{segments: b_size} = b) when a_size < b_size, do:
intersection(b, a)
def intersection(%__MODULE__{} = a, %__MODULE__{} = b) do
%__MODULE__{root: not_b_tnode} = complement(b)
Impl.stream_vertices(not_b_tnode)
|> Enum.reduce(a, fn ival, bset_acc ->
unset(bset_acc, ival)
end)
end
def intersection(coerceable_a, coerceable_b) do
{%Bounds.Set{} = a, %Bounds.Set{} = b} = Coerce.coerce(coerceable_a, coerceable_b)
intersection(a, b)
end
def disjoint?(%__MODULE__{root: tnode}, interval() = ival), do:
Impl.overlaps(tnode, ival) == []
def disjoint?(%__MODULE__{segments: a_size} = a, %__MODULE__{segments: b_size} = b) when a_size < b_size, do:
disjoint?(b, a)
def disjoint?(%__MODULE__{root: a_tnode}, %__MODULE__{root: b_tnode}) do
Impl.stream_vertices(b_tnode)
|> Enum.all?(fn ival ->
Impl.overlaps(a_tnode, ival) == []
end)
end
def disjoint?(coerceable_a, coerceable_b) do
{%Bounds.Set{} = a, %Bounds.Set{} = b} = Coerce.coerce(coerceable_a, coerceable_b)
disjoint?(a, b)
end
def covers?(%__MODULE__{root: tnode}, interval() = ival), do:
Impl.covered_by(tnode, ival) != []
def covers?(%__MODULE__{root: a_tnode}, %__MODULE__{root: b_tnode}) do
Impl.stream_vertices(b_tnode)
|> Enum.all?(fn ival ->
Impl.covered_by(a_tnode, ival) != []
end)
end
def covers?(coerceable_a, coerceable_b) do
{%Bounds.Set{} = a, %Bounds.Set{} = b} = Coerce.coerce(coerceable_a, coerceable_b)
covers?(a, b)
end
def clip(%__MODULE__{} = mask, interval(priority: priority, value: value) = ival, opts \\ []) do
%__MODULE__{root: tnode} = case Keyword.fetch(opts, :as) do
{:ok, :negative} ->
Bounds.Set.difference(Bounds.Set.from_bounds(ival), mask)
_ ->
Bounds.Set.intersection(Bounds.Set.from_bounds(ival), mask)
end
Impl.stream_vertices(tnode)
|> Stream.map(fn ival ->
interval(ival, priority: priority, value: value)
end)
end
## helpers
defp as_ival_stream(boundable_enum) do
Stream.map(boundable_enum, fn boundable ->
{%Bounds{lower: lower, upper: upper}, _} = Coerce.coerce(boundable, %Bounds{})
interval(lower: lower, upper: upper)
end)
end
defp concat_ivals(ivals) do
agg_lower = Enum.map(ivals, fn interval(lower: lower) -> lower end) |> Enum.min()
agg_upper = Enum.map(ivals, fn interval(upper: upper) -> upper end) |> Enum.max()
interval(lower: agg_lower, upper: agg_upper)
end
end
defimpl Inspect, for: Bounds.Set do
import Bounds.Map.Records
alias Bounds.Map.Impl
import Inspect.Algebra
def inspect(%Bounds.Set{root: tnode}, opts) do
pre = color("(", :tuple, opts)
post = color(")", :tuple, opts)
sep = color(" ∪", :tuple, opts)
bounds_vals =
Impl.stream_vertices(tnode)
|> Enum.map(fn interval(lower: lower, upper: upper) -> %Bounds{lower: lower, upper: upper} end)
Inspect.Algebra.container_doc(pre, bounds_vals, post, opts, &to_doc/2, [separator: sep, break: :flex])
end
end
defimpl Collectable, for: Bounds.Set do
def into(%Bounds.Set{} = bset), do:
{bset, &collector/2}
defp collector(acc, cmd)
defp collector(bset, {:cont, ival_or_boundable}), do:
Bounds.Set.set(bset, ival_or_boundable)
defp collector(bset, :done), do:
bset
defp collector(_acc, :halt), do:
:ok
end
|
lib/bounds/set.ex
| 0.657098
| 0.550064
|
set.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.