code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Ecto.ERD.PlantUML do
@moduledoc false
alias Ecto.ERD.{Node, Edge, Graph, Render, Color}
@safe_name_pattern ~r/^[a-z\d_\.:\?]+$/i
def render(%Graph{nodes: nodes, edges: edges}, options) do
columns = Keyword.fetch!(options, :columns)
fontname = Keyword.fetch!(options, :fontname)
clusters = Enum.group_by(nodes, & &1.cluster)
{global_nodes, clusters} = Map.pop(clusters, nil)
ensure_cluster_names_valid!(Map.keys(clusters))
global_nodes = List.wrap(global_nodes)
namespaces =
Enum.map(clusters, fn {cluster_name, nodes} ->
"""
namespace #{cluster_name} #{Color.get(cluster_name)} {
#{Enum.map_join(nodes, "\n", &render_node(&1, columns, " "))}
}
"""
end)
entities = Enum.map_join(global_nodes, "\n", &render_node(&1, columns, ""))
refs =
edges
|> Enum.uniq_by(fn %Edge{
from: {from_source, from_schema, _},
to: {to_source, to_schema, _}
} ->
{from_source, from_schema, to_source, to_schema}
end)
|> Enum.map_join("\n", &render_edge/1)
"""
@startuml
hide circle
hide methods
#{case columns do
[] -> "hide fields"
_ -> ""
end}
skinparam linetype ortho
skinparam defaultFontName #{fontname}
skinparam shadowing false
#{namespaces}
#{entities}
#{refs}
@enduml
"""
end
defp render_node(
%Node{
source: source,
fields: fields,
schema_module: schema_module
},
columns,
padding
) do
case columns do
[] ->
"#{padding}entity #{Render.in_quotes(Node.id(source, schema_module), @safe_name_pattern)}"
columns ->
items =
case Enum.split_with(fields, & &1.primary?) do
{[], fields} -> fields
{pk, fields} -> pk ++ ["--"] ++ fields
end
content =
Enum.map_join(items, "\n#{padding} ", fn
"--" ->
"--"
field ->
columns
|> Enum.map_join(
" : ",
fn
:name -> Render.in_quotes(field.name, @safe_name_pattern)
:type -> format_type(field.type)
end
)
end)
"""
#{padding}entity #{Render.in_quotes(Node.id(source, schema_module), @safe_name_pattern)} {
#{padding} #{content}
#{padding}}
"""
end
end
defp render_edge(%Edge{
from: {from_source, from_schema, _},
to: {to_source, to_schema, _},
assoc_types: assoc_types
}) do
operator =
if {:has, :one} in assoc_types do
"||--o|"
else
"||--|{"
end
[
Render.in_quotes(Node.id(from_source, from_schema), @safe_name_pattern),
operator,
Render.in_quotes(Node.id(to_source, to_schema), @safe_name_pattern)
]
|> Enum.join(" ")
end
defp format_type({:parameterized, Ecto.Enum, %{on_dump: on_dump}}) do
"enum(#{Enum.join(Map.values(on_dump), ",")})"
end
defp format_type(type) do
case Ecto.Type.type(type) do
{parent, _t} -> Atom.to_string(parent)
atom when is_atom(atom) -> Atom.to_string(atom)
end
end
# namespaces cannot be quoted, so this check is necessary
defp ensure_cluster_names_valid!(names) do
Enum.each(names, fn name ->
unless name =~ @safe_name_pattern do
raise "Cluster name #{inspect(name)} contains symbols which are unsupported by PlantUML."
end
end)
end
end
|
lib/ecto/erd/plantuml.ex
| 0.567697
| 0.435421
|
plantuml.ex
|
starcoder
|
defmodule Exyt.Auth do
defmodule HTTPError do
defexception message: "HTTP Error"
end
@moduledoc """
A struct to fetch access / refresh token(s) from the Google's OAuth2 endpoints.
"""
@default_headers [
"Accept": "application/json",
"Content-Type": "application/x-www-form-urlencoded"
]
alias Exyt.{AccessToken, Client}
@doc """
Fetches the access / refresh token
## Parameters
* `client` - The Client struct to fetch access token with
* `code` - The authorization code fetched from OAuth2 callback
Returns a tuple with access token or an error message
"""
@spec access_token(Client.t, binary) :: {:ok, AccessToken.t} | {:error, binary}
def access_token(%Client{} = client, code) do
url = Client.token_url(client)
options = [
headers: @default_headers,
body: build_access_body(client, code)
]
HTTPotion.post(url, options) |> parse_response(client)
end
@doc """
Fetches the access / refresh token
## Parameters
* `client` - The client struct to fetch access token with
* `code` - The authorization code fetched from OAuth2 callback
Returns a `%Exyt.AccessToken` with a token or raises an `Exyt.Auth.HTTPError` with a message.
"""
@spec access_token!(Client.t, binary) :: AccessToken.t
def access_token!(%Client{} = client, code) do
case access_token(client, code) do
{:ok, token} -> token
{:error, message} -> raise HTTPError, message: message
end
end
@doc """
Refreshes the (expired) access token, by using the refresh token.
Getting a new access token only works when the request of `Auth.access_token` inlcudes the
`grant_type=offline` query parameter. This is in order to allow refreshing an expired access token.
For more details see [Refreshing an Access Token](https://developers.google.com/youtube/v3/guides/auth/server-side-web-apps#offline).
## Parameters
* `client` - The client struct that contains the access token.
"""
@spec refresh_token(Client.t) :: {:ok, AccessToken.t} | {:error, binary}
def refresh_token(%Client{} = client) do
url = Client.token_url(client)
options = [
headers: @default_headers,
body: build_refresh_body(client)
]
HTTPotion.post(url, options) |> parse_response(client)
end
@doc """
Refreshes the (expired) access token, by using the refresh token. See `Auth.refresh_token/1` for details.
Returns a `%Exyt.AccessToken` with a new token or raises an `Exyt.Auth.HTTPError` with a message.
"""
@spec refresh_token!(Client.t) :: AccessToken.t
def refresh_token!(%Client{} = client) do
case refresh_token(client) do
{:ok, token} -> token
{:error, message} -> raise HTTPError, message: message
end
end
defp build_access_body(%Client{} = client, code) do
client
|> Map.take([:client_id, :client_secret, :redirect_uri])
|> Map.put(:code, code)
|> Map.put(:grant_type, "authorization_code")
|> URI.encode_query()
end
defp build_refresh_body(%Client{token: %AccessToken{refresh_token: token}} = client) do
client
|> Map.take([:client_id, :client_secret])
|> Map.put(:refresh_token, token)
|> Map.put(:grant_type, "refresh_token")
|> URI.encode_query()
end
defp parse_response(%HTTPotion.Response{status_code: 200} = response, %Client{} = client) do
# we need to rely on this specific response structure
token =
Poison.Parser.parse!(response.body, keys: :atoms)
|> Map.take([:access_token, :refresh_token, :expires_in])
{:ok, Map.merge(client.token || %AccessToken{}, token)}
end
defp parse_response(%HTTPotion.Response{} = response, %Client{}) do
{:error, "Status: #{response.status_code} - Body: #{response.body}"}
end
defp parse_response(%HTTPotion.ErrorResponse{} = response, %Client{}) do
{:error, response.message}
end
end
|
lib/exyt/auth.ex
| 0.889613
| 0.40389
|
auth.ex
|
starcoder
|
defmodule Number.Human do
@moduledoc """
Provides functions for converting numbers into more human readable strings.
"""
import Number.Delimit, only: [number_to_delimited: 2]
import Decimal, only: [cmp: 2]
@doc """
Formats and labels a number with the appropriate English word.
## Examples
iex> Number.Human.number_to_human(123)
"123.00"
iex> Number.Human.number_to_human(1234)
"1.23 Thousand"
iex> Number.Human.number_to_human(999001)
"999.00 Thousand"
iex> Number.Human.number_to_human(1234567)
"1.23 Million"
iex> Number.Human.number_to_human(1234567890)
"1.23 Billion"
iex> Number.Human.number_to_human(1234567890123)
"1.23 Trillion"
iex> Number.Human.number_to_human(1234567890123456)
"1.23 Quadrillion"
iex> Number.Human.number_to_human(1234567890123456789)
"1,234.57 Quadrillion"
iex> Number.Human.number_to_human(Decimal.new("5000.0"))
"5.00 Thousand"
"""
def number_to_human(number, options \\ [])
def number_to_human(number, options) when not is_map(number) do
if Number.Conversion.impl_for(number) do
number
|> Number.Conversion.to_decimal
|> number_to_human(options)
else
raise ArgumentError, """
number must be a float or integer, or implement `Number.Conversion` protocol,
was #{inspect number}"
"""
end
end
def number_to_human(number, options) do
cond do
cmp(number, ~d(999)) == :gt && cmp(number, ~d(1_000_000)) == :lt ->
delimit(number, ~d(1_000), "Thousand", options)
cmp(number, ~d(1_000_000)) in [:gt, :eq] and cmp(number, ~d(1_000_000_000)) == :lt ->
delimit(number, ~d(1_000_000), "Million", options)
cmp(number, ~d(1_000_000_000)) in [:gt, :eq] and cmp(number, ~d(1_000_000_000_000)) == :lt ->
delimit(number, ~d(1_000_000_000), "Billion", options)
cmp(number, ~d(1_000_000_000_000)) in [:gt, :eq] and cmp(number, ~d(1_000_000_000_000_000)) == :lt ->
delimit(number, ~d(1_000_000_000_000), "Trillion", options)
cmp(number, ~d(1_000_000_000_000_000)) in [:gt, :eq] ->
delimit(number, ~d(1_000_000_000_000_000), "Quadrillion", options)
true ->
number_to_delimited(number, options)
end
end
@doc """
Adds ordinal suffix (st, nd, rd or th) for the number
## Examples
iex> Number.Human.number_to_ordinal(3)
"3rd"
iex> Number.Human.number_to_ordinal(1)
"1st"
iex> Number.Human.number_to_ordinal(46)
"46th"
iex> Number.Human.number_to_ordinal(442)
"442nd"
iex> Number.Human.number_to_ordinal(4001)
"4001st"
"""
def number_to_ordinal(number) when is_integer(number) do
sfx = ~w(th st nd rd th th th th th th)
(Integer.to_string(number)) <> case rem(number, 100) do
11 -> "th"
12 -> "th"
13 -> "th"
_ -> Enum.at(sfx, rem(number, 10))
end
end
defp sigil_d(number, _modifiers) do
number
|> String.replace("_", "")
|> String.to_integer
|> Decimal.new
end
defp delimit(number, divisor, label, options) do
number =
number
|> Decimal.div(divisor)
|> number_to_delimited(options)
number <> " " <> label
end
end
|
lib/number/human.ex
| 0.816991
| 0.523847
|
human.ex
|
starcoder
|
defmodule CSV do
@moduledoc ~S"""
RFC 4180 compliant CSV parsing and encoding for Elixir. Allows to specify other separators,
so it could also be named: TSV, but it isn't.
"""
@doc """
Decode a stream of comma-separated lines into a table.
## Options
These are the options:
* `:separator` β The separator token to use, defaults to `?,`. Must be a codepoint (syntax: ? + (your separator)).
* `:delimiter` β The delimiter token to use, defaults to `\\r\\n`. Must be a string.
* `:strip_cells` β When set to true, will strip whitespace from cells. Defaults to false.
* `:multiline_escape` β Whether to allow multiline escape sequences. Defaults to true.
* `:multiline_escape_max_lines` β How many lines to maximally aggregate for multiline escapes. Defaults to a 1000.
* `:num_pipes` β Will be deprecated in 2.0 - see num_workers
* `:num_workers` β The number of parallel operations to run when producing the stream.
* `:worker_work_ratio` β The available work per worker, defaults to 5. Higher rates will mean more work sharing, but might also lead to work fragmentation slowing down the queues.
* `:headers` β When set to `true`, will take the first row of the csv and use it as
header values.
Defaults to number of erlang schedulers times 3
header values.
When set to a list, will use the given list as header values.
When set to `false` (default), will use no header values.
When set to anything but `false`, the resulting rows in the matrix will
be maps instead of lists.
## Examples
Convert a filestream into a stream of rows:
iex> \"../test/fixtures/docs.csv\"
iex> |> Path.expand(__DIR__)
iex> |> File.stream!
iex> |> CSV.decode
iex> |> Enum.take(2)
[[\"a\",\"b\",\"c\"], [\"d\",\"e\",\"f\"]]
Convert a filestream into a stream of rows in order of the given stream:
iex> \"../test/fixtures/docs.csv\"
iex> |> Path.expand(__DIR__)
iex> |> File.stream!
iex> |> CSV.decode(num_pipes: 1)
iex> |> Enum.take(2)
[[\"a\",\"b\",\"c\"], [\"d\",\"e\",\"f\"]]
Map an existing stream of lines separated by a token to a stream of rows with a header row:
iex> [\"a;b\",\"c;d\", \"e;f\"]
iex> |> Stream.map(&(&1))
iex> |> CSV.Decoder.decode(separator: ?;, headers: true)
iex> |> Enum.take(2)
[%{\"a\" => \"c\", \"b\" => \"d\"}, %{\"a\" => \"e\", \"b\" => \"f\"}]
Map an existing stream of lines separated by a token to a stream of rows with a given header row:
iex> [\"a;b\",\"c;d\", \"e;f\"]
iex> |> Stream.map(&(&1))
iex> |> CSV.Decoder.decode(separator: ?;, headers: [:x, :y])
iex> |> Enum.take(2)
[%{:x => \"a\", :y => \"b\"}, %{:x => \"c\", :y => \"d\"}]
"""
def decode(stream, options \\ []) do
CSV.Decoder.decode(stream, options)
end
@doc """
Encode a table stream into a stream of RFC 4180 compliant CSV lines for writing to a file
or other IO.
## Options
These are the options:
* `:separator` β The separator token to use, defaults to `?,`. Must be a codepoint (syntax: ? + (your separator)).
* `:delimiter` β The delimiter token to use, defaults to `\\r\\n`. Must be a string.
## Examples
Convert a stream of rows with cells into a stream of lines:
iex> [~w(a b), ~w(c d)]
iex> |> CSV.encode
iex> |> Enum.take(2)
[\"a,b\\r\\n\", \"c,d\\r\\n\"]
Convert a stream of rows with cells with escape sequences into a stream of lines:
iex> [[\"a\\nb\", \"\\tc\"], [\"de\", \"\\tf\\\"\"]]
iex> |> CSV.encode(separator: ?\\t, delimiter: \"\\n\")
iex> |> Enum.take(2)
[\"\\\"a\\\\nb\\\"\\t\\\"\\\\tc\\\"\\n\", \"de\\t\\\"\\\\tf\\\"\\\"\\\"\\n\"]
"""
def encode(stream, options \\ []) do
CSV.Encoder.encode(stream, options)
end
end
|
data/web/deps/csv/lib/csv.ex
| 0.84891
| 0.559561
|
csv.ex
|
starcoder
|
defmodule Contex.Dataset do
@moduledoc """
`Dataset` is a simple wrapper around a datasource for plotting charts.
Dataset marshalls a couple of different data structures into a consistent form for consumption
by the chart plotting functions. It allows a list of maps, list of lists or a list of tuples to be
treated the same.
The most sensible way to work with a dataset is to provide column headers - it makes code elsewhere
readable. When the provided data is a list of maps, headers are inferred from the map keys. If you
don't want to, you can also refer to columns by index.
Dataset provides a few convenience functions for calculating data extents for a column, extracting unique
values from columns, calculating combined extents for multiple columns (handy when plotting bar charts)
and guessing column type (handy when determining whether to use a `Contex.TimeScale` or a `Contex.ContinuousLinearScale`).
Datasets can be created from a list of maps:
iex> data = [
...> %{x: 0.0, y: 0.0, category: "Hippo"},
...> %{x: 0.2, y: 0.3, category: "Rabbit"}
...> ] # Wherever your data comes from (e.g. could be straight from Ecto)
...> dataset = Dataset.new(data)
%Contex.Dataset{
data: [
%{category: "Hippo", x: 0.0, y: 0.0},
%{category: "Rabbit", x: 0.2, y: 0.3}
],
headers: nil,
title: nil
}
iex> Dataset.column_names(dataset)
[:category, :x, :y] # Note ordering of column names from map data is not guaranteed
or from a list of tuples (or lists):
iex> data = [
...> {0.0, 0.0, "Hippo"},
...> {0.5, 0.3, "Turtle"},
...> {0.4, 0.3, "Turtle"},
...> {0.2, 0.3, "Rabbit"}
...> ]
...> dataset = Dataset.new(data, ["x", "y", "category"]) # Attach descriptive headers
iex> Dataset.column_names(dataset)
["x", "y", "category"]
...> Dataset.column_extents(dataset, "x") # Get extents for a named column
{0.0, 0.5}
iex> Dataset.column_index(dataset, "x") # Get index of column by name
0
iex> category_col = Dataset.column_name(dataset, 2) # Get name of column by index
"category"
iex> Enum.map(dataset.data, fn row -> # Enumerate values in a column
...> accessor = Dataset.value_fn(dataset, category_col)
...> accessor.(row)
...> end)
["Hippo", "Turtle", "Turtle", "Rabbit"]
iex> Dataset.unique_values(dataset, "category") # Extract unique values for legends etc.
["Hippo", "Turtle", "Rabbit"]
Dataset gives facilities to map between names and column indexes. Where headers are not supplied (either directly or
via map keys), the column index is treated as the column name internally. Data values are retrieved by column name
using accessor functions, in order to avoid expensive mappings in tight loops.
**Note** There are very few validation checks when a dataset is created (for example, to checks that number of headers
supplied matches) the size of each array or tuple in the data. If there are any issues finding a value, nil is returned.
"""
alias __MODULE__
alias Contex.Utils
defstruct [:headers, :data, :title]
@type column_name() :: String.t() | integer() | atom()
@type column_type() :: :datetime | :number | :string | :unknown | nil
@type row() :: list() | tuple() | map()
@type t() :: %__MODULE__{}
@doc """
Creates a new Dataset wrapper around some data.
Data is expected to be a list of tuples of the same size, a list of lists of same size, or a list of maps with the same keys.
Columns in map data are accessed by key. For lists of lists or tuples, if no headers are specified, columns are access by index.
"""
@spec new(list(row())) :: Contex.Dataset.t()
def new(data) when is_list(data) do
%Dataset{headers: nil, data: data}
end
@doc """
Creates a new Dataset wrapper around some data with headers.
Data is expected to be a list of tuples of the same size or list of lists of same size. Headers provided with a list of maps
are ignored; column names from map data are inferred from the maps' keys.
"""
@spec new(list(row()), list(String.t())) :: Contex.Dataset.t()
def new(data, headers) when is_list(data) and is_list(headers) do
%Dataset{headers: headers, data: data}
end
@doc """
Optionally sets a title.
Not really used at the moment to be honest, but seemed like a good
idea at the time. Might come in handy when overlaying plots.
"""
@spec title(Contex.Dataset.t(), String.t()) :: Contex.Dataset.t()
def title(%Dataset{} = dataset, title) do
%{dataset | title: title}
end
@doc """
Looks up the index for a given column name. Returns nil if not found.
"""
@spec column_index(Contex.Dataset.t(), column_name()) :: nil | column_name()
def column_index(%Dataset{data: [first_row | _rest]}, column_name) when is_map(first_row) do
if Map.has_key?(first_row, column_name) do
column_name
else
nil
end
end
def column_index(%Dataset{headers: headers}, column_name) when is_list(headers) do
Enum.find_index(headers, fn col -> col == column_name end)
end
def column_index(_, column_name) when is_integer(column_name) do
column_name
end
def column_index(_, _), do: nil
# TODO: Should this be column_ids - they are essentially the internal column names
@doc """
Returns a list of the names of all of the columns in the dataset data (irrespective of
whether the column names are mapped to plot elements).
"""
@spec column_names(Contex.Dataset.t()) :: list(column_name())
def column_names(%Dataset{data: [first_row | _]}) when is_map(first_row) do
Map.keys(first_row)
end
def column_names(%Dataset{data: [first_row | _], headers: headers})
when is_nil(headers) and is_tuple(first_row) do
max = tuple_size(first_row) - 1
0..max |> Enum.into([])
end
def column_names(%Dataset{data: [first_row | _], headers: headers})
when is_nil(headers) and is_list(first_row) do
max = length(first_row) - 1
0..max |> Enum.into([])
end
def column_names(%Dataset{headers: headers}), do: headers
@doc """
Looks up the column name for a given index.
If there are no headers, or the index is outside the range of the headers
the requested index is returned.
"""
@spec column_name(Contex.Dataset.t(), integer() | any) :: column_name()
def column_name(%Dataset{headers: headers} = _dataset, column_index)
when is_list(headers) and
is_integer(column_index) and
column_index < length(headers) do
# Maybe drop the length guard above and have it throw an exception
Enum.at(headers, column_index)
end
def column_name(_, column_index), do: column_index
@doc """
Returns a function that retrives the value for a given column in given row, accessed by
the column name.
## Examples
iex> data = [
...> %{x: 0.0, y: 0.0, category: "Hippo"},
...> %{x: 0.2, y: 0.3, category: "Rabbit"}
...> ]
iex> dataset = Dataset.new(data)
iex> category_accessor = Dataset.value_fn(dataset, :category)
iex> category_accessor.(hd(data))
"Hippo"
"""
@spec value_fn(Contex.Dataset.t(), column_name()) :: (row() -> any)
def value_fn(%Dataset{data: [first_row | _]}, column_name)
when is_map(first_row) and is_binary(column_name) do
fn row -> row[column_name] end
end
def value_fn(%Dataset{data: [first_row | _]}, column_name)
when is_map(first_row) and is_atom(column_name) do
fn row -> row[column_name] end
end
def value_fn(%Dataset{data: [first_row | _]} = dataset, column_name) when is_list(first_row) do
column_index = column_index(dataset, column_name)
fn row -> Enum.at(row, column_index, nil) end
end
def value_fn(%Dataset{data: [first_row | _]} = dataset, column_name) when is_tuple(first_row) do
column_index = column_index(dataset, column_name)
if column_index < tuple_size(first_row) do
fn row -> elem(row, column_index) end
else
fn _ -> nil end
end
end
def value_fn(_dataset, _column_name), do: fn _ -> nil end
@doc """
Calculates the min and max value in the specified column
"""
@spec column_extents(Contex.Dataset.t(), column_name()) :: {any, any}
def column_extents(%Dataset{data: data} = dataset, column_name) do
accessor = Dataset.value_fn(dataset, column_name)
Enum.reduce(data, {nil, nil}, fn row, {min, max} ->
val = accessor.(row)
{Utils.safe_min(val, min), Utils.safe_max(val, max)}
end)
end
@doc """
Tries to guess the data type for a column based on contained data.
Looks through the rows and returns the first match it can find.
"""
@spec guess_column_type(Contex.Dataset.t(), column_name()) :: column_type()
def guess_column_type(%Dataset{data: data} = dataset, column_name) do
accessor = Dataset.value_fn(dataset, column_name)
Enum.reduce_while(data, nil, fn row, _result ->
val = accessor.(row)
case evaluate_type(val) do
{:ok, type} -> {:halt, type}
_ -> {:cont, nil}
end
end)
end
defp evaluate_type(%DateTime{}), do: {:ok, :datetime}
defp evaluate_type(%NaiveDateTime{}), do: {:ok, :datetime}
defp evaluate_type(v) when is_number(v), do: {:ok, :number}
defp evaluate_type(v) when is_binary(v), do: {:ok, :string}
defp evaluate_type(_), do: {:unknown}
@doc """
Calculates the data extents for the sum of the columns supplied.
It is the equivalent of evaluating the extents of a calculated row where the calculating
is the sum of the values identified by column_names.
"""
@spec combined_column_extents(Contex.Dataset.t(), list(column_name())) :: {any(), any()}
def combined_column_extents(%Dataset{data: data} = dataset, column_names) do
accessors =
Enum.map(column_names, fn column_name -> Dataset.value_fn(dataset, column_name) end)
Enum.reduce(data, {nil, nil}, fn row, {min, max} ->
val = sum_row_values(row, accessors)
{Utils.safe_min(val, min), Utils.safe_max(val, max)}
end)
end
defp sum_row_values(row, accessors) do
Enum.reduce(accessors, 0, fn accessor, acc ->
val = accessor.(row)
Utils.safe_add(acc, val)
end)
end
@doc """
Extracts a list of unique values in the given column.
Note that the unique values will maintain order of first detection
in the data.
"""
@spec unique_values(Contex.Dataset.t(), String.t() | integer()) :: [any]
def unique_values(%Dataset{data: data} = dataset, column_name) do
accessor = Dataset.value_fn(dataset, column_name)
{result, _found} =
Enum.reduce(data, {[], MapSet.new()}, fn row, {result, found} ->
val = accessor.(row)
case MapSet.member?(found, val) do
true -> {result, found}
_ -> {[val | result], MapSet.put(found, val)}
end
end)
# Maintain order they are found in
Enum.reverse(result)
end
end
|
lib/chart/dataset.ex
| 0.802942
| 0.874774
|
dataset.ex
|
starcoder
|
defmodule Sanbase.TemplateEngine do
@moduledoc ~s"""
Produce a string value from a given template and key-value enumerable.
All occurances in the template that are enclosed in double braces are replaced
with the corersponding values from KV enumerable.
Example:
iex> Sanbase.TemplateEngine.run("My name is {{name}}", %{name: "San"})
"My name is San"
iex> Sanbase.TemplateEngine.run("{{a}}{{b}}{{a}}{{a}}", %{a: "1", b: 2})
"1211"
iex> Sanbase.TemplateEngine.run("SmallNum: {{small_num}}", %{small_num: 100})
"SmallNum: 100"
iex> Sanbase.TemplateEngine.run("MediumNum: {{medium_num}}", %{medium_num: 100000})
"MediumNum: 100000"
iex> Sanbase.TemplateEngine.run("Human Readable MediumNum: {{medium_num}}", %{medium_num: 100000, human_readable: [:medium_num]})
"Human Readable MediumNum: 100,000.00"
iex> Sanbase.TemplateEngine.run("BigNum: {{big_num}}", %{big_num: 999999999999})
"BigNum: 999999999999"
iex> Sanbase.TemplateEngine.run("Human Readable BigNum: {{big_num}}", %{big_num: 999999999999, human_readable: [:big_num]})
"Human Readable BigNum: 1,000.00 Billion"
"""
@spec run(String.t(), map) :: String.t()
def run(template, kv) do
{human_readable_map, kv} = Map.split(kv, [:human_readable])
human_readable_mapset = Map.get(human_readable_map, :human_readable, []) |> MapSet.new()
Enum.reduce(kv, template, fn {key, value}, acc ->
replace(acc, key, value, human_readable_mapset)
end)
end
defp replace(string, key, value, human_readable_mapset) do
String.replace(string, "{{#{key}}}", fn _ ->
case key in human_readable_mapset do
true -> value |> human_readable |> to_string()
false -> value |> to_string()
end
end)
end
# Numbers below 1000 are not changed
# Numbers between 1000 and 1000000 are delimited: 999,523.00, 123,529.12
# Number bigger than 1000000 are made human readable: 1.54 Million, 85.00 Billion
defp human_readable(data) do
case data do
num when is_number(num) and (num >= 1_000_000 or num <= -1_000_000) ->
Number.Human.number_to_human(num)
num when is_number(num) and (num >= 1000 or num <= -1000) ->
Number.Delimit.number_to_delimited(num)
num when is_number(num) and (num > -1 and num < 1) ->
Number.Delimit.number_to_delimited(num, precision: 8)
num when is_float(num) ->
Number.Delimit.number_to_delimited(num, precision: 2)
num when is_integer(num) ->
Integer.to_string(num)
end
end
end
|
lib/sanbase/template_engine/template_engine.ex
| 0.685529
| 0.555646
|
template_engine.ex
|
starcoder
|
defmodule ExWire.Packet.BlockBodies do
@moduledoc """
Eth Wire Packet for getting block bodies from a peer.
```
**BlockBodies** [`+0x06`, [`transactions_0`, `uncles_0`] , ...]
Reply to `GetBlockBodies`. The items in the list (following the message ID) are
some of the blocks, minus the header, in the format described in the main Ethereum
specification, previously asked for in a `GetBlockBodies` message. This may
validly contain no items if no blocks were able to be returned for the
`GetBlockBodies` query.
```
"""
require Logger
alias ExWire.Struct.Block
@behaviour ExWire.Packet
@type t :: %__MODULE__{
blocks: [Block.t()]
}
defstruct [
:blocks
]
@doc """
Given a BlockBodies packet, serializes for transport over Eth Wire Protocol.
## Examples
iex> %ExWire.Packet.BlockBodies{
...> blocks: [
...> %ExWire.Struct.Block{transactions_list: [[<<5>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]], ommers: [<<1::256>>]},
...> %ExWire.Struct.Block{transactions_list: [[<<6>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]], ommers: [<<1::256>>]}
...> ]
...> }
...> |> ExWire.Packet.BlockBodies.serialize()
[ [[[<<5>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]], [<<1::256>>]], [[[<<6>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]], [<<1::256>>]] ]
"""
@spec serialize(t) :: ExRLP.t()
def serialize(packet = %__MODULE__{}) do
for block <- packet.blocks, do: Block.serialize(block)
end
@doc """
Given an RLP-encoded BlockBodies packet from Eth Wire Protocol,
decodes into a BlockBodies struct.
## Examples
iex> ExWire.Packet.BlockBodies.deserialize([ [[[<<5>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]], [<<1::256>>]], [[[<<6>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]], [<<1::256>>]] ])
%ExWire.Packet.BlockBodies{
blocks: [
%ExWire.Struct.Block{
transactions_list: [[<<5>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]],
transactions: [%Blockchain.Transaction{nonce: 5, gas_price: 6, gas_limit: 7, to: <<1::160>>, value: 8, v: 27, r: 9, s: 10, data: "hi"}],
ommers: [<<1::256>>]
},
%ExWire.Struct.Block{
transactions_list: [[<<6>>, <<6>>, <<7>>, <<1::160>>, <<8>>, "hi", <<27>>, <<9>>, <<10>>]],
transactions: [%Blockchain.Transaction{nonce: 6, gas_price: 6, gas_limit: 7, to: <<1::160>>, value: 8, v: 27, r: 9, s: 10, data: "hi"}],
ommers: [<<1::256>>]
}
]
}
"""
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
blocks = for block <- rlp, do: Block.deserialize(block)
%__MODULE__{
blocks: blocks
}
end
@doc """
Handles a BlockBodies message. This is when we have received
a given set of blocks back from a peer.
## Examples
iex> %ExWire.Packet.GetBlockBodies{hashes: [<<5>>, <<6>>]}
...> |> ExWire.Packet.GetBlockBodies.handle()
:ok
"""
@spec handle(ExWire.Packet.packet()) :: ExWire.Packet.handle_response()
def handle(packet = %__MODULE__{}) do
_ = Logger.info("[Packet] Peer sent #{Enum.count(packet.blocks)} block(s).")
:ok
end
end
|
apps/ex_wire/lib/ex_wire/packet/block_bodies.ex
| 0.885965
| 0.808483
|
block_bodies.ex
|
starcoder
|
defmodule Phoenix.LiveDashboard.TableComponent do
use Phoenix.LiveDashboard.Web, :live_component
@limit [50, 100, 500, 1000, 5000]
@type params() :: %{
limit: pos_integer(),
sort_by: :atom,
sort_dir: :desc | :asc,
search: binary(),
hint: binary() | nil
}
@impl true
def mount(socket) do
{:ok, socket}
end
def normalize_params(params) do
params
|> validate_required([:columns, :id, :row_fetcher, :title])
|> normalize_columns()
|> validate_required_one_sortable_column()
|> Map.put_new(:search, true)
|> Map.put_new(:limit, @limit)
|> Map.put_new(:row_attrs, [])
|> Map.put_new(:hint, nil)
|> Map.update(:default_sort_by, nil, &(&1 && to_string(&1)))
|> Map.put_new_lazy(:rows_name, fn ->
Phoenix.Naming.humanize(params.title) |> String.downcase()
end)
end
defp validate_required(params, list) do
case Enum.find(list, &(not Map.has_key?(params, &1))) do
nil -> :ok
key -> raise ArgumentError, "the #{inspect(key)} parameter is expected in table component"
end
params
end
defp normalize_columns(%{columns: columns} = params) when is_list(columns) do
%{params | columns: Enum.map(columns, &normalize_column/1)}
end
defp normalize_columns(%{columns: columns}) do
raise ArgumentError, ":columns must be a list, got: #{inspect(columns)}"
end
defp normalize_column(column) do
case Access.fetch(column, :field) do
{:ok, nil} ->
msg = ":field parameter must not be nil, got: #{inspect(column)}"
raise ArgumentError, msg
{:ok, field} when is_atom(field) or is_binary(field) ->
column
|> Map.new()
|> Map.put_new_lazy(:header, fn -> Phoenix.Naming.humanize(field) end)
|> Map.put_new(:header_attrs, [])
|> Map.put_new(:format, & &1)
|> Map.put_new(:cell_attrs, [])
|> Map.put_new(:sortable, nil)
{:ok, _} ->
msg = ":field parameter must be an atom or a string, got: "
raise ArgumentError, msg <> inspect(column)
:error ->
msg = "the :field parameter is expected, got: #{inspect(column)}"
raise ArgumentError, msg
end
end
defp validate_required_one_sortable_column(%{columns: columns} = params) do
sortable_columns = sortable_columns(columns)
if sortable_columns == [] do
raise ArgumentError, "must have at least one column with :sortable parameter"
else
params
end
end
@impl true
def update(assigns, socket) do
assigns = normalize_table_params(assigns)
%{
table_params: table_params,
page: page,
row_fetcher: row_fetcher
} = assigns
{rows, total} = row_fetcher.(table_params, page.node)
assigns = Map.merge(assigns, %{rows: rows, total: total})
{:ok, assign(socket, assigns)}
end
defp normalize_table_params(assigns) do
%{columns: columns, page: %{params: all_params}, default_sort_by: sort_by} = assigns
sortable_columns = sortable_columns(columns)
sort_by =
all_params
|> get_in_or_first("sort_by", sort_by, sortable_columns)
|> String.to_atom()
sort_dir =
all_params
|> get_in_or_first("sort_dir", sortable_dirs(columns, sort_by))
|> String.to_atom()
limit =
if assigns.limit do
all_params
|> get_in_or_first("limit", Enum.map(assigns.limit, &to_string/1))
|> String.to_integer()
else
nil
end
search = all_params["search"]
search = if search == "", do: nil, else: search
table_params = %{sort_by: sort_by, sort_dir: sort_dir, limit: limit, search: search}
Map.put(assigns, :table_params, table_params)
end
defp sortable_columns(columns) do
for column <- columns, column.sortable, do: to_string(column.field)
end
defp sortable_dirs(columns, field) do
case Enum.find(columns, &(&1.field == field)) do
%{sortable: :desc} -> ~w(desc asc)
%{sortable: :asc} -> ~w(asc desc)
end
end
defp get_in_or_first(params, key, default \\ nil, valid) do
value = params[key]
if value in valid, do: value, else: default || hd(valid)
end
@impl true
def render(assigns) do
~L"""
<div class="tabular">
<h5 class="card-title"><%= @title %> <%= @hint && hint(do: @hint) %></h5>
<%= if @search do %>
<div class="tabular-search">
<form phx-change="search" phx-submit="search" phx-target="<%= @myself %>" class="form-inline">
<div class="form-row align-items-center">
<div class="col-auto">
<input type="search" name="search" class="form-control form-control-sm" value="<%= @table_params.search %>" placeholder="Search" phx-debounce="300">
</div>
</div>
</form>
</div>
<% end %>
<form phx-change="select_limit" phx-target="<%= @myself %>" class="form-inline">
<div class="form-row align-items-center">
<%= if @limit do %>
<div class="col-auto">Showing at most</div>
<div class="col-auto">
<div class="input-group input-group-sm">
<select name="limit" class="custom-select" id="limit-select">
<%= options_for_select(@limit, @table_params.limit) %>
</select>
</div>
</div>
<div class="col-auto">
<%= @rows_name %> out of <%= @total %>
</div>
<% else %>
<div class="col-auto">
Showing <%= @total %> <%= @rows_name %>
</div>
<% end %>
</div>
</form>
<div class="card tabular-card mb-4 mt-4">
<div class="card-body p-0">
<div class="dash-table-wrapper">
<table class="table table-hover mt-0 dash-table">
<thead>
<tr>
<%= for column <- @columns do %>
<%= tag_with_attrs(:th, column.header_attrs, [column]) %>
<%= if direction = column.sortable do %>
<%= sort_link(@socket, @page, @table_params, column, direction) %>
<% else %>
<%= column.header %>
<% end %>
</th>
<% end %>
</tr>
</thead>
<tbody>
<%= for row <- @rows do %>
<%= tag_with_attrs(:tr, @row_attrs, [row]) %>
<%= for column <- @columns do %>
<%= tag_with_attrs(:td, column.cell_attrs, [row]) %>
<%= column.format.(row[column.field]) %>
</td>
<% end %>
</tr>
<% end %>
</tbody>
</table>
</div>
</div>
</div>
</div>
"""
end
defp tag_with_attrs(name, fun, args), do: tag(name, calc_attrs(fun, args))
defp calc_attrs(falsy, _) when falsy in [nil, false], do: []
defp calc_attrs(list, _) when is_list(list), do: list
defp calc_attrs(fun, args) when is_function(fun), do: apply(fun, args)
defp column_header(column) do
column.header || column.field |> to_string() |> String.capitalize()
end
@impl true
def handle_event("search", %{"search" => search}, socket) do
table_params = %{socket.assigns.table_params | search: search}
to = live_dashboard_path(socket, socket.assigns.page, table_params)
{:noreply, push_patch(socket, to: to)}
end
def handle_event("select_limit", %{"limit" => limit}, socket) do
table_params = %{socket.assigns.table_params | limit: limit}
to = live_dashboard_path(socket, socket.assigns.page, table_params)
{:noreply, push_patch(socket, to: to)}
end
defp sort_link(socket, page, table_params, column, direction) do
field = column.field
case table_params do
%{sort_by: ^field, sort_dir: sort_dir} ->
table_params = %{table_params | sort_dir: opposite_sort_dir(table_params), sort_by: field}
column
|> column_header()
|> sort_link_body(sort_dir)
|> live_patch(to: live_dashboard_path(socket, page, table_params))
%{} ->
table_params = %{table_params | sort_dir: direction, sort_by: field}
column
|> column_header()
|> sort_link_body()
|> live_patch(to: live_dashboard_path(socket, page, table_params))
end
end
defp sort_link_body(link_name), do: link_name
defp sort_link_body(link_name, sort_dir) do
[link_name | sort_link_icon(sort_dir)]
end
defp sort_link_icon(:desc) do
{:safe,
"""
<div class="dash-table-icon">
<span class="icon-sort icon-desc"></span>
</div>
"""}
end
defp sort_link_icon(:asc) do
{:safe,
"""
<div class="dash-table-icon">
<span class="icon-sort icon-asc"></span>
</div>
"""}
end
defp opposite_sort_dir(%{sort_dir: :desc}), do: :asc
defp opposite_sort_dir(_), do: :desc
end
|
lib/phoenix/live_dashboard/components/table_component.ex
| 0.597021
| 0.421016
|
table_component.ex
|
starcoder
|
defmodule Aoc2019.Day3 do
@behaviour DaySolution
def solve_part1(), do: paths() |> part1()
def solve_part2(), do: paths() |> part2()
defp paths(), do: load("inputs/input_day3")
defp load(filepath),
do:
File.read!(filepath)
|> String.split("\n")
|> Enum.map(fn path -> path |> String.split(",") end)
|> Enum.take(2)
defp part1(paths), do: paths |> closest_intersection_by_dist()
defp part2(paths), do: paths |> closest_intersection_by_steps()
# Without loss of generality, we'll take the starting the point to be {0, 0}
def closest_intersection_by_dist(paths),
do:
paths
|> intersections()
|> Enum.min_by(fn point -> point |> manhattan_dist({0, 0}) end)
|> manhattan_dist({0, 0})
def closest_intersection_by_steps(paths) do
[points1, points2] = paths |> Enum.map(&points_on_path/1)
paths
|> intersections()
|> Enum.map(fn point ->
{Enum.find_index(points1, fn point1 -> point1 == point end) + 1,
Enum.find_index(points2, fn point2 -> point2 == point end) + 1}
end)
|> Enum.map(fn {steps1, steps2} -> steps1 + steps2 end)
|> Enum.min()
end
defp intersections(paths) do
[points1, points2] =
paths |> Enum.map(&points_on_path/1) |> Enum.map(fn points -> points |> MapSet.new() end)
points1 |> MapSet.intersection(points2)
end
defp points_on_path(path) do
path
|> parse_path()
|> Enum.reduce([{0, 0}], fn {direction, steps}, acc ->
{x, y} = acc |> List.last()
acc ++
(1..steps
|> Enum.map(fn n ->
case direction do
:R -> {x + n, y}
:L -> {x - n, y}
:U -> {x, y + n}
:D -> {x, y - n}
end
end))
end)
|> Enum.drop(1)
end
defp parse_path(path),
do:
path
|> Enum.map(fn elem ->
{String.to_atom(String.at(elem, 0)), String.to_integer(String.slice(elem, 1..-1))}
end)
defp manhattan_dist({x1, y1}, {x2, y2}), do: abs(y2 - y1) + abs(x2 - x1)
end
|
lib/aoc2019/day3.ex
| 0.670177
| 0.555073
|
day3.ex
|
starcoder
|
defmodule DeepMerge do
@moduledoc """
Provides functionality for deeply/recursively merging structures (normally for
`Map` and `Keyword`).
If you want to change the deep merge behavior of a custom struct,
please have a look at the `DeepMerge.Resolver` protocol.
"""
alias DeepMerge.Resolver
@continue_symbol :__deep_merge_continue
@doc """
Deeply merges two maps or keyword list `original` and `override`.
In more detail, if two conflicting values are maps or keyword lists themselves
then they will also be merged recursively. This is an extension in that sense
to what `Map.merge/2` and `Keyword.merge/2` do as it doesn't just override map
or keyword values but tries to merge them.
It does not merge structs or structs with maps. If you want your structs to be
merged then please have a look at the `DeepMerge.Resolver` protocol and
consider implementing/deriving it.
Also, while it says `Map` and `Keyword` here, it is really dependent on which
types implement the `DeepMerge.Resolver` protocol, which by default are `Map`
and `Keyword`.
## Examples
iex> DeepMerge.deep_merge(%{a: 1, b: [x: 10, y: 9]}, %{b: [y: 20, z: 30], c: 4})
%{a: 1, b: [x: 10, y: 20, z: 30], c: 4}
iex> DeepMerge.deep_merge(%{a: 1, b: %{x: 10, y: 9}}, %{b: %{y: 20, z: 30}, c: 4})
%{a: 1, b: %{x: 10, y: 20, z: 30}, c: 4}
iex> DeepMerge.deep_merge(%{a: 1, b: %{x: 10, y: 9}}, %{b: %{y: 20, z: 30}, c: 4})
%{a: 1, b: %{x: 10, y: 20, z: 30}, c: 4}
iex> DeepMerge.deep_merge([a: 1, b: [x: 10, y: 9]], [b: [y: 20, z: 30], c: 4])
[a: 1, b: [x: 10, y: 20, z: 30], c: 4]
iex> DeepMerge.deep_merge(%{a: 1, b: 2}, %{b: 3, c: 4})
%{a: 1, b: 3, c: 4}
iex> DeepMerge.deep_merge(%{a: 1, b: %{x: 10, y: 9}}, %{b: 5, c: 4})
%{a: 1, b: 5, c: 4}
iex> DeepMerge.deep_merge([a: [b: [c: 1, d: 2], e: [24]]], [a: [b: [f: 3], e: [42, 100]]])
[a: [b: [c: 1, d: 2, f: 3], e: [42, 100]]]
iex> DeepMerge.deep_merge(%{a: 1, b: 5}, %{b: %{x: 10, y: 9}, c: 4})
%{a: 1, b: %{x: 10, y: 9}, c: 4}
iex> DeepMerge.deep_merge(%{a: [b: %{c: [d: "foo", e: 2]}]}, %{a: [b: %{c: [d: "bar"]}]})
%{a: [b: %{c: [e: 2, d: "bar"]}]}
"""
@spec deep_merge(map() | keyword(), map | keyword()) :: map() | keyword()
def deep_merge(original, override)
when (is_map(original) or is_list(original)) and (is_map(override) or is_list(override)) do
standard_resolve(nil, original, override)
end
@doc """
A variant of `DeepMerge.deep_merge/2` that allows to modify the merge behavior
through the additional passed in function.
This is similar to the relationship between `Map.merge/2` and `Map.merge/3`
and the structure of the function is exactly the same, e.g. the passed in
arguments are `key`, `original` and `override`.
The function is called before a merge is performed. If it returns any value
that value is inserted at that point during the deep_merge. If the deep merge
should continue like normal you need to return the symbol returned by
`DeepMerge.continue_deep_merge/0`.
If the merge conflict occurs at the top level then `key` is `nil`.
The example shows how this can be used to modify `deep_merge` not to merge
keyword lists, in case you don't like that behavior.
## Examples
iex> resolver = fn
...> (_, original, override) when is_list(original) and is_list(override) ->
...> override
...> (_, _original, _override) ->
...> DeepMerge.continue_deep_merge
...> end
iex> DeepMerge.deep_merge(%{a: %{b: 1}, c: [d: 1]},
...> %{a: %{z: 5}, c: [x: 0]}, resolver)
%{a: %{b: 1, z: 5}, c: [x: 0]}
"""
@spec deep_merge(map() | keyword(), map() | keyword(), (any(), any() -> any())) ::
map() | keyword()
def deep_merge(original, override, resolve_function)
when (is_map(original) or is_list(original)) and (is_map(override) or is_list(override)) do
resolver = build_resolver(resolve_function)
resolver.(nil, original, override)
end
@doc """
The symbol to return in the function in `deep_merge/3` when deep merging
should continue as normal.
## Examples
iex> DeepMerge.continue_deep_merge
:__deep_merge_continue
"""
@spec continue_deep_merge() :: :__deep_merge_continue
def continue_deep_merge, do: @continue_symbol
@spec build_resolver((any(), any() -> any())) :: (any(), any(), any() -> any())
defp build_resolver(resolve_function) do
my_resolver = fn key, base, override, fun ->
resolved_value = resolve_function.(key, base, override)
case resolved_value do
@continue_symbol ->
continue_deep_merge(base, override, fun)
_anything ->
resolved_value
end
end
rebuild_resolver(my_resolver)
end
defp rebuild_resolver(resolve_function) do
fn key, base, override ->
resolve_function.(key, base, override, resolve_function)
end
end
defp continue_deep_merge(base, override, fun) do
resolver = rebuild_resolver(fun)
Resolver.resolve(base, override, resolver)
end
defp standard_resolve(_key, original, override) do
Resolver.resolve(original, override, &standard_resolve/3)
end
end
|
lib/deep_merge.ex
| 0.850996
| 0.811452
|
deep_merge.ex
|
starcoder
|
defmodule ExWinlog do
@moduledoc """
`Logger` backend for the [Windows Event Log](https://docs.microsoft.com/windows/win32/wes/windows-event-log)
## Usage
Add it to your list of `Logger` backends in your `config.exs` file like this:
```elixir
config :logger,
backends: [:console, {ExWinlog, "My Event Source Name"}]
```
### Registering Event Sources
The `ExWinlog` backend requires that the event source, `"My Event Source Name"` in the example, is registered with Windows Event Viewer.
Use `ExWinlog.register/1` and `ExWinlog.deregister/1`, while running the application as an administrator, to manage the sources.
This should typically be done once when installing or uninstalling the application.
"""
@behaviour GenEvent
@default_state %{name: nil, path: nil, io_device: nil, inode: nil, format: nil, level: nil, metadata: nil, metadata_filter: nil, rotate: nil}
@doc """
Registers a new event source with Windows Event Viewer. Must be run as an administrator.
"""
def register(event_source_name), do: ExWinlog.Logger.register(event_source_name)
@doc """
De-registers an existing event source with Windows Event Viewer. Must be run as an administrator.
"""
def deregister(event_source_name), do: ExWinlog.Logger.deregister(event_source_name)
@impl true
def init({__MODULE__, event_source_name}) do
state = Map.put(@default_state, :event_source_name, event_source_name)
{:ok, state}
end
@impl true
def handle_event({level, _gl, {Logger, msg, _ts, _md}}, %{level: min_level, event_source_name: event_source_name} = state) do
if (is_nil(min_level) or Logger.compare_levels(level, min_level) != :lt) do
:ok = apply(ExWinlog.Logger, level, [event_source_name, msg])
end
{:ok, state}
end
@impl true
def handle_call(_, state) do
{:ok, :ok, state}
end
@impl true
def handle_info(_, state) do
{:ok, state}
end
@impl true
def code_change(_old_vsn, state, _extra) do
{:ok, state}
end
@impl true
def terminate(_reason, _state) do
:ok
end
end
|
lib/ex_winlog.ex
| 0.811937
| 0.668706
|
ex_winlog.ex
|
starcoder
|
require Utils
require Program
defmodule D7 do
@moduledoc """
--- Day 7: Amplification Circuit ---
Based on the navigational maps, you're going to need to send more power to your ship's thrusters to reach Santa in time. To do this, you'll need to configure a series of amplifiers already installed on the ship.
There are five amplifiers connected in series; each one receives an input signal and produces an output signal. They are connected such that the first amplifier's output leads to the second amplifier's input, the second amplifier's output leads to the third amplifier's input, and so on. The first amplifier's input value is 0, and the last amplifier's output leads to your ship's thrusters.
-- O-------O O-------O O-------O O-------O O-------O
0 ->| Amp A |->| Amp B |->| Amp C |->| Amp D |->| Amp E |-> (to thrusters)
-- O-------O O-------O O-------O O-------O O-------O
The Elves have sent you some Amplifier Controller Software (your puzzle input), a program that should run on your existing Intcode computer. Each amplifier will need to run a copy of the program.
When a copy of the program starts running on an amplifier, it will first use an input instruction to ask the amplifier for its current phase setting (an integer from 0 to 4). Each phase setting is used exactly once, but the Elves can't remember which amplifier needs which phase setting.
The program will then call another input instruction to get the amplifier's input signal, compute the correct output signal, and supply it back to the amplifier with an output instruction. (If the amplifier has not yet received an input signal, it waits until one arrives.)
Your job is to find the largest output signal that can be sent to the thrusters by trying every possible combination of phase settings on the amplifiers. Make sure that memory is not shared or reused between copies of the program.
Try every combination of phase settings on the amplifiers. What is the highest signal that can be sent to the thrusters?
--- Part Two ---
It's no good - in this configuration, the amplifiers can't generate a large enough output signal to produce the thrust you'll need. The Elves quickly talk you through rewiring the amplifiers into a feedback loop:
-- O-------O O-------O O-------O O-------O O-------O
0 -+->| Amp A |->| Amp B |->| Amp C |->| Amp D |->| Amp E |-.
-- | O-------O O-------O O-------O O-------O O-------O |
-- | |
-- '--------------------------------------------------------+
|
v
(to thrusters)
Most of the amplifiers are connected as they were before; amplifier A's output is connected to amplifier B's input, and so on. However, the output from amplifier E is now connected into amplifier A's input. This creates the feedback loop: the signal will be sent through the amplifiers many times.
In feedback loop mode, the amplifiers need totally different phase settings: integers from 5 to 9, again each used exactly once. These settings will cause the Amplifier Controller Software to repeatedly take input and produce output many times before halting. Provide each amplifier its phase setting at its first input instruction; all further input/output instructions are for signals.
Don't restart the Amplifier Controller Software on any amplifier during this process. Each one should continue receiving and sending signals until it halts.
All signals sent or received in this process will be between pairs of amplifiers except the very first signal and the very last signal. To start the process, a 0 signal is sent to amplifier A's input exactly once.
Eventually, the software on the amplifiers will halt after they have processed the final loop. When this happens, the last output signal from amplifier E is sent to the thrusters. Your job is to find the largest output signal that can be sent to the thrusters using the new phase settings and feedback loop arrangement.
Try every combination of the new phase settings on the amplifier feedback loop. What is the highest signal that can be sent to the thrusters?
"""
@behaviour Day
def permutations([]), do: [[]]
def permutations(list),
do: for(elem <- list, rest <- permutations(list -- [elem]), do: [elem | rest])
def part_2_mapping(programs) do
programs =
Enum.scan(
programs,
fn {:block, program}, {_, %Program{output: [previous_output | _]}} ->
program = %{program | input: [previous_output]}
Program.run_blocking(program)
end
)
{final_status, final_program} = List.last(programs)
programs = List.replace_at(programs, 0, {final_status, final_program})
case final_status do
:block -> part_2_mapping(programs)
:halt -> hd(final_program.output)
end
end
def solve(input) do
input = input |> Utils.to_ints()
program = Program.new(input)
part_1 =
0..4
|> Enum.to_list()
|> permutations
|> Enum.map(fn phase_settings ->
Enum.reduce(
phase_settings,
0,
fn phase, previous_output ->
%Program{output: [result]} = Program.run(%{program | input: [phase, previous_output]})
result
end
)
end)
|> Enum.max()
part_2 =
5..9
|> Enum.to_list()
|> permutations
|> Enum.map(fn phase_settings ->
[a | rest] =
Enum.map(
phase_settings,
fn phase ->
%{program | input: [phase]}
end
)
programs = Enum.map([a | rest], fn program -> Program.run_blocking(program) end)
programs = [{:block, %Program{output: [0]}} | programs]
part_2_mapping(programs)
end)
|> Enum.max()
{
part_1,
part_2
}
end
end
|
lib/days/07.ex
| 0.828766
| 0.840717
|
07.ex
|
starcoder
|
defmodule ListToCsv.Option do
@moduledoc """
`ListToCsv.Option` contains types and utilities for option.
"""
alias ListToCsv.Header
alias ListToCsv.Key
@type t() :: [
headers: list(Header.t()) | nil,
keys: list(Key.many()),
length: list({Key.many(), integer}) | nil
]
@spec expand(t()) :: list({Header.t(), Key.many()} | Key.many())
def expand(option) do
headers = option[:headers]
keys = option[:keys]
length = option[:length]
case {headers, length} do
{nil, nil} ->
keys
{headers, nil} ->
Enum.zip(headers, keys)
{nil, length} ->
Enum.reduce(length, keys, &do_expand/2)
_ ->
Enum.reduce(length, Enum.zip(headers, keys), &do_expand/2)
end
end
@spec do_expand({Key.many(), integer()}, list({Header.t(), Key.many()} | Key.many())) ::
list({Header.t(), Key.many()} | Key.many())
def do_expand({keys, n}, headers) do
matcher = &starts_with?(&1, Key.build_prefix(keys))
case chunks(headers, matcher) do
{prefix, body, []} ->
Enum.concat(prefix, duplicate(body, n))
{prefix, body, suffix} ->
Enum.concat([prefix, duplicate(body, n), do_expand({keys, n}, suffix)])
end
end
@spec duplicate(list({Header.t(), Key.many()} | Key.many()), integer()) ::
list({Header.t(), Key.many()} | Key.many())
def duplicate([{_, _} | _] = list, n) do
{headers, keys} = Enum.unzip(list)
Enum.zip(Header.duplicate(headers, n), Key.duplicate(keys, n))
end
@doc """
Returns `list` duplicated `n` times. And replace first `:N` with current 1 base index.
## Examples
iex> duplicate([[:name, :N]], 2)
[[:name, 1], [:name, 2]]
iex> duplicate([{"name#", [:name, :N]}], 2)
[{"name1", [:name, 1]}, {"name2", [:name, 2]}]
iex> duplicate([[:name, :N, :item, :N]], 2)
[[:name, 1, :item, :N], [:name, 2, :item, :N]]
iex> duplicate([{"name#.item#", [:name, :N, :item, :N]}], 2)
[{"name1.item#", [:name, 1, :item, :N]}, {"name2.item#", [:name, 2, :item, :N]}]
"""
def duplicate(list, n), do: Key.duplicate(list, n)
@doc """
Returns `true` if `keys` starts with the given `prefix` list; otherwise returns
`false`.
Note that `:N` can match with `integer`.
## Examples
iex> starts_with?(:name, [:item, :N])
false
iex> starts_with?({"name", :name}, [:item, :N])
false
iex> starts_with?([:item, :N, :name], [:item, :N])
true
iex> starts_with?({"item#.name", [:item, :N, :name]}, [:item, :N])
true
iex> starts_with?([:name], [:item, :N])
false
iex> starts_with?({"name", [:name]}, [:item, :N])
false
iex> starts_with?([:item, 1, :name, :N, :first], [:item, :N, :name, :N])
true
iex> starts_with?({"item1.name#.first", [:item, 1, :name, :N, :first]}, [:item, :N, :name, :N])
true
iex> starts_with?([:packages, :N, :name], [:item, :N])
false
iex> starts_with?({"package#.name", [:packages, :N, :name]}, [:item, :N])
false
"""
@spec starts_with?({Header.t(), Key.many()} | Key.many(), list(Key.t())) :: boolean
def starts_with?({header, keys}, prefix) when is_binary(header),
do: Key.starts_with?(keys, prefix)
def starts_with?(keys, prefix), do: Key.starts_with?(keys, prefix)
@doc """
Split `list` 3 part with respect orders
- 1st not matched with `fun`
- 2nd matched with `fun`
- 3rd not matched with `fun`
## Examples
iex> chunks([1, 2, 3, 3, 2, 1, 3, 2], &(&1 == 3))
{[1, 2], [3, 3], [2, 1, 3, 2]}
iex> chunks([3, 2, 3, 2, 1, 3, 2], &(&1 == 3))
{[], [3], [2, 3, 2, 1, 3, 2]}
iex> chunks([1, 2, 4, 5, 2], &(&1 == 3))
{[1, 2, 4, 5, 2], [], []}
"""
@spec chunks(list(), function()) :: {list(), list(), list()}
def chunks(list, fun) do
{prefix, tail} = Enum.split_while(list, &(!fun.(&1)))
{body, suffix} = Enum.split_while(tail, fun)
{prefix, body, suffix}
end
end
|
lib/list_to_csv/option.ex
| 0.84869
| 0.493164
|
option.ex
|
starcoder
|
defmodule EctoEnumMigration do
@moduledoc """
Provides a DSL to easily handle Postgres Enum Types in Ecto database migrations.
"""
import Ecto.Migration, only: [execute: 1, execute: 2]
@doc """
Create a Postgres Enum Type.
## Examples
```elixir
defmodule MyApp.Repo.Migrations.CreateTypeMigration do
use Ecto.Migration
import EctoEnumMigration
def change do
create_type(:status, [:registered, :active, :inactive, :archived])
end
end
```
By default the type will be created in the `public` schema.
To change the schema of the type pass the `schema` option.
```elixir
create_type(:status, [:registered, :active, :inactive, :archived], schema: "custom_schema")
```
"""
@spec create_type(name :: atom(), values :: [atom()], opts :: Keyword.t()) :: :ok | no_return()
def create_type(name, values, opts \\ [])
when is_atom(name) and is_list(values) and is_list(opts) do
type_name = type_name(name, opts)
type_values = values |> Enum.map(fn value -> "'#{value}'" end) |> Enum.join(", ")
create_sql = "CREATE TYPE #{type_name} AS ENUM (#{type_values});"
drop_sql = "DROP TYPE #{type_name};"
execute(create_sql, drop_sql)
end
@doc """
Drop a Postgres Enum Type.
This command is not reversible, so make sure to include a `down/0` step in the migration.
## Examples
```elixir
defmodule MyApp.Repo.Migrations.DropTypeMigration do
use Ecto.Migration
import EctoEnumMigration
def up do
drop_type(:status)
end
def down do
create_type(:status, [:registered, :active, :inactive, :archived])
end
end
```
By default the type will be created in the `public` schema.
To change the schema of the type pass the `schema` option.
```elixir
drop_type(:status, schema: "custom_schema")
```
"""
@spec drop_type(name :: atom(), opts :: Keyword.t()) :: :ok | no_return()
def drop_type(name, opts \\ []) when is_atom(name) and is_list(opts) do
[
"DROP TYPE",
if_exists_sql(opts),
type_name(name, opts),
";"
]
|> execute_query()
end
@doc """
Rename a Postgres Type.
## Examples
```elixir
defmodule MyApp.Repo.Migrations.RenameTypeMigration do
use Ecto.Migration
import EctoEnumMigration
def change do
rename_type(:status, :status_renamed)
end
end
```
By default the type will be created in the `public` schema.
To change the schema of the type pass the `schema` option.
```elixir
rename_type(:status, :status_renamed, schema: "custom_schema")
```
"""
@spec rename_type(before_name :: atom(), after_name :: atom(), opts :: Keyword.t()) ::
:ok | no_return()
def rename_type(before_name, after_name, opts \\ [])
when is_atom(before_name) and is_atom(after_name) and is_list(opts) do
before_type_name = type_name(before_name, opts)
after_type_name = type_name(after_name, opts)
up_sql = "ALTER TYPE #{before_type_name} RENAME TO #{after_name};"
down_sql = "ALTER TYPE #{after_type_name} RENAME TO #{before_name};"
execute(up_sql, down_sql)
end
@doc """
Add a value to a existing Postgres type.
This operation is not reversible, existing values cannot be removed from an enum type.
Checkout [Enumerated Types](https://www.postgresql.org/docs/current/datatype-enum.html)
for more information.
Also it cannot be used inside a transaction block, we need to set
`@disable_ddl_transaction true` in the migration.
## Examples
```elixir
defmodule MyApp.Repo.Migrations.AddValueToTypeMigration do
use Ecto.Migration
import EctoEnumMigration
@disable_ddl_transaction true
def up do
add_value_to_type(:status, :finished)
end
def down do
end
end
```
By default the type will be created in the `public` schema.
To change the schema of the type pass the `schema` option.
```elixir
add_value_to_type(:status, :finished, schema: "custom_schema")
```
If the new value's place in the enum's ordering is not specified,
then the new item is placed at the end of the list of values.
But we specify the the place in the ordering for the new value with the
`:before` and `:after` options.
```elixir
add_value_to_type(:status, :finished, before: :started)
```
```elixir
add_value_to_type(:status, :finished, after: :started)
```
"""
@spec add_value_to_type(name :: atom(), value :: atom(), opts :: Keyword.t()) ::
:ok | no_return()
def add_value_to_type(name, value, opts \\ []) do
[
"ALTER TYPE",
type_name(name, opts),
"ADD VALUE",
to_value(value),
before_after(opts),
";"
]
|> execute_query()
end
@doc """
Rename a value of a Postgres Type.
***Only compatible with Postgres version 10+***
## Examples
```elixir
defmodule MyApp.Repo.Migrations.RenameTypeMigration do
use Ecto.Migration
import EctoEnumMigration
def change do
rename_value(:status, :finished, :done)
end
end
```
By default the type will be created in the `public` schema.
To change the schema of the type pass the `schema` option.
```elixir
rename_value(:status, :finished, :done, schema: "custom_schema")
```
"""
@spec rename_value(
type_name :: atom(),
before_value :: atom(),
after_value :: atom(),
opts :: Keyword.t()
) :: :ok | no_return()
def rename_value(type_name, before_value, after_value, opts \\ [])
when is_atom(type_name) and is_atom(before_value) and is_atom(after_value) and is_list(opts) do
type_name = type_name(type_name, opts)
before_value = to_value(before_value)
after_value = to_value(after_value)
up_sql = "
ALTER TYPE #{type_name} RENAME VALUE #{before_value} TO #{after_value};
"
down_sql = "
ALTER TYPE #{type_name} RENAME VALUE #{after_value} TO #{before_value};
"
execute(up_sql, down_sql)
end
defp before_after(opts) do
before_value = Keyword.get(opts, :before)
after_value = Keyword.get(opts, :after)
cond do
before_value ->
["BEFORE ", to_value(before_value)]
after_value ->
["AFTER ", to_value(after_value)]
true ->
[]
end
end
defp to_value(value) do
[?', to_string(value), ?']
end
defp type_name(name, opts) do
schema = Keyword.get(opts, :schema, "public")
"#{schema}.#{name}"
end
defp if_exists_sql(opts) do
if Keyword.get(opts, :if_exists, false) do
"IF EXISTS"
else
[]
end
end
defp execute_query(terms) do
terms
|> Enum.reject(&(is_nil(&1) || &1 == []))
|> Enum.intersperse(?\s)
|> IO.iodata_to_binary()
|> execute()
end
end
|
lib/ecto_enum_migration.ex
| 0.865537
| 0.809427
|
ecto_enum_migration.ex
|
starcoder
|
defmodule Posexional.Field.Value do
@moduledoc """
this module represent a single field in a row of a positional file
"""
alias Posexional.Field
defstruct name: nil,
size: nil,
filler: ?\s,
alignment: :left,
default: nil
@spec new(atom, integer, Keyword.t()) :: %Posexional.Field.Value{}
def new(name, size, opts \\ []) do
opts = Keyword.merge([name: name, size: size, filler: ?\s, alignment: :left, default: nil], opts)
%Posexional.Field.Value{
name: opts[:name],
size: opts[:size],
filler: opts[:filler],
alignment: opts[:alignment],
default: opts[:default]
}
end
@doc """
outputs a field
## Examples
iex> Posexional.Field.Value.write(Posexional.Field.Value.new(:test, 5), "test")
"test "
iex> Posexional.Field.Value.write(Posexional.Field.Value.new(:test, 5), "too long")
** (RuntimeError) The value too long is too long for the test field. The maximum size is 5 while the value is 8
iex> Posexional.Field.Value.write(Posexional.Field.Value.new(:test, 10), "test")
"test "
iex> Posexional.Field.Value.write(Posexional.Field.Value.new(:test, 10), "test")
"test "
iex> Posexional.Field.Value.write(Posexional.Field.Value.new(:test, 10, filler: ?0), "test")
"test000000"
iex> Posexional.Field.Value.write(Posexional.Field.Value.new(:test, 10, filler: ?0, alignment: :right), "test")
"000000test"
iex> Posexional.Field.Value.write(Posexional.Field.Value.new(:test, 10), 50)
** (RuntimeError) The value provided for the test field doesn't seem to be a string
"""
@spec write(%Field.Value{}, binary) :: binary
def write(%Field.Value{filler: filler, size: size, default: nil}, nil) do
String.duplicate(to_string([filler]), size)
end
def write(field = %Field.Value{default: default}, nil) do
Field.positionalize(default, field)
end
def write(field = %Field.Value{name: name, size: size}, value) when is_binary(value) do
if String.length(value) <= size do
Field.positionalize(value, field)
else
raise "The value #{value} is too long for the #{name} field. " <>
"The maximum size is #{size} while the value is #{String.length(value)}"
end
end
def write(%Field.Value{name: name}, _value) do
raise "The value provided for the #{name} field doesn't seem to be a string"
end
end
defimpl Posexional.Protocol.FieldLength, for: Posexional.Field.Value do
def length(%Posexional.Field.Value{size: size}), do: size
end
defimpl Posexional.Protocol.FieldName, for: Posexional.Field.Value do
def name(%Posexional.Field.Value{name: field_name}), do: field_name
end
defimpl Posexional.Protocol.FieldSize, for: Posexional.Field.Value do
def size(%Posexional.Field.Value{size: size}), do: size
end
defimpl Posexional.Protocol.FieldWrite, for: Posexional.Field.Value do
def write(field, value), do: Posexional.Field.Value.write(field, value)
end
defimpl Posexional.Protocol.FieldRead, for: Posexional.Field.Value do
def read(field, content), do: Posexional.Field.depositionalize(content, field)
end
|
lib/posexional/field/value.ex
| 0.84375
| 0.563408
|
value.ex
|
starcoder
|
defmodule GraphQL.Lang.AST.ParallelVisitor do
@moduledoc ~S"""
A ParallelVisitor runs all child visitors in parallel instead of serially like the CompositeVisitor.
In this context, 'in parallel' really means that for each node in the AST, each visitor will be invoked
for each node in the AST, but the :skip/:continue return value of enter and leave is maintained per-visitor.
This means invividual visitors can bail out of AST processing as soon as possible and not waste cycles.
This code based on the graphql-js *visitInParallel* function.
"""
alias GraphQL.Lang.AST.{
Visitor,
InitialisingVisitor,
PostprocessingVisitor
}
defstruct visitors: []
defimpl Visitor do
def enter(visitor, node, accumulator) do
visitors = Enum.filter(visitor.visitors, fn(child_visitor) ->
!skipping?(accumulator, child_visitor)
end)
accumulator = Enum.reduce(visitors, accumulator, fn(child_visitor, accumulator) ->
case child_visitor |> Visitor.enter(node, accumulator) do
{:continue, next_accumulator} -> next_accumulator
{:skip, next_accumulator} ->
put_in(next_accumulator[:skipping][child_visitor], node)
end
end)
if length(visitors) > 0 do
{:continue, accumulator}
else
{:skip, accumulator}
end
end
def leave(visitor, node, accumulator) do
Enum.reduce visitor.visitors, accumulator, fn(child_visitor, accumulator) ->
cond do
!skipping?(accumulator, child_visitor) ->
child_visitor |> Visitor.leave(node, accumulator)
accumulator[:skipping][child_visitor] == node ->
Map.delete(accumulator[:skipping], child_visitor)
true -> accumulator
end
end
end
defp skipping?(accumulator, child_visitor) do
Map.has_key?(accumulator[:skipping], child_visitor)
end
end
defimpl InitialisingVisitor do
def init(visitor, accumulator) do
accumulator = put_in(accumulator[:skipping], %{})
Enum.reduce(visitor.visitors, accumulator, &InitialisingVisitor.init/2)
end
end
defimpl PostprocessingVisitor do
def finish(visitor, accumulator) do
Enum.reduce(visitor.visitors, accumulator, &PostprocessingVisitor.finish/2)
end
end
end
|
lib/graphql/lang/ast/parallel_visitor.ex
| 0.712332
| 0.478712
|
parallel_visitor.ex
|
starcoder
|
defmodule ArtemisLog.IntervalWorker do
@moduledoc """
A `use` able module for creating GenServer instances that perform tasks on a
set interval.
## Callbacks
Define a `call/1` function to be executed at the interval. Receives the
current `state.data`.
Must return a tuple `{:ok, _}` or `{:error, _}`.
## Options
Takes the following options:
:name - Required. Name of the server.
:enabled - Optional. If set to false, starts in paused state.
:interval - Optional. Integer or Atom. Interval between calls.
:log_limit - Optional. Number of log entries to keep.
:delayed_start - Optional. Integer or Atom. Time to wait for initial call.
For example:
use ArtemisLog.IntervalWorker,
interval: 15_000,
log_limit: 20,
name: :repo_reset_on_interval
"""
@callback call(map(), any()) :: {:ok, any()} | {:error, any()}
@callback handle_info_callback(any(), any()) :: {:ok, any()} | {:error, any()}
@optional_callbacks handle_info_callback: 2
defmacro __using__(options) do
quote do
require Logger
use GenServer
defmodule State do
defstruct [
:config,
:data,
:timer,
log: []
]
end
defmodule Log do
defstruct [
:details,
:duration,
:ended_at,
:module,
:started_at,
:success
]
end
@behaviour ArtemisLog.IntervalWorker
@default_interval 60_000
@default_log_limit_fallback 10
def start_link(config \\ []) do
initial_state = %State{
config: config
}
dynamic_name = Keyword.get(config, :name)
configured_name = get_name()
options = [
name: dynamic_name || configured_name
]
GenServer.start_link(__MODULE__, initial_state, options)
end
def get_name(name \\ nil), do: name || get_option(:name)
def get_config(name \\ nil), do: GenServer.call(get_name(name), :config)
def get_data(name \\ nil), do: GenServer.call(get_name(name), :data)
def get_log(name \\ nil), do: GenServer.call(get_name(name), :log)
def get_options(), do: unquote(options)
def get_option(key, default \\ nil)
def get_option(:delayed_start, default) do
interval = Keyword.get(get_options(), :delayed_start, default)
cond do
interval == :next_full_minute -> ArtemisLog.Helpers.Time.get_milliseconds_to_next_minute() + :timer.minutes(1)
interval == :next_minute -> ArtemisLog.Helpers.Time.get_milliseconds_to_next_minute()
true -> interval
end
end
def get_option(:interval, default) do
fallback = default || @default_interval
interval = Keyword.get(get_options(), :interval, fallback)
cond do
interval == :next_minute -> ArtemisLog.Helpers.Time.get_milliseconds_to_next_minute()
true -> interval
end
end
def get_option(key, default), do: Keyword.get(get_options(), key, default)
def get_result(name \\ nil), do: GenServer.call(get_name(name), :result)
def get_state(name \\ nil), do: GenServer.call(get_name(name), :state)
def pause(name \\ nil), do: GenServer.call(get_name(name), :pause)
def resume(name \\ nil), do: GenServer.call(get_name(name), :resume)
def update(options \\ [], name \\ nil) do
case Keyword.get(options, :async) do
true -> Process.send(get_name(name), :update, [])
_ -> GenServer.call(get_name(name), :update)
end
end
# Callbacks
@impl true
def init(state) do
state = initial_actions(state)
{:ok, state}
end
@impl true
def handle_call(:config, _from, state) do
{:reply, state.config, state}
end
@impl true
def handle_call(:data, _from, state) do
{:reply, state.data, state}
end
@impl true
def handle_call(:log, _from, state) do
{:reply, state.log, state}
end
@impl true
def handle_call(:pause, _from, state) do
if state.timer && state.timer != :paused do
Process.cancel_timer(state.timer)
end
{:reply, true, %State{state | timer: :paused}}
end
@impl true
def handle_call(:result, _from, state) do
result = Artemis.Helpers.deep_get(state, [:data, :result])
{:reply, result, state}
end
@impl true
def handle_call(:resume, _from, state) do
if state.timer && state.timer != :paused do
Process.cancel_timer(state.timer)
end
{:reply, true, %State{state | timer: schedule_update()}}
end
@impl true
def handle_call(:state, _from, state) do
{:reply, state, state}
end
@impl true
@doc "Synchronous"
def handle_call(:update, _from, state) do
state = update_state(state)
{:reply, state, state}
end
@impl true
@doc "Asynchronous"
def handle_info(:update, state) do
state = update_state(state)
{:noreply, state}
end
def handle_info(data, state) do
handle_info_callback(data, state)
end
def handle_info_callback(_, state) do
{:noreply, state}
end
# Callback Helpers
defp initial_actions(state) do
case get_option(:enabled, true) do
true -> schedule_or_execute_initial_call(state)
false -> Map.put(state, :timer, :paused)
end
end
defp schedule_or_execute_initial_call(state) do
# Call immediately use an asynchronous call instead of synchronous
# one to prevent loading delays on application start
default_interval = 10
interval = get_option(:delayed_start, default_interval)
Map.put(state, :timer, schedule_update(interval))
end
defp update_state(state) do
started_at = Timex.now()
result = call(state.data, state.config)
ended_at = Timex.now()
state
|> Map.put(:data, parse_data(state, result))
|> Map.put(:log, update_log(state, result, started_at, ended_at))
|> Map.put(:timer, schedule_update_unless_paused(state))
end
defp schedule_update(custom_interval \\ nil) do
interval = custom_interval || get_option(:interval, @default_interval)
Process.send_after(self(), :update, interval)
end
defp schedule_update_unless_paused(%{timer: timer}) when timer == :paused, do: nil
defp schedule_update_unless_paused(%{timer: timer}) when is_nil(timer), do: schedule_update()
defp schedule_update_unless_paused(%{timer: timer}) do
Process.cancel_timer(timer)
schedule_update()
end
def parse_data(_state, {:ok, data}), do: data
def parse_data(%{data: current_data}, _), do: current_data
defp update_log(%{log: log}, result, started_at, ended_at) do
entry = %Log{
details: elem(result, 1),
duration: Timex.diff(ended_at, started_at),
ended_at: ended_at,
module: __MODULE__,
started_at: started_at,
success: success?(result)
}
log_limit = get_log_limit()
truncated = Enum.slice(log, 0, log_limit)
print_log(entry)
[entry | truncated]
end
defp print_log(entry) do
module = Artemis.Helpers.module_name(__MODULE__)
start = Timex.format!(entry.started_at, "{h24}:{m}:{s}{ss}")
duration = entry.duration / 1000
message = [
type: "IntervalWorker",
key: module,
start: start,
duration: "#{duration}ms"
]
options = [
log_level: Artemis.Helpers.AppConfig.fetch!(:artemis, :interval_worker, :default_log_level)
]
Artemis.Helpers.log(message, options)
end
defp get_log_limit() do
case get_option(:log_limit) do
nil -> get_default_log_limit()
limit -> limit
end
end
defp get_default_log_limit() do
:artemis
|> Application.fetch_env!(:interval_worker)
|> Keyword.fetch!(:default_log_limit)
|> Artemis.Helpers.to_integer()
rescue
_ -> @default_log_limit_fallback
end
defp success?({:ok, _}), do: true
defp success?(_), do: false
# Allow defined `@callback`s to be overwritten
defoverridable ArtemisLog.IntervalWorker
end
end
end
|
apps/artemis_log/lib/artemis_log/workers/interval_worker.ex
| 0.860369
| 0.463809
|
interval_worker.ex
|
starcoder
|
defmodule Day10 do
@moduledoc """
Advent of Code 2019
Day 10: Monitoring Station
"""
alias Day10.{Part1, Part2}
def get_map() do
Path.join(__DIR__, "inputs/day10.txt")
|> File.open!()
|> IO.stream(:line)
|> Stream.map(&String.trim/1)
|> Stream.map(&String.graphemes/1)
|> Enum.map(&Enum.map(&1, fn e -> e == "#" end))
end
def execute() do
map = get_map()
IO.puts("Part 1: #{Part1.run(map)}")
IO.puts("Part 2: #{Part2.run(map)}")
end
end
defmodule Day10.Part1 do
def run(map) do
asteroids = construct_asteroids_map(map)
dimensions = get_asteroids_dimensions(map)
get_monitoring_station_coords(asteroids, dimensions)
|> (&elem(&1, 1)).()
end
def construct_asteroids_map(map) do
for {row, y} <- Enum.with_index(map),
{is_asteroid, x} <- Enum.with_index(row),
into: %{},
do: {{x, y}, is_asteroid}
end
def get_asteroids_dimensions(map) do
{length(Enum.at(map, 0)), length(map)}
end
def get_monitoring_station_coords(asteroids, dimensions) do
asteroids
|> Stream.map(fn {coords, is_asteroid} ->
num_visible =
if is_asteroid,
do: count_visible_asteroids(coords, asteroids, dimensions),
else: 0
{coords, num_visible}
end)
|> Enum.max_by(&elem(&1, 1))
end
defp count_visible_asteroids(coords, asteroids, {width, height}) do
for row <- 0..(width - 1), col <- 0..(height - 1) do
if asteroids[{row, col}] and
coords != {row, col} and
not exists_blocking_asteroid?(coords, {row, col}, asteroids),
do: 1,
else: 0
end
|> Enum.sum()
end
# Recursive function to check for asteroids that block line of sight.
defp exists_blocking_asteroid?({x, y} = coords, asteroid, asteroids) do
{mx, my} = slope = calculate_slope(coords, asteroid)
exists_blocking_asteroid?({x + mx, y + my}, asteroid, slope, asteroids)
end
defp exists_blocking_asteroid?({x, y} = coords, asteroid, {mx, my} = slope, asteroids) do
cond do
coords == asteroid -> false
asteroids[coords] -> true
true -> exists_blocking_asteroid?({x + mx, y + my}, asteroid, slope, asteroids)
end
end
defp calculate_slope({x1, y1}, {x2, y2}) do
mx = x2 - x1
my = y2 - y1
case Integer.gcd(mx, my) do
1 -> {mx, my}
gcd -> {trunc(mx / gcd), trunc(my / gcd)}
end
end
end
defmodule Day10.Part2 do
alias Day10.Part1
def run(map) do
asteroids = Part1.construct_asteroids_map(map)
dimensions = Part1.get_asteroids_dimensions(map)
ms_coords =
Part1.get_monitoring_station_coords(asteroids, dimensions)
|> (&elem(&1, 0)).()
sort_asteroids_into_list(asteroids, ms_coords)
|> get_200th_asteroid_coords()
|> (fn {x, y} -> 100 * x + y end).()
end
def sort_asteroids_into_list(asteroids, {ms_x, ms_y} = ms_coords) do
asteroids
|> Map.to_list()
|> Stream.reject(&(elem(&1, 0) == ms_coords))
|> Stream.reject(&(elem(&1, 1) == false))
|> Enum.map(fn {coords, _} -> {calculate_slope_weight(coords, ms_coords), coords} end)
|> transform_slope_weights_into_map()
|> Map.to_list()
|> Enum.sort(fn {{weight1, abs_slope1}, _}, {{weight2, abs_slope2}, _} ->
if weight1 == weight2, do: abs_slope1 < abs_slope2, else: weight1 < weight2
end)
|> Enum.map(fn {_, coords} ->
Enum.sort_by(coords, fn {x, y} -> :math.pow(x - ms_x, 2) + :math.pow(y - ms_y, 2) end)
end)
end
def get_200th_asteroid_coords([[asteroid | asteroids_list] | coords_list], ctr \\ 1) do
cond do
ctr == 200 ->
asteroid
asteroids_list == [] ->
get_200th_asteroid_coords(coords_list, ctr + 1)
true ->
get_200th_asteroid_coords(coords_list ++ [asteroids_list], ctr + 1)
end
end
def calculate_slope_weight({x, y}, {ms_x, ms_y}) do
diff = %{x: x - ms_x, y: y - ms_y}
cond do
diff.x >= 0 and diff.y < 0 -> {0, abs(diff.x / diff.y)}
diff.x > 0 and diff.y >= 0 -> {1, diff.y / diff.x}
diff.x <= 0 and diff.y > 0 -> {2, abs(diff.x / diff.y)}
diff.x < 0 and diff.y <= 0 -> {3, diff.y / diff.x}
end
end
defp transform_slope_weights_into_map(weights, map \\ %{})
defp transform_slope_weights_into_map([], map), do: map
defp transform_slope_weights_into_map([{weight, coords} | weights], map) do
map = Map.update(map, weight, [coords], &[coords | &1])
transform_slope_weights_into_map(weights, map)
end
end
|
lib/day10.ex
| 0.695028
| 0.60996
|
day10.ex
|
starcoder
|
defmodule GoogleMaps do
@moduledoc """
Provides various map-related functionality.
Unless otherwise noted, all the functions take the required Google
parameters as its own parameters, and all optional ones in an
`options` keyword list.
The `options` keyword can also take special entry for `headers` and
`options`, which are passed to the underlying `Request`. See the
documentation of `HTTPoison` for details.
"""
alias GoogleMaps.{Request, Response}
@typedoc """
An address that will be geocoded and converted to latitude/longitude
coordinate.
"""
@type address :: String.t
@type latitude :: number
@type longitude :: number
@typedoc """
A latitude/longitude pair in tuple or comma-separated string format.
"""
@type coordinate :: {latitude(), longitude()} | String.t
@typedoc """
A tagged tuple with an ID of a known place.
"""
@type place_id :: {:place_id, String.t}
@typedoc """
A specific point, which can be an address, a latitude/longitude coord
or a place id tupple.
"""
@type waypoint :: address() | coordinate() | place_id()
@type options :: keyword()
@type mode :: String.t
@doc """
Retrives the directions from one point to the other.
Args:
* `origin` β The address, textual latitude/longitude value, or
place ID from which you wish to calculate directions. If you pass
an address, the Directions service geocodes the string and
converts it to a latitude/longitude coordinate to calculate
directions. This coordinate may be different from that returned
by the Google Maps Geocoding API, for example a building entrance
rather than its center. Place IDs must be prefixed with
`place_id:`. The place ID may only be specified if the request
includes an API key or a Google Maps APIs Premium Plan client ID.
You can retrieve place IDs from the Google Maps Geocoding API and
the Google Places API (including Place Autocomplete).
* `destination` β The address, textual latitude/longitude value, or
place ID to which you wish to calculate directions. The options
for the destination parameter are the same as for the origin
parameter, described above.
Options:
* `mode` (defaults to "driving") β Specifies the mode of transport
to use when calculating directions. Valid values and other
request details are specified in Travel Modes section.
* `waypoints`β Specifies an array of waypoints. Waypoints alter a
route by routing it through the specified location(s). A waypoint
is specified as a latitude/longitude coordinate, an encoded
polyline, a place ID, or an address which will be geocoded.
Encoded polylines must be prefixed with enc: and followed by a
colon (:). Place IDs must be prefixed with place_id:. The place
ID may only be specified if the request includes an API key or
a Google Maps APIs Premium Plan client ID. Waypoints are only
supported for driving, walking and bicycling directions.
* `alternatives` β If set to true, specifies that the Directions
service may provide more than one route alternative in the
response. Note that providing route alternatives may increase the
response time from the server.
* `avoid` β Indicates that the calculated route(s) should avoid the
indicated features. Supports the following arguments:
* `tolls` indicates that the calculated route should avoid toll
roads/bridges.
* `highways` indicates that the calculated route should avoid
highways.
* `ferries` indicates that the calculated route should avoid
ferries.
* `indoor` indicates that the calculated route should avoid
indoor steps for walking and transit directions. Only requests
that include an API key or a Google Maps APIs Premium Plan
client ID will receive indoor steps by default.
* `language` β The language in which to return results.
* See the list of [supported languages](https://developers.google.com/maps/faq#languagesupport).
* If `language` is not supplied, the API attempts to use the
preferred language as specified in the `language` config, or
the native language of the domain from which request is sent.
* If a name is not available in the preferred language, the API
uses the closest match.
* The preferred language has a small influence on the set of
results that the API chooses to return, and the order in which
they are returned. The geocoder interprets abbreviations
differently depending on language, such as the abbreviations
for street types, or synonyms that may be valid in one
language but not in another. For example, utca and tΓ©r are
synonyms for street in Hungarian.
* `units` β Specifies the unit system to use displaying results.
* `region` β Specifies the region code, specified as a ccTLD
("top-level domain") two-character value.
* `arrival_time` β Specifies the desired time of arrival for
transit directions, in seconds since midnight, January 1, 1970
UTC. You can specify either `departure_time` or `arrival_time`,
but not both. Note that arrival_time must be specified as an
integer.
* `departure_time` β Specifies the desired time of departure. You
can specify the time as an integer in seconds since midnight,
January 1, 1970 UTC. Alternatively, you can specify a value of
`now`, which sets the departure time to the current time (correct
to the nearest second). The departure time may be specified in
two cases:
* For requests where the travel mode is transit: You can
optionally specify one of `departure_time` or `arrival_time`.
If neither time is specified, the `departure_time` defaults to
now (that is, the departure time defaults to the current time).
* For requests where the travel mode is driving: You can specify
the `departure_time` to receive a route and trip duration
(response field: `duration_in_traffic`) that take traffic
conditions into account. This option is only available if the
request contains a valid API key, or a valid Google Maps APIs
Premium Plan client ID and signature. The `departure_time` must
be set to the current time or some time in the future. It
cannot be in the past.
* `traffic_model` (defaults to `best_guess`) β Specifies the
assumptions to use when calculating time in traffic. This setting
affects the value returned in the `duration_in_traffic` field in
the response, which contains the predicted time in traffic based
on historical averages. The `traffic_model` parameter may only be
specified for driving directions where the request includes a
`departure_time`, and only if the request includes an API key or
a Google Maps APIs Premium Plan client ID. The available values
for this parameter are:
* `best_guess` (default) indicates that the returned
`duration_in_traffic` should be the best estimate of travel
time given what is known about both historical traffic
conditions and live traffic. Live traffic becomes more
important the closer the `departure_time` is to now.
* `pessimistic` indicates that the returned `duration_in_traffic`
should be longer than the actual travel time on most days,
though occasional days with particularly bad traffic conditions
may exceed this value.
* `optimistic` indicates that the returned `duration_in_traffic`
should be shorter than the actual travel time on most days,
though occasional days with particularly good traffic
conditions may be faster than this value.
The default value of `best_guess` will give the most useful
predictions for the vast majority of use cases. The `best_guess`
travel time prediction may be shorter than `optimistic`, or
alternatively, longer than `pessimistic`, due to the way the
`best_guess` prediction model integrates live traffic information.
This function returns `{:ok, body}` if the request is successful, and
Google returns data. It returns `{:error, error}` when there is HTTP
errors, or `{:error, status, error_message}` when the request is successful, but
Google returns status codes different than "OK", i.e.:
* "NOT_FOUND"
* "ZERO_RESULTS"
* "MAX_WAYPOINTS_EXCEEDED"
* "INVALID_REQUEST"
* "OVER_QUERY_LIMIT"
* "REQUEST_DENIED"
* "UNKNOWN_ERROR"
## Examples
# Driving directions with an invalid API key
iex> {:error, status, error_message} = GoogleMaps.directions("Toronto", "Montreal", key: "invalid key")
iex> status
"REQUEST_DENIED"
iex> error_message
"The provided API key is invalid."
# Driving directions from Toronto, Ontario to Montreal, Quebec.
iex> {:ok, result} = GoogleMaps.directions("Toronto", "Montreal")
iex> [route] = result["routes"]
iex> match?(%{
...> "northeast" => %{"lat" => _, "lng" => _},
...> "southwest" => %{"lat" => _, "lng" => _}
...> }, route["bounds"])
true
# Directions for a scenic bicycle journey that avoids major highways.
iex> {:ok, result} = GoogleMaps.directions("Toronto", "Montreal", [
...> avoid: "highway",
...> mode: "bicycling"
...> ])
iex> [route] = result["routes"]
iex> match?(%{
...> "northeast" => %{"lat" => _, "lng" => _},
...> "southwest" => %{"lat" => _, "lng" => _}
...> }, route["bounds"])
true
# Transit directions from Brooklyn, New York to Queens, New York.
# The request does not specify a `departure_time`, so the
# departure time defaults to the current time:
iex> {:ok, result} = GoogleMaps.directions("Brooklyn", "Queens", [
...> mode: "transit"
...> ])
iex> Enum.count(result["routes"])
1
# Driving directions from Glasgow, UK to Perth, UK using place IDs.
iex> {:ok, result} = GoogleMaps.directions("place_id:ChIJ685WIFYViEgRHlHvBbiD5nE", "place_id:ChIJA01I-8YVhkgRGJb0fW4UX7Y")
iex> Enum.count(result["routes"])
1
# Same driving directions above but using place ID tuples.
iex> {:ok, result} = GoogleMaps.directions({:place_id, "ChIJ685WIFYViEgRHlHvBbiD5nE"}, {:place_id, "ChIJA01I-8YVhkgRGJb0fW4UX7Y"})
iex> Enum.count(result["routes"])
1
"""
@spec directions(waypoint(), waypoint(), options()) :: Response.t()
def directions(origin, destination, options \\ []) do
params = options
|> Keyword.merge([origin: origin, destination: destination])
GoogleMaps.get("directions", params)
end
@doc """
Finds the distance between two addresses.
## Args:
* `origins` β The starting point for calculating travel distance and time.
* `destinations` β The finishing point for calculating travel distance and time.
## Options:
* `mode` (defaults to `driving`) β Specifies the mode of transport to use
when calculating distance.
* `language` β The language in which to return results.
* `avoid` β Introduces restrictions to the route. Valid values are specified
in the Restrictions section of this document. Only one restriction can be
specified.
* `units` β Specifies the unit system to use when expressing distance as
text. See the Unit Systems section of this document for more information.
* `arrival_time` β Specifies the desired time of arrival for transit
requests, in seconds since midnight, January 1, 1970 UTC. You can specify
either `departure_time` or `arrival_time`, but not both. Note that
`arrival_time` must be specified as an integer.
* `departure_time` β The desired time of departure. You can specify the time
as an integer in seconds since midnight, January 1, 1970 UTC.
Alternatively, you can specify a value of `now`, which sets the departure
time to the current time (correct to the nearest second).
* traffic_model (defaults to `best_guess`) β Specifies the assumptions to
use when calculating time in traffic.
* `transit_mode` β Specifies one or more preferred modes of transit.
* `transit_routing_preference` β Specifies preferences for transit requests.
This function returns `{:ok, body}` if the request is successful, and
Google returns data. It returns `{:error, error}` when there is HTTP
errors, or `{:error, status, error_message}` when the request is successful, but
Google returns status codes different than "OK", i.e.:
* "NOT_FOUND"
* "ZERO_RESULTS"
* "MAX_WAYPOINTS_EXCEEDED"
* "INVALID_REQUEST"
* "OVER_QUERY_LIMIT"
* "REQUEST_DENIED"
* "UNKNOWN_ERROR"
## Examples
# Distance with an invalid API key
iex> {:error, status, error_message} = GoogleMaps.distance("Place d'Armes, 78000 Versailles", "Champ de Mars, 5 Avenue Anatole", key: "invalid key")
iex> status
"REQUEST_DENIED"
iex> error_message
"The provided API key is invalid."
# Distance from Eiffel Tower to Palace of Versailles.
iex> {:ok, result} = GoogleMaps.distance("Place d'Armes, 78000 Versailles", "Champ de Mars, 5 Avenue Anatole")
iex> match?(%{
...> "destination_addresses" => _,
...> "origin_addresses" => _,
...> "rows" => [
...> %{"elements" => [%{"distance" => %{"text" => _, "value" => _}}]}
...> ]
...> }, result)
true
# Distance from coordinate A to coordinate B
iex> {:ok, result2} = GoogleMaps.distance({27.5119772, -109.9409902}, {19.4156207, -99.171256517})
iex> match?(%{
...> "destination_addresses" => _,
...> "origin_addresses" => _,
...> "rows" => [
...> %{"elements" => [%{"distance" => %{"text" => _, "value" => _}}]}
...> ]
...> }, result2)
true
"""
def distance(origin, destination, options \\ [])
@spec distance(address(), address(), options()) :: Response.t()
def distance(origin, destination, options) when is_binary(origin) and is_binary(destination) do
params = options
|> Keyword.merge([origins: origin, destinations: destination])
GoogleMaps.get("distancematrix", params)
end
@spec distance(coordinate(), coordinate(), options()) :: Response.t()
def distance({lat1, lng1}, {lat2, lng2}, options) do
distance("#{lat1},#{lng1}", "#{lat2},#{lng2}", options)
end
@doc """
Converts between addresses and geographic coordinates.
**Geocoding** is the process of converting addresses (like "1600
Amphitheatre Parkway, Mountain View, CA") into geographic coordinates
(like latitude 37.423021 and longitude -122.083739), which you can
use to place markers on a map, or position the map.
**Reverse geocoding** is the process of converting geographic
coordinates into a human-readable address. The Google Maps
Geocoding API's reverse geocoding service also lets you find the
address for a given place ID.
## Args:
* `address` β The street address that you want to geocode, in the
format used by the national postal service of the country
concerned. Additional address elements such as business names and
unit, suite or floor numbers should be avoided.
* ** or **
* `components` β A component filter for which you wish to obtain a
geocode. The `components` filter will also be accepted as an
optional parameter if an address is provided.
* --- Reverse geocoding ---
* `latlng`: The latitude and longitude values specifying the
location for which you wish to obtain the closest, human-readable
address.
* ** or **
* `place_id` β The place ID of the place for which you wish to
obtain the human-readable address. The place ID is a unique
identifier that can be used with other Google APIs.
## Options:
* `bounds` β The bounding box of the viewport within which to bias
geocode results more prominently. This parameter will only
influence, not fully restrict, results from the geocoder.
* `language` β The language in which to return results.
* `region` β The region code, specified as a ccTLD ("top-level
domain") two-character value. This parameter will only influence,
not fully restrict, results from the geocoder.
* `components` β The component filters, separated by a pipe (|).
Each component filter consists of a component:value pair and will
fully restrict the results from the geocoder. For more
information see Component Filtering.
* `result_type` β One or more address types, separated by a pipe
(`|`). Examples of address types: `country`, `street_address`,
`postal_code`. For a full list of allowable values, see the
address types. **Note** for reverse geocoding requests.
* `location_type` β One or more location types, separated by a pipe
(`|`). Specifying a type will restrict the results to this type.
If multiple types are specified, the API will return all
addresses that match any of the types. **Note** for reverse
geocoding requests. The following values are supported:
* "ROOFTOP" restricts the results to addresses for which we
have location information accurate down to street address
precision.
* "RANGE_INTERPOLATED" restricts the results to those that
reflect an approximation (usually on a road) interpolated
between two precise points (such as intersections). An
interpolated range generally indicates that rooftop geocodes
are unavailable for a street address.
* "GEOMETRIC_CENTER" restricts the results to geometric centers
of a location such as a polyline (for example, a street) or
polygon (region).
* "APPROXIMATE" restricts the results to those that are
characterized as approximate.
If both `result_type` and `location_type` restrictions are present
then the API will return only those results that matches both the
`result_type` and the `location_type` restrictions.
## Returns
This function returns `{:ok, body}` if the request is successful, and
Google returns data. The returned body is a map contains two root
elements:
* `status` contains metadata on the request.
* `results` contains an array of geocoded address information and
geometry information.
Generally, only one entry in the `results` array is returned for
address lookups, though the geocoder may return several results when
address queries are ambiguous. Reverse geocoder returns more than one
result, from most specific to least specific.
A typical result is made up of the following fields:
* The `types[]` array indicates the *type* of the returned result.
This array contains a set of zero or more tags identifying the
type of feature returned in the result. For example, a geocode
of "Chicago" returns "locality" which indicates that "Chicago"
is a city, and also returns "political" which indicates it is a
political entity.
* `formatted_address` is a string containing the human-readable
address of this location. Often this address is equivalent to
the "postal address," which sometimes differs from country to
country. (Note that some countries, such as the United Kingdom,
do not allow distribution of true postal addresses due to
licensing restrictions.) This address is generally composed of
one or more address components. For example, the address "111
8th Avenue, New York, NY" contains separate address components
for "111" (the street number), "8th Avenue" (the route), "New
York" (the city) and "NY" (the US state). These address
components contain additional information as noted below.
* `address_components[]` is an array containing the separate
address components, as explained above. **Note** that
`address_components[]` may contain more address components than
noted within the `formatted_address`. Each `address_component`
typically contains:
* `types[]` is an array indicating the type of the address
component.
* `long_name` is the full text description or name of the address
component as returned by the Geocoder.
* `short_name` is an abbreviated textual name for the address
component, if available. For example, an address component for
the state of Alaska may have a `long_name` of "Alaska" and a
`short_name` of "AK" using the 2-letter postal abbreviation.
* `postcode_localities[]` is an array denoting all the localities
contained in a postal code. This is only present when the result
is a postal code that contains multiple localities.
* `geometry` contains the following information:
* `location` contains the geocoded latitude,longitude value. For
normal address lookups, this field is typically the most
important.
* `location_type` stores additional data about the specified
location. The following values are currently supported:
* "ROOFTOP" indicates that the returned result is a precise
geocode for which we have location information accurate down
to street address precision.
* "RANGE_INTERPOLATED" indicates that the returned result
reflects an approximation (usually on a road) interpolated
between two precise points (such as intersections).
Interpolated results are generally returned when rooftop
geocodes are unavailable for a street address.
* "GEOMETRIC_CENTER" indicates that the returned result is the
geometric center of a result such as a polyline (for example,
a street) or polygon (region).
* "APPROXIMATE" indicates that the returned result is
approximate.
* `viewport` contains the recommended viewport for displaying the
returned result, specified as two latitude,longitude values
defining the southwest and northeast corner of the viewport
bounding box. Generally the viewport is used to frame a result
when displaying it to a user.
* `bounds` (optionally returned) stores the bounding box which
can fully contain the returned result. Note that these bounds
may not match the recommended viewport. (For example, San
Francisco includes the Farallon islands, which are technically
part of the city, but probably should not be returned in the
viewport.)
* `partial_match` indicates that the geocoder did not return an
exact match for the original request, though it was able to match
part of the requested address. You may wish to examine the
original request for misspellings and/or an incomplete address.
Partial matches most often occur for street addresses that do not
exist within the locality you pass in the request. Partial
matches may also be returned when a request matches two or more
locations in the same locality. For example, "21 Henr St,
Bristol, UK" will return a partial match for both Henry Street
and Henrietta Street. Note that if a request includes a
misspelled address component, the geocoding service may suggest
an alternative address. Suggestions triggered in this way will
also be marked as a partial match.
* `place_id` is a unique identifier that can be used with other
Google APIs. For example, you can use the place_id in a Google
Places API request to get details of a local business, such as
phone number, opening hours, user reviews, and more.
## Examples
# Geocode with an invalid API key
iex> {:error, status, error_message} = GoogleMaps.geocode("1600 Amphitheatre Parkway, Mountain View, CA", key: "invalid key")
iex> status
"REQUEST_DENIED"
iex> error_message
"The provided API key is invalid."
iex> {:ok, %{"results" => [result]}} =
...> GoogleMaps.geocode("1600 Amphitheatre Parkway, Mountain View, CA")
iex> match?(%{
...> "formatted_address" => _,
...> "geometry" => %{"location" => %{"lat" => _, "lng" => _}}
...> }, result)
true
iex> {:ok, %{"results" => [result|_]}} =
...> GoogleMaps.geocode({40.714224,-73.961452})
iex> match?(%{
...> "formatted_address" => _,
...> "geometry" => %{"location" => %{"lat" => _, "lng" => _}}
...> }, result)
true
iex> {:ok, %{"results" => [result|_]}} =
...> GoogleMaps.geocode("place_id:ChIJd8BlQ2BZwokRAFUEcm_qrcA")
iex> match?(%{
...> "formatted_address" => _,
...> "geometry" => %{"location" => %{"lat" => _, "lng" => _}}
...> }, result)
true
iex> {:ok, %{"results" => [result|_]}} =
...> GoogleMaps.geocode({:place_id, "ChIJd8BlQ2BZwokRAFUEcm_qrcA"})
iex> match?(%{
...> "formatted_address" => _,
...> "geometry" => %{"location" => %{"lat" => _, "lng" => _}}
...> }, result)
true
"""
@spec geocode(map() | String.t | coordinate() | place_id, options()) :: Response.t()
def geocode(input, options \\ [])
# Reverse geo-coding
def geocode({lat, lng}, options) when is_number(lat) and is_number(lng) do
params = Keyword.merge(options, [latlng: "#{lat},#{lng}"])
GoogleMaps.get("geocode", params)
end
def geocode({:place_id, place_id}, options) do
params = Keyword.merge(options, [place_id: place_id])
GoogleMaps.get("geocode", params)
end
def geocode("place_id:" <> place_id, options) do
params = Keyword.merge(options, [place_id: place_id])
GoogleMaps.get("geocode", params)
end
# Geocode using components.
def geocode(components, options) when is_map(components) do
components = Enum.map_join(components, "|", fn({k, v}) -> "#{k}:#{v}" end)
params = Keyword.merge(options, [components: components])
GoogleMaps.get("geocode", params)
end
def geocode(address, options) when is_binary(address) do
params = Keyword.merge(options, [address: address])
GoogleMaps.get("geocode", params)
end
@doc """
Automatically fill in the name and/or address of a place.
The Place Autocomplete service is a web service that returns place
predictions in response to an HTTP request. The request specifies a
textual search string and optional geographic bounds. The service
can be used to provide autocomplete functionality for text-based
geographic searches, by returning places such as businesses,
addresses and points of interest as a user types.
The Place Autocomplete service can match on full words as well as
substrings. Applications can therefore send queries as the user
types, to provide on-the-fly place predictions.
The returned predictions are designed to be presented to the user to
aid them in selecting the desired place. You can send a Place Details
request for more information about any of the places returned.
## Args:
* `input` β The text string on which to search. The Place
Autocomplete service will return candidate matches based on this
string and order results based on their perceived relevance.
## Options:
* `offset` β The position, in the input term, of the last character
that the service uses to match predictions. For example, if the
input is 'Google' and the `offset` is 3, the service will match
on 'Goo'. The string determined by the offset is matched against
the first word in the input term only. For example, if the input
term is 'Google abc' and the `offset` is 3, the service will
attempt to match against 'Goo abc'. If no offset is supplied, the
service will use the whole term. The offset should generally be
set to the position of the text caret.
* `location` β The point around which you wish to retrieve place
information. Must be specified as *latitude,longitude*.
* `radius` β The distance (in meters) within which to return place
results. Note that setting a `radius` biases results to the
indicated area, but may not fully restrict results to the
specified area. See Location Biasing below.
* `language` β The language code, indicating in which language the
results should be returned, if possible. Searches are also biased
to the selected language; results in the selected language may be
given a higher ranking. See the [list of supported languages](https://developers.google.com/maps/faq#languagesupport)
and their codes. Note that we often update supported languages so
this list may not be exhaustive. If language is not supplied, the
Place Autocomplete service will attempt to use the native
language of the domain from which the request is sent.
* `types` β The types of place results to return. See Place Types
below. If no type is specified, all types will be returned.
* `components` β A grouping of places to which you would like to
restrict your results. Currently, you can use `components` to
filter by country. The country must be passed as a two character,
ISO 3166-1 Alpha-2 compatible country code. For example:
`components=country:fr` would restrict your results to places
within France.
## Location Biasing
You may bias results to a specified circle by passing a `location` &
a `radius` parameter. This instructs the Place Autocomplete service
to *prefer* showing results within that circle. Results outside of
the defined area may still be displayed. You can use the `components`
parameter to filter results to show only those places within a
specified country.
**Note**: If you do not supply the location and radius, the API will
attempt to detect the server's location from their IP address, and
will bias the results to that location. If you would prefer to have
no location bias, set the location to '0,0' and radius to '20000000'
(20 thousand kilometers), to encompass the entire world.
*Tip*: Establishment results generally do not rank highly enough to
show in results when the search area is large. If you want
establishments to appear in mixed establishment/geocode results, you
can specify a smaller radius. Alternatively, use `types=establishment`
to restrict results to establishments only.
## Place Types
You may restrict results from a Place Autocomplete request to be of
a certain type by passing a `types` parameter. The parameter specifies
a type or a type collection, as listed in the supported types below.
If nothing is specified, all types are returned. In general only a
single type is allowed. The exception is that you can safely mix the
`geocode` and `establishment` types, but note that this will have the
same effect as specifying no types. The supported types are:
* `geocode` instructs the Place Autocomplete service to return only
geocoding results, rather than business results. Generally, you
use this request to disambiguate results where the location
specified may be indeterminate.
* `address` instructs the Place Autocomplete service to return only
geocoding results with a precise address. Generally, you use this
request when you know the user will be looking for a fully
specified address.
* `establishment` instructs the Place Autocomplete service to
return only business results.
* the `(regions)` type collection instructs the Places service to
return any result matching the following types:
* `locality`
* `sublocality`
* `postal_code`
* `country`
* `administrative_area_level_1`
* `administrative_area_level_2`
* the `(cities)` type collection instructs the Places service to
return results that match `locality` or
`administrative_area_level_3`.
## Returns
This function returns `{:ok, body}` if the request is successful, and
Google returns data. The returned body is a map contains two root
elements:
* `status` contains metadata on the request.
* `predictions` contains an array of places, with information about
the place. See Place Autocomplete Results for information about
these results. The Google API returns up to 5 results.
Of particular interest within the results are the place_id elements,
which can be used to request more specific details about the place
via a separate query. See Place Details Requests.
It returns `{:error, error}` when there is HTTP
errors, or `{:error, status, error_message}` when the request is successful, but
Google returns status codes different than "OK", i.e.:
* "NOT_FOUND"
* "ZERO_RESULTS"
* "MAX_WAYPOINTS_EXCEEDED"
* "INVALID_REQUEST"
* "OVER_QUERY_LIMIT"
* "REQUEST_DENIED"
* "UNKNOWN_ERROR"
## Place Autocomplete Results
Each prediction result contains the following fields:
* `description` contains the human-readable name for the returned
result. For `establishment` results, this is usually the business
name.
* `place_id` is a textual identifier that uniquely identifies a
place. To retrieve information about the place, pass this
identifier in the `placeId` field of a Google Places API request.
* `terms` contains an array of terms identifying each section of
the returned description (a section of the description is
generally terminated with a comma). Each entry in the array has
a value field, containing the text of the term, and an `offset`
field, defining the start position of this term in the
description, measured in Unicode characters.
* `types` contains an array of types that apply to this place. For
example: [ "political", "locality" ] or [ "establishment",
"geocode" ].
* `matched_substrings` contains an array with offset value and
length. These describe the location of the entered term in the
prediction result text, so that the term can be highlighted if
desired.
**Note**: The Place Autocomplete response does not include the `scope`
or `alt_ids` fields that you may see in search results or place
details. This is because Autocomplete returns only Google-scoped
place IDs. It does not return app-scoped place IDs that have not yet
been accepted into the Google Places database. For more details about
Google-scoped and app-scoped place IDs, see the documentation on
[adding places](https://developers.google.com/places/web-service/add-place).
## Examples
# Searching with an invalid API key
iex> {:error, status, error_message} = GoogleMaps.place_autocomplete("Paris France", key: "invalid key")
iex> status
"REQUEST_DENIED"
iex> error_message
"The provided API key is invalid."
# Searching for "Paris"
iex> {:ok, result} = GoogleMaps.place_autocomplete("Paris France")
iex> Enum.count(result["predictions"]) > 0
true
iex> [paris | _rest] = result["predictions"]
iex> paris["description"]
"Paris, France"
iex> paris["place_id"]
"ChIJD7fiBh9u5kcRYJSMaMOCCwQ"
iex> paris["types"]
[ "locality", "political", "geocode" ]
# Establishments containing the string "Amoeba" within an area
# centered in San Francisco, CA:
iex> {:ok, result} = GoogleMaps.place_autocomplete("Amoeba", [
...> types: "establishment",
...> location: "37.76999,-122.44696",
...> radius: 500
...> ])
iex> Enum.count(result["predictions"])
5
# Addresses containing "Vict" with results in French:
iex> {:ok, result} = GoogleMaps.place_autocomplete("Vict", [
...> types: "geocode",
...> language: "fr"
...> ])
iex> Enum.count(result["predictions"])
5
# Cities containing "Vict" with results in Brazilian Portuguese:
iex> {:ok, result} = GoogleMaps.place_autocomplete("Vict", [
...> types: "(cities)",
...> language: "pt_BR"
...> ])
iex> Enum.count(result["predictions"])
5
"""
@spec place_autocomplete(String.t, options()) :: Response.t()
def place_autocomplete(input, options \\ []) do
params = options
|> Keyword.merge([input: input])
GoogleMaps.get("place/autocomplete", params)
end
@doc """
Provide a query prediction for text-based geographic searches.
The Query Autocomplete service allows you to add on-the-fly
geographic query predictions to your application. Instead of
searching for a specific location, a user can type in a categorical
search, such as "pizza near New York" and the service responds with
a list of suggested queries matching the string. As the Query
Autocomplete service can match on both full words and substrings,
applications can send queries as the user types to provide
on-the-fly predictions.
## Args:
* `input` β The text string on which to search. The Places
service will return candidate matches based on this
string and order results based on their perceived relevance.
## Options:
* `offset` β The character position in the input term at which the
service uses text for predictions. For example, if the input is
'Googl' and the completion point is 3, the service will match
on 'Goo'. The `offset` should generally be set to the position of
the text caret. If no offset is supplied, the service will use
the entire term.
* `location` β The point around which you wish to retrieve place
information. Must be specified as *latitude,longitude*.
* `radius` β The distance (in meters) within which to return place
results. Note that setting a `radius` biases results to the
indicated area, but may not fully restrict results to the
specified area. See Location Biasing below.
* `language` β The language code, indicating in which language the
results should be returned, if possible. Searches are also biased
to the selected language; results in the selected language may be
given a higher ranking. See the [list of supported languages](https://developers.google.com/maps/faq#languagesupport)
and their codes. Note that we often update supported languages so
this list may not be exhaustive. If language is not supplied, the
Places service will attempt to use the native language of the
domain from which the request is sent.
## Returns
This function returns `{:ok, body}` if the request is successful, and
Google returns data. The returned body is a map contains two root
elements:
* `status` contains metadata on the request.
* `predictions` contains an array of query predictions.
Each prediction result contains the following fields:
* `description` contains the human-readable name for the returned
result. For `establishment` results, this is usually the business
name.
* `terms` contains an array of terms identifying each section of
the returned description (a section of the description is
generally terminated with a comma). Each entry in the array has
a `value` field, containing the text of the term, and an `offset`
field, defining the start position of this term in the
description, measured in Unicode characters.
* `matched_substring` contains an `offset` value and a `length`.
These describe the location of the entered term in the prediction
result text, so that the term can be highlighted if desired.
Note that some of the predictions may be places, and the `place_id`,
`reference` and `type` fields will be included with those
predictions. See Place Autocomplete Results for information about
these results.
## Examples
# A request with an invalid API key
iex> {:error, status, error_message} = GoogleMaps.place_query("Pizza near Par", key: "invalid key")
iex> status
"REQUEST_DENIED"
iex> error_message
"The provided API key is invalid."
# A request "Pizza near Par":
iex> {:ok, result} = GoogleMaps.place_query("Pizza near Par")
iex> is_list(result["predictions"])
true
# A request "Pizza near Par", with results in French:
iex> {:ok, result} = GoogleMaps.place_query("Pizza near Par", [language: "fr"])
iex> is_list(result["predictions"])
true
"""
@spec place_query(String.t, options()) :: Response.t()
def place_query(input, options \\ []) do
params = options
|> Keyword.merge([input: input])
GoogleMaps.get("place/queryautocomplete", params)
end
@doc """
Search for nearby places based on location and radius.
The Google Places API Web Service allows you to query
for place information on a variety of categories,
such as: establishments, prominent points of interest,
geographic locations, and more. You can search for places
either by proximity or a text string. A Place Search
returns a list of places along with summary information
about each place; additional information is available
via a Place Details query
## Args:
* `location` β The latitude/longitude around which to
retrieve place information. Can be in string format:
`"123.456,-123.456"` or tuple format: `{123.456, -123.456}`
* `radius` β Defines the distance (in meters) within which
to return place results. The maximum allowed radius is 50β000 meters.
Note that radius must not be included if `rankby=distance`
(described under Optional parameters below) is specified
## Options:
* `keyword` β The text string on which to search. The Places
service will return candidate matches based on this
string and order results based on their perceived relevance.
* `language` β The language code, indicating in which language the
results should be returned, if possible. Searches are also biased
to the selected language; results in the selected language may be
given a higher ranking. See the [list of supported languages](https://developers.google.com/maps/faq#languagesupport)
and their codes. Note that we often update supported languages so
this list may not be exhaustive. If language is not supplied, the
Places service will attempt to use the native language of the
domain from which the request is sent.
* `minprice` and `maxprice` - Restricts results to only those places
within the specified price level. Valid values are in the range
from `0` (most affordable) to `4` (most expensive), inclusive.
The exact amount indicated by a specific value will vary from
region to region.
* `opennow` - Returns only those places that are open for business at
the time the query is sent. Places that do not specify opening hours
in the Google Places database will not be returned if you include
this parameter in your query.
* `name` - A term to be matched against all content that Google has indexed for this place.
Equivalent to keyword. The name field is no longer restricted to place names.
Values in this field are combined with values in the keyword field and passed
as part of the same search string. We recommend using only the keyword parameter
for all search terms.
* `type` - Restricts the results to places matching the specified type.
Only one type may be specified (if more than one type is provided,
all types following the first entry are ignored).
See the [list of supported types](https://developers.google.com/places/web-service/supported_types).
* `rankby` - Specifies the order in which results are listed.
Note that rankby must not be included if radius(described under Required parameters above) is specified.
Possible values are:
* `prominence` - (default). This option sorts results based on their importance.
Ranking will favor prominent places within the specified area.
Prominence can be affected by a place's ranking in Google's index,
global popularity, and other factors.
* `distance` - This option biases search results in ascending order by
their distance from the specified location. When distance is specified,
one or more of keyword, name, or type is required.
## Returns
This function returns `{:ok, body}` if the request is successful, and
Google returns data. The returned body is a map that contains four root
elements:
* `status` contains metadata on the request.
* `results` contains an array of nearby places.
* `html_attributons` contain a set of attributions about this listing which must be displayed to the user.
* `next_page_token` contains a token that can be used to return up to 20 additional results.
A `next_page_token` will not be returned if there are no additional results to display.
The maximum number of results that can be returned is 60. There is a short delay between when a
`next_page_token` is issued, and when it will become valid.
Each result contains the following fields:
* `geometry` contains geometry information about the result, generally including the location (geocode)
of the place and (optionally) the viewport identifying its general area of coverage.
* `icon` contains the URL of a recommended icon which may be displayed to the user when indicating this result.
* `name` contains the human-readable name for the returned result. For establishment results, this is usually the business name.
* `opening_hours` may contain the following information:
* `open_now` is a boolean value indicating if the place is open at the current time.
* `photos[]` - an array of photo objects, each containing a reference to an image.
A Place Search will return at most one photo object. Performing a Place Details request on the place
may return up to ten photos. More information about Place Photos and how you can use the images in your
application can be found in the [Place Photos](https://developers.google.com/places/web-service/photos) documentation.
A photo object is described as:
* `photo_reference` β a string used to identify the photo when you perform a Photo request.
* `height` β the maximum height of the image.
* `width` β the maximum width of the image.
* `html_attributions[]` β contains any required attributions. This field will always be present, but may be empty.
* `place_id` - a textual identifier that uniquely identifies a place. To retrieve information about the place,
pass this identifier in the placeId field of a Places API request. For more information about place IDs,
see the [place ID overview](https://developers.google.com/places/web-service/place-id).
* `scope` - Indicates the scope of the `place_id`. The possible values are:
* `APP`: The place ID is recognised by your application only. This is because your application added the place,
and the place has not yet passed the moderation process.
* `GOOGLE`: The place ID is available to other applications and on Google Maps.
* `alt_ids` β An array of zero, one or more alternative place IDs for the place,
with a scope related to each alternative ID. Note: This array may be empty or not present.
If present, it contains the following fields:
* `place_id` β The most likely reason for a place to have an alternative place ID is if your application
adds a place and receives an application-scoped place ID, then later receives a Google-scoped place ID
after passing the moderation process.
* `scope` β The scope of an alternative place ID will always be APP, indicating that the alternative
place ID is recognised by your application only.
* `price_level` β The price level of the place, on a scale of `0` to `4`. The exact amount indicated by a
specific value will vary from region to region. Price levels are interpreted as follows:
* `0` β Free
* `1` β Inexpensive
* `2` β Moderate
* `3` β Expensive
* `4` β Very Expensive
* `rating` contains the place's rating, from 1.0 to 5.0, based on aggregated user reviews
* `types` contains an array of feature types describing the given result.
See the [list of supported types](https://developers.google.com/places/web-service/supported_types#table2).
* `vicinity` contains a feature name of a nearby location. Often this feature refers to a street or
neighborhood within the given results.
* `permanently_closed` is a boolean flag indicating whether the place has permanently shut down (value true).
If the place is not permanently closed, the flag is absent from the response.
## Examples
# Search with an invalid API key
iex> {:error, status, error_message} = GoogleMaps.place_nearby("38.8990252802915,-77.0351808197085", 500, key: "invalid key")
iex> status
"REQUEST_DENIED"
iex> error_message
"The provided API key is invalid."
# Search for museums 500 meters around the White house
iex> {:ok, response} = GoogleMaps.place_nearby("38.8990252802915,-77.0351808197085", 500)
iex> is_list(response["results"])
true
# Search for museums by the white house but rank by distance
iex> {:ok, response} = GoogleMaps.place_nearby(
...> "38.8990252802915,-77.0351808197085",
...> 500,
...> [rankby: "distance",
...> keyword: "museum"])
iex> Enum.any?(response["results"],
...> fn result -> result["name"] == "National Museum of Women in the Arts" end)
true
"""
@spec place_nearby(coordinate(), integer, options()) :: Response.t()
def place_nearby(location, radius, options \\ [])
def place_nearby(location, radius, options) when is_binary(location) do
params =
if options[:rankby] == "distance" do
Keyword.merge(options, [location: location])
else
Keyword.merge(options, [location: location, radius: radius])
end
GoogleMaps.get("place/nearbysearch", params)
end
def place_nearby({latitude, longitude}, radius, options) when is_number(latitude) and is_number(longitude) do
place_nearby("#{latitude},#{longitude}", radius, options)
end
@doc """
A Place Details request returns more comprehensive information about the indicated place
such as its complete address, phone number, user rating and reviews.
## Args:
* `place_id` β A textual identifier that uniquely identifies a place,
returned from a [Place Search](https://developers.google.com/places/web-service/search).
For more information about place IDs, see the [place ID overview](https://developers.google.com/places/web-service/place-id).
Can be in the following formats:
* tuple: `{:place_id, "ChIJy5RYvL23t4kR3U1oXsAxEzs"}`
* place_id string: `"place_id:ChIJy5RYvL23t4kR3U1oXsAxEzs"`
* string: `"ChIJy5RYvL23t4kR3U1oXsAxEzs"`
## Options:
* `language` β The language code, indicating in which language the
results should be returned, if possible. Searches are also biased
to the selected language; results in the selected language may be
given a higher ranking. See the [list of supported languages](https://developers.google.com/maps/faq#languagesupport)
and their codes. Note that we often update supported languages so
this list may not be exhaustive. If language is not supplied, the
Places service will attempt to use the native language of the
domain from which the request is sent.
* `region` β The region code, specified as a [ccTLD](https://en.wikipedia.org/wiki/CcTLD) (country code top-level domain)
two-character value. Most ccTLD codes are identical to ISO 3166-1 codes,
with some exceptions. This parameter will only influence, not fully restrict,
results. If more relevant results exist outside of the specified region,
they may be included. When this parameter is used, the country name is
omitted from the resulting `formatted_address` for results in the specified region.
## Returns
This function returns `{:ok, body}` if the request is successful, and
Google returns data. The returned body is a map that contains three root
elements:
* `status` contains metadata on the request.
* `result` contains the detailed information about the place requested
* `html_attributions` contains a set of attributions about this listing which must be displayed to the user.
Each result contains the following fields:
* `address_components[]` is an array containing the separate components applicable to this address.
Each address component typically contains the following fields:
* `types[]` is an array indicating the type of the address component.
* `long_name` is the full text description or name of the address component as returned by the Geocoder.
* `short_name` is an abbreviated textual name for the address component, if available.
For example, an address component for the state of Alaska may have a `long_name` of
"Alaska" and a `short_name` of "AK" using the 2-letter postal abbreviation.
Note the following facts about the address_components[] array:
* The array of address components may contain more components than the `formatted_address.`
* The array does not necessarily include all the political entities that contain an address,
apart from those included in the formatted_address. To retrieve all the political entities
that contain a specific address, you should use reverse geocoding, passing the
latitude/longitude of the address as a parameter to the request
* The format of the response is not guaranteed to remain the same between requests.
In particular, the number of `address_components` varies based on the address requested
and can change over time for the same address. A component can change position in the array.
The type of the component can change. A particular component may be missing in a later response.
* `formatted_address` is a string containing the human-readable address of this place.
Often this address is equivalent to the postal address. Note that some countries,
such as the United Kingdom, do not allow distribution of true postal addresses due to licensing restrictions.
The formatted address is logically composed of one or more address components. For example, the address
"111 8th Avenue, New York, NY" consists of the following components: "111" (the street number),
"8th Avenue" (the route), "New York" (the city) and "NY" (the US state).
Do not parse the formatted address programmatically. Instead you should use the individual address components,
which the API response includes in addition to the formatted address field
* `formatted_phone_number` contains the place's phone number in its [local format](http://en.wikipedia.org/wiki/Local_conventions_for_writing_telephone_numbers).
For example, the `formatted_phone_number` for Google's Sydney, Australia office is `(02) 9374 4000`.
* `adr_address` is a representation of the place's address in the [adr microformat](http://microformats.org/wiki/adr).
* `geometry` contains the following information:
* `location` contains the geocoded latitude,longitude value for this place.
* `viewport` contains the preferred viewport when displaying this place on a map as a `LatLngBounds` if it is known.
* `icon` contains the URL of a suggested icon which may be displayed to the user when indicating this result on a map.
* `international_phone_number` contains the place's phone number in international format.
International format includes the country code, and is prefixed with the plus (+) sign.
For example, the `international_phone_number` for Google's Sydney, Australia office is `+61 2 9374 4000`
* `name` contains the human-readable name for the returned result.
For `establishment` results, this is usually the canonicalized business name.
* `opening_hours` contains the following information:
* `open_now` is a boolean value indicating if the place is open at the current time.
* `periods[]` is an array of opening periods covering seven days, starting from Sunday, in chronological order.
Each period contains:
* `open` contains a pair of day and time objects describing when the place opens:
* `day` a number from 0β6, corresponding to the days of the week, starting on Sunday. For example, 2 means Tuesday.
* `time` may contain a time of day in 24-hour hhmm format. Values are in the range 0000β2359.
The `time` will be reported in the placeβs time zone.
* `close` may contain a pair of day and time objects describing when the place closes.
Note: If a place is always open, the close section will be missing from the response.
Clients can rely on always-open being represented as an open period containing day
with value 0 and time with value 0000, and no close.
* `weekday_text` is an array of seven strings representing the formatted opening hours for each day of the week.
If a language parameter was specified in the Place Details request, the Places Service will format and localize
the opening hours appropriately for that language. The ordering of the elements in this array depends on the
language parameter. Some languages start the week on Monday while others start on Sunday.
* `permanently_closed` is a boolean flag indicating whether the place has permanently shut down (value `true`).
If the place is not permanently closed, the flag is absent from the response.
* `photos[]` β an array of photo objects, each containing a reference to an image.
A Place Details request may return up to ten photos.
More information about place photos and how you can use the images in your application can be found in the [Place Photos documentation](https://developers.google.com/places/web-service/photos).
A photo object is described as:
* `photo_reference` β a string used to identify the photo when you perform a Photo request.
* `height` β the maximum height of the image.
* `width` β the maximum width of the image.
* `html_attributions[]` β contains any required attributions. This field will always be present, but may be empty.
* `place_id`: A textual identifier that uniquely identifies a place. To retrieve information about the place, pass this
identifier in the placeId field of a Places API request. For more information about place IDs, see the [place ID overview](https://developers.google.com/places/web-service/place-id).
* `scope`: Indicates the scope of the place_id. The possible values are:
* `APP`: The place ID is recognised by your application only. This is because your application added the place,
and the place has not yet passed the moderation process.
* `GOOGLE`: The place ID is available to other applications and on Google Maps.
* `alt_ids` β An array of zero, one or more alternative place IDs for the place, with a scope related to each alternative ID.
Note: This array may be empty or not present. If present, it contains the following fields:
* `place_id` β The most likely reason for a place to have an alternative place ID is if your application
adds a place and receives an application-scoped place ID, then later receives a Google-scoped place
ID after passing the moderation process.
* `scope` β The scope of an alternative place ID will always be APP, indicating that the alternative
place ID is recognised by your application only.
* `price_level` β The price level of the place, on a scale of `0` to `4`.
The exact amount indicated by a specific value will vary from region to region.
Price levels are interpreted as follows:
* `0` β Free
* `1` β Inexpensive
* `2` β Moderate
* `3` β Expensive
* `4` β Very Expensive
* `rating` contains the place's rating, from 1.0 to 5.0, based on aggregated user reviews.
* `reviews[]` a JSON array of up to five reviews. If a language parameter was specified in
the Place Details request, the Places Service will bias the results to prefer reviews written in that language.
Each review consists of several components:
* `aspects` contains a collection of AspectRating objects, each of which provides a rating of a
single attribute of the establishment. The first object in the collection is considered the primary aspect.
Each AspectRating is described as:
* `type` the name of the aspect that is being rated.
The following types are supported: `appeal`, `atmosphere`, `decor`, `facilities`, `food`, `overall`, `quality` and `service`.
* `rating` the user's rating for this particular aspect, from 0 to 3.
* `author_name` the name of the user who submitted the review. Anonymous reviews are attributed to "A Google user".
* `author_url` the URL to the user's Google Maps Local Guides profile, if available.
* `language` an IETF language code indicating the language used in the user's review. This field contains the main
language tag only, and not the secondary tag indicating country or region. For example, all the English reviews
are tagged as 'en', and not 'en-AU' or 'en-UK' and so on.
* `rating` the user's overall rating for this place. This is a whole number, ranging from 1 to 5.
* `text` the user's review. When reviewing a location with Google Places, text reviews are considered optional.
Therefore, this field may by empty. Note that this field may include simple HTML markup.
For example, the entity reference `&` may represent an ampersand character.
* `time` the time that the review was submitted, measured in the number of seconds since since midnight, January 1, 1970 UTC.
* `types[]` contains an array of feature types describing the given result. See the [list of supported types](https://developers.google.com/places/web-service/supported_types#table2).
* `url` contains the URL of the official Google page for this place. This will be the Google-owned page that contains the
best available information about the place. Applications must link to or embed this page on any screen that shows
detailed results about the place to the user.
* `utc_offset` contains the number of minutes this placeβs current timezone is offset from UTC.
For example, for places in Sydney, Australia during daylight saving time this would be 660 (+11 hours from UTC),
and for places in California outside of daylight saving time this would be -480 (-8 hours from UTC).
* `vicinity` lists a simplified address for the place, including the street name, street number, and locality,
but not the province/state, postal code, or country. For example, Google's Sydney,
Australia office has a vicinity value of 48 Pirrama Road, Pyrmont.
* `website` lists the authoritative website for this place, such as a business' homepage.
## Examples
iex> {:error, status, error_message} = GoogleMaps.place_details({:place_id, "ChIJy5RYvL23t4kR3U1oXsAxEzs"}, key: "invalid key")
iex> status
"REQUEST_DENIED"
iex> error_message
"The provided API key is invalid."
iex> {:ok, response} = GoogleMaps.place_details({:place_id, "ChIJy5RYvL23t4kR3U1oXsAxEzs"})
iex> is_map(response["result"])
true
iex> {:ok, response} = GoogleMaps.place_details("place_id:ChIJy5RYvL23t4kR3U1oXsAxEzs")
iex> response["result"]["name"]
"719-751 Madison Pl NW"
iex> {:ok, response} = GoogleMaps.place_details("ChIJy5RYvL23t4kR3U1oXsAxEzs")
iex> response["result"]["formatted_address"]
"719-751 Madison Pl NW, Washington, DC 20005, USA"
"""
@spec place_details(place_id, options()) :: Response.t()
def place_details(place_id, options \\ [])
def place_details({:place_id, place_id}, options) do
params =
options
|> Keyword.merge([place_id: place_id])
GoogleMaps.get("place/details", params)
end
def place_details("place_id:" <> place_id, options) do
place_details({:place_id, place_id}, options)
end
def place_details(place_id, options) do
place_details({:place_id, place_id}, options)
end
@doc """
A Timezone request returns timezone information for the given location.
## Args:
* `location` - A comma-separated latitude / longitude tuple (eg, location = -33.86,151.20),
which represents the location to be searched.
* `timestamp` - Specifies the desired time in seconds after midnight, UTC,
from January 1, 1970. Google Maps Time Zone API uses timestamp to determine
if summer time should be applied. The hours before 1970 can be expressed as negative values.
## Options:
* `language` β The language code, indicating in which language the
results should be returned, if possible. Searches are also biased
to the selected language; results in the selected language may be
given a higher ranking. See the [list of supported languages](https://developers.google.com/maps/faq#languagesupport)
and their codes. Note that we often update supported languages so
this list may not be exhaustive. If language is not supplied, the
Places service will attempt to use the native language of the
domain from which the request is sent.
## Returns
This function returns `{:ok, body}` if the request is successful, and
Google returns data. It returns either `{:error, status}` or `{:error, status, error_message}`
when there is an error, depending if there's an error message or not.
The returned body is a map that contains four root elements:
* `dstOffset` The time difference for summer time in seconds.
This value will be zero if the time zone is not in daylight saving time
during the specified timestamp.
* `rawOffset` The time difference with respect to UTC (in seconds)
for the determined location. This does not consider summer timetables.
* `timeZoneId` A string that contains the id. of "tz" in the time zone,
such as "United States / Los_Angeles" or "Australia / Sydney"
* `timeZoneName` A string that contains the name in long format the
time zone This field will be located if the parameter is configured of
language; p. eg, "Pacific Summer Time" or "Summer Time from Eastern Australia".
* `status` contains metadata on the request.
## Examples
iex> {:ok, response} = GoogleMaps.timezone({8.6069305,104.7196242})
iex> is_map(response)
true
iex> {:ok, response} = GoogleMaps.timezone({8.6069305,104.7196242})
iex> response["timeZoneId"]
"Asia/Saigon"
"""
@spec timezone(coordinate(), options()) :: Response.t()
def timezone(input, options \\ [])
def timezone(location, options) when is_binary(location) do
params = Keyword.merge(options, [location: location, timestamp: :os.system_time(:seconds)])
GoogleMaps.get("timezone", params)
end
def timezone({lat, lng}, options) when is_number(lat) and is_number(lng) do
timezone("#{lat},#{lng}", options)
end
@doc """
Direct request to Google Maps API endpoint.
Instead of relying on the functionality this module provides, you can
use this function to make direct request to the Google Maps API.
It takes an endpoint string, and a keyword list of parameters.
## Examples
iex> {:error, status, error_message} = GoogleMaps.get("directions", [
...> origin: "Disneyland",
...> destination: "Universal Studios Hollywood",
...> key: "invalid key",
...> ])
iex> status
"REQUEST_DENIED"
iex> error_message
"The provided API key is invalid."
iex> {:ok, result} = GoogleMaps.get("directions", [
...> origin: "Disneyland",
...> destination: "Universal Studios Hollywood"
...> ])
iex> [route] = result["routes"]
iex> match?(%{
...> "northeast" => %{"lat" => _, "lng" => _},
...> "southwest" => %{"lat" => _, "lng" => _}
...> }, route["bounds"])
true
iex> {:ok, result} = GoogleMaps.get("place/autocomplete", [input: "Paris, France"])
iex> Enum.count(result["predictions"]) > 0
true
iex> [paris | _rest] = result["predictions"]
iex> paris["description"]
"Paris, France"
iex> paris["place_id"]
"ChIJD7fiBh9u5kcRYJSMaMOCCwQ"
iex> paris["types"]
[ "locality", "political", "geocode" ]
# A request "Pizza near Par":
iex> {:ok, result} = GoogleMaps.get("place/queryautocomplete", [input: "Pizza near Par"])
iex> is_list(result["predictions"])
true
# Passing request headers and/or options
iex> {:ok, result} = GoogleMaps.get("directions", [
...> origin: "Disneyland",
...> destination: "Universal Studios Hollywood",
...> headers: [{"Accept-Language", "vi"}]
...> ])
iex> [route] = result["routes"]
iex> Regex.match?(~r(Dα»― liα»u bαΊ£n Δα» Β©[\\d]{4} Google), route["copyrights"])
true
iex> {:error, error} = GoogleMaps.get("directions", [
...> origin: "Disneyland",
...> destination: "Universal Studios Hollywood",
...> headers: [{"Accept-Language", "vi"}],
...> options: [timeout: 0]
...> ])
...> error.reason
:connect_timeout
# Uses insecure HTTP request (no API key will be used.)
iex> {:ok, %{"results" => [result]}} =
...> GoogleMaps.get("geocode", [
...> address: "1600 Amphitheatre Parkway, Mountain View, CA",
...> secure: false
...> ])
iex> result["formatted_address"]
"Google Building 41, 1600 Amphitheatre Pkwy, Mountain View, CA 94043, USA"
"""
@spec get(String.t, options()) :: Response.t()
def get(endpoint, params) do
Request.get(endpoint, params)
|> Response.wrap
end
end
|
lib/google_maps.ex
| 0.955827
| 0.758399
|
google_maps.ex
|
starcoder
|
defmodule Plug.Builder do
@moduledoc """
Conveniences for building plugs.
This module can be used into a module in order to build
a plug stack:
defmodule MyApp do
use Plug.Builder
plug :hello, upper: true
def hello(conn, opts) do
body = if opts[:upper], do: "WORLD", else: "world"
send_resp(conn, 200, body)
end
end
`Plug.Builder` will define a `init/1` function (which is overridable)
and a `call/2` function with the compiled stack. By implementing the
Plug API, `Plug.Builder` guarantees this module can be handed to a web
server or used as part of another stack.
Note this module also exports a `compile/1` function for those willing
to collect and compile their plugs manually.
"""
@type plug :: module | atom
@doc false
defmacro __using__(_) do
quote do
@behaviour Plug
def init(opts) do
opts
end
defoverridable [init: 1]
import Plug.Builder, only: [plug: 1, plug: 2]
Module.register_attribute(__MODULE__, :plugs, accumulate: true)
@before_compile Plug.Builder
end
end
@doc false
defmacro __before_compile__(env) do
plugs = Module.get_attribute(env.module, :plugs)
{conn, body} = Plug.Builder.compile(plugs)
quote do
def call(unquote(conn), _), do: unquote(body)
end
end
@doc """
A macro that stores a new plug.
"""
defmacro plug(plug, opts \\ []) do
quote do
@plugs {unquote(plug), unquote(opts)}
end
end
@doc """
Compiles a plug stack.
It expects a reversed stack (with the last plug coming first)
and returns a tuple containing the reference to the connection
as first argument and the compiled quote stack.
"""
@spec compile([{plug, Plug.opts}]) :: {Macro.t, Macro.t}
def compile(stack) do
conn = quote do: conn
{conn, Enum.reduce(stack, conn, "e_plug(init_plug(&1), &2))}
end
defp init_plug({plug, opts}) do
case atom_to_list(plug) do
'Elixir.' ++ _ ->
init_module_plug(plug, opts)
_ ->
init_fun_plug(plug, opts)
end
end
defp init_module_plug(plug, opts) do
opts = plug.init(opts)
call? = function_exported?(plug, :call, 2)
wrap? = function_exported?(plug, :wrap, 3)
cond do
call? and wrap? ->
raise ArgumentError,
message: "#{inspect plug} plug implements both call/2 and wrap/3"
call? ->
{:call, plug, opts}
wrap? ->
{:wrap, plug, opts}
true ->
raise ArgumentError,
message: "#{inspect plug} plug must implement call/2 or wrap/3"
end
end
defp init_fun_plug(plug, opts) do
{:fun, plug, opts}
end
defp quote_plug({:wrap, plug, opts}, acc) do
quote do
unquote(plug).wrap(conn, unquote(Macro.escape(opts)), fn conn ->
unquote(acc)
end)
end
end
defp quote_plug({:call, plug, opts}, acc) do
quote do
case unquote(plug).call(conn, unquote(Macro.escape(opts))) do
%Plug.Conn{} = conn -> unquote(acc)
_ -> raise "expected #{unquote(inspect plug)}.call/2 to return a Plug.Conn"
end
end
end
defp quote_plug({:fun, plug, opts}, acc) do
quote do
case unquote(plug)(conn, unquote(Macro.escape(opts))) do
%Plug.Conn{} = conn -> unquote(acc)
_ -> raise "expected #{unquote(plug)}/2 to return a Plug.Conn"
end
end
end
end
|
lib/plug/builder.ex
| 0.789153
| 0.428861
|
builder.ex
|
starcoder
|
defmodule WeePub.Broadcaster do
@moduledoc """
A `GenServer` that manages distribution of messages to interested clients
"""
use GenServer
@broadcaster __MODULE__
@registry WeePub.Registry
@topic @broadcaster
@doc false
def child_spec(options) do
%{
id: @broadcaster,
start: {__MODULE__, :start, [options]},
type: :worker,
}
end
@doc false
def start(options \\ []) do
GenServer.start_link(__MODULE__, options, name: @broadcaster)
end
@doc false
def init(_options) do
{:ok, %{}}
end
@doc """
Registers the caller process as a subscriber to broadcasts.
**Options**
* `filter:` A function that accepts a single parameter and returns a boolean.
Defaults to all messages
* `topic:` A narrow cast topic atom. The subscriber's filter will only be evaluated
if the topic matches the topic registered with. **Note:** `WeePub.Subscriber`
does not currently support generating clients with narrow cast topics.
"""
def subscribe(options \\ []) do
options = Keyword.merge [topic: @topic, filter: (fn _ -> true end)], options
Registry.register(
@registry,
options[:topic],
%{filter: options[:filter]}
)
end
@doc """
Publish a message
* `message` The message to be sent to subscribers if their `filter:` matches
**Options**
* `topic:` A narrow cast topic atom. The message will only be evaluated for subscribers
registered with a matching topic registration. **Note:** `WeePub.Subscriber`
does not currently support generating clients with narrow cast topics.
"""
def publish(message, options \\ []) do
options = Keyword.merge [topic: @topic], options
GenServer.call(@broadcaster, {:publish, %{message: message, topic: options[:topic]}})
end
@doc false
def handle_call({:publish, %{message: _, topic: _} = message}, _caller, state) do
{:reply, broadcast(message), state}
end
defp broadcast(%{message: message, topic: topic}) do
Registry.dispatch(@registry, topic, &propagate(message, &1), parallel: true)
end
defp propagate(message, entries) do
stream = entries
|> Stream.map(fn ({pid, %{filter: filter}}) -> {pid, filter} end)
|> Stream.filter(&divulge?(&1, message))
|> Stream.map(&divulge(&1, message))
Stream.run(stream)
end
defp divulge?({_, filter}, message) do
filter.(message)
end
defp divulge({pid, _}, message) do
GenServer.cast(pid, message)
end
end
|
lib/wee_pub/broadcaster.ex
| 0.788746
| 0.460653
|
broadcaster.ex
|
starcoder
|
defmodule Tint.Distance.CIEDE2000 do
@moduledoc """
A module that implements the CIEDE2000 color distance algorithm.
(http://www2.ece.rochester.edu/~gsharma/ciede2000/ciede2000noteCRNA.pdf)
"""
@behaviour Tint.Distance
alias Tint.Utils.Math
@deg_6_in_rad Math.deg_to_rad(6)
@deg_25_in_rad Math.deg_to_rad(25)
@deg_30_in_rad Math.deg_to_rad(30)
@deg_63_in_rad Math.deg_to_rad(63)
@deg_275_in_rad Math.deg_to_rad(275)
@pow_25_7 :math.pow(25, 7)
@impl true
def distance(color, other_color, opts) do
color = Tint.to_lab(color)
other_color = Tint.to_lab(other_color)
# 2)
c_star_1 = calc_c_star_i(color.a, color.b)
c_star_2 = calc_c_star_i(other_color.a, other_color.b)
# 3)
c_star_dash = calc_c_star_dash(c_star_1, c_star_2)
# 4)
g = calc_g(c_star_dash)
# 5)
a_apo_1 = calc_a_apo(g, color.a)
a_apo_2 = calc_a_apo(g, other_color.a)
# 6)
c_apo_1 = calc_c_apo(a_apo_1, color.b)
c_apo_2 = calc_c_apo(a_apo_2, other_color.b)
# 7)
h_apo_1 = calc_h_apo(a_apo_1, color.b)
h_apo_2 = calc_h_apo(a_apo_2, other_color.b)
# 8)
delta_l_apo = other_color.lightness - color.lightness
# 9)
delta_c_apo = c_star_2 - c_star_1
# 10)
delta_h_apo = calc_delta_h_apo(c_apo_1, c_apo_2, h_apo_1, h_apo_2)
# 11)
delta_cap_h_apo = calc_delta_cap_h_apo(c_apo_1, c_apo_2, delta_h_apo)
# 12)
l_apo_dash = calc_cl_apo_dash(color.lightness, other_color.lightness)
# 13)
c_apo_dash = calc_cl_apo_dash(c_apo_1, c_apo_2)
# 14)
h_apo_dash = calc_h_apo_dash(c_apo_1, c_apo_2, h_apo_1, h_apo_2)
# 15)
t = calc_t(h_apo_dash)
# 16)
delta_theta = calc_delta_theta(h_apo_dash)
# 17)
rc = calc_rc(c_apo_dash)
# 18)
sl = calc_sl(l_apo_dash)
# 19)
sc = calc_sc(c_apo_dash)
# 20)
sh = calc_sh(c_apo_dash, t)
# 21)
rt = calc_rt(delta_theta, rc)
# 22)
delta_e =
calc_delta_e00(
delta_l_apo,
delta_c_apo,
delta_cap_h_apo,
rt,
sl,
sc,
sh,
opts[:weights]
)
delta_e
end
defp calc_c_star_i(a, b) do
:math.sqrt(:math.pow(a, 2) + :math.pow(b, 2))
end
defp calc_c_star_dash(c_star_1, c_star_2) do
(c_star_1 + c_star_2) / 2
end
defp calc_g(c_star_dash) do
c_star_dash_pow_7 = :math.pow(c_star_dash, 7)
0.5 * (1 - :math.sqrt(c_star_dash_pow_7 / (c_star_dash_pow_7 + @pow_25_7)))
end
defp calc_a_apo(g, a) do
(1 + g) * a
end
defp calc_c_apo(a_apo, b) do
:math.sqrt(:math.pow(a_apo, 2) + :math.pow(b, 2))
end
defp calc_h_apo(a_apo, b) do
if a_apo == 0 && b == 0 do
0
else
b
|> :math.atan2(a_apo)
|> Math.rad_to_deg()
end
end
defp calc_delta_h_apo(c_apo_1, c_apo_2, h_apo_1, h_apo_2) do
if c_apo_1 == 0 && c_apo_2 == 0 do
0
else
diff = h_apo_2 - h_apo_1
cond do
abs(diff) <= 180 -> diff
diff > 180 -> diff - 360
diff < -180 -> diff + 360
end
end
end
defp calc_delta_cap_h_apo(c_apo_1, c_apo_2, delta_h_apo) do
2 * :math.sqrt(c_apo_1 * c_apo_2) *
:math.sin(Math.deg_to_rad(delta_h_apo) / 2)
end
defp calc_cl_apo_dash(v1, v2) do
(v1 + v2) / 2
end
defp calc_h_apo_dash(c_apo_1, c_apo_2, h_apo_1, h_apo_2) do
sum = h_apo_1 + h_apo_2
if c_apo_1 == 0 && c_apo_2 == 0 do
sum
else
abs_diff = abs(h_apo_1 - h_apo_2)
cond do
abs_diff <= 180 -> sum / 2
sum < 360 -> (sum + 360) / 2
sum >= 360 -> (sum - 360) / 2
end
end
end
defp calc_t(h_apo_dash) do
h_apo_dash_rad = Math.deg_to_rad(h_apo_dash)
1 - 0.17 * :math.cos(h_apo_dash_rad - @deg_30_in_rad) +
0.24 * :math.cos(2 * h_apo_dash_rad) +
0.32 * :math.cos(3 * h_apo_dash_rad + @deg_6_in_rad) -
0.2 * :math.cos(4 * h_apo_dash_rad - @deg_63_in_rad)
end
defp calc_delta_theta(h_apo_dash) do
@deg_30_in_rad *
:math.exp(
-:math.pow(
(Math.deg_to_rad(h_apo_dash) - @deg_275_in_rad) / @deg_25_in_rad,
2
)
)
end
defp calc_rc(c_apo_dash) do
c_apo_dash_pow_7 = :math.pow(c_apo_dash, 7)
2 * :math.sqrt(c_apo_dash_pow_7 / (c_apo_dash_pow_7 + @pow_25_7))
end
defp calc_sl(l_apo_dash) do
v = :math.pow(l_apo_dash - 50, 2)
1 + 0.015 * v / :math.sqrt(20 + v)
end
defp calc_sc(c_apo_dash) do
1 + 0.045 * c_apo_dash
end
defp calc_sh(c_apo_dash, t) do
1 + 0.015 * c_apo_dash * t
end
defp calc_rt(delta_theta, rc) do
-:math.sin(2 * delta_theta) * rc
end
defp calc_delta_e00(
delta_l_apo,
delta_c_apo,
delta_h_apo,
rt,
sl,
sc,
sh,
weights
) do
# The weights kL, kC, and kH are usually unity
{kl, kc, kh} = weights || {1, 1, 1}
d_l_apo_sl_div = delta_l_apo / (kl * sl)
d_c_apo_sc_div = delta_c_apo / (kc * sc)
d_h_apo_sh_div = delta_h_apo / (kh * sh)
:math.sqrt(
:math.pow(d_l_apo_sl_div, 2) +
:math.pow(d_c_apo_sc_div, 2) +
:math.pow(d_h_apo_sh_div, 2) +
rt * d_c_apo_sc_div * d_h_apo_sh_div
)
end
end
|
lib/tint/distance/ciede2000.ex
| 0.775137
| 0.579817
|
ciede2000.ex
|
starcoder
|
defmodule Wasmex.Instance do
@moduledoc """
Instantiates a WebAssembly module and allows calling exported functions on it.
# Read a WASM file and compile it into a WASM module
{:ok, bytes } = File.read("wasmex_test.wasm")
{:ok, module} = Wasmex.Module.compile(bytes)
# Instantiates the WASM module.
{:ok, instance } = Wasmex.start_link(%{module: module})
# Call a function on it.
{:ok, [result]} = Wasmex.call_function(instance, "sum", [1, 2])
IO.puts result # 3
All exported functions are accessible via `call_exported_function`.
Arguments of these functions are automatically casted to WebAssembly values.
Note that WebAssembly only knows number datatypes (floats and integers of various sizes).
You can pass arbitrary data to WebAssembly by writing data into an instances memory. The `memory/3` function returns a `Wasmex.Memory` struct representing the memory of an instance, e.g.:
```elixir
{:ok, memory} = Wasmex.Instance.memory(instance, :uint8, 0)
```
This module, especially `call_exported_function/4`, is assumed to be called within a GenServer context.
Usually, functions definedd here are called through the `Wasmex` module API to satisfy this assumption.
"""
@type t :: %__MODULE__{
resource: binary(),
reference: reference()
}
defstruct resource: nil,
# The actual NIF instance resource.
# Normally the compiler will happily do stuff like inlining the
# resource in attributes. This will convert the resource into an
# empty binary with no warning. This will make that harder to
# accidentally do.
reference: nil
@deprecated "Compile the module with Wasmex.Module.compile/1 and then use new/2 instead"
@spec from_bytes(binary(), %{optional(binary()) => (... -> any())}) ::
{:ok, __MODULE__.t()} | {:error, binary()}
def from_bytes(bytes, imports) do
case Wasmex.Module.compile(bytes) do
{:ok, module} -> new(module, imports)
error -> error
end
end
@spec new(Wasmex.Module.t(), %{optional(binary()) => (... -> any())}) ::
{:ok, __MODULE__.t()} | {:error, binary()}
def new(%Wasmex.Module{resource: memory_resource}, imports) when is_map(imports) do
case Wasmex.Native.instance_new(memory_resource, imports) do
{:ok, resource} -> {:ok, wrap_resource(resource)}
{:error, err} -> {:error, err}
end
end
@deprecated "Compile the module with Wasmex.Module.compile/1 and then use new_wasi/3 instead"
@spec wasi_from_bytes(binary(), %{optional(binary()) => (... -> any())}, %{
optional(:args) => [String.t()],
optional(:env) => %{String.t() => String.t()},
optional(:stdin) => Wasmex.Pipe.t(),
optional(:stdout) => Wasmex.Pipe.t(),
optional(:stderr) => Wasmex.Pipe.t()
}) ::
{:ok, __MODULE__.t()} | {:error, binary()}
def wasi_from_bytes(bytes, imports, wasi) do
case Wasmex.Module.compile(bytes) do
{:ok, module} -> new_wasi(module, imports, wasi)
error -> error
end
end
@spec new_wasi(Wasmex.Module.t(), %{optional(binary()) => (... -> any())}, %{
optional(:args) => [String.t()],
optional(:env) => %{String.t() => String.t()},
optional(:stdin) => Wasmex.Pipe.t(),
optional(:stdout) => Wasmex.Pipe.t(),
optional(:stderr) => Wasmex.Pipe.t()
}) ::
{:ok, __MODULE__.t()} | {:error, binary()}
def new_wasi(%Wasmex.Module{resource: memory_resource}, imports, wasi)
when is_map(imports) and is_map(wasi) do
args = Map.get(wasi, "args", [])
env = Map.get(wasi, "env", %{})
{opts, _} = Map.split(wasi, ["stdin", "stdout", "stderr", "preopen"])
case Wasmex.Native.instance_new_wasi(memory_resource, imports, args, env, opts) do
{:ok, resource} -> {:ok, wrap_resource(resource)}
{:error, err} -> {:error, err}
end
end
defp wrap_resource(resource) do
%__MODULE__{
resource: resource,
reference: make_ref()
}
end
@spec function_export_exists(__MODULE__.t(), binary()) :: boolean()
def function_export_exists(%__MODULE__{resource: resource}, name) when is_binary(name) do
Wasmex.Native.instance_function_export_exists(resource, name)
end
@doc """
Calls a function with the given `name` and `params` on the WebAssembly `instance`.
This function assumes to be called within a GenServer context, it expects a `from` argument
as given by `handle_call` etc.
The WebAssembly function will be invoked asynchronously in a new OS thread.
The calling process will receive a `{:returned_function_call, result, from}` message once
the execution finished.
The result either is an `{:error, reason}` or the `:ok` atom.
A BadArg exception may be thrown when given unexpected input data.
"""
@spec call_exported_function(__MODULE__.t(), binary(), [any()], GenServer.from()) ::
:ok | {:error, binary()}
def call_exported_function(%__MODULE__{resource: resource}, name, params, from)
when is_binary(name) do
Wasmex.Native.instance_call_exported_function(resource, name, params, from)
end
@spec memory(__MODULE__.t(), atom(), pos_integer()) ::
{:ok, Wasmex.Memory.t()} | {:error, binary()}
def memory(%__MODULE__{} = instance, size, offset)
when size in [:uint8, :int8, :uint16, :int16, :uint32, :int32] do
Wasmex.Memory.from_instance(instance, size, offset)
end
end
defimpl Inspect, for: Wasmex.Instance do
import Inspect.Algebra
def inspect(dict, opts) do
concat(["#Wasmex.Instance<", to_doc(dict.reference, opts), ">"])
end
end
|
lib/wasmex/instance.ex
| 0.850375
| 0.822225
|
instance.ex
|
starcoder
|
defmodule Treex.Tree do
@moduledoc """
Primarily a struct for representing tree data to be processed with the `Tree.Traverse` module.
Also includes implementations necessary for the `Enumerable` protocol and conversion functions
to/from ordinary `t:Map.t/0` representations of the same data.
One import note is the notion of a "virtual root." In order to represent arbitrary maps, any map
that has multiple top-level keys (i.e. has multiple roots and is a graph, not a tree) will be
converted to a tree by inserting a root node whose value defaults to `:root` and can be specified
as the second parameter to `Treex.Tree.from_map/2`.
"""
@moduledoc since: "0.1.0"
defstruct value: nil, children: [], key: nil
@type acc:: {:cont, term} | {:halt, term} | {:suspend, term}
@type continuation:: (acc -> result)
@type element:: {term, list(term) | Map.t | term}
@type length:: pos_integer
@type reducer:: (term, term -> acc)
@type result:: {:done, term} | {:halted, term} | {:suspended, term, continuation}
@type size:: non_neg_integer
@type start:: non_neg_integer
@type slicing_fun :: (start, length -> list(term))
@type t:: %Treex.Tree{value: any, children: [t], key: term}
@doc """
Counts the number of nodes in the given tree.
Returns: `t:integer`
"""
@doc since: "0.1.0"
@spec count(t):: {:ok, size} | {:error, module}
def count(_tree), do: 42
@doc """
Checks whether the given element is a member of the tree at any depth, performed breadth-first.
Returns: `t:boolean`
"""
@doc since: "0.1.0"
@spec member?(t, term) :: {:ok, boolean} | {:error, module}
def member?(_tree, _element), do: true
@doc """
Reduces the given tree into the given accumulator by invoking the given `t:Treex.Tree.reducer/0`
function on each node, traversed breadth-first. The return is tagged tuple following the
`Enumerable` protocol.
Returns: `Treex.Tree.acc/0` (where `term` is the same type as the `acc` parameter)
"""
@doc since: "0.1.0"
@spec reduce(t, acc, reducer) :: result
def reduce(_tree, acc, _fun), do: {:cont, acc}
@doc """
Generates a function that contiguously slices the given tree. See `Enumerable.slice/1`
Returns
`{:ok, t:non_neg_integer/0, t:Treex.Tree.slicing_fun/0}` when successful
`{:error, t:module/0}` when there is an error
"""
@doc since: "0.1.0"
@spec slice(t) :: {:ok, size, slicing_fun} | {:error, module}
def slice(_tree), do: &(&1 + &2)
@doc """
List the leaf nodes of the given `t:Treex.Tree.t/0`
Returns: `[t:Treex.Tree.t/0]`
"""
@doc since: "0.1.0"
@spec leaves(t):: list(t) | []
def leaves(_tree), do: []
@doc """
Convert the given map into a `t:Treex.Tree.t/0`. If the given map has more than one top-level key,
the optional `root` parameter specifies what value to give a virtual root node that will be
inserted at the top of the key and contain the given `map`'s top-level keys as children.
Returns: `t:Treex.Tree.t/0`
"""
@doc since: "0.1.0"
@spec from_map(Map.t):: t
@spec from_map(Map.t, term):: t
def from_map(map, root \\ :root)
def from_map(map = %{}, root) when map |> map_size == 0,
do: %Treex.Tree{value: root, children: []}
def from_map(map = %{}, _root) when map |> map_size == 1,
do: map |> Enum.reduce(%Treex.Tree{}, &node_from_element/2)
def from_map(map = %{}, root), do: %{root => map} |> from_map(root)
@doc """
Convert the given tree to a `t:Map.t/0`. Optionally, remove the root node by passing
`pop_root: true` as a keyword option. This is useful, for example, when the caller knows the given
tree has a virtual root.
"""
@doc since: "0.1.0"
@spec to_map(t | list(t)):: Map.t
@spec to_map(t | list(t), [pop_root: boolean]):: Map.t
def to_map(tree, opts \\ [pop_root: false])
def to_map(%Treex.Tree{value: nil, children: []}, _), do: %{}
def to_map([], [pop_root: false]), do: nil
def to_map(trees, [pop_root: false]) when is_list(trees) do
trees
|> Enum.map(&to_map/1)
|> Enum.reduce(%{}, &Map.merge/2)
end
def to_map(tree, [pop_root: false]), do: %{tree.value => tree.children |> to_map}
def to_map(tree, [pop_root: true]), do: tree.children |> to_map
@doc """
Checks whether the given node is a leaf node or not.
Returns: `t:boolean/0`
"""
@doc since: "0.1.0"
@spec leaf_node?(t):: boolean
def leaf_node?(node), do: node.children |> Enum.empty?
@doc false
@spec node_from_element(element):: t
defp node_from_element(element)
defp node_from_element({root, children = %{}}) when children |> map_size == 0,
do: %Treex.Tree{value: root, children: []}
defp node_from_element({root, children = %{}}),
do: %Treex.Tree{value: root, children: children |> Enum.map(&node_from_element/1)}
defp node_from_element({root, children}),
do: %Treex.Tree{value: root, children: [%Treex.Tree{value: children, children: []}]}
@doc false
@spec node_from_element(element, term):: t
defp node_from_element(element, _)
defp node_from_element(pair, _), do: pair |> node_from_element
end
|
lib/treex/tree.ex
| 0.94064
| 0.806738
|
tree.ex
|
starcoder
|
defmodule Rajska.ObjectScopeAuthorization do
@moduledoc """
Absinthe Phase to perform object scoping.
Authorizes all Absinthe's [objects](https://hexdocs.pm/absinthe/Absinthe.Schema.Notation.html#object/3) requested in a query by checking the underlying struct.
## Usage
[Create your Authorization module and add it and ObjectScopeAuthorization to your Absinthe Pipeline](https://hexdocs.pm/rajska/Rajska.html#module-usage). Then set the scope of an object:
```elixir
object :user do
# Turn on Object and Field scoping, but if the FieldAuthorization middleware is not included, this is the same as using `scope_object?`
meta :scope?, true
field :id, :integer
field :email, :string
field :name, :string
field :company, :company
end
object :company do
meta :scope_object?, true
field :id, :integer
field :user_id, :integer
field :name, :string
field :wallet, :wallet
end
object :wallet do
meta :scope?, true
meta :rule, :object_authorization
field :total, :integer
end
```
To define custom rules for the scoping, use [has_user_access?/3](https://hexdocs.pm/rajska/Rajska.Authorization.html#c:has_user_access?/3). For example:
```elixir
defmodule Authorization do
use Rajska,
valid_roles: [:user, :admin],
super_role: :admin
@impl true
def has_user_access?(%{role: :admin}, %User{}, _rule), do: true
def has_user_access?(%{id: user_id}, %User{id: id}, _rule) when user_id === id, do: true
def has_user_access?(_current_user, %User{}, _rule), do: false
def has_user_access?(%{id: user_id}, %Wallet{user_id: id}, :object_authorization), do: user_id == id
def has_user_access?(%{id: user_id}, %Wallet{user_id: id}, :always_block), do: false
end
```
This way different rules can be set to the same struct.
See `Rajska.Authorization` for `rule` default settings.
"""
alias Absinthe.{Blueprint, Phase, Type}
alias Rajska.Introspection
use Absinthe.Phase
@spec run(Blueprint.t() | Phase.Error.t(), Keyword.t()) :: {:ok, map}
def run(%Blueprint{execution: execution} = bp, _options \\ []) do
{:ok, %{bp | execution: process(execution)}}
end
defp process(%{validation_errors: [], result: result} = execution), do: %{execution | result: result(result, execution.context)}
defp process(execution), do: execution
# Introspection
defp result(%{emitter: %{schema_node: %{identifier: identifier}}} = result, _context)
when identifier in [:query_type, nil] do
result
end
# Root
defp result(%{fields: fields, emitter: %{schema_node: %{identifier: identifier}}} = result, context)
when identifier in [:query, :mutation, :subscription] do
%{result | fields: walk_result(fields, context)}
end
# Object
defp result(%{fields: fields, emitter: %{schema_node: schema_node} = emitter, root_value: root_value} = result, context) do
type = Introspection.get_object_type(schema_node.type)
scope? = get_scope!(type)
default_rule = Rajska.apply_auth_mod(context, :default_rule)
rule = Type.meta(type, :rule) || default_rule
case authorized?(scope?, context, root_value, rule) do
true -> %{result | fields: walk_result(fields, context)}
false -> Map.put(result, :errors, [error(emitter)])
end
end
# List
defp result(%{values: values} = result, context) do
%{result | values: walk_result(values, context)}
end
# Leafs
defp result(result, _context), do: result
defp walk_result(fields, context, new_fields \\ [])
defp walk_result([], _context, new_fields), do: Enum.reverse(new_fields)
defp walk_result([field | fields], context, new_fields) do
new_fields = [result(field, context) | new_fields]
walk_result(fields, context, new_fields)
end
defp get_scope!(object) do
scope? = Type.meta(object, :scope?)
scope_object? = Type.meta(object, :scope_object?)
case {scope?, scope_object?} do
{nil, nil} -> true
{nil, scope_object?} -> scope_object?
{scope?, nil} -> scope?
{_, _} -> raise "Error in #{inspect object.identifier}. If scope_object? is defined, then scope? must not be defined"
end
end
defp authorized?(false, _context, _scoped_struct, _rule), do: true
defp authorized?(true, context, scoped_struct, rule) do
Rajska.apply_auth_mod(context, :context_user_authorized?, [context, scoped_struct, rule])
end
defp error(%{source_location: location, schema_node: %{type: type}}) do
%Phase.Error{
phase: __MODULE__,
message: "Not authorized to access object #{Introspection.get_object_type(type).identifier}",
locations: [location]
}
end
end
|
lib/middlewares/object_scope_authorization.ex
| 0.803328
| 0.808143
|
object_scope_authorization.ex
|
starcoder
|
defmodule ESpec.ExampleHelpers do
@moduledoc """
Defines macros 'example' and 'it'.
These macros defines function with random name which will be called when example runs.
Example structs %ESpec.Example are accumulated in @examples attribute
"""
@aliases ~w(it specify)a
@skipped ~w(xit xexample xspecify)a
@focused ~w(fit fexample fspecify focus)a
@doc """
Adds example to @examples and defines function to wrap the spec.
Sends `shared`' variable to the example block.
"""
defmacro example(description, opts, do: block) do
block =
quote do
unquote(block)
end
|> Macro.escape(unquote: true)
quote bind_quoted: [description: description, opts: opts, block: block] do
f_name = random_atom(description)
context = Enum.reverse(@context)
@examples %ESpec.Example{
description: description,
module: __MODULE__,
function: f_name,
opts: opts,
file: __ENV__.file,
line: __ENV__.line,
context: context,
shared: @shared
}
def unquote(f_name)(var!(shared)) do
var!(shared)
unquote(block)
end
end
end
defmacro example(opts, do: block) when is_list(opts) do
quote do: example("", unquote(opts), do: unquote(block))
end
defmacro example(description, do: block) do
quote do: example(unquote(description), [], do: unquote(block))
end
defmacro example(do: block) do
quote do: example("", [], do: unquote(block))
end
@doc "Aliases for `example`"
Enum.each(@aliases, fn func ->
defmacro unquote(func)(description, opts, do: block) do
quote do: example(unquote(description), unquote(opts), do: unquote(block))
end
defmacro unquote(func)(description_or_opts, do: block) do
quote do: example(unquote(description_or_opts), do: unquote(block))
end
defmacro unquote(func)(do: block) do
quote do: example(do: unquote(block))
end
end)
@doc "Macros for skipped examples."
Enum.each(@skipped, fn func ->
defmacro unquote(func)(description, opts, do: block) do
reason = "`#{unquote(func)}`"
quote do:
example(
unquote(description),
Keyword.put(unquote(opts), :skip, unquote(reason)),
do: unquote(block)
)
end
defmacro unquote(func)(description, do: block) when is_binary(description) do
reason = "`#{unquote(func)}`"
quote do: example(unquote(description), [skip: unquote(reason)], do: unquote(block))
end
defmacro unquote(func)(opts, do: block) when is_list(opts) do
reason = "`#{unquote(func)}`"
quote do: example(Keyword.put(unquote(opts), :skip, unquote(reason)), do: unquote(block))
end
defmacro unquote(func)(do: block) do
reason = "`#{unquote(func)}`"
quote do: example([skip: unquote(reason)], do: unquote(block))
end
end)
@doc "Macros for focused examples."
Enum.each(@focused, fn func ->
defmacro unquote(func)(description, opts, do: block) do
quote do:
example(
unquote(description),
Keyword.put(unquote(opts), :focus, true),
do: unquote(block)
)
end
defmacro unquote(func)(description, do: block) when is_binary(description) do
quote do: example(unquote(description), [focus: true], do: unquote(block))
end
defmacro unquote(func)(opts, do: block) when is_list(opts) do
quote do: example(Keyword.put(unquote(opts), :focus, true), do: unquote(block))
end
defmacro unquote(func)(do: block) do
quote do: example([focus: true], do: unquote(block))
end
end)
@doc "Macros for pending exaples."
Enum.each([:example, :pending] ++ @aliases, fn func ->
defmacro unquote(func)(description) when is_binary(description) do
quote do: example(unquote(description), [pending: unquote(description)], do: nil)
end
end)
@doc "Defines examples using another module."
defmacro it_behaves_like(module, lets \\ []) when is_list(lets) do
quote do
let unquote(lets)
Enum.each(unquote(module).examples, fn example ->
new_context = ESpec.ExampleHelpers.__assign_shared_lets__(example.context, @context)
context = Enum.reverse(@context) ++ new_context
@examples %ESpec.Example{
description: example.description,
module: example.module,
function: example.function,
opts: example.opts,
file: __ENV__.file,
line: __ENV__.line,
context: context,
shared: false
}
end)
end
end
@doc false
def __assign_shared_lets__(example_context, module_context) do
Enum.map(example_context, fn context ->
case context do
%ESpec.Let{shared: true, var: var} -> assign_shared(context, module_context, var)
_ -> context
end
end)
end
defp assign_shared(context, module_context, var) do
module_let =
Enum.find(module_context, fn module_let ->
case module_let do
%ESpec.Let{var: ^var, shared: false} -> module_let
_ -> false
end
end)
if module_let,
do: %{module_let | shared_module: context.shared_module, shared: true},
else: context
end
@doc "alias for include_examples"
defmacro include_examples(module, lets \\ []) when is_list(lets) do
quote do: it_behaves_like(unquote(module), unquote(lets))
end
def random_atom(arg) do
String.to_atom("example_#{ESpec.Support.word_chars(arg)}_#{ESpec.Support.random_string()}")
end
end
|
lib/espec/example_helpers.ex
| 0.700997
| 0.578627
|
example_helpers.ex
|
starcoder
|
defmodule HXL.Evaluator do
@moduledoc """
Defines the behaviour for custom evaluation of the AST returned from `HXL.Parser`
See `c:eval/2` for more information
"""
@type t :: module()
defmacro __using__(_) do
quote do
@behaviour unquote(__MODULE__)
end
end
@doc ~S"""
This functions is invoked for every AST node. There's a base implementation of a evaluator
which is expected to be used by implmenters for handling nodes they're not interested in.
## Example
An example would be to implement Terraform specific behaviours such as modules:
hcl = ""\"
module "servers" {
source = "./servers"
servers = 5
}
""\"
Using `HXL.Evaluator.Base` the result would be:
%{"module" => %{"servers" => %{"servers" => 5, "source" => "./servers"}}}
Using a different evaluator we can change how we interpret the AST:
defmodule Terraform.Evaluator do
use HXL.Evaluator
alias HXL.Ast.{Block, Body, Attr}
alias HXL.Evaluator.Base
@impl true
def eval(%Block{type: "module" = t, labels: [mod_name], body: body}, %HXL.Eval{} = ctx) do
mod = mod_from_body(body)
# We're not interesed to put anything in the document but to populate the
# ctx with the ability to lookup values from the module
symbol_table = put_in(ctx.symbol_table, [Access.key(t, %{}), mod_name], mod)
{:ignore, %{ctx | symbol_table: symbol_table}}
end
def eval(ast, ctx), do: Base.eval(ast, ctx)
defp mod_from_body(%Body{statements: stmts}) do
case Enum.reduce(stmts, {nil, []}, &mod_args/2) do
{nil, _} ->
raise ArgumentError, message: "`source` argument are required for Terramform modules"
{source, args} ->
HXL.decode_file!(source <> ".tf", evaluator: __MODULE__, variables: mod_variables(args))
end
end
defp mod_args(%Attr{name: "source", expr: expr}, {_, args}) do
source =
expr
|> HXL.Evaluator.Base.eval(%{})
|> elem(0)
{source, args}
end
defp mod_args(arg, {source, args}) do
{source, [arg | args]}
end
defp mod_variables(args) do
for arg <- args, into: %{} do
arg
|> HXL.Evaluator.Base.eval(%HXL.Eval{})
|> elem(0)
end
end
end
Given the following HCL document:
hcl = ""\"
module "servers" {
source = "./servers"
servers = 5
}
instance = module.servers.instances
""\"
And a module file `servers.tf`:
instances = ["127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"]
Eval using the `Terraform.Evaluator`
iex>HXL.decode!(hcl, evaluator: Terraform.Evaluator)
%{"instance" => ["127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5"]}
"""
@callback eval(HXL.Ast.t(), HXL.Eval.t()) :: {term(), HXL.Eval.t()}
end
|
lib/hxl/evaluator.ex
| 0.858985
| 0.597344
|
evaluator.ex
|
starcoder
|
defmodule QuizServer.Boundary.Validator do
@moduledoc """
Functions to validate that the API inputs are properly configured
"""
@type result_error :: {atom(), String.t()}
@type validation_result :: :ok | {:error, String.t()}
@spec required(list(result_error), map(), list() | atom()) :: list(result_error)
@doc """
Goes through a list of required fields and validate that they are present.
It is fed with a list of errors, fields, and list of fields to validate
and returns updated errors if there is something missing.
"""
def required(errors, fields, validate_list) when is_list(validate_list) do
validate_list
|> Enum.reduce(errors, &required(&2, fields, &1))
end
# Validates that a single field is present and adds an error if it is not
def required(errors, fields, required_field) do
if Map.has_key?(fields, required_field) do
add_error(:ok, errors, required_field)
else
add_error({:error, "must be present"}, errors, required_field)
end
end
@spec validate_with_function(list(result_error), map(), list() | atom(), fun) ::
list(result_error)
@doc """
Calls a function that returns validation_result to see if the field or fields are comformant with specifications.
Adds the errors to the error list
"""
def validate_with_function(errors, fields, field_list, f)
when is_function(f) and is_list(field_list) do
field_list
|> Enum.reduce(errors, &validate_with_function(&2, fields, &1, f))
end
def validate_with_function(errors, fields, field_name, f) when is_function(f) do
result =
if Map.has_key?(fields, field_name) do
Map.fetch!(fields, field_name)
|> f.()
else
:ok
end
add_error(result, errors, field_name)
end
@spec validate_is_atom(any) :: validation_result()
@doc """
Validation to make sure that the field is an atom
"""
def validate_is_atom(input) when is_atom(input), do: :ok
def validate_is_atom(_input), do: {:error, "must be an atom"}
@spec validate_is_string(any) :: validation_result()
@doc """
Validation to make sure that the field is a string
"""
def validate_is_string(input) when is_binary(input), do: :ok
def validate_is_string(_input), do: {:error, "must be a string"}
def validate_is_list(input) when is_list(input), do: :ok
def validate_is_list(_input), do: {:error, "must be a list"}
@doc """
Validation to make sure that the field is a function
"""
@spec validate_is_function(any, integer()) :: validation_result()
def validate_is_function(input, arity \\ 1)
def validate_is_function(input, arity) when is_function(input, arity), do: :ok
def validate_is_function(_input, _), do: {:error, "must be a function"}
# If the validation is ok, then return the errors (no changes)
defp add_error(:ok, errors, _field_name), do: errors
# If there is a list of errors, add them all
# Β TODO: find if there is any real requirement to do this
# Β defp add_error({:error, messages}, errors, field_name) when is_list(messages) do
# errors ++ Enum.map(messages, &{field_name, &1})
# end
# If there is a single error, add it directly.
defp add_error({:error, message}, errors, field_name) do
errors ++ [{field_name, message}]
end
end
|
apps/quiz_server/lib/boundary/validator.ex
| 0.766818
| 0.480844
|
validator.ex
|
starcoder
|
defmodule ExRPC do
@moduledoc """
ExRPC is an out-of band RPC application and library that uses multiple TCP
ports to send and receive data between Elixir nodes. It behaves mostly like Erlang's
`RPC` module but uses different ports and processes for different nodes,
effectively spreading the load to X processes for X nodes, instead of pushing data
from every remote node/RPC call into a single `rex` server.
You can generally use `ExRPC.call` and `ExRPC.cast` the same way you use
the `RPC` library, in the following manner:
iex> ExRPC.call(node, :erlang, :is_atom, [:ok], 1000)
true
iex> ExRPC.cast(node, :os, :timestamp)
true
iex> ExRPC.safe_cast(node, :os, :timestamp)
true
iex> ExRPC.safe_cast(:'random_node@127.0.0.1', :os, :timestamp)
{:badrpc, :nodedown}
ExRPC will try to detect possible issues with the TCP channel on which
it operates, both by closely monitoring `gen_tcp` timeouts and by testing
connectivity through the Erlang VM for `every single request`, thus ensuring
proper response to changes in channel state.
"""
# ===================================================
# Public API
# ===================================================
@doc """
Performs an ExRPC `call`, by automatically connecting to a remote `node`,
performing a "protected" {`m`,`f`,`a`} call and returning the result within
`recv_to` milliseconds.
It is important to understand that receiving {:badrpc, :timeout} does not guarantee
that the RPC call failed, just that it took it longer than it was expected to execute.
If the RPC calls finishes and sends the result back to the client, those results will be dropped.
"""
@spec call(atom, atom, atom, list, timeout | nil, timeout | nil) :: {:badtcp | :badrpc, any} | any
def call(node, m, f, a \\ [], recv_to \\ nil, send_to \\ nil)
when is_atom(node) and is_atom(m) and
is_atom(f) and is_list(a) and
(is_nil(recv_to) or is_integer(recv_to) or recv_to === :infinity) and
(is_nil(send_to) or is_integer(send_to) or send_to === :infinity)
do
ExRPC.Client.call(node, m, f, a, recv_to, send_to)
end
@doc """
Performs an ExRPC `cast`, by automatically connecting to a remote `node` and
sending a "protected" {`m`,`f`,`a`} call that will execute but never return the result
(an asynchronous cast).
"""
@spec cast(atom, atom, atom, list, timeout | nil) :: true
def cast(node, m, f, a \\ [], send_to \\ nil)
when is_atom(node) and is_atom(m) and
is_atom(f) and is_list(a) and
(is_nil(send_to) or is_integer(send_to) or send_to === :infinity)
do
ExRPC.Client.cast(node, m, f, a, send_to)
end
@doc """
Performs an ExRPC `safe_cast`, by automatically connecting to a remote `node` and
sending a "protected" {`m`,`f`,`a`} call that will execute but never return the result
(an asynchronous cast). In contrast to the simple `cast` functin, this function will
return an error if the connection to the remote node fails (hence the `safe` prefix).
"""
@spec safe_cast(atom, atom, atom, list, timeout | nil) :: {:badtcp | :badrpc, any} | true
def safe_cast(node, m, f, a \\ [], send_to \\ nil)
when is_atom(node) and is_atom(m) and
is_atom(f) and is_list(a) and
(is_nil(send_to) or is_integer(send_to) or send_to === :infinity)
do
ExRPC.Client.safe_cast(node, m, f, a, send_to)
end
end
|
lib/exrpc.ex
| 0.794345
| 0.611802
|
exrpc.ex
|
starcoder
|
defmodule Asteroid.ObjectStore.AuthenticationEvent.Mnesia do
@moduledoc """
Mnesia implementation of the `Asteroid.ObjectStore.AuthenticationEvent` behaviour
## Options
The options (`Asteroid.ObjectStore.AuthenticationEvent.opts()`) are:
- `:table_name`: an `atom()` for the table name. Defaults to `:asteroid_authentication_event`
- `:tab_def`: Mnesia's table definitions of the `:mnesia.create_table/2` function. Defaults to
the options below. User-defined `:tab_def` will be merged on a key basis, i.e. defaults will
not be erased. One can use it to add additional indexes for clients or devices, e.g.:
`tab_def: [index: :refresh_token, :authenticated_session_id, :client_id]`
- `:purge_interval`: the `integer()` interval in seconds the purge process will be triggered,
or `:no_purge` to disable purge. Defaults to `60` (1 minutes)
## Default Mnesia table definition
```elixir
[
attributes: [:id, :authenticated_session_id, :data],
index: [:authenticated_session_id]
]
```
## Purge process
The purge process uses the `Singleton` library. Therefore the purge process will be unique
per cluster (and that's probably what you want if you use Mnesia).
"""
require Logger
alias Asteroid.OIDC.AuthenticationEvent
@behaviour Asteroid.ObjectStore.AuthenticationEvent
@impl true
def install(opts) do
:mnesia.stop()
:mnesia.create_schema([node()])
:mnesia.start()
table_name = opts[:table_name] || :asteroid_authentication_event
tab_def =
[
attributes: [:id, :authenticated_session_id, :data],
index: [:authenticated_session_id]
]
|> Keyword.merge(opts[:tab_def] || [])
case :mnesia.create_table(table_name, tab_def) do
{:atomic, :ok} ->
Logger.info("#{__MODULE__}: created authentication event store #{table_name}")
:ok
{:aborted, {:already_exists, _}} ->
Logger.info("#{__MODULE__}: authentication event store #{table_name} already exists")
:ok
{:aborted, reason} ->
Logger.error(
"#{__MODULE__}: failed to create authentication event store #{table_name} " <>
"(reason: #{inspect(reason)})"
)
{:error, reason}
end
end
@impl true
def start_link(opts) do
case :mnesia.start() do
:ok ->
opts = Keyword.merge([purge_interval: 60], opts)
# we launch the process anyway because we need to return a process
# but the singleton will do nothing if the value is `:no_purge`
Singleton.start_child(__MODULE__.Purge, opts, __MODULE__)
{:error, _} = error ->
error
end
end
@impl true
def get(authentication_event_id, opts) do
table_name = opts[:table_name] || :asteroid_authentication_event
case :mnesia.dirty_read(table_name, authentication_event_id) do
[] ->
Logger.debug(
"#{__MODULE__}: getting authentication event `#{authentication_event_id}`, " <>
"value: `nil`"
)
{:ok, nil}
[{^table_name, ^authentication_event_id, authenticated_session_id, data}] ->
authentication_event = %AuthenticationEvent{
id: authentication_event_id,
authenticated_session_id: authenticated_session_id,
data: data
}
Logger.debug(
"#{__MODULE__}: getting authentication event `#{authentication_event_id}`, " <>
"value: `#{inspect(authentication_event)}`"
)
{:ok, authentication_event}
_ ->
{:error, "Multiple results from Mnesia"}
end
catch
:exit, reason ->
{:error, reason}
end
@impl true
def get_from_authenticated_session_id(authenticated_session_id, opts) do
table_name = opts[:table_name] || :asteroid_authentication_event
{:ok,
for {_table_name, authentication_event_id, _authenticated_session_id, _data} <-
:mnesia.dirty_match_object({table_name, :_, authenticated_session_id, :_}) do
authentication_event_id
end}
catch
:exit, reason ->
{:error, reason}
end
@impl true
def put(authentication_event, opts) do
table_name = opts[:table_name] || :asteroid_authentication_event
record = {
table_name,
authentication_event.id,
authentication_event.authenticated_session_id,
authentication_event.data
}
:mnesia.dirty_write(table_name, record)
Logger.debug(
"#{__MODULE__}: stored authentication event `#{authentication_event.id}`, " <>
"value: `#{inspect(authentication_event)}`"
)
:ok
catch
:exit, reason ->
{:error, reason}
end
@impl true
def delete(authentication_event_id, opts) do
table_name = opts[:table_name] || :asteroid_authentication_event
:mnesia.dirty_delete(table_name, authentication_event_id)
Logger.debug("#{__MODULE__}: deleted authentication event `#{authentication_event_id}`")
:ok
catch
:exit, reason ->
{:error, reason}
end
end
|
lib/asteroid/object_store/authentication_event/mnesia.ex
| 0.903126
| 0.835752
|
mnesia.ex
|
starcoder
|
defmodule RefInspector.Config do
@moduledoc """
Module to simplify access to configuration values with default values.
There should be no configuration required to start using `:ref_inspector` if
you rely on the default values:
config :ref_inspector,
database_files: ["referers.yml"],
database_path: Application.app_dir(:ref_inspector, "priv"),
http_opts: [],
remote_urls: [{"referers.yml", "https://s3-eu-west-1.amazonaws.com/snowplow-hosted-assets/third-party/referer-parser/referers-latest.yaml"}],
startup_silent: false,
startup_sync: true,
yaml_file_reader: {:yamerl_constr, :file, [[:str_node_as_binary]]}
The default `:database_path` is evaluated at runtime and not compiled into
a release!
## How to Configure
There are two ways to change the configuration values with the preferred way
depending on your environment and personal taste.
### Static Configuration
If you can ensure the configuration are static and not dependent on i.e. the
server your application is running on, you can use a static approach by
modifying your `config.exs` file:
config :ref_inspector,
database_files: ["referers_search.yml", "referers_social.yml"],
database_path: "/path/to/ref_inspector/database_files"
### Dynamic Configuration
If a compile time configuration is not possible or does not match the usual
approach taken in your application you can use a runtime approach.
This is done by defining an initializer module that will automatically be
called by `RefInspector.Supervisor` upon startup/restart. The configuration
is expected to consist of a `{mod, fun}` or `{mod, fun, args}` tuple:
# {mod, fun}
config :ref_inspector,
init: {MyInitModule, :my_init_mf}
# {mod, fun, args}
config :ref_inspector,
init: {MyInitModule, :my_init_mfargs, [:foo, :bar]}
defmodule MyInitModule do
@spec my_init_mf() :: :ok
def my_init_mf(), do: my_init_mfargs(:foo, :bar)
@spec my_init_mfargs(atom, atom) :: :ok
def my_init_mfargs(:foo, :bar) do
priv_dir = Application.app_dir(:my_app, "priv")
Application.put_env(:ref_inspector, :database_path, priv_dir)
end
end
The function is required to always return `:ok`.
## Startup Behaviour
Databases are loaded synchronously when starting the application.
You can change this behaviour to have the application force an asynchronous
database loading during the initial startup:
config :ref_inspector,
startup_sync: false
This can lead to the first parsing calls to work with an empty database
and therefore not return the results you expect.
### Starting Silently
When starting the application you will receive warnings if the database is
not available. If you want to hide these messages you can configure the
startup the be completely silent:
config :ref_inspector,
startup_silent: true
## Database Configuration
Configuring the database to use can be done using three related values:
- `:database_files`
- `:database_path`
- `:remote_urls`
The `:database_path` is the directory to look for when loading the databases.
It is also the place where `RefInspector.Downloader` stores a copy of the
configured files.
For the actual files loaded there is `:database_files`, a list of filenames
to load in the order specified. All files are expected to be inside the
configured database path.
When downloading the databases through `RefInspector.Downloader` the value
`:remote_urls` is of utmost importance. It defines where each file is located.
config :ref_inspector,
remote_urls: [
"http://example.com/database.yml",
{"database_local.yml", "http://example.com/database_remote.yml"}
]
To configure a remote database name you can either define a plain URL. It will
be stored locally under the filename that is extracted from the url. In above
example that would be `"database.yml"`.
If the remote and local names match you can configure a `{local, remote}`
tuple to deactivate the automatic name extraction.
### Internal Domains
To exclude some domains from parsing you can mark
them as `:internal` using your configuration:
config :ref_inspector,
internal: ["www.example.com", "www.example.org"]
If a referer matches one of the configured
domains (== ends with, paths ignored!), it will return a result with
the medium `:internal`. Both `:source` and `:term` will be left at the
initial/unknown state not intended for further processing.
## Download Configuration
Using the default configuration all download requests for your database files
are done using [`:hackney`](https://hex.pm/packages/hackney). To pass custom
configuration values to hackney you can use the key `:http_opts`:
config :ref_inspector,
http_opts: [proxy: "http://mycompanyproxy.com"]
Please see
[`:hackney.request/5`](https://hexdocs.pm/hackney/hackney.html#request-5)
for a complete list of available options.
If you want to change the library used to download the databases you can
configure a module implementing the `RefInspector.Downloader.Adapter`
behaviour:
config :ref_inspector,
downloader_adapter: MyDownloaderAdapter
## YAML File Reader Configuration
By default the library [`:yamerl`](https://hex.pm/packages/yamerl) will
be used to read and decode the yaml database files. You can configure this
reader to be a custom module:
config :ref_inspector,
yaml_file_reader: {module, function}
config :ref_inspector,
yaml_file_reader: {module, function, extra_args}
The configured module will receive the file to read as the first argument with
any optionally configured extra arguments after that.
"""
@upstream_remote "https://s3-eu-west-1.amazonaws.com/snowplow-hosted-assets/third-party/referer-parser/referers-latest.yaml"
@default_files ["referers.yml"]
@default_urls [{"referers.yml", @upstream_remote}]
@default_downloader_adapter RefInspector.Downloader.Adapter.Hackney
@default_yaml_reader {:yamerl_constr, :file, [[:str_node_as_binary]]}
@doc """
Provides access to configuration values with optional environment lookup.
"""
@spec get(atom, term) :: term
def get(key, default \\ nil) do
Application.get_env(:ref_inspector, key, default)
end
@doc """
Returns the list of configured database files.
"""
@spec database_files() :: [binary]
def database_files do
case get(:database_files) do
nil -> default_files()
files when is_list(files) -> files
end
end
@doc """
Returns the configured database path.
If the path is not defined the `priv` dir of `:ref_inspector`
as returned by `Application.app_dir(:ref_inspector, "priv")` will be used.
"""
@spec database_path() :: String.t()
def database_path do
case get(:database_path) do
nil -> Application.app_dir(:ref_inspector, "priv")
path -> path
end
end
@doc """
Returns the default list of database files.
"""
@spec default_files() :: [binary]
def default_files, do: @default_files
@doc """
Returns the default list of database urls.
"""
@spec default_urls() :: [{binary, binary}]
def default_urls, do: @default_urls
@doc """
Returns whether the remote database matches the default.
"""
@spec default_remote_database?() :: boolean
def default_remote_database?, do: yaml_urls() == default_urls()
@doc """
Returns the configured downloader adapter module.
The modules is expected to adhere to the behaviour defined in
`RefInspector.Downloader.Adapter`.
"""
@spec downloader_adapter() :: module
def downloader_adapter, do: get(:downloader_adapter, @default_downloader_adapter)
@doc """
Calls the optionally configured init method.
"""
@spec init_env() :: :ok
def init_env do
case get(:init) do
nil -> :ok
{mod, fun} -> apply(mod, fun, [])
{mod, fun, args} -> apply(mod, fun, args)
end
end
@doc """
Returns the `{mod, fun, extra_args}` to be used when reading a yaml file.
"""
@spec yaml_file_reader() :: {module, atom, [term]}
def yaml_file_reader do
case get(:yaml_file_reader) do
{_, _, _} = mfargs -> mfargs
{mod, fun} -> {mod, fun, []}
_ -> @default_yaml_reader
end
end
@doc """
Returns the remote urls of the database file.
"""
@spec yaml_urls() :: [String.t() | {String.t(), String.t()}]
def yaml_urls do
case get(:remote_urls) do
files when is_list(files) and 0 < length(files) -> files
_ -> default_urls()
end
end
end
|
lib/ref_inspector/config.ex
| 0.896591
| 0.534673
|
config.ex
|
starcoder
|
defmodule ExSaga.Hook do
@moduledoc """
"""
alias ExSaga.{DryRun, Event, Stage, Stepable}
@typedoc """
"""
@type opt ::
{:override, boolean}
| {:cascade_depth, non_neg_integer}
@typedoc """
"""
@type opts :: [opt]
@typedoc """
"""
@type hook_state :: term
@typedoc """
"""
@type hook_result ::
:ok
| {:ok, hook_state}
| {:error, reason :: term}
| {:error, reason :: term, hook_state}
@typedoc """
"""
@type hook_context :: {Event.t(), hook_result()} | Event.t()
@typedoc """
"""
@type accumulator :: %{
:hooks => [t],
:hooks_left => [t],
:effects_so_far => Stage.effects(),
optional(term) => term
}
defstruct name: nil,
filter: nil,
fun: nil
@type t :: %__MODULE__{
name: Stage.name(),
filter: (Event.t(), hook_state -> boolean),
fun: (Event.t(), hook_state -> hook_result)
}
@doc """
"""
@spec merge_hooks(accumulator, Stepable.opts()) :: [t]
def merge_hooks(acc, opts \\ []) do
acc
|> Map.get(:hooks, [])
|> Enum.map(fn
{h, _} -> h
h -> h
end)
|> reduce_hooks(opts)
end
@doc false
@spec reduce_hooks([t], Stepable.opts()) :: [t]
defp reduce_hooks(stage_hooks, opts) do
opts
|> Keyword.get(:extra_hooks, [])
|> Enum.reduce(stage_hooks, fn
{h, hopts}, hs -> add_hook(h, hs, hopts)
h, hs -> add_hook(h, hs)
end)
end
@doc false
@spec add_hook(t, [t], opts) :: [t]
defp add_hook(new_hook, hooks, opts \\ []) do
with %__MODULE__{} <- Enum.find(hooks, fn h -> h.name == new_hook.name end),
{false, _} <- Keyword.pop(opts, :override?, false) do
hooks
else
_ -> [new_hook | hooks]
end
end
@doc """
"""
@spec step(Event.t(), accumulator, Stepable.opts()) :: {Event.t() | nil, accumulator}
def step(event, acc, opts \\ [])
def step(event, %{hooks_left: []} = acc, opts) do
case maybe_update_hook_state(event.context, acc) do
{:ok, new_state} ->
{nil, new_state}
{:error, _reason, new_state} ->
handle_hook_error(event.context, new_state, opts)
end
end
def step(event, %{hooks_left: [h | hs]} = acc, opts) do
case maybe_update_hook_state(event.context, acc) do
{:ok, new_state} ->
{maybe_execute_hook(h, event, new_state, opts), %{new_state | hooks_left: hs}}
{:error, _reason, new_state} ->
handle_hook_error(event.context, new_state, opts)
end
end
def step(_event, acc, _opts), do: {nil, acc}
@doc false
@spec maybe_update_hook_state(hook_context, accumulator) ::
{:ok, accumulator}
| {:error, reason :: term, accumulator}
defp maybe_update_hook_state({_, hook_result}, acc) do
update_hook_state(hook_result, acc)
end
defp maybe_update_hook_state(_, acc), do: {:ok, acc}
@doc false
@spec update_hook_state(hook_result, accumulator) ::
{:ok, accumulator}
| {:error, reason :: term, accumulator}
defp update_hook_state(:ok, acc), do: {:ok, acc}
defp update_hook_state({:ok, hook_state}, acc) do
{:ok, put_in(acc, [Access.key(:effects_so_far), :__hookstate__], hook_state)}
end
defp update_hook_state({:error, reason}, acc),
do: {:error, reason, acc}
defp update_hook_state({:error, reason, hook_state}, acc) do
{:error, reason, put_in(acc, [Access.key(:effects_so_far), :__hookstate__], hook_state)}
end
defp update_hook_state(hook_result, acc) do
{:error, {:invalid_hook_result, hook_result}, acc}
end
@doc false
@spec handle_hook_error(hook_context, accumulator, Stepable.opts()) :: {Event.t() | nil, accumulator}
defp handle_hook_error({event, {:error, reason, _}}, acc, opts),
do: handle_hook_error({event, {:error, reason}}, acc, opts)
defp handle_hook_error({%Event{name: [_, _, :compensation]}, _}, %{hooks_left: []} = acc, _opts) do
{nil, acc}
end
defp handle_hook_error({%Event{name: [_, _, :compensation]} = e, _}, %{hooks_left: [h | hs]} = s, opts) do
{maybe_execute_hook(h, e, s, opts), %{s | hooks_left: hs}}
end
defp handle_hook_error({event, {:error, reason}}, acc, _opts) do
# TODO: compensation or error handler after error w/ hooks
%{effects_so_far: effects_so_far} = acc
{
Event.update(event,
context: {:error, reason, Event.get_effect(event, effects_so_far), effects_so_far},
name: nil
),
acc
}
end
@doc """
"""
@spec maybe_execute_hook(t, Event.t(), accumulator, Stepable.opts()) :: Event.t()
def maybe_execute_hook(hook, %Event{name: [:completed, :hook, _]} = event, acc, opts) do
{inner_event, _} = event.context
maybe_execute_hook(hook, inner_event, acc, opts)
end
def maybe_execute_hook(hook, %Event{name: [:skipped, :hook, _]} = event, acc, opts) do
maybe_execute_hook(hook, event.context, acc, opts)
end
def maybe_execute_hook(hook, event, acc, opts) do
hook_state = get_in(acc, [Access.key(:effects_so_far), :__hookstate__])
opts = DryRun.from_stepable(event, opts, false)
case DryRun.maybe_execute(hook.filter, [event, hook_state], opts ++ [hook: :filter]) do
true ->
result = execute_hook(hook, event, hook_state, opts)
Event.update(event,
name: [:completed, :hook, hook.name],
context: {event, result}
)
_ ->
Event.update(event,
name: [:skipped, :hook, hook.name],
context: event
)
end
end
@doc false
@spec execute_hook(t, Event.t(), hook_state, DryRun.execution_opts()) :: hook_result
defp execute_hook(hook, event, state, opts) do
opts = Keyword.put(opts, :dry_run_result_default, :ok)
case DryRun.maybe_execute(hook.fun, [event, state], opts ++ [hook: :hook]) do
:ok -> :ok
{:ok, hook_state} -> {:ok, hook_state}
{:error, reason} -> {:error, reason}
{:error, reason, hook_state} -> {:error, reason, hook_state}
otherwise -> {:error, {:unsupported_hook_result_form, otherwise}}
end
end
end
|
lib/ex_saga/hook.ex
| 0.701509
| 0.420719
|
hook.ex
|
starcoder
|
defmodule QRCode.DataMasking do
@moduledoc """
A mask pattern changes which modules are dark and which are light
according to a particular rule. The purpose of this step is to
modify the QR code to make it as easy for a QR code reader to scan
as possible.
"""
use Bitwise
alias MatrixReloaded.Matrix
alias QRCode.QR
import QRCode.QR, only: [version: 1]
@spec apply(QR.t()) :: QR.t()
def apply(%QR{matrix: matrix, version: version} = qr)
when version(version) do
{index, masked_matrix} =
matrix
|> masking_matrices()
|> total_penalties()
|> best_mask()
%{qr | matrix: masked_matrix, mask_num: index}
end
@spec masking_matrices(Matrix.t()) :: Enumerable.t()
def masking_matrices(matrix) do
Stream.map(0..7, fn num -> {num, make_mask_pattern(matrix, num)} end)
end
@spec total_penalties(Enumerable.t()) :: Enumerable.t()
def total_penalties(matrices) do
Stream.map(matrices, fn {num, matrix} -> {num, total_penalty(matrix), matrix} end)
end
@spec best_mask(Enumerable.t()) :: {non_neg_integer(), Matrix.t()}
def best_mask(matrices) do
[{index, _, masked_matrix} | _] =
matrices
|> Enum.sort(fn {_, p1, _}, {_, p2, _} -> p1 <= p2 end)
{index, masked_matrix}
end
@spec total_penalty(Matrix.t()) :: pos_integer()
def total_penalty(matrix) do
Enum.reduce(1..4, 0, fn pen, sum -> penalty(matrix, pen) + sum end)
end
@spec penalty(Matrix.t(), 1 | 2 | 3 | 4) :: non_neg_integer()
def penalty(matrix, 1) do
row_pen =
matrix
|> compute_penalty_1()
col_pen =
matrix
|> Matrix.transpose()
|> compute_penalty_1()
row_pen + col_pen
end
def penalty(matrix, 2) do
matrix
|> compute_penalty_2()
end
def penalty(matrix, 3) do
row_pen =
matrix
|> compute_penalty_3()
col_pen =
matrix
|> Matrix.transpose()
|> compute_penalty_3()
row_pen + col_pen
end
def penalty(matrix, 4) do
{rs, cs} = Matrix.size(matrix)
dark_modules =
matrix
|> Enum.reduce(0, fn row, acc -> Enum.sum(row) + acc end)
percent_of_dark = Kernel.floor(dark_modules * 100 / (rs * cs))
reminder =
percent_of_dark
|> Kernel.rem(5)
Kernel.trunc(
Kernel.min(
Kernel.abs(percent_of_dark - reminder - 50) / 5,
Kernel.abs(percent_of_dark - reminder - 45) / 5
) * 10
)
end
defp make_mask_pattern(matrix, mask_num) do
matrix
|> Enum.with_index()
|> Enum.map(fn {row, i} ->
row
|> Enum.with_index()
|> Enum.map(fn {val, j} -> mask_pattern(val, i, j, mask_num) end)
end)
end
defp compute_penalty_1(matrix) do
matrix
|> Enum.reduce(0, fn [h | _] = row, acc ->
row
|> Enum.reduce({h, 0, acc}, &evaluate_cond_1/2)
|> Kernel.elem(2)
end)
end
defp evaluate_cond_1(val, {selected, sum, acc}) when val == selected and sum < 4 do
{val, sum + 1, acc}
end
defp evaluate_cond_1(val, {selected, 4, acc}) when val == selected do
{val, 5, acc + 3}
end
defp evaluate_cond_1(val, {selected, sum, acc}) when val == selected and sum > 4 do
{val, sum + 1, acc + 1}
end
defp evaluate_cond_1(val, {_val, _sum, acc}) do
{val, 1, acc}
end
defp compute_penalty_2(rows, acc \\ 0)
defp compute_penalty_2(rows, acc) when length(rows) == 1 do
acc
end
defp compute_penalty_2([row1, row2 | rows], acc) do
acc_row =
row1
|> Enum.zip(row2)
|> Enum.map(fn {v1, v2} -> v1 + v2 end)
|> evaluate_cond_2()
compute_penalty_2([row2] ++ rows, acc + acc_row)
end
defp evaluate_cond_2(row, sum \\ 0)
defp evaluate_cond_2(row, sum) when length(row) == 1 do
sum
end
defp evaluate_cond_2([v1, v2 | tl], sum) when v1 + v2 == 0 or v1 + v2 == 4 do
evaluate_cond_2([v2] ++ tl, sum + 3)
end
defp evaluate_cond_2([_v1 | tl], sum) do
evaluate_cond_2(tl, sum)
end
defp compute_penalty_3(matrix) do
matrix
|> Enum.reduce(0, fn row, acc -> evaluate_cond_3(row, acc) end)
end
defp evaluate_cond_3(row, sum) when length(row) < 11 do
sum
end
defp evaluate_cond_3([a, b, c, d, e, f, g, h, i, j, k | tl], sum) do
check = [a, b, c, d, e, f, g, h, i, j, k]
patt_1 = [1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0]
patt_2 = [0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1]
pen =
if check == patt_1 or check == patt_2 do
40
else
0
end
evaluate_cond_3([b, c, d, e, f, g, h, i, j, k] ++ tl, sum + pen)
end
defp mask_pattern(val, row, col, 0) when rem(row + col, 2) == 0, do: val ^^^ 1
defp mask_pattern(val, row, _col, 1) when rem(row, 2) == 0, do: val ^^^ 1
defp mask_pattern(val, _row, col, 2) when rem(col, 3) == 0, do: val ^^^ 1
defp mask_pattern(val, row, col, 3) when rem(row + col, 3) == 0, do: val ^^^ 1
defp mask_pattern(val, row, col, 4)
when rem(floor(row / 2) + floor(col / 3), 2) == 0,
do: val ^^^ 1
defp mask_pattern(val, row, col, 5)
when rem(row * col, 2) + rem(row * col, 3) == 0,
do: val ^^^ 1
defp mask_pattern(val, row, col, 6)
when rem(rem(row * col, 2) + rem(row * col, 3), 2) == 0,
do: val ^^^ 1
defp mask_pattern(val, row, col, 7)
when rem(rem(row + col, 2) + rem(row * col, 3), 2) == 0,
do: val ^^^ 1
defp mask_pattern(val, _row, _col, _mask_num), do: val
end
|
lib/qr_code/data_masking.ex
| 0.796134
| 0.788298
|
data_masking.ex
|
starcoder
|
defmodule TimeZoneInfo do
@moduledoc """
`TimeZoneInfo` provides a time zone database for
[Elixir](https://elixir-lang.org/) using the data from the
[the Internet Assigned Numbers Authority (IANA)](https://www.iana.org/time-zones).
Therefore `TimeZoneInfo` contains an implementation of the
`Calendar.TimeZoneDatabase` behaviour under `TimeZoneInfo.TimeZoneDatabase`.
"""
alias TimeZoneInfo.{
DataConfig,
DataPersistence,
DataStore,
ExternalTermFormat,
FileArchive,
IanaParser,
Transformer,
Transformer.Abbr,
Worker
}
@typedoc "Seconds since year 0 in the gregorian calendar."
@type gregorian_seconds :: integer()
@typedoc "The data structure containing all informations for `TimeZoneInfo`."
@type data :: %{
required(:version) => String.t(),
required(:time_zones) => %{Calendar.time_zone() => [transition()]},
required(:rules) => %{TimeZoneInfo.rule_name() => [rule()]},
required(:links) => %{Calendar.time_zone() => Calendar.time_zone()},
required(:config) => data_config()
}
@typedoc "The name of a rule set that can be found in the IANA data."
@type rule_name :: String.t()
@typedoc """
A transition marks a point in time when one or more of the values `utc-offset`,
`std_offset` or `zone-abbr` change.
"""
@type transition :: {gregorian_seconds() | NaiveDateTime.t(), zone_state}
@typedoc "The `zone_state` is either a `timezone_period` or a `rules_ref`."
@type zone_state :: time_zone_period | rules_ref
@typedoc """
A reference to a rule set. The reference also contains `utc_offset` and
`format`,
The reference contains `utc_offset` and `format` because these values are needed
to apply a `rule`.
"""
@type rules_ref :: {Calendar.utc_offset(), rule_name(), Abbr.format()}
@typedoc """
The wall time period represented in a tuple with two naive date times.
"""
@type wall_period :: {NaiveDateTime.t(), NaiveDateTime.t()}
@typedoc """
A period where a certain combination of UTC offset, standard offset and zone
abbreviation is in effect. The `wall_period` contains the start and end of the
time zone period in wall time.
"""
@type time_zone_period :: {
Calendar.utc_offset(),
Calendar.std_offset(),
Calendar.zone_abbr(),
wall_period
}
@typedoc "A rule representation."
@type rule :: {
{
Calendar.month(),
IanaParser.day(),
{Calendar.hour(), Calendar.minute(), Calendar.second()}
},
time_standard,
Calendar.std_offset(),
Abbr.letters()
}
@typedoc "The time standards used by IANA."
@type time_standard :: :wall | :standard | :gmt | :utc | :zulu
@type time_zones :: :all | [Calendar.time_zone()]
@typedoc """
The configuration for data generation.
- `:files`: The list of files from the IANA DB download.
- `:time_zones`: The list of time zones that will be used. The atom `:all`
indicates that all time zones from the IANA DB will be used.
- `:lookahead`: Number of years for which data are precalculated.
See `Config` page for more information.
"""
@type data_config :: [
files: [String.t()],
time_zones: time_zones(),
lookahead: non_neg_integer()
]
@doc """
Returns the list of all available time zones with or without links. The option
`:links` can be used to customize the list.
Values for `:links`:
- `:ignore` just the time zone names will be returned
- `:only` just the link names will be returned
- `:include` the time zone and link names will be returned (default)
The list will be sorted.
"""
@spec time_zones(links: :ignore | :only | :include) :: [Calendar.time_zone()]
def time_zones(opts \\ [links: :include]), do: DataStore.get_time_zones(opts)
@doc """
Returns the version of the IANA database.
"""
@spec iana_version :: String.t()
def iana_version, do: DataStore.version()
@doc """
Triggers the update process. Withe the `opt` `:force` the update will be
forced.
"""
@spec update(opt :: :run | :force) :: :ok | {:next, non_neg_integer()} | {:error, term()}
def update(opt \\ :run) when opt in [:run, :force], do: Worker.update(opt)
@doc """
Returns the date time in UTC for the next update. Retruns `:never` if the
automated update disabled.
"""
@spec next_update :: DateTime.t() | :never | :error
def next_update do
case Worker.next() do
{:next, value} -> value
_ -> :error
end
end
@doc """
Returns the state of `TimeZoneInfo`.
Returns
- `:ok` if everything runs normal and the automated update is disabled.
- `{:next, seconds}` if everything runs normal.
- `{:error, reason}` in case of an error.
"""
@spec state :: :ok | {:next, non_neg_integer()} | {:error, term()}
def state, do: Worker.state()
@doc """
Returns infos about persisted and stored data.
"""
def info do
%{
store: DataStore.info(),
persistence: DataPersistence.info(),
worker: Worker.state()
}
end
@doc """
Generates `TimeZoneInfo.data` from the given `iana_data_archive`.
"""
@spec data(binary(), data_config()) :: {:ok, binary() | data(), String.t()} | {:error, term()}
def data(iana_data_archive, config \\ []) do
with {:ok, config} <- validate(config),
{:ok, files} <- FileArchive.extract(iana_data_archive, config[:files]),
{:ok, version, content} <- content(files),
{:ok, parsed} <- IanaParser.parse(content),
data <- Transformer.transform(parsed, version, config),
{:ok, data} <- DataConfig.update_time_zones(data, config[:time_zones]),
{:ok, checksum} <- ExternalTermFormat.checksum(data),
{:ok, data} <- encode(data, config[:encode]) do
{:ok, data, checksum}
end
end
defp encode(data, true), do: ExternalTermFormat.encode(data)
defp encode(data, _), do: {:ok, data}
defp content(files) do
case Map.pop(files, "version") do
{nil, _} -> {:error, :version_not_found}
{version, files} -> {:ok, String.trim(version), join(files)}
end
end
defp join(files) do
files |> Enum.map(fn {_name, content} -> content end) |> Enum.join("\n")
end
defp validate(config) do
with {:ok, config} <- validate(:lookahead, config),
{:ok, config} <- validate(:time_zones, config),
{:ok, config} <- validate(:files, config),
{:ok, config} <- validate(:version_file, config) do
{:ok, config}
end
end
defp validate(:time_zones, config) do
case config[:time_zones] do
nil -> {:ok, Keyword.put(config, :time_zones, :all)}
time_zones when is_list(time_zones) -> {:ok, config}
value -> {:error, {:invalid_config, [time_zones: value]}}
end
end
defp validate(:version_file, config) do
files = config[:files]
case Enum.member?(files, "version") do
true -> {:ok, config}
false -> {:ok, Keyword.put(config, :files, ["version" | files])}
end
end
defp validate(:files, config) do
case config[:files] do
nil -> {:ok, Keyword.put(config, :files, files())}
files when is_list(files) -> {:ok, config}
value -> {:error, {:invalid_config, [files: value]}}
end
end
defp validate(:lookahead, config) do
case config[:lookahead] do
nil -> {:ok, Keyword.put(config, :lookahead, lookahead())}
years when is_integer(years) -> {:ok, config}
value -> {:error, {:invalid_config, [lookahead: value]}}
end
end
defp lookahead, do: Application.get_env(:time_zone_info, :lookahead)
defp files, do: Application.get_env(:time_zone_info, :files)
end
|
lib/time_zone_info.ex
| 0.917985
| 0.669674
|
time_zone_info.ex
|
starcoder
|
defmodule Cure.Server do
use GenEvent
alias Cure.Queue, as: Queue
require Logger
@moduledoc """
The server is responsible for the communication between Elixir and C/C++.
The communication is based on Erlang Ports.
"""
@port_options [:binary, :use_stdio, packet: 2]
defmodule State do
defstruct port: nil, mgr: nil, queue: Queue.new, subs: []
end
## API
@doc """
Starts a Cure.Server process and opens a Port that can communicate with a
C/C++ program.
"""
@spec start(String.t) :: GenEvent.on_start
def start(program_name) when program_name |> is_binary do
{ok, mgr} = GenEvent.start
mgr |> GenEvent.add_handler(__MODULE__, [program_name, mgr])
{ok, mgr}
end
@doc """
Starts a Cure.Server process, links it to the calling process and opens a
Port that can communicate with a C/C++ program.
"""
@spec start_link(String.t) :: GenEvent.on_start
def start_link(program_name) when program_name |> is_binary do
{ok, mgr} = GenEvent.start_link
mgr |> GenEvent.add_handler(__MODULE__, [program_name, mgr])
{ok, mgr}
end
@doc """
Stops the server process.
"""
@spec stop(pid) :: :ok
def stop(mgr) when mgr |> is_pid do
mgr |> GenEvent.stop
end
@doc """
Subscribes the calling process to receive data events from the server process.
"""
@spec subscribe(pid) :: :ok
def subscribe(mgr) when mgr |> is_pid do
mgr |> GenEvent.sync_notify({:subscribe, self()})
end
@doc """
Adds an extra callback function to the server that is triggered on all
incoming data.
"""
@spec subscribe(pid, ((binary) -> any)) :: :ok
def subscribe(mgr, fun) when mgr |> is_pid and fun |> is_function(1) do
mgr |> GenEvent.sync_notify({:subscribe_callback, fun})
end
@doc """
Unsubscribes the calling process from receiving further data events coming
from the server process.
"""
@spec unsubscribe(pid) :: :ok
def unsubscribe(mgr) do
mgr |> GenEvent.sync_notify({:unsubscribe, self()})
end
@doc """
Removes a callback that was applied to all incoming data events.
NOTE: this has to be the exact same callback function that was registered
earlier with subscribe in order for this function to work properly.
"""
@spec unsubscribe(pid, ((binary) -> any)) :: :ok
def unsubscribe(mgr, fun) do
mgr |> GenEvent.sync_notify({:unsubscribe_callback, fun})
end
@doc """
Sends binary data to the C/C++ program that the server is connected with. A
callback-function (arity 1) can be added to handle the incoming response of
the program. If no callback is added, the response will be sent to the
calling process of this function.
The third argument indicates how the response should be handled. Possible
modes for handling the response are the following:
:once -> callback function is only applied once.
:permanent -> callback function is applied to all following events
:sync -> the server waits with further events until response is processed
(no timeout specified = :infinity).
"""
@spec send_data(pid, binary, :once | :permanent | :sync,
((binary) -> any) | timeout) :: :ok
def send_data(mgr, data, :once, callback)
when mgr |> is_pid
and data |> is_binary
and callback |> is_function(1) do
mgr |> GenEvent.sync_notify({:data, data, :once, {:function, callback}})
end
def send_data(mgr, data, :permanent, callback)
when mgr |> is_pid
and data |> is_binary
and callback |> is_function(1) do
mgr |> subscribe(callback)
mgr |> send_data(data, :noreply)
end
def send_data(mgr, data, :sync, callback)
when mgr |> is_pid
and data |> is_binary
and callback |> is_function(1) do
mgr |> send_data(data, :sync, callback, :infinity)
end
def send_data(mgr, data, :sync, timeout)
when mgr |> is_pid
and data |> is_binary
and (timeout == :infinity or (timeout |> is_number and timeout >= 0)) do
mgr |> GenEvent.sync_notify({:data, data, :sync, timeout, {:pid, self()}})
receive do
{:cure_data, msg} -> msg
end
end
@doc """
Sends binary data to the C/C++ program that the server is connected with.
The server waits with processing further events until the response for this
function is handled.
"""
@spec send_data(pid, binary, :sync, ((binary) -> any), timeout) :: :ok
def send_data(mgr, data, :sync, callback, timeout)
when mgr |> is_pid
and data |> is_binary
and callback |> is_function(1)
and (timeout == :infinity or (timeout |> is_number and timeout > 0)) do
mgr |> GenEvent.sync_notify({:data, data, :sync, timeout,
{:function, callback}})
end
@doc """
Sends binary data to the C/C++ program that the server is connected with.
The result is sent back to the process that called this function. The third
argument indicates how the response should be handled. Possible modes for
handling the response are the following:
:once -> Only the first event will be sent back to the calling process.
:noreply -> No event will be sent back to the calling process.
:permanent -> All following events will be sent back to the calling process.
:sync -> the server waits with processing further events until the response is
sent back to the calling process (timeout = :infinity unless specified).
"""
@spec send_data(pid, binary,
:once | :noreply | :permanent, :sync) :: :ok | {:error, term}
def send_data(mgr, data, :once) when mgr |> is_pid
and data |> is_binary do
mgr |> GenEvent.sync_notify({:data, data, :once, {:pid, self()}})
end
def send_data(mgr, data, :noreply) when mgr |> is_pid
and data |> is_binary do
mgr |> GenEvent.sync_notify({:data, data, :noreply})
end
def send_data(mgr, data, :permanent) when mgr |> is_pid
and data |> is_binary do
mgr |> subscribe
mgr |> send_data(data, :noreply)
end
def send_data(mgr, data, :sync) when mgr |> is_pid
and data |> is_binary do
mgr |> send_data(data, :sync, :infinity)
end
## Callbacks
@doc false
def init([program_name, mgr]) do
Process.flag(:trap_exit, true)
port = Port.open({:spawn, program_name}, @port_options)
{:ok, %State{port: port, mgr: mgr}}
end
@doc false
def terminate(:stop, %State{port: port}) do
port |> Port.close
:ok
end
@doc false
def handle_event({:subscribe, pid}, state = %State{subs: subs}) do
new_subs = subs |> add_sub({:pid, pid})
{:ok, %State{state | subs: new_subs}}
end
def handle_event({:unsubscribe, pid}, state = %State{subs: subs}) do
{:ok, %State{state | subs: List.delete(subs, {:pid, pid})}}
end
def handle_event({:subscribe_callback, fun}, state = %State{subs: subs}) do
new_subs = subs |> add_sub({:function, fun})
{:ok, %State{state | subs: new_subs}}
end
def handle_event({:unsubscribe_callback, fun}, state = %State{subs: subs}) do
{:ok, %State{state | subs: List.delete(subs, {:function, fun})}}
end
def handle_event({:data, data, :once, callback},
state = %State{port: port, queue: queue}) do
new_state = %State{state | queue: Queue.push(queue, callback)}
port |> Port.command(data)
{:ok, new_state}
end
def handle_event({:data, data, :noreply},
state = %State{port: port, queue: queue}) do
new_state = %State{state | queue: Queue.push(queue, :noreply)}
port |> Port.command(data)
{:ok, new_state}
end
def handle_event({:data, data, :sync, timeout, cb},
state = %State{port: port}) do
port |> Port.command(data)
result = receive do
{^port, {:data, value}} -> value
after timeout -> :timeout
end
cb |> handle_msg(result)
{:ok, state}
end
## Port related callbacks
@doc false
def handle_info({_port, {:data, msg}}, state = %State{queue: {[], []},
subs: subs}) do
spawn fn ->
subs |> Enum.map(fn(sub) ->
sub |> handle_msg(msg)
end)
end
{:ok, state}
end
def handle_info({_port, {:data, msg}}, state = %State{queue: queue,
subs: subs}) do
{remaining, value: oldest} = Queue.pop(queue)
state = %State{state | queue: remaining}
oldest |> handle_msg(msg)
spawn fn ->
subs |> Enum.map(fn(sub) ->
sub |> handle_msg(msg)
end)
end
{:ok, state}
end
def handle_info({:EXIT, _port, reason}, state = %State{mgr: mgr}) do
Logger.debug "Cure Server: Port closed, reason: #{reason}."
mgr |> stop
{:ok, state}
end
# Helper functions:
defp handle_msg({:pid, pid}, msg) do
pid |> send({:cure_data, msg})
end
defp handle_msg({:function, callback}, msg) do
spawn fn ->
apply(callback, [msg])
end
end
defp handle_msg(:noreply, _msg), do: :ok
defp add_sub(subs, new_sub) do
if new_sub in subs, do: subs, else: [new_sub | subs]
end
end
|
lib/cure_server.ex
| 0.659734
| 0.485905
|
cure_server.ex
|
starcoder
|
defmodule Extractly.Toc.Options do
@moduledoc false
defstruct format: :markdown,
gh_links: false,
max_level: 7,
min_level: 1,
remove_gaps: false,
start: 1,
type: :ul
# %Extractly.Toc.Options{format: :markdown, gh_links: false, max_level: 7, min_level: 1, remove_gaps: false, start: 1, type: :ul}
# This only works because no values are strings and do not contain ",", "{", or "}"
@parse_rgx ~r< \{ (.*) \} >x
def from_string!(str) do
case Regex.run(@parse_rgx, str) do
[_, content] -> _parse_str(content, new!())
_ -> raise "Illegal Options representation: #{str}"
end
end
def new(from \\ []) do
try do
{:ok, new!(from)}
rescue
ke in KeyError -> {:error, "Unsupported option #{ke.key}"}
end
end
def new!(from \\ []), do: struct!(__MODULE__, from)
def to_string(%__MODULE__{}=options), do: inspect(options)
@transformers %{
format: &__MODULE__._make_sym/1,
gh_links: &__MODULE__._make_bool/1,
max_level: &__MODULE__._make_int/1,
min_level: &__MODULE__._make_int/1,
remove_gaps: &__MODULE__._make_bool/1,
start: &__MODULE__._make_int/1,
type: &__MODULE__._make_sym/1
}
defp _add_parsed_option(key, value, options) do
Map.put(options, key, Map.fetch!(@transformers, key).(value))
end
def _make_bool(str) do
cond do
str == "true" -> true
str == "false" -> false
true -> raise "Illegal boolean value #{str}"
end
end
def _make_int(str) do
case Integer.parse(str) do
{value, ""} -> value
_ -> raise "Illegal integer value #{str}"
end
end
def _make_sym(str) do
str
|> String.trim_leading(":")
|> String.to_atom
end
# This only works because no values are strings and do not contain ",", "{", or "}"
@elergx ~r{ (\w+): \s (\S+)(?:,|\z) }x
defp _parse_str(str, options) do
@elergx
|> Regex.scan(str)
|> Enum.reduce(options, fn [_, key, value], options_ -> _add_parsed_option(String.to_atom(key), value, options_) end)
end
end
# SPDX-License-Identifier: Apache-2.0
|
lib/extractly/toc/options.ex
| 0.66072
| 0.456773
|
options.ex
|
starcoder
|
defmodule Mix.Tasks.Bmark.Cmp do
use Mix.Task
@shortdoc "Compare bmark results"
@moduledoc """
## Usage
mix bmark.cmp <result1> <result2>
Compares a pair of benchmark results.
result1 and result2 should be results files written by bmark for different runs of
the same benchmark. bmark.cmp will compare the two results and will report the statistical
significance of the difference in their means.
"""
defmodule Stats do
@moduledoc """
Stores statistics from a bmark result file
* `count` - The number of runs in the results file.
* `mean` - The arithmetic mean of the results from the file
* `stdev` - The standard deviation of the results from the file
"""
defstruct count: 0, mean: 0, stdev: 0
end
@doc """
The task run function processes the arguments into a report.
"""
def run(args) do
args
|> parse_args
|> load_results
|> report_results
|> compare_results
|> report_difference
end
# Extracts eactly two files names from `args` or prints the usage and exits.
defp parse_args(args) do
case OptionParser.parse(args, strict: []) do
{[], [name1, name2], []} -> [name1, name2]
{_, _, _} -> usage
end
end
defp usage do
Kernel.exit("Usage: mix lifebench.cmp <results file 1> <results file 2>")
end
defp load_results(list_of_filenames) do
{
list_of_filenames |> Enum.map(&filename_path_to_header/1),
Enum.map(list_of_filenames, &load_single_result_file/1)
}
end
# Trims current working directory from the filename to produce a shorter string to be used as a
# column header for the report.
defp filename_path_to_header(filename) do
Path.relative_to_cwd(filename)
end
defp load_single_result_file(filename) do
File.stream!(filename)
|> Enum.map(&String.strip(&1))
end
defp report_results({list_of_headers, list_of_results}) do
Bmark.ComparisonFormatter.format(list_of_headers, list_of_results) |> IO.puts
list_of_results
end
defp compare_results(list_of_results) do
list_of_results
|> Enum.map(&convert_to_integer(&1))
|> Enum.map(&compute_stats(&1))
|> compute_t_value
end
defp convert_to_integer(results) do
results
|> Enum.map(&String.to_integer(&1))
end
defp compute_stats(results) do
%Stats
{
count: Enum.count(results),
mean: sample_mean(results),
stdev: corrected_sample_stdev(results)
}
end
defp sample_mean(samples) do
Enum.sum(samples) / Enum.count(samples)
end
defp corrected_sample_stdev(samples) do
mean = sample_mean(samples)
(Enum.map(samples, fn(x) -> (mean - x) * (mean - x) end) |> Enum.sum) / (Enum.count(samples) - 1)
|> :math.sqrt
end
defp compute_t_value(stats) do
a = compute_a(stats)
b = compute_b(stats)
t = compute_t(a, b, stats)
{stats, t}
end
defp compute_a([%Stats{count: n1}, %Stats{count: n2}]) do
(n1 + n2) / (n1 * n2)
end
defp compute_b([%Stats{count: n1, stdev: s1}, %Stats{count: n2, stdev: s2}]) do
( ((n1 - 1) * s1 * s1) + ((n2 - 1) * s2 * s2) ) / (n1 + n2 - 2)
end
defp compute_t(a, b, [%Stats{mean: u1}, %Stats{mean: u2}]) do
abs(u1 - u2) / :math.sqrt(a * b)
end
defp report_difference({[%Stats{mean: u1}, %Stats{mean: u2}] = stats, t}) do
IO.puts "#{u1} -> #{u2} (#{percent_increase(u1, u2)}) with p < #{t_dist(t, df(stats))}"
IO.puts "t = #{t}, #{df(stats)} degrees of freedom"
end
defp df([%Stats{count: n1}, %Stats{count: n2}]) do
n1 + n2 - 2
end
defp percent_increase(u1, u2) do
percent = 100 * (u2 - u1) / u1
percent_s = Float.to_string(percent, [decimals: 2])
cond do
percent < 0 -> "#{percent_s}%"
true -> "+#{percent_s}%"
end
end
defp t_dist(t, df) do
Bmark.Distribution.t(t, df)
end
end
|
lib/mix/tasks/bmark_cmp.ex
| 0.725843
| 0.512693
|
bmark_cmp.ex
|
starcoder
|
defmodule Ueberauth.Strategy.W3ID do
use Ueberauth.Strategy
# Authorize Phase
@doc """
The initial redirect to the w3id authentication page.
"""
def handle_request!(conn) do
state = Map.get(conn.params, "state")
authorize_url = Ueberauth.Strategy.W3ID.OAuth.authorize_url!(state: state)
redirect!(conn, authorize_url)
end
# Callback Phase
@doc """
The callback phase exchanges the code for a valid token.
In addition to the standard token information, W3ID also returns an
`id_token` JWT which contains user data. This can be used instead of the
standard call to an OAuth2 access token introspection endpoint.
"""
def handle_callback!(%Plug.Conn{params: %{"code" => code}} = conn) do
response = Ueberauth.Strategy.W3ID.OAuth.get_token!(code: code)
parse_callback_response(conn, response)
end
def handle_callback!(conn) do
set_errors!(conn, [error("missing_code", "No code received")])
end
defp parse_callback_response(conn, %{token: data}) do
parse_callback_token(conn, data)
end
defp parse_callback_response(conn, _) do
set_errors!(conn, ["missing_token", "Server response did not include token"])
end
defp parse_callback_token(conn, %{other_params: %{"id_token" => id_token}} = token) do
user = parse_id_token(id_token)
conn
|> put_private(:w3id_token, token)
|> put_private(:w3id_user, user)
end
defp parse_callback_token(conn, %{access_token: error}) do
set_errors!(conn, ["missing_id_token_jwt", error])
end
defp parse_callback_token(conn, _) do
set_errors!(conn, ["missing_id_token_jwt", "Server response did not include id_token jwt"])
end
# Callback Phase - Struct Helpers
def credentials(conn) do
token = conn.private.w3id_token
scope_string = token.other_params["scope"] || ""
scopes = String.split(scope_string, ",")
%Ueberauth.Auth.Credentials{
expires: !!token.expires_at,
expires_at: token.expires_at,
other: token.other_params,
refresh_token: token.refresh_token,
scopes: scopes,
token: token.access_token,
token_type: token.token_type
}
end
def extra(conn) do
%Ueberauth.Auth.Extra{
raw_info: %{
id_token_data: conn.private.w3id_user,
state: conn.params["state"]
}
}
end
def info(conn) do
data = conn.private.w3id_user
%Ueberauth.Auth.Info{
email: data["emailAddress"],
first_name: data["firstName"],
last_name: data["lastName"],
name: URI.decode(data["cn"])
}
end
def uid(conn), do: conn.private.w3id_user["uid"]
# Cleanup Phase
@doc false
def handle_cleanup!(conn) do
conn
|> put_private(:w3id_token, nil)
|> put_private(:w3id_user, nil)
end
# Helpers
defp parse_id_token(id_token) do
id_token
|> String.split(".")
|> Enum.at(1)
|> Base.url_decode64!(padding: false)
|> Jason.decode!()
end
end
|
apps/artemis_web/lib/ueberauth/strategy/w3id.ex
| 0.779364
| 0.432543
|
w3id.ex
|
starcoder
|
defmodule Cog.Config do
@type interval_type :: :ms | :sec | :min | :hour | :day | :week
@type typed_interval :: {integer, interval_type}
@doc """
Token lifetime configuration, converted into seconds. This is how
long after creation time a token is considered valid.
"""
def token_lifetime do
value = Application.fetch_env!(:cog, :token_lifetime)
convert(value, :sec)
end
@doc """
Token reap period configuration, converted into
milliseconds. Expired tokens will be reaped on this schedule.
"""
def token_reap_period do
value = Application.fetch_env!(:cog, :token_reap_period)
convert(value, :ms)
end
@doc """
Convert various tagged time durations into either seconds or
milliseconds, as desired.
Useful for allowing a readable configuration format that can still
easily be translated into the time units most frequently encountered
in Elixir / Erlang code.
More general conversion (e.g., from days to minutes), or using
variable conversion units (i.e., a month can have 28, 29, 30, or 31
days in it, depending on the month and/or year) are explicitly not
handled.
Units are specified as one of the following recognized atoms:
- :ms (millisecond)
- :sec (second)
- :min (minute)
- :hour
- :day
- :week
Examples:
iex> Cog.Config.convert({3, :day}, :sec)
259200
"""
def convert(from, into) do
from
|> convert_to_seconds
|> convert_from_seconds(into)
end
@doc "Returns the mythical Relay id used to execute embedded commands"
def embedded_relay(), do: "28a35f98-7ae1-4b8d-929a-3c716f6717c7"
defp convert_to_seconds({seconds, :sec}),
do: {seconds, :sec}
defp convert_to_seconds({minutes, :min}),
do: {minutes * 60, :sec}
defp convert_to_seconds({hours, :hour}),
do: {hours * 60 *60, :sec}
defp convert_to_seconds({days, :day}),
do: {days * 24 * 60 * 60, :sec}
defp convert_to_seconds({weeks, :week}),
do: {weeks * 7 * 24 * 60 * 60, :sec}
defp convert_from_seconds({seconds, :sec}, :ms),
do: seconds * 1000
defp convert_from_seconds({seconds, :sec}, :sec),
do: seconds
end
|
lib/cog/config.ex
| 0.863118
| 0.540499
|
config.ex
|
starcoder
|
defmodule Phoenix.LiveView.Router do
@moduledoc """
Provides LiveView routing for Phoenix routers.
"""
@doc """
Defines a LiveView route.
## Layout
When a layout isn't explicitly set, a default layout is inferred similar to
controller actions. For example, the layout for the router `MyAppWeb.Router`
would be inferred as `MyAppWeb.LayoutView` and would use the `:app` template.
## Options
* `:session` - the optional list of keys to pull out of the Plug
connection session and into the LiveView session.
For example, the following would copy Plug's session current
user ID and the `remember_me` value into the LiveView session:
[:user_id, :remember_me]
* `:layout` - the optional tuple for specifying a layout to render the
LiveView. Defaults to `{LayoutView, :app}` where LayoutView is relative to
your application's namespace.
* `:container` - the optional tuple for the HTML tag and DOM attributes to
be used for the LiveView container. For example: `{:li, style: "color: blue;"}`
* `:as` - optionally configures the named helper. Defaults to `:live`.
## Examples
defmodule MyApp.Router
use Phoenix.Router
import Phoenix.LiveView.Router
scope "/", MyApp do
pipe_through [:browser]
live "/thermostat", ThermostatLive
live "/clock", ClockLive, session: [:user_id]
live "/dashboard", DashboardLive, layout: {MyApp.AlternativeView, "app.html"}
end
end
iex> MyApp.Router.Helpers.live_path(MyApp.Endpoint, MyApp.ThermostatLive)
"/thermostat"
"""
defmacro live(path, live_view, opts \\ []) do
quote bind_quoted: binding() do
Phoenix.Router.get(
path,
Phoenix.LiveView.Plug,
Phoenix.Router.scoped_alias(__MODULE__, live_view),
private: %{
phoenix_live_view:
Keyword.put_new(
opts,
:layout,
Phoenix.LiveView.Router.__layout_from_router_module__(__MODULE__)
)
},
as: opts[:as] || :live,
alias: false
)
end
end
@doc false
def __layout_from_router_module__(module) do
view =
module
|> Atom.to_string()
|> String.split(".")
|> Enum.drop(-1)
|> Kernel.++(["LayoutView"])
|> Module.concat()
{view, :app}
end
end
|
lib/phoenix_live_view/router.ex
| 0.926312
| 0.465327
|
router.ex
|
starcoder
|
defprotocol Phoenix.Param do
@moduledoc """
A protocol that converts data structures into URL parameters.
This protocol is used by URL helpers and other parts of the
Phoenix stack. For example, when you write:
user_path(conn, :edit, @user)
Phoenix knows how to extract the `:id` from `@user` thanks
to this protocol.
By default, Phoenix implements this protocol for integers, binaries, atoms,
and structs. For structs, a key `:id` is assumed, but you may provide a
specific implementation.
Nil values cannot be converted to param.
## Custom parameters
In order to customize the parameter for any struct,
one can simply implement this protocol.
However, for convenience, this protocol can also be
derivable. For example:
defmodule User do
@derive Phoenix.Param
defstruct [:id, :username]
end
By default, the derived implementation will also use
the `:id` key. In case the user does not contain an
`:id` key, the key can be specified with an option:
defmodule User do
@derive {Phoenix.Param, key: :username}
defstruct [:username]
end
will automatically use `:username` in URLs.
When using Ecto, you must call `@derive` before
your `schema` call:
@derive {Phoenix.Param, key: :username}
schema "users" do
"""
@fallback_to_any true
@spec to_param(term) :: String.t
def to_param(term)
end
defimpl Phoenix.Param, for: Integer do
def to_param(int), do: Integer.to_string(int)
end
defimpl Phoenix.Param, for: BitString do
def to_param(bin) when is_binary(bin), do: bin
end
defimpl Phoenix.Param, for: Atom do
def to_param(nil) do
raise ArgumentError, "cannot convert nil to param"
end
def to_param(atom) do
Atom.to_string(atom)
end
end
defimpl Phoenix.Param, for: Map do
def to_param(map) do
raise ArgumentError,
"maps cannot be converted to_param. A struct was expected, got: #{inspect map}"
end
end
defimpl Phoenix.Param, for: Any do
defmacro __deriving__(module, struct, options) do
key = Keyword.get(options, :key, :id)
unless Map.has_key?(struct, key) do
raise ArgumentError, "cannot derive Phoenix.Param for struct #{inspect module} " <>
"because it does not have key #{inspect key}. Please pass " <>
"the :key option when deriving"
end
quote do
defimpl Phoenix.Param, for: unquote(module) do
def to_param(%{unquote(key) => nil}) do
raise ArgumentError, "cannot convert #{inspect unquote(module)} to param, " <>
"key #{inspect unquote(key)} contains a nil value"
end
def to_param(%{unquote(key) => key}) when is_integer(key), do: Integer.to_string(key)
def to_param(%{unquote(key) => key}) when is_binary(key), do: key
def to_param(%{unquote(key) => key}), do: Phoenix.Param.to_param(key)
end
end
end
def to_param(%{id: nil}) do
raise ArgumentError, "cannot convert struct to param, key :id contains a nil value"
end
def to_param(%{id: id}) when is_integer(id), do: Integer.to_string(id)
def to_param(%{id: id}) when is_binary(id), do: id
def to_param(%{id: id}), do: Phoenix.Param.to_param(id)
def to_param(map) when is_map(map) do
raise ArgumentError,
"structs expect an :id key when converting to_param or a custom implementation " <>
"of the Phoenix.Param protocol (read Phoenix.Param docs for more information), " <>
"got: #{inspect map}"
end
def to_param(data) do
raise Protocol.UndefinedError, protocol: @protocol, value: data
end
end
|
lib/phoenix/param.ex
| 0.910689
| 0.627923
|
param.ex
|
starcoder
|
defmodule Twirp do
@moduledoc """
Twirp provides an elixir implementation of the [twirp rpc framework](https://github.com/twitchtv/twirp)
developed by Twitch. The protocol defines semantics for routing and
serialization of RPCs based on protobufs.
## Example
The canonical Twirp example is a Haberdasher service. Here's the protobuf
description for the service.
```protobuf
syntax = "proto3";
package example;
// Haberdasher service makes hats for clients.
service Haberdasher {
// MakeHat produces a hat of mysterious, randomly-selected color!
rpc MakeHat(Size) returns (Hat);
}
// Size of a Hat, in inches.
message Size {
int32 inches = 1; // must be > 0
}
// A Hat is a piece of headwear made by a Haberdasher.
message Hat {
int32 inches = 1;
string color = 2; // anything but "invisible"
string name = 3; // i.e. "bowler"
}
```
We'll assume for now that this proto file lives in `priv/protos/service.proto`
### Code generation
We can now use `protoc` to generate the files we need. You can run this command
from the root directory of your project.
$ protoc --proto_path=./priv/protos --elixir_out=./lib/example --twirp_elixir_out=./lib/example ./priv/protos/service.proto
After running this command there should be 2 files located in `lib/example`.
The message definitions:
```elixir
defmodule Example.Size do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
inches: integer
}
defstruct [:inches]
field :inches, 1, type: :int32
end
defmodule Example.Hat do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
inches: integer,
color: String.t(),
name: String.t()
}
defstruct [:inches, :color, :name]
field :inches, 1, type: :int32
field :color, 2, type: :string
field :name, 3, type: :string
end
```
The service and client definition:
```elixir
defmodule Example.HaberdasherService do
@moduledoc false
use Twirp.Service
package "example"
service "Haberdasher"
rpc :MakeHat, Example.Size, Example.Hat, :make_hat
end
defmodule Example.HaberdasherClient do
@moduledoc false
use Twirp.Client, service: Example.HaberdasherService
end
```
### Implementing the server
Now that we've generated the service definition we can implement a "handler"
module that will implement each "method".
```elixir
defmodule Example.HaberdasherHandler do
@colors ~w|white black brown red blue|
@names ["bowler", "baseball cap", "top hat", "derby"]
def make_hat(_ctx, size) do
if size <= 0 do
Twirp.Error.invalid_argument("I can't make a hat that small!")
else
%Haberdasher.Hat{
inches: size.inches,
color: Enum.random(@colors),
name: Enum.random(@names)
}
end
end
end
```
Separating the service and handler like this may seem a little odd but there are
good reasons to do this. The most important is that it allows the service to be
autogenerated again in the future. The second reason is that it allows us to
easily mock service implementations for testing.
### Running the server
To serve traffic Twirp provides a Plug. We use this plug to attach our service
definition with our handler.
```elixir
defmodule Example.Router do
use Plug.Router
plug Twirp.Plug,
service: Haberdasher.HatMakerService,
handler: Haberdasher.HatMakerHandler
end
```
```elixir
defmodule Example.Application do
use Application
def start(_type, _args) do
children = [
Plug.Cowboy.child_spec(scheme: :http, plug: Example.Router, options: [port: 4040]),
]
opts = [strategy: :one_for_one, name: Example.Supervisor]
Supervisor.start_link(children, opts)
end
end
```
If you start your application your plug will now be available on port 4040.
### Using the client
Client definitions are generated alongside the service definition. This allows
you to generate clients for your services in other applications. You can make
RPC calls like so:
```elixir
defmodule AnotherService.GetHats do
alias Example.HaberdasherClient, as: Client
alias Example.{Size, Hat}
def make_a_hat(inches) do
case Client.make_hat(Size.new(inches: inches)) do
{:ok, %Hat{}=hat} ->
hat
{:error, %Twirp.Error{msg: msg}} ->
Logger.error(msg)
end
end
end
```
"""
end
|
lib/twirp.ex
| 0.909365
| 0.886813
|
twirp.ex
|
starcoder
|
defmodule LQueue do
@moduledoc """
Functions that work on the double-ended queue with limited length.
`[1, 2, 3]` queue has the front at the element `1` and the rear at `3`.
By pushing a new element to the queue (`4`), and assuming that the max
length of the queue is 3, we will get `[2, 3, 4]` (the `push/2` function).
Also, it is possible to push to the front of the queue. In this case, by
pushing `4` to the front (`push_front/2`) we will get `[4, 1, 2]`.
Due to efficiency reasons, the limited length queue is implemented as two
lists - the front and the rear list. The rear end is reversed and becomes
the new front when the front is empty.
"""
defstruct count: 0, max_count: 1, r: [], f: []
@typedoc """
The current number of elements in the queue.
The number is an integer between `0` and `max_count`
"""
@type count :: non_neg_integer
@typedoc """
The max number of elements in the queue.
"""
@type max_count :: pos_integer
@typedoc """
The queue can hold any type of elements.
"""
@type element :: term
@typedoc """
The structure representing the limited length queue.
It includes the current number of elements, max number of elements, rear
list and front list. Note that this structure should be considered as
opaque by other modules.
"""
@type t :: %LQueue{
count: count,
max_count: max_count,
r: [element],
f: [element]
}
@doc """
Creates a new limited length queue.
## Examples
iex> LQueue.new(1)
%LQueue{count: 0, max_count: 1, r: [], f: []}
iex> LQueue.new(5)
%LQueue{count: 0, max_count: 5, r: [], f: []}
"""
@spec new(max_count) :: t
def new(max_count)
def new(max_count) when max_count > 0, do: %LQueue{max_count: max_count}
@doc """
Checks if the queue is full.
## Examples
iex> [1, 2] |> LQueue.from_list(3) |> LQueue.full?()
false
iex> [1, 2] |> LQueue.from_list(2) |> LQueue.full?()
true
"""
@spec full?(t) :: boolean
def full?(lqueue)
def full?(%LQueue{count: max_count, max_count: max_count}), do: true
def full?(_lq), do: false
@doc """
Removes all the elements from the queue.
## Examples
iex> [] |> LQueue.from_list(5) |> LQueue.clear() |> Enum.to_list == []
true
iex> [1, 2, 3] |> LQueue.from_list(5) |> LQueue.clear() |>
...> Enum.to_list == []
true
"""
@spec clear(t) :: t
def clear(lqueue)
def clear(%LQueue{max_count: max_count}), do: %LQueue{max_count: max_count}
@doc """
Returns the max number of elements the queue can hold.
## Examples
iex> LQueue.new(10) |> LQueue.max_count()
10
iex> [1, 2] |> LQueue.from_list(2) |> LQueue.max_count()
2
"""
@spec max_count(t) :: max_count
def max_count(lqueue)
def max_count(%LQueue{max_count: max_count}), do: max_count
@doc """
Pushes a new element to the rear of the queue.
When pushing to a full queue, the front element will be discarded.
## Examples
iex> [1, 2] |> LQueue.from_list(3) |> LQueue.push(10) |>
...> Enum.to_list()
[1, 2, 10]
iex> [1, 2] |> LQueue.from_list(2) |> LQueue.push(10) |> Enum.to_list()
[2, 10]
"""
@spec push(t, element) :: t
def push(lqueue, element)
def push(%LQueue{count: count, max_count: max_count, r: r} = lq, elem)
when count < max_count do
%{lq | count: count + 1, r: [elem | r]}
end
def push(%LQueue{count: max_count, max_count: max_count, r: r, f: [_fh | ft]} = lq, elem) do
%{lq | r: [elem | r], f: ft}
end
def push(%LQueue{count: max_count, max_count: max_count, r: r, f: []} = lq, elem) do
[_fh | ft] = Enum.reverse(r)
%{lq | r: [elem], f: ft}
end
@doc """
Pushes a new element to the front of the queue.
When pushing to a full queue, the rear element will be discarded.
## Examples
iex> [1, 2] |> LQueue.from_list(3) |> LQueue.push_front(5) |>
...> Enum.to_list()
[5, 1, 2]
iex> [1, 2] |> LQueue.from_list(2) |> LQueue.push_front(5) |>
...> Enum.to_list()
[5, 1]
"""
@spec push_front(t, element) :: t
def push_front(lqueue, element)
def push_front(%LQueue{count: count, max_count: max_count, f: f} = lq, elem)
when count < max_count do
%{lq | count: count + 1, f: [elem | f]}
end
def push_front(%LQueue{count: max_count, max_count: max_count, r: [_rh | rt], f: f} = lq, elem) do
%{lq | r: rt, f: [elem | f]}
end
def push_front(%LQueue{count: max_count, max_count: max_count, r: [], f: f} = lq, elem) do
[_rh | rt] = Enum.reverse(f)
%{lq | r: rt, f: [elem]}
end
@doc """
Pops an element from the front of the queue.
If the queue is empty, `nil` is returned.
## Examples
iex> {nil, lqueue} = [] |> LQueue.from_list(5) |> LQueue.pop()
{nil, %LQueue{count: 0, max_count: 5, r: [], f: []}}
iex> lqueue |> Enum.to_list() == []
true
iex> {1, lqueue} = [1, 2] |> LQueue.from_list(2) |> LQueue.pop()
{1, %LQueue{count: 1, max_count: 2, r: [], f: [2]}}
iex> lqueue |> Enum.to_list() == [2]
true
"""
@spec pop(t) :: {element | nil, t}
def pop(lqueue)
def pop(%LQueue{count: 0, r: [], f: []} = lq) do
{nil, lq}
end
def pop(%LQueue{count: count, f: [fh | ft]} = lq) do
{fh, %{lq | count: count - 1, f: ft}}
end
def pop(%LQueue{count: count, r: r, f: []} = lq) do
[fh | ft] = Enum.reverse(r)
{fh, %{lq | count: count - 1, r: [], f: ft}}
end
@doc """
Pops an element from the rear of the queue.
If the queue is empty, `nil` is returned.
## Examples
iex> {nil, lqueue} = [] |> LQueue.from_list(5) |> LQueue.pop_rear()
{nil, %LQueue{count: 0, max_count: 5, r: [], f: []}}
iex> lqueue |> Enum.to_list() == []
true
iex> {2, lqueue} = [1, 2] |> LQueue.from_list(2) |> LQueue.pop_rear()
{2, %LQueue{count: 1, max_count: 2, r: [1], f: []}}
iex> lqueue |> Enum.to_list() == [1]
true
"""
@spec pop_rear(t) :: {element | nil, t}
def pop_rear(lqueue)
def pop_rear(%LQueue{count: 0, r: [], f: []} = lq) do
{nil, lq}
end
def pop_rear(%LQueue{count: count, r: [rh | rt]} = lq) do
{rh, %{lq | count: count - 1, r: rt}}
end
def pop_rear(%LQueue{count: count, r: [], f: f} = lq) do
[rh | rt] = Enum.reverse(f)
{rh, %{lq | count: count - 1, r: rt, f: []}}
end
@doc """
Gets the front element of the queue.
It does not change the queue. When the queue is empty, `nil` is returned.
## Examples
iex> [] |> LQueue.from_list(5) |> LQueue.get()
nil
iex> [1, 2] |> LQueue.from_list(2) |> LQueue.get()
1
"""
@spec get(t) :: element | nil
def get(lqueue)
def get(%LQueue{count: 0, r: [], f: []}), do: nil
def get(%LQueue{r: r, f: f}), do: get(r, f)
@doc """
Gets the rear element of the queue.
It does not change the queue. When the queue is empty, `nil` is returned.
## Examples
iex> [] |> LQueue.from_list(5) |> LQueue.get_rear()
nil
iex> [1, 2] |> LQueue.from_list(2) |> LQueue.get_rear()
2
"""
@spec get_rear(t) :: element | nil
def get_rear(lqueue)
def get_rear(%LQueue{count: 0, r: [], f: []}), do: nil
def get_rear(%LQueue{r: r, f: f}), do: get_rear(r, f)
@doc """
Drops the front element of the queue.
When the queue is empty, it is not changed.
## Examples
iex> [] |> LQueue.from_list(5) |> LQueue.drop() |> Enum.to_list()
[]
iex> [1, 2, 3] |> LQueue.from_list(5) |> LQueue.drop() |>
...> Enum.to_list()
[2, 3]
"""
@spec drop(t) :: t
def drop(lqueue)
def drop(%LQueue{count: 0, r: [], f: []} = lq) do
lq
end
def drop(%LQueue{count: count, f: [_fh | ft]} = lq) do
%{lq | count: count - 1, f: ft}
end
def drop(%LQueue{count: count, r: r, f: []} = lq) do
[_fh | ft] = Enum.reverse(r)
%{lq | count: count - 1, r: [], f: ft}
end
@doc """
Drops the rear element of the queue.
When the queue is empty, it is not changed.
## Examples
iex> [] |> LQueue.from_list(5) |> LQueue.drop() |> Enum.to_list()
[]
iex> [1, 2, 3] |> LQueue.from_list(5) |> LQueue.drop_rear() |>
...> Enum.to_list()
[1, 2]
"""
@spec drop_rear(t) :: t
def drop_rear(lqueue)
def drop_rear(%LQueue{count: 0, r: [], f: []} = lq) do
lq
end
def drop_rear(%LQueue{count: count, r: [_rf | rt]} = lq) do
%{lq | count: count - 1, r: rt}
end
def drop_rear(%LQueue{count: count, r: [], f: f} = lq) do
[_rh | rt] = Enum.reverse(f)
%{lq | count: count - 1, r: rt, f: []}
end
@doc """
Converts the list to a queue.
The elements are pushed into the queue starting with the list head. If the
list has more elements than the `max_count` of the queue, those that can't
fit in the queue (at the front) are discarded.
## Examples
iex> [] |> LQueue.from_list(3) |> Enum.to_list()
[]
iex> [1, 2, 3] |> LQueue.from_list(3) |> Enum.to_list()
[1, 2, 3]
iex> [1, 2, 3, 4, 5] |> LQueue.from_list(3) |> Enum.to_list()
[3, 4, 5]
"""
@spec from_list([element], max_count) :: t
def from_list(list, max_count)
def from_list([], max_count) when max_count > 0, do: new(max_count)
def from_list(list, max_count) when is_list(list) and max_count > 0 do
count = list |> length |> min(max_count)
r = list |> Enum.reverse() |> Enum.take(max_count)
%LQueue{count: count, max_count: max_count, r: r, f: []}
end
@doc false
defp get(_, [fh | _]), do: fh
defp get([rh], []), do: rh
defp get([_ | rt], []), do: List.last(rt)
@doc false
defp get_rear([rh | _], _), do: rh
defp get_rear([], [fh]), do: fh
defp get_rear([], [_ | ft]), do: List.last(ft)
end
defimpl Enumerable, for: LQueue do
@spec count(LQueue.t()) :: {:ok, non_neg_integer}
def count(%LQueue{count: count}), do: {:ok, count}
@spec member?(LQueue.t(), term) :: {:ok, boolean}
def member?(%LQueue{count: count, r: r, f: f}, value) when count > 0 do
{:ok, Enum.member?(f, value) or Enum.member?(r, value)}
end
def member?(%LQueue{count: 0, r: [], f: []}, _value) do
{:ok, false}
end
@spec reduce(LQueue.t(), Enumerable.acc(), Enumerable.reducer()) :: Enumerable.result()
def reduce(%LQueue{}, {:halt, acc}, _fun) do
{:halted, acc}
end
def reduce(%LQueue{} = lq, {:suspend, acc}, fun) do
{:suspended, acc, &reduce(lq, &1, fun)}
end
def reduce(%LQueue{count: count, f: [fh | ft]} = lq, {:cont, acc}, fun) do
reduce(%{lq | count: count - 1, f: ft}, fun.(fh, acc), fun)
end
def reduce(%LQueue{count: count, r: r, f: []} = lq, {:cont, acc}, fun)
when r != [] do
[fh | ft] = Enum.reverse(r)
reduce(%{lq | count: count - 1, r: [], f: ft}, fun.(fh, acc), fun)
end
def reduce(%LQueue{r: [], f: []}, {:cont, acc}, _fun) do
{:done, acc}
end
@type slicing_fun :: (start :: non_neg_integer, length :: pos_integer -> [term()])
@spec slice(LQueue.t()) ::
{:ok, size :: non_neg_integer(), slicing_fun()}
| {:error, module()}
def slice(%LQueue{count: count} = lq) do
{:ok, count, &Enumerable.List.slice(Enum.to_list(lq), &1, &2)}
end
end
defimpl Collectable, for: LQueue do
@spec into(LQueue.t()) :: {term, (term, Collectable.command() -> LQueue.t() | term)}
def into(%LQueue{} = lq), do: {lq, &into(&1, &2)}
@doc false
defp into(%LQueue{} = lq, {:cont, elem}), do: LQueue.push(lq, elem)
defp into(%LQueue{} = lq, :done), do: lq
defp into(_lq, :halt), do: :ok
end
defimpl Inspect, for: LQueue do
@spec inspect(LQueue.t(), Keyword.t()) :: String.t()
def inspect(%LQueue{} = lq, _opts) do
"#LQueue<#{lq |> Enum.to_list() |> inspect}>"
end
end
|
lib/lqueue.ex
| 0.896883
| 0.675015
|
lqueue.ex
|
starcoder
|
defmodule CCSP.Chapter5.SendMoreMoney do
alias __MODULE__, as: T
@moduledoc """
Corresponds to CCSP in Python, Chapter 5, titled "Genetic Algorithms"
"""
@type t :: %T{
letters: list(String.t())
}
defstruct [
:letters
]
@spec new(list(String.t())) :: t
def new(letters) do
%T{letters: letters}
end
@spec random_instance() :: t
def random_instance() do
["S", "E", "N", "D", "M", "O", "R", "Y", " ", " "]
|> Enum.shuffle()
|> new
end
@spec fitness_difference(t) ::
{non_neg_integer, non_neg_integer, non_neg_integer, non_neg_integer}
def fitness_difference(c) do
s = Enum.find_index(c.letters, &(&1 == "S"))
e = Enum.find_index(c.letters, &(&1 == "E"))
n = Enum.find_index(c.letters, &(&1 == "N"))
d = Enum.find_index(c.letters, &(&1 == "D"))
m = Enum.find_index(c.letters, &(&1 == "M"))
o = Enum.find_index(c.letters, &(&1 == "O"))
r = Enum.find_index(c.letters, &(&1 == "R"))
y = Enum.find_index(c.letters, &(&1 == "Y"))
send = s * 1_000 + e * 100 + n * 10 + d
more = m * 1_000 + o * 100 + r * 10 + e
money = m * 10_000 + o * 1_000 + n * 100 + e * 10 + y
difference = abs(money - (send + more))
{send, more, money, difference}
end
end
defimpl CCSP.Chapter5.Chromosome, for: CCSP.Chapter5.SendMoreMoney do
alias CCSP.Chapter5.SendMoreMoney
@type t :: __MODULE__.t()
@spec fitness(t) :: float
def fitness(c) do
{_, _, _, difference} = SendMoreMoney.fitness_difference(c)
1 / (difference + 1)
end
@spec crossover(t, t) :: {t, t}
def crossover(c1, c2) do
[idx1, idx2] = Enum.take_random(0..(length(c1.letters) - 1), 2)
l1 = Enum.at(c1.letters, idx1)
l2 = Enum.at(c2.letters, idx2)
new_c1_letters =
c1.letters
|> List.replace_at(
Enum.find_index(c1.letters, &(&1 == l2)),
Enum.at(c1.letters, idx2)
)
|> List.replace_at(idx2, l2)
new_c2_letters =
c2.letters
|> List.replace_at(
Enum.find_index(c2.letters, &(&1 == l1)),
Enum.at(c2.letters, idx1)
)
|> List.replace_at(idx1, l1)
{
%SendMoreMoney{c1 | :letters => new_c1_letters},
%SendMoreMoney{c2 | :letters => new_c2_letters}
}
end
@spec mutate(t) :: t
def mutate(c) do
[idx1, idx2] = Enum.take_random(0..(length(c.letters) - 1), 2)
c_letters =
c.letters
|> List.replace_at(idx1, Enum.at(c.letters, idx2))
|> List.replace_at(idx2, Enum.at(c.letters, idx1))
%SendMoreMoney{c | :letters => c_letters}
end
end
defimpl Inspect, for: CCSP.Chapter5.SendMoreMoney do
alias CCSP.Chapter5.SendMoreMoney
def inspect(c, _opts) do
{send, more, money, difference} = SendMoreMoney.fitness_difference(c)
"#{send} + #{more} = #{money} Difference: #{difference}"
end
end
defimpl String.Chars, for: CCSP.Chapter5.SendMoreMoney do
alias CCSP.Chapter5.SendMoreMoney
def to_string(c) do
{send, more, money, difference} = SendMoreMoney.fitness_difference(c)
"#{send} + #{more} = #{money} Difference: #{difference}"
end
end
|
lib/ccsp/chapter5/send_more_money.ex
| 0.680772
| 0.452838
|
send_more_money.ex
|
starcoder
|
defmodule Elasticsearch.Index do
@moduledoc """
Functions for manipulating Elasticsearch indexes.
"""
alias Elasticsearch.{
Cluster.Config,
Index.Bulk
}
@doc """
Creates an index using a zero-downtime hot-swap technique.
1. Build an index for the given `alias`, with a timestamp: `alias-12323123`
2. Bulk upload data to that index using `store` and `sources`.
3. Alias the `alias` to `alias-12323123`.
4. Remove old indexes beginning with `alias`.
5. Refresh `alias-12323123`.
This allows an old index to be served while a new index for `alias` is built.
## Example
iex> Index.hot_swap(Cluster, "posts")
:ok
"""
@spec hot_swap(Cluster.t(), alias :: String.t() | atom) ::
:ok | {:error, Elasticsearch.Exception.t()}
def hot_swap(cluster, alias) do
alias = alias_to_atom(alias)
name = build_name(alias)
config = Config.get(cluster)
%{settings: settings_file} = index_config = config[:indexes][alias]
with :ok <- create_from_file(config, name, settings_file),
:ok <- Bulk.upload(config, name, index_config),
:ok <- __MODULE__.alias(config, name, alias),
:ok <- clean_starting_with(config, alias, 2),
:ok <- refresh(config, name) do
:ok
end
end
defp alias_to_atom(atom) when is_atom(atom), do: atom
defp alias_to_atom(str) when is_binary(str), do: String.to_existing_atom(str)
@doc """
Returns all indexes which start with a given string.
## Example
iex> Index.create_from_file(Cluster, "posts-1", "test/support/settings/posts.json")
...> Index.starting_with(Cluster, "posts")
{:ok, ["posts-1"]}
"""
@spec starting_with(Cluster.t(), String.t() | atom) ::
{:ok, [String.t()]}
| {:error, Elasticsearch.Exception.t()}
def starting_with(cluster, prefix) do
with {:ok, indexes} <- Elasticsearch.get(cluster, "/_cat/indices?format=json") do
prefix = to_string(prefix)
indexes =
indexes
|> Enum.map(& &1["index"])
|> Enum.filter(&String.starts_with?(&1, prefix))
|> Enum.sort()
{:ok, indexes}
end
end
@doc """
Assigns an alias to a given index, simultaneously removing it from prior
indexes, with zero downtime.
## Example
iex> Index.create_from_file(Cluster, "posts-1", "test/support/settings/posts.json")
...> Index.alias(Cluster, "posts-1", "posts")
:ok
"""
@spec alias(Cluster.t(), String.t(), String.t()) ::
:ok
| {:error, Elasticsearch.Exception.t()}
def alias(cluster, name, alias) do
with {:ok, indexes} <- starting_with(cluster, alias),
indexes = Enum.reject(indexes, &(&1 == name)) do
remove_actions =
Enum.map(indexes, fn index ->
%{"remove" => %{"index" => index, "alias" => alias}}
end)
actions = %{
"actions" => remove_actions ++ [%{"add" => %{"index" => name, "alias" => alias}}]
}
with {:ok, _response} <- Elasticsearch.post(cluster, "/_aliases", actions), do: :ok
end
end
@doc """
Gets the most recent index name with the given prefix.
## Examples
iex> Index.create_from_file(Cluster, "posts-1", "test/support/settings/posts.json")
...> Index.create_from_file(Cluster, "posts-2", "test/support/settings/posts.json")
...> Index.latest_starting_with(Cluster, "posts")
{:ok, "posts-2"}
If there are no indexes matching that prefix:
iex> Index.latest_starting_with(Cluster, "nonexistent")
{:error, :not_found}
"""
@spec latest_starting_with(Cluster.t(), String.t() | atom) ::
{:ok, String.t()}
| {:error, :not_found}
| {:error, Elasticsearch.Exception.t()}
def latest_starting_with(cluster, prefix) do
with {:ok, indexes} <- starting_with(cluster, prefix) do
index =
indexes
|> Enum.sort()
|> List.last()
case index do
nil -> {:error, :not_found}
index -> {:ok, index}
end
end
end
@doc """
Refreshes a given index with recently added data.
## Example
iex> Index.create_from_file(Cluster, "posts-1", "test/support/settings/posts.json")
...> Index.refresh(Cluster, "posts-1")
:ok
"""
@spec refresh(Cluster.t(), String.t()) :: :ok | {:error, Elasticsearch.Exception.t()}
def refresh(cluster, name) do
with {:ok, _} <- Elasticsearch.post(cluster, "/#{name}/_forcemerge?max_num_segments=5", %{}),
{:ok, _} <- Elasticsearch.post(cluster, "/#{name}/_refresh", %{}),
do: :ok
end
@doc """
Same as `refresh/1`, but raises an error on failure.
## Examples
iex> Index.create_from_file(Cluster, "posts-1", "test/support/settings/posts.json")
...> Index.refresh!(Cluster, "posts-1")
:ok
iex> Index.refresh!(Cluster, "nonexistent")
** (Elasticsearch.Exception) (index_not_found_exception) no such index
"""
@spec refresh!(Cluster.t(), String.t()) :: :ok
def refresh!(cluster, name) do
case refresh(cluster, name) do
:ok ->
:ok
{:error, error} ->
raise error
end
end
@doc """
Removes indexes starting with the given prefix, keeping a certain number.
Can be used to garbage collect old indexes that are no longer used.
## Examples
If there is only one index, and `num_to_keep` is >= 1, the index is not deleted.
iex> Index.create_from_file(Cluster, "posts-1", "test/support/settings/posts.json")
...> Index.clean_starting_with(Cluster, "posts", 1)
...> Index.starting_with(Cluster, "posts")
{:ok, ["posts-1"]}
If `num_to_keep` is less than the number of indexes, the older indexes are
deleted.
iex> Index.create_from_file(Cluster, "posts-1", "test/support/settings/posts.json")
...> Index.clean_starting_with(Cluster, "posts", 0)
...> Index.starting_with(Cluster, "posts")
{:ok, []}
"""
@spec clean_starting_with(Cluster.t(), String.t(), integer) ::
:ok
| {:error, [Elasticsearch.Exception.t()]}
def clean_starting_with(cluster, prefix, num_to_keep) when is_integer(num_to_keep) do
with {:ok, indexes} <- starting_with(cluster, prefix) do
total = length(indexes)
num_to_delete = total - num_to_keep
num_to_delete = if num_to_delete >= 0, do: num_to_delete, else: 0
errors =
indexes
|> Enum.sort()
|> Enum.take(num_to_delete)
|> Enum.map(&Elasticsearch.delete(cluster, "/#{&1}"))
|> Enum.filter(&(elem(&1, 0) == :error))
|> Enum.map(&elem(&1, 1))
if length(errors) > 0 do
{:error, errors}
else
:ok
end
end
end
@doc """
Creates an index with the given name from either a JSON string or Elixir map.
## Examples
iex> Index.create(Cluster, "posts-1", "{}")
:ok
"""
@spec create(Cluster.t(), String.t(), map | String.t()) ::
:ok
| {:error, Elasticsearch.Exception.t()}
def create(cluster, name, settings) do
with {:ok, _response} <- Elasticsearch.put(cluster, "/#{name}", settings), do: :ok
end
@doc """
Creates an index with the given name, with settings loaded from a JSON file.
## Example
iex> Index.create_from_file(Cluster, "posts-1", "test/support/settings/posts.json")
:ok
iex> Index.create_from_file(Cluster, "posts-1", "nonexistent.json")
{:error, :enoent}
The `posts.json` file contains regular index settings as described in the
Elasticsearch [documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html#_example_mapping):
{
"mappings": {
"post": {
"properties": {
"title": {
"type": "string"
},
"author": {
"type": "string"
}
}
}
}
}
"""
@spec create_from_file(Cluster.t(), String.t(), Path.t()) ::
:ok
| {:error, File.posix()}
| {:error, Elasticsearch.Exception.t()}
def create_from_file(cluster, name, file) do
with {:ok, settings} <- File.read(file) do
create(cluster, name, settings)
end
end
@doc """
Generates a name for an index that will be aliased to a given `alias`.
Similar to migrations, the name will contain a timestamp.
## Example
Index.build_name("main")
# => "main-1509581256"
"""
@spec build_name(String.t() | atom) :: String.t()
def build_name(alias) do
"#{alias}-#{system_timestamp()}"
end
defp system_timestamp do
DateTime.to_unix(DateTime.utc_now())
end
end
|
lib/elasticsearch/indexing/index.ex
| 0.901884
| 0.593963
|
index.ex
|
starcoder
|
defmodule Xandra.ConnectionError do
@moduledoc """
An exception struct that represents an error in the connection to the
Cassandra server.
For more information on when this error is returned or raised, see the
documentation for the `Xandra` module.
The `:action` field represents the action that was being performed when the
connection error occurred. The `:reason` field represents the reason of the
connection error: for network errors, this is usually a POSIX reason (like
`:econnrefused`). The following Xandra-specific reasons are supported:
* `{:unsupported_compression, algorithm}` - this happens when a
`:compressor` module has been specified in `Xandra.start_link/1`, but
negotiating the connection algorithm fails because such compressor module
uses an algorithm that the Cassandra server does not support.
* `{:cluster, :not_connected}` - this happens when a `Xandra.Cluster`-based
connection is not connected to any node (for example, because all the
specified nodes are currently down). See the documentation for
`Xandra.Cluster` for more information.
Since this struct is an exception, it is possible to raise it with
`Kernel.raise/1`. If the intent is to format connection errors as strings (for
example, for logging purposes), it is possible to use `Exception.message/1` to
get a formatted version of the error.
"""
defexception [:action, :reason]
@type t :: %__MODULE__{
action: String.t(),
reason: term
}
@spec new(String.t(), term) :: t
def new(action, reason) when is_binary(action) do
%__MODULE__{action: action, reason: reason}
end
def message(%__MODULE__{action: action, reason: reason}) do
"action \"#{action}\" failed with reason: #{format_reason(reason)}"
end
defp format_reason({:unsupported_compression, algorithm}) do
"unsupported compression algorithm #{inspect(algorithm)}"
end
defp format_reason(:closed) do
"socket is closed"
end
defp format_reason({:cluster, :not_connected}) do
"not connected to any of the nodes"
end
defp format_reason(reason) do
case :inet.format_error(reason) do
'unknown POSIX error' -> inspect(reason)
formatted -> List.to_string(formatted)
end
end
end
|
lib/xandra/connection_error.ex
| 0.919227
| 0.584864
|
connection_error.ex
|
starcoder
|
defmodule Credo.Check.Refactor.WithClauses do
use Credo.Check,
base_priority: :high,
explanations: [
check: ~S"""
`with` statements are useful when you need to chain a sequence
of pattern matches, stopping at the first one that fails.
But sometimes, we go a little overboard with them (pun intended).
If the first or last clause in a `with` statement is not a `<-` clause,
it still compiles and works, but is not really utilizing what the `with`
macro provides and can be misleading.
with ref = make_ref(),
{:ok, user} <- User.create(ref),
:ok <- send_email(user),
Logger.debug("Created user: #{inspect(user)}") do
user
end
Here, both the first and last clause are actually not matching anything.
If we move them outside of the `with` (the first ones) or inside the body
of the `with` (the last ones), the code becomes more focused and .
This `with` should be refactored like this:
ref = make_ref()
with {:ok, user} <- User.create(ref),
:ok <- send_email(user) do
Logger.debug("Created user: #{inspect(user)}")
user
end
"""
]
alias Credo.Code
@message_first_clause_not_pattern "`with` doesn't start with a <- clause, move the non-pattern <- clauses outside of the `with`"
@message_last_clause_not_pattern "`with` doesn't end with a <- clause, move the non-pattern <- clauses inside the body of the `with`"
@doc false
@impl true
def run(%SourceFile{} = source_file, params) do
issue_meta = IssueMeta.for(source_file, params)
Code.prewalk(source_file, &traverse(&1, &2, issue_meta))
end
# TODO: consider for experimental check front-loader (ast)
defp traverse({:with, meta, [_, _ | _] = clauses_and_body} = ast, issues, issue_meta)
when is_list(clauses_and_body) do
# If clauses_and_body is a list with at least two elements in it, we think
# this might be a call to the special form "with". To be sure of that,
# we get the last element of clauses_and_body and check that it's a keyword
# list with a :do key in it (the body).
# We can hard-match on [maybe_body] here since we know that clauses_and_body
# has at least two elements.
{maybe_clauses, [maybe_body]} = Enum.split(clauses_and_body, -1)
if Keyword.keyword?(maybe_body) and Keyword.has_key?(maybe_body, :do) do
{ast, issues_for_with(maybe_clauses, meta[:line], issue_meta) ++ issues}
else
{ast, issues}
end
end
defp traverse(ast, issues, _issue_meta) do
{ast, issues}
end
defp issues_for_with(clauses, line, issue_meta) do
issue_if_not_starting_with_pattern_clause(clauses, line, issue_meta) ++
issue_if_not_ending_with_pattern_clause(clauses, line, issue_meta)
end
defp issue_if_not_starting_with_pattern_clause(
[{:<-, _meta, _args} | _rest],
_line,
_issue_meta
) do
[]
end
defp issue_if_not_starting_with_pattern_clause(_clauses, line, issue_meta) do
[format_issue(issue_meta, message: @message_first_clause_not_pattern, line_no: line)]
end
defp issue_if_not_ending_with_pattern_clause(clauses, line, issue_meta) do
if length(clauses) > 1 and not match?({:<-, _, _}, Enum.at(clauses, -1)) do
[format_issue(issue_meta, message: @message_last_clause_not_pattern, line_no: line)]
else
[]
end
end
end
|
lib/credo/check/refactor/with_clauses.ex
| 0.526465
| 0.423935
|
with_clauses.ex
|
starcoder
|
defmodule McProtocol.Util.GenerateRSA do
@moduledoc """
Utilities for generating RSA keys.
"""
@doc """
Generates an RSA key with the size of key_size.
Calls out to the openssl commend line executable for key generation.
Returns an RSA private key in the form of a :RSAPrivateKey record.
"""
def gen(key_size) do
{command, args} = gen_command(key_size)
{output, 0} = System.cmd(command, args)
split_output = output
|> String.split("\n")
{_, raw_values} = Enum.reduce(split_output, {nil, %{}}, fn(line, {mode, map}) ->
case is_key(line) do
:skip -> {mode, map}
false ->
{mode, Map.put(map, mode, [line | Map.fetch!(map, mode)])}
key ->
{short_key, list_beginning} = decode_key(key)
{short_key, Map.put(map, short_key, list_beginning)}
end
end)
values = raw_values
|> Enum.map(fn {k, v} ->
value = v
|> Enum.reverse
|> Enum.map(&String.strip/1)
|> Enum.join
|> String.replace(":", "")
{k, value}
end)
|> Enum.into(%{})
[pub_exp_text, _] = values["publicExponent"] |> String.split(" ")
{pub_exp, ""} = pub_exp_text |> Integer.parse
modulus = values["modulus"] |> Base.decode16!(case: :lower) |> as_num
priv_exp = values["privateExponent"] |> Base.decode16!(case: :lower) |> as_num
prime_1 = values["prime1"] |> Base.decode16!(case: :lower) |> as_num
prime_2 = values["prime2"] |> Base.decode16!(case: :lower) |> as_num
exp_1 = values["exponent1"] |> Base.decode16!(case: :lower) |> as_num
exp_2 = values["exponent2"] |> Base.decode16!(case: :lower) |> as_num
coeff = values["coefficient"] |> Base.decode16!(case: :lower) |> as_num
{:RSAPrivateKey, :"two-prime",
modulus, pub_exp, priv_exp,
prime_1, prime_2, exp_1, exp_2, coeff,
:asn1_NOVALUE}
end
defp as_num(bin) do
size = byte_size(bin)
<<num::integer-unit(8)-size(size)>> = bin
num
end
defp gen_command(bits) when is_number(bits) do
{"openssl",
["genpkey", "-algorithm", "RSA", "-pkeyopt", "rsa_keygen_bits:#{bits}", "-text"]}
end
defp decode_key(key) do
[key, list_first] = String.split(key, ":")
{key, [list_first]}
end
defp is_key("-----BEGIN PRIVATE KEY-----"), do: "privateKeyBlock:"
defp is_key("-----END PRIVATE KEY-----"), do: :skip
defp is_key(""), do: :skip
defp is_key(str) do
cond do
String.starts_with?(str, " ") -> false
match?({:ok, _}, Base.decode64(str)) -> false
true -> str#binary_part(str, 0, byte_size(str) - 1)
end
end
end
|
lib/util/generate_rsa.ex
| 0.762026
| 0.419143
|
generate_rsa.ex
|
starcoder
|
defmodule Vox do
@moduledoc """
Functions for working with voxels.
"""
@doc """
Create vox data from some data.
"""
@spec new(any) :: Vox.Data.t | nil
def new(data), do: new(data, format(data))
@doc """
Create vox data from some data that should be loaded by the specified
module.
"""
@spec new(any, module) :: Vox.Data.t
@spec new(any, nil) :: nil
def new(_, nil), do: nil
def new(data, module), do: module.new(data)
@default_formats [
Vox.Format.VOX
]
@doc """
Get the format that will handle this voxel data format.
"""
@spec format(any) :: module | nil
def format(data), do: Enum.find(Application.get_env(:vox, :formats, []), &(&1.format?(data))) || Enum.find(@default_formats, &(&1.format?(data)))
@doc """
Get all the models represented by the voxel data.
"""
@spec models(Vox.Data.t) :: [Vox.Model.t]
def models(data), do: Vox.Data.models(data)
@doc """
Get a specific model represented by the voxel data.
"""
@spec model(Vox.Data.t, Vox.Model.id) :: Vox.Model.t | nil
def model(data, index) do
case Vox.Data.impl(data, :model) do
nil -> Enum.at(models(data), index)
fun -> fun.(data, index)
end
end
@doc """
Get the number of models represented by the voxel data.
"""
@spec model_count(Vox.Data.t) :: non_neg_integer
def model_count(data) do
case Vox.Data.impl(data, :model_count) do
nil -> Enum.count(models(data))
fun -> fun.(data)
end
end
@doc """
Get a voxel from a model in the voxel data.
"""
@spec voxel(Vox.Data.t, Vox.Model.id, Vox.Model.point) :: { :ok, Vox.Voxel.t | nil } | Vox.Model.error(Vox.Model.bounds_error | Vox.Model.unknown_error)
def voxel(data, index, point) do
case Vox.Data.impl(data, :voxel) do
nil ->
case Enum.at(models(data), index) do
nil -> { :error, { :model, :unknown } }
model -> Vox.Model.voxel(model, point)
end
fun -> fun.(data, index, point)
end
end
@doc """
Get a voxel from a model in the voxel data.
"""
@spec voxel(Vox.Data.t, Vox.Model.id, Vox.Model.point, Vox.Model.axis, Vox.Model.axis) :: { :ok, Vox.Voxel.t | nil } | Vox.Model.error(Vox.Model.bounds_error | Vox.Model.unknown_error)
def voxel(data, index, x, y, z), do: voxel(data, index, { x, y, z })
defmodule NoModelError do
defexception [:data, :id]
@impl Exception
def exception({ data, id }) do
%NoModelError{
data: data,
id: id
}
end
@impl Exception
def message(%{ id: id }), do: "no model with id: #{inspect id}"
end
@doc """
Get a voxel from a model in the voxel data.
"""
@spec voxel!(Vox.Data.t, Vox.Model.id, Vox.Model.point) :: Vox.Voxel.t | nil | no_return
def voxel!(data, index, point) do
case voxel(data, index, point) do
{ :ok, result } -> result
{ :error, { :model, :unknown } } -> raise NoModelError, { data, index }
{ :error, { :model, :bounds } } -> raise Vox.Model.BoundsError, { model(data, index), point }
end
end
@doc """
Get a voxel from a model in the voxel data.
"""
@spec voxel!(Vox.Data.t, Vox.Model.id, Vox.Model.point, Vox.Model.axis, Vox.Model.axis) :: Vox.Voxel.t | nil | no_return
def voxel!(data, index, x, y, z), do: voxel!(data, index, { x, y, z })
@doc """
Transform the voxel data so it's coordinate system is re-orientated.
"""
@spec transform(Vox.Data.t, Vox.Data.origin) :: Vox.Data.t
def transform(data, origin), do: Vox.Transform.new(data, origin)
@doc """
Transform the voxel data so it's coordinate system is re-orientated.
"""
@spec transform(Vox.Data.t, Vox.Data.face, Vox.Data.face, Vox.Data.face) :: Vox.Data.t
def transform(data, x, y, z), do: transform(data, { x, y, z })
end
|
lib/vox.ex
| 0.913286
| 0.598547
|
vox.ex
|
starcoder
|
defmodule GuessWho.Contenders.Seancribbs do
@behaviour GuessWho.Contender
def name do
"Sean's Guesstimator"
end
# Post result, need an algorithm to eliminate characters and attributes excluded
defmodule State do
defstruct guesses: [],
characters:
GuessWho.Attributes.characters()
|> Enum.map(&{&1, GuessWho.Attributes.character_attributes(&1)})
|> Map.new(),
attributes:
GuessWho.Attributes.attributes()
|> Enum.map(&{&1, GuessWho.Attributes.characters_with_attribute(&1)})
|> Map.new()
def cleanup({:has_attribute?, result}, %{guesses: [attribute | _]} = state) do
suspects = GuessWho.Attributes.characters_with_attribute(attribute)
chars =
if result do
Map.take(state.characters, suspects)
else
Map.drop(state.characters, suspects)
end
%{state | characters: chars, attributes: Map.delete(state.attributes, attribute)}
end
def cleanup({:name_looks_like?, result}, %{guesses: [pattern | _]} = state) do
suspects =
state.characters
|> Map.keys()
|> Enum.filter(fn c ->
{_, test} = GuessWho.Attributes.character_matches?(c, pattern)
result == test
end)
new_chars =
if result do
Map.take(state.characters, suspects)
else
Map.drop(state.characters, suspects)
end
new_attributes =
if result do
# If the name does match, we only care about the attributes that are
# associated with those characters
keep =
new_chars
|> Map.values()
|> List.flatten()
|> Enum.uniq()
state.attributes
|> Map.take(keep)
|> Enum.map(fn {a, cs} -> {a, Enum.filter(cs, &(&1 in suspects))} end)
|> Map.new()
else
state.attributes
|> Enum.map(fn {a, cs} -> {a, Enum.reject(cs, &(&1 in suspects))} end)
|> Enum.reject(fn {_a, cs} -> cs == [] end)
|> Map.new()
end
%{state | characters: new_chars, attributes: new_attributes}
end
def cleanup({:name_guessed?, false}, %{guesses: [name | _]} = state) do
new_chars = Map.delete(state.characters, name)
%{
state
| characters: new_chars,
attributes:
state.attributes
|> Enum.map(fn {a, cs} -> {a, cs -- [name]} end)
|> Enum.reject(fn {_a, cs} -> cs == [] end)
|> Map.new()
}
end
end
def turn(nil, nil) do
# always guess male/female first
{"male", %State{guesses: ["male"]}}
end
def turn(result, state) do
state = State.cleanup(result, state)
cond do
Enum.count(state.characters) == 1 ->
# It's this dude(tte)
char = hd(Map.keys(state.characters))
{char, %{state | guesses: [char | state.guesses]}}
Enum.count(state.attributes) == 1 ->
# Fallback to linear guessing because I can't be arsed to build a trie
char = hd(hd(Map.values(state.attributes)))
{char, %{state | guesses: [char | state.guesses]}}
true ->
# Find the largest attribute to partition on, hopefully eliminating or
# selecting the largest group
{attr, _} = Enum.max_by(state.attributes, fn {_a, cs} -> Enum.count(cs) end)
{attr, %{state | guesses: [attr | state.guesses]}}
end
end
end
|
lib/guess_who/contenders/seancribbs.ex
| 0.523908
| 0.477493
|
seancribbs.ex
|
starcoder
|
defmodule Nerves.Package do
@moduledoc """
Defines a Nerves package struct and helper functions.
A Nerves package is an application which defines a Nerves package
configuration file at the root of the application path. The configuration
file is `nerves.exs` and uses Mix.Config to list configuration values.
## Example Configuration
```
use Mix.Config
version =
Path.join(__DIR__, "VERSION")
|> File.read!
|> String.trim
pkg =
config pkg, :nerves_env,
type: :system,
version: version,
compiler: :nerves_package,
artifact_url: [
"https://github.com/nerves-project/\#{pkg}/releases/download/v\#{version}/\#{pkg}-v\#{version}.tar.gz",
],
platform: Nerves.System.BR,
platform_config: [
defconfig: "nerves_defconfig",
],
checksum: [
"linux",
"rootfs_overlay",
"uboot",
"bbb-busybox.config",
"fwup.conf",
"nerves_defconfig",
"nerves.exs",
"post-createfs.sh",
"uboot-script.cmd",
"VERSION"
]
```
## Keys
** Required **
* `:type` - The Nerves package type. Can be any one of the following
* `:system` - A Nerves system.
* `:system_platform` - A set of build tools for a Nerves system.
* `:toolchain` - A Nerves toolchain
* `:toolchain_platform` - A set of build tools for a Nerves toolchain.
* `:version` - The package version
** Optional **
* `:compiler` - The Mix.Project compiler for the package. Example: `:nerves_package`
* `:platform` - The application which is the packages build platform.
* `:checksum` - A list of files and top level folders to expand paths for use when calculating the checksum of the package source.
"""
defstruct [app: nil, path: nil, dep: nil, type: nil, version: nil, platform: nil, provider: nil, compiler: nil, config: []]
alias __MODULE__
alias Nerves.Package.{Artifact, Providers}
alias Nerves.Package
@type t :: %__MODULE__{app: atom,
path: binary,
type: :system |
:package |
:toolchain,
dep: :project |
:path |
:hex |
:git,
platform: atom,
provider: atom,
compiler: atom,
version: Version.t,
config: Keyword.t}
@package_config "nerves.exs"
@checksum "CHECKSUM"
@required [:type, :version, :platform]
@doc """
Builds the package and produces an artifact. See Nerves.Package.Artifact
for more information.
"""
@spec artifact(Nerves.Package.t, Nerves.Package.t) :: :ok
def artifact(pkg, toolchain) do
ret =
case pkg.provider do
{mod, opts} -> mod.artifact(pkg, toolchain, opts)
providers when is_list(providers) ->
Enum.reduce(providers, nil, fn ({mod, opts}, ret) ->
if ret != :ok do
mod.artifact(pkg, toolchain, opts)
else
ret
end
end)
end
case ret do
:ok -> Path.join(Artifact.dir(pkg, toolchain), @checksum)
|> File.write!(checksum(pkg))
_ -> :error
end
end
@doc """
Loads the package config and parses it into a `%Package{}`
"""
@spec load_config({app :: atom, path :: String.t}) :: Nerves.Package.t
def load_config({app, path}) do
load_nerves_config(path)
config = Application.get_env(app, :nerves_env)
version = config[:version]
unless version do
Mix.shell.error "The Nerves package #{app} does not define a version.\n\n" <>
"Verify that the key exists in '#{config_path(path)}'\n" <>
"and that the package name is correct."
exit({:shutdown, 1})
end
type = config[:type]
unless type do
Mix.shell.error "The Nerves package #{app} does not define a type.\n\n" <>
"Verify that the key exists in '#{config_path(path)}'.\n"
exit({:shutdown, 1})
end
platform = config[:platform]
provider = provider(app, type)
compiler = config[:compiler]
config = Enum.reject(config, fn({k, _v}) -> k in @required end)
%Package{
app: app,
type: type,
platform: platform,
provider: provider,
dep: dep_type(app),
path: path,
version: version,
compiler: compiler,
config: config}
end
@doc """
Produce a base16 encoded checksum for the package from the list of files
and expanded folders listed in the checksum config key.
"""
@spec checksum(Nerves.Package.t) :: String.t
def checksum(pkg) do
blob =
(pkg.config[:checksum] || [])
|> expand_paths(pkg.path)
|> Enum.map(& File.read!/1)
|> Enum.map(& :crypto.hash(:sha256, &1))
|> Enum.join
:crypto.hash(:sha256, blob)
|> Base.encode16
end
@doc """
Cleans the artifacts for the package providers of all packages
"""
@spec clean(Nerves.Package.t) :: :ok | {:error, term}
def clean(pkg) do
Mix.shell.info("Cleaning Nerves Package #{pkg.app}")
Enum.each(pkg.provider, fn({provider, _}) -> provider.clean(pkg) end)
end
@doc """
Determines if the artifact for a package is stale and needs to be rebuilt.
"""
@spec stale?(Nerves.Package.t, Nerves.Package.t) :: boolean
def stale?(pkg, toolchain) do
if Artifact.env_var?(pkg) do
false
else
exists = Artifact.exists?(pkg, toolchain)
checksum = match_checksum?(pkg, toolchain)
!(exists and checksum)
end
end
@doc """
Starts an interactive shell with the working directory set
to the package path
"""
@spec shell(Nerves.Package.t) :: :ok
def shell(nil) do
Mix.raise "Package is not loaded in your Nerves Environment."
end
def shell(%{platform: nil, app: app}) do
Mix.raise "Cannot start shell for #{app}"
end
def shell(pkg) do
pkg.provider.shell(pkg)
end
@doc """
Takes the path to the package and returns the path to its package config.
"""
@spec config_path(String.t) :: String.t
def config_path(path) do
Path.join(path, @package_config)
end
defp match_checksum?(pkg, toolchain) do
artifact_checksum =
Path.join(Artifact.dir(pkg, toolchain), @checksum)
|> File.read
case artifact_checksum do
{:ok, checksum} ->
checksum == Package.checksum(pkg)
_ ->
false
end
end
defp provider(app, type) do
config = Mix.Project.config[:artifacts] || []
case Keyword.get(config, app) do
nil -> provider_mod(type)
opts -> provider_opts(opts)
end
end
defp provider_mod(:system_platform), do: []
defp provider_mod(:toolchain_platform), do: []
defp provider_mod(:toolchain) do
mod =
case :os.type do
{_, :linux} -> Providers.HTTP
{_, :darwin} -> Providers.HTTP
_ -> Providers.Docker
end
[{Providers.HTTP, []}, {mod, []}]
end
defp provider_mod(_) do
mod =
case :os.type do
{_, :linux} -> Providers.Local
_ -> Providers.Docker
end
[{Providers.HTTP, []}, {mod, []}]
end
defp provider_opts(mod) when is_atom(mod), do: {mod, []}
defp provider_opts(opts) when is_list(opts) do
mod =
cond do
opts[:path] != nil -> Providers.Path
opts[:url] != nil -> Providers.HTTP
true -> Mix.raise "Invalid artifact options"
end
{mod, opts}
end
defp load_nerves_config(path) do
config_path(path)
|> Mix.Config.read!
|> Mix.Config.persist
end
defp dep_type(pkg) do
deps_paths = Mix.Project.deps_paths()
case Map.get(deps_paths, pkg) do
nil ->
:project
path ->
deps_path =
File.cwd!
|> Path.join(Mix.Project.config[:deps_path])
|> Path.expand
if String.starts_with?(path, deps_path) do
:hex
else
:path
end
end
end
defp expand_paths(paths, dir) do
expand_dir = Path.expand(dir)
paths
|> Enum.map(&Path.join(dir, &1))
|> Enum.flat_map(&Path.wildcard/1)
|> Enum.flat_map(&dir_files/1)
|> Enum.map(&Path.expand/1)
|> Enum.filter(&File.regular?/1)
|> Enum.uniq
|> Enum.map(&Path.relative_to(&1, expand_dir))
end
defp dir_files(path) do
if File.dir?(path) do
Path.wildcard(Path.join(path, "**"))
else
[path]
end
end
end
|
lib/nerves/package.ex
| 0.850267
| 0.779248
|
package.ex
|
starcoder
|
defmodule Militerm.ECS.Entity do
@moduledoc """
An entity is composed of components. Each entity has a process that coordinates event
messages with the components if the entity responds to events.
The entity module holds any event handlers that are specific to the entity type. Entity
modules can delegate some events to other modules as-needed.
Generally, if a process isn't running for the entity and the entity receives events, the
system will start a process for the entity. So the system doesn't have to spin up processes
for everything at start up or when an item is created. It's done automatically later when the
first event for the entity is queued.
Each entity has an identifier as well as the module defining the entity's behavior.
Events are used to respond to commands and environmental activities. They are not the same
as an entities heartbeat or component-based processes and systems.
"""
@callback preprocess(term) :: term
@callback handle_event(term, String.t(), String.t(), map) :: term
@callback can?(term, String.t(), String.t(), map) :: true | false | nil
@callback is?(term, String.t(), map) :: true | false | nil
@callback calculates?(term, String.t()) :: true | false | nil
@callback validates?(term, String.t()) :: true | false | nil
@callback calculate(term, String.t(), map) :: term
@callback validate(term, String.t(), term, map) :: term
defmacro __using__(opts) do
based_on = Keyword.get(opts, :based_on)
abilities = Keyword.get(opts, :abilities, [])
components = Keyword.get(opts, :components, [])
quote do
import Militerm.ECS.Entity
import Militerm.ECS.Ability
@behaviour Militerm.ECS.Entity
@based_on unquote(based_on)
@components unquote(components)
@abilities unquote(abilities)
def defaults(), do: @components
def create(entity_id, archetype, component_data) do
Militerm.ECS.Entity.create_entity(
__MODULE__,
entity_id,
archetype,
__MODULE__.preprocess(component_data),
@components
)
end
def create(entity_id, archetype) when is_binary(archetype) do
create(entity_id, archetype, [])
end
def create(entity_id, component_data) do
Militerm.ECS.Entity.create_entity(
__MODULE__,
entity_id,
__MODULE__.preprocess(component_data),
@components
)
end
def delete(entity_id), do: Militerm.ECS.Entity.delete_entity(entity_id)
def preprocess(component_data), do: component_data
@defoverridable preprocess: 1
end
end
@doc false
def create_entity(entity_module, entity_id, archetype \\ nil, component_data, defaults) do
# we want to make sure the defaults are applied and components are added, but that not all
# components have ot be listed in the defaults
archetype_data = get_archetype_data(archetype)
component_mapping = Militerm.Config.master().components()
data =
defaults
|> with_string_keys()
|> merge(with_string_keys(archetype_data))
|> merge(with_string_keys(component_data))
for {module_key, module_data} <- data do
module = Map.get(component_mapping, as_atom(module_key), nil)
if not is_nil(module), do: module.set(entity_id, module_data)
end
Militerm.Components.Entity.register(entity_id, entity_module, archetype)
entity_id
end
def get_archetype_data(nil), do: %{}
def get_archetype_data(archetype) do
case Militerm.Services.Archetypes.get(archetype) do
%{data: data} -> data
_ -> %{}
end
end
def delete_entity(entity_id) do
component_mapping = Militerm.Config.master().components()
# make sure the entity isn't running
Militerm.Systems.Entity.shutdown(entity_id)
for {_, module} <- component_mapping do
module.remove(entity_id)
end
Militerm.Components.Entity.remove(entity_id)
end
defp merge(a, b) when is_map(a) and is_map(b) do
Map.merge(a, b, fn _, sa, sb -> merge(sa, sb) end)
end
defp merge(a, b) when is_map(a) and is_list(b) do
if Keyword.keyword?(b), do: merge(a, Map.new(b)), else: a
end
defp merge(a, b) when is_list(a) and is_map(b) do
if Keyword.keyword?(a), do: merge(Map.new(a), b), else: a
end
defp merge(a, b) when is_list(a) and is_list(b) do
if Keyword.keyword?(a) and Keyword.keyword?(b),
do: Keyword.merge(a, b),
else: Enum.uniq(a ++ b)
end
defp merge(nil, b), do: b
defp merge(a, nil), do: a
defp merge(_, b), do: b
defp as_atom(atom) when is_atom(atom), do: atom
defp as_atom(bin) when is_binary(bin), do: String.to_atom(bin)
defp with_string_keys(nil), do: %{}
defp with_string_keys([]), do: %{}
defp with_string_keys(list) when is_list(list) do
if Keyword.keyword?(list) do
list
|> Enum.map(fn {k, v} -> {to_string(k), v} end)
|> Enum.into(%{})
else
list
end
end
defp with_string_keys(map) when is_map(map) do
map
|> Enum.map(fn
{k, v} when is_atom(k) -> {to_string(k), v}
otherwise -> otherwise
end)
|> Enum.into(%{})
end
defp with_string_keys(otherwise), do: otherwise
end
|
lib/militerm/ecs/entity.ex
| 0.802942
| 0.527742
|
entity.ex
|
starcoder
|
defmodule DocuSign.Api.BulkEnvelopes do
@moduledoc """
API calls for all endpoints tagged `BulkEnvelopes`.
"""
alias DocuSign.Connection
import DocuSign.RequestBuilder
@doc """
Gets the status of a specified bulk send operation.
Retrieves the status information of a single bulk recipient batch. A bulk recipient batch is the set of envelopes sent from a single bulk recipient file.
## Parameters
- connection (DocuSign.Connection): Connection to server
- account_id (String.t): The external account number (int) or account ID Guid.
- batch_id (String.t):
- opts (KeywordList): [optional] Optional parameters
- :count (String.t): Specifies the number of entries to return.
- :include (String.t): Specifies which entries are included in the response. Multiple entries can be included by using commas in the query string (example: ?include=\"failed,queued\") Valid values: * all - Returns all entries. If present, overrides all other query settings. This is the default if no query string is provided. * failed - Entries with a failed status. * processing - Entries with a processing status. * queued - Entries with a queued status. * sent - Entries with a sent status.
- :start_position (String.t): Specifies the location in the list of envelopes from which to start.
## Returns
{:ok, %DocuSign.Model.BulkEnvelopeStatus{}} on success
{:error, info} on failure
"""
@spec bulk_envelopes_get_bulk_envelopes_batch_id(
Tesla.Env.client(),
String.t(),
String.t(),
keyword()
) :: {:ok, DocuSign.Model.BulkEnvelopeStatus.t()} | {:error, Tesla.Env.t()}
def bulk_envelopes_get_bulk_envelopes_batch_id(connection, account_id, batch_id, opts \\ []) do
optional_params = %{
count: :query,
include: :query,
start_position: :query
}
%{}
|> method(:get)
|> url("/v2/accounts/#{account_id}/bulk_envelopes/#{batch_id}")
|> add_optional_params(optional_params, opts)
|> Enum.into([])
|> (&Connection.request(connection, &1)).()
|> decode(%DocuSign.Model.BulkEnvelopeStatus{})
end
@doc """
Gets status information about bulk recipient batches.
Retrieves status information about all the bulk recipient batches. A bulk recipient batch is the set of envelopes sent from a single bulk recipient file. The response includes general information about each bulk recipient batch. The response returns information about the envelopes sent with bulk recipient batches, including the `batchId` property, which can be used to retrieve a more detailed status of individual bulk recipient batches.
## Parameters
- connection (DocuSign.Connection): Connection to server
- account_id (String.t): The external account number (int) or account ID Guid.
- opts (KeywordList): [optional] Optional parameters
- :count (String.t): The number of results to return. This can be 1 to 20.
- :include (String.t): Specifies which entries are included in the response. Multiple entries can be included by using commas in the query string (example: ?include=\"failed,queued\") Valid values: * all - Returns all entries. If present, overrides all other query settings. This is the default if no query string is provided. * failed - Entries with a failed status. * processing - Entries with a processing status. * queued - Entries with a queued status. * sent - Entries with a sent status.
- :start_position (String.t): The position of the bulk envelope items in the response. This is used for repeated calls, when the number of bulk envelopes returned is too large for one return. The default value is 0.
## Returns
{:ok, %DocuSign.Model.BulkEnvelopes{}} on success
{:error, info} on failure
"""
@spec bulk_envelopes_get_envelopes_bulk(Tesla.Env.client(), String.t(), keyword()) ::
{:ok, DocuSign.Model.BulkEnvelopes.t()} | {:error, Tesla.Env.t()}
def bulk_envelopes_get_envelopes_bulk(connection, account_id, opts \\ []) do
optional_params = %{
count: :query,
include: :query,
start_position: :query
}
%{}
|> method(:get)
|> url("/v2/accounts/#{account_id}/bulk_envelopes")
|> add_optional_params(optional_params, opts)
|> Enum.into([])
|> (&Connection.request(connection, &1)).()
|> decode(%DocuSign.Model.BulkEnvelopes{})
end
end
|
lib/docusign/api/bulk_envelopes.ex
| 0.889846
| 0.485234
|
bulk_envelopes.ex
|
starcoder
|
defmodule Bolt.Sips.Internals.PackStream.Message.Encoder do
@moduledoc false
_module_doc = """
Manages the message encoding.
A mesage is a tuple formated as:
`{message_type, data}`
with:
- message_type: atom amongst the valid message type (:init, :discard_all, :pull_all,
:ack_failure, :reset, :run)
- data: a list of data to be used by the message
Messages are passed in one or more chunk. The structure of a chunk is as follow: `chunk_size` `data`
with `chunk_size` beign a 16-bit integer.
A message always ends with the end marker `0x00 0x00`.
Thus the possible typologies of messages are:
- One-chunk message:
`chunk_size` `message_data` `end_marker`
- multiple-chunk message:
`chunk_1_size` `message_data` `chunk_n_size` `message_data`...`end_marker`
More documentation on message transfer encoding:
[https://boltprotocol.org/v1/#message_transfer_encoding](https://boltprotocol.org/v1/#message_transfer_encoding)
All messages are serialized structures. See `Bolt.Sips.Internals.PackStream.EncoderV1` for
more information about structure encoding).
An extensive documentation on messages can be found here:
[https://boltprotocol.org/v1/#messages](https://boltprotocol.org/v1/#messages)
"""
alias Bolt.Sips.Internals.BoltVersionHelper
alias Bolt.Sips.Internals.PackStreamError
@max_chunk_size 65_535
@end_marker <<0x00, 0x00>>
@available_bolt_versions BoltVersionHelper.available_versions()
@doc """
Return client name (based on bolt_sips version)
"""
def client_name() do
"BoltSips/" <> to_string(Application.spec(:bolt_sips, :vsn))
end
@doc """
Return the valid message signatures depending on the Bolt version
"""
@spec valid_signatures(integer()) :: [integer()]
def valid_signatures(bolt_version) when bolt_version <= 2 do
Bolt.Sips.Internals.PackStream.Message.EncoderV1.valid_signatures()
end
def valid_signatures(3) do
Bolt.Sips.Internals.PackStream.Message.EncoderV3.valid_signatures()
end
@doc """
Check if the encoder for the given bolt version is capable of encoding the given message
If it is the case, the encoding function will be called
If not, fallback to previous bolt version
If encoding function is not present in any of the bolt version, an error will be raised
"""
@spec encode({atom(), list()}, integer()) :: binary() | Bolt.Sips.Internals.PackStreamError.t()
def encode(data, bolt_version)
when is_integer(bolt_version) and bolt_version in @available_bolt_versions do
call_encode(data, bolt_version, bolt_version)
end
def encode(data, bolt_version) when is_integer(bolt_version) do
if bolt_version > BoltVersionHelper.last() do
encode(data, BoltVersionHelper.last())
else
raise PackStreamError,
data: data,
bolt_version: bolt_version,
message: "[Message] Unsupported encoder version"
end
end
def encode(data, bolt_version) do
raise PackStreamError,
data: data,
bolt_version: bolt_version,
message: "[Message] Unsupported encoder version"
end
@spec call_encode({atom(), list()}, integer(), nil | integer()) ::
binary() | PackStreamError.t()
defp call_encode(data, original_bolt_version, used_bolt_version)
when used_bolt_version not in @available_bolt_versions do
raise(PackStreamError,
data: data,
bolt_version: original_bolt_version,
message: "[Message] Encoder not implemented for"
)
end
defp call_encode(data, original_version, used_version) do
module = Module.concat(["Bolt.Sips.Internals.PackStream.Message", "EncoderV#{used_version}"])
with true <- Code.ensure_loaded?(module),
true <- Kernel.function_exported?(module, :encode, 2),
result <- Kernel.apply(module, :encode, [data, original_version]),
{:ok, result} <- ok_result(result) do
result
else
{:error, :not_implemented} ->
call_encode(data, original_version, BoltVersionHelper.previous(used_version))
_ ->
call_encode(data, original_version, -1)
end
end
# Wrap result in a ok-tuple if valid
# This ease the error pattern matching with a `with| statement
defp ok_result(result) when is_binary(result) do
{:ok, result}
end
defp ok_result(result) do
result
end
@doc """
Perform the final message:
- add header
- manage chunk if necessary
- add end marker
"""
@spec encode_message(
Bolt.Sips.Internals.PackStream.Message.out_signature(),
integer(),
list(),
integer()
) ::
Bolt.Sips.Internals.PackStream.Message.encoded()
def encode_message(message_type, signature, data, bolt_version) do
Bolt.Sips.Internals.Logger.log_message(:client, message_type, data)
encoded =
{signature, data}
|> Bolt.Sips.Internals.PackStream.encode(bolt_version)
|> generate_chunks()
Bolt.Sips.Internals.Logger.log_message(:client, message_type, encoded, :hex)
encoded
end
@spec generate_chunks(Bolt.Sips.Internals.PackStream.value() | <<>>, list()) ::
Bolt.Sips.Internals.PackStream.Message.encoded()
defp generate_chunks(data, chunks \\ [])
defp generate_chunks(data, chunks) when byte_size(data) > @max_chunk_size do
<<chunk::binary-@max_chunk_size, rest::binary>> = data
generate_chunks(rest, [format_chunk(chunk) | chunks])
end
defp generate_chunks(<<>>, chunks) do
[@end_marker | chunks]
|> Enum.reverse()
|> Enum.join()
end
defp generate_chunks(data, chunks) do
generate_chunks(<<>>, [format_chunk(data) | chunks])
end
@spec format_chunk(Bolt.Sips.Internals.PackStream.value()) ::
Bolt.Sips.Internals.PackStream.Message.encoded()
defp format_chunk(chunk) do
<<byte_size(chunk)::16>> <> chunk
end
end
|
lib/bolt_sips/internals/pack_stream/message/encoder.ex
| 0.845385
| 0.61025
|
encoder.ex
|
starcoder
|
defmodule Makeup.Lexer do
@moduledoc """
A lexer turns raw source code into a list of tokens.
"""
alias Makeup.Lexer.Types, as: T
alias Makeup.Lexer.Postprocess
@doc """
Parses the smallest number of tokens that make sense.
It's a `parsec`.
"""
@callback root_element(String.t) :: T.parsec_result
@doc """
Parses the given string into a `parsec` result that inludes a list of tokens.
"""
@callback root(String.t) :: T.parsec_result
@doc """
Postprocesses a list of tokens before matching the contained groups.
"""
@callback postprocess([T.token()], list()) :: [T.token()]
@doc """
Matches groups in a list of tokens.
"""
@callback match_groups([T.token()], String.t) :: [T.token()]
@doc """
Lexes a string into a list of tokens
"""
@callback lex(String.t(), list()) :: [T.token()]
@doc """
Merges the token values into the original string.
Inverts the ouput of a lexer. That is, if `lexer` is a lexer, then:
string |> lexer.lex() |> Makeup.Lexer.unlex() == string
This only works for a correctly implemented lexer, of course.
The above identity can be trated as a lexer invariant for newly implemented lexers.
"""
@spec unlex(list(T.token())) :: String.t()
def unlex(tokens) do
tokens
|> Enum.map(&Postprocess.token_value_to_binary/1)
|> Enum.map(fn {_tag, _meta, value} -> value end)
|> Enum.join()
end
@doc """
Splits a list of tokens on newline characters (`\n`).
The result is a list of lists of tokens with no newlines.
"""
@spec split_into_lines(list(T.token())) :: list(list(T.token()))
def split_into_lines(tokens) do
{lines, last_line} =
Enum.reduce tokens, {[], []}, (fn token, {lines, line} ->
{ttype, meta, text} = Postprocess.token_value_to_binary(token)
case String.split(text, "\n") do
[_] -> {lines, [token | line]}
[part | parts] ->
first_line = [{ttype, meta, part} | line] |> :lists.reverse
all_but_last_line =
parts
|> Enum.slice(0..-2)
|> Enum.map(fn tok_text -> [{ttype, meta, tok_text}] end)
|> :lists.reverse
last_line = [{ttype, meta, Enum.at(parts, -1)}]
{all_but_last_line ++ [first_line | lines], last_line}
end
end)
:lists.reverse([last_line | lines])
end
@doc """
Merge adjacent tokens of the same type and with the same attributes.
Doing this will require iterating over the list of tokens again,
so only do this if you have a good reason.
"""
@spec merge(list(T.token())) :: list(T.token())
def merge([{tag, meta, value1}, {tag, meta, value2} | rest]),
do: merge [{tag, meta, value1 <> value2} | rest]
def merge([token | rest]),
do: [token | merge(rest)]
def merge([]),
do: []
end
|
lib/makeup/lexer.ex
| 0.788502
| 0.421552
|
lexer.ex
|
starcoder
|
defmodule Adap.Piper do
@moduledoc ~S"""
Piper proposes an implementation of `Adap.Stream.Emitter` where
the distributed processing of each element is defined as a
succession of matching rules.
Each rule can use external data to process the element or emit new
ones. When external data is needed, a process is spawned on the node
containing it, will receive the element and continue to apply rules.
The principle is to make each element hop from node to node in
order to be processed using the locally present data.
The element will go to the stream sink when no more rule matches.
The `Adap.Stream` stream data by chunk, so that the
construction of the external state server can take as much time as
necessary without congestion: never more than the chunk size number
of elements will be queued.
Let's see a processing pipe example:
- the input is a product stream : stream of `{:product,%{field1: value1, field2: value2}}`
- `user@jsonserver1` contains a json file "/color.json" containing a COLOR mapping
- `user@jsonserver2` contains a json file "/size.json" containing a SIZE mapping
- you want to map product color and size according to these mappings
- you want to add a field "deleted" when the mapped color is red
This can be implemented using:
iex> Adap.Piper.defpipe ColorPipe, [{ColorPipe.Rules,[]}]
iex> defmodule JSONMap do
iex> use Adap.StateServer, ttl: 1_000
iex> def init(mapping) do
iex> {:ok,File.read!("/#{mapping}.json") |> JSON.decode!}
iex> end
iex> def node("color") do :"user@jsonserver1" end
iex> def node("size") do :"user@jsonserver2" end
iex> end
iex> defmodule ColorPipe.Rules do
iex> use Adap.Piper, for: :product
iex> defrule map_color(%{color: color}=prod,_) do
iex> {JSONMap,"color"},color_map->
iex> %{prod| color: color_map[color]}
iex> end
iex> defrule map_size(%{size: size}=prod,_) do
iex> {JSONMap,"size"},size_map->
iex> %{prod| size: size_map[size]}
iex> end
iex> defrule red_is_deleted(%{color: "red"}=prod,_) do
iex> Dict.put(prod,:deleted,true)
iex> end
iex> end
iex> [
iex> {:product,%{gender: "male", category: "ipad"}},
iex> {:product,%{color: "carmine", category: "shirt"}},
iex> {:product,%{color: "periwinkle", size: "xxl"}}
iex> ] |> Adap.Stream.new(ColorPipe) |> Enum.to_list
[{:product,%{gender: "male", category: "ipad"}},
{:product,%{color: "red", category: "shirt", deleted: true}},
{:product,%{color: "blue", size: "large"}}]
"""
@doc false
def next(type, elem, [{next, args} | nexts], sink) do
next.pipe(type, elem, args, nexts, sink)
end
def next(type, elem, [], sink) do
Adap.Stream.done(sink, {type, elem})
end
@doc false
def wrap_result(sink, {:emit, elems}, prev_elem, prev_state) do
Adap.Stream.emit(sink, elems)
{prev_elem, prev_state}
end
def wrap_result(sink, {:emit, elems, elem}, _prev_elem, prev_state) do
Adap.Stream.emit(sink, elems)
{elem, prev_state}
end
def wrap_result(sink, {:emit, elems, elem, state}, _prev_elem, _prev_state) do
Adap.Stream.emit(sink, elems)
{elem, state}
end
def wrap_result(_sink, {:newstate, state}, prev_elem, _prev_state) do
{prev_elem, state}
end
def wrap_result(_sink, {:newstate, state, elem}, _prev_elem, _prev_state) do
{elem, state}
end
def wrap_result(_sink, elem, _prev_elem, prev_state) do
{elem, prev_state}
end
defmacro defpipe(alias, pipers) do
quote do
defmodule unquote(alias) do
use Adap.Stream.Emitter
def do_emit(sink, {type, elem}) do
Adap.Piper.next(type, elem, unquote(pipers), sink)
end
end
end
end
defmacro __using__(opts) do
quote do
import Adap.Piper
@behaviour Adap.Piper
@rules []
@rules_for unquote(opts[:for])
@before_compile Adap.Piper
def pipe(type, elem, args, nexts, sink) do
{elem, pipe_state} = init(elem, args)
pipe(type, init_apply_map(), elem, pipe_state, nexts, sink)
end
def init(e, arg), do: {e, arg}
defoverridable [init: 2]
end
end
@callback init(elem :: term,args :: term) :: {elem :: term,pipe_state :: term}
defmacro __before_compile__(_env) do # add to the end of your module (after parsing so before compilation)
quote do
def pipe(type,_apply_map,elem,_pipe_state,nexts,sink) do
Adap.Piper.next(type,elem,nexts,sink)
end
def init_apply_map do
(@rules |> Enum.map(&{&1, false}) |> Enum.into(%{}))
end
end
end
defmacro defrule(sig,blocks) do
{name, [elem_q, pipestate_q], guards_q} = sig_normalizer(sig)
quote do
@rules [unquote(name) | @rules]
def pipe(@rules_for, %{unquote(name) => false}=apply_map, unquote(elem_q)=prev_elem, unquote(pipestate_q)=prev_state, nexts, sink) when unquote(guards_q) do
unquote(rule_body(blocks, name))
end
end
end
defp sig_normalizer({:when ,_, [{name, _, params}, guards]}), do: {name, params, guards}
defp sig_normalizer({name, _, params}), do: {name, params, true}
defp rule_body([do: [{:->, _, [[server_spec | args], body]}]], name) do
quote do
Adap.Unit.Router.cast(unquote(server_spec), fn unquote_splicing(args) ->
spawn(fn ->
{elem, state} = Adap.Piper.wrap_result(sink, unquote(body), prev_elem, prev_state)
pipe(@rules_for, %{apply_map | unquote(name) => true}, elem, state, nexts, sink)
end)
end)
end
end
defp rule_body([do: body], name) do
quote do
{elem, state} = Adap.Piper.wrap_result(sink, unquote(body), prev_elem, prev_state)
pipe(@rules_for, %{apply_map | unquote(name) => true}, elem, state, nexts, sink)
end
end
end
|
lib/piper.ex
| 0.773516
| 0.562537
|
piper.ex
|
starcoder
|
defmodule Timex.Parse.Timezones.Posix do
@moduledoc """
Parses POSIX-style timezones:
## Format
POSIX-style timezones are of the format: `local_timezone,date/time,date/time`
Where `date` is in the `Mm.n.d` format, and where:
- `Mm` (1-12) for 12 months
- `n` (1-5) 1 for the first week and 5 for the last week in the month
- `d` (0-6) 0 for Sunday and 6 for Saturday
## Example
TZ = `CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00`
This would represents a change to daylight saving time at 2:00 AM on the second Sunday
in March and change back at 2:00 AM on the first Sunday in November, and keep 6 hours time
offset from GMT every year. The breakdown of the string is:
- `CST6CDT` is the timezone name
- `CST` is the standard abbreviation
- `6` is the hours of time difference from GMT
- `CDT` is the DST abbreviation
- `,M3` is the third month
- `.2` is the second occurrence of the day in the month
- `.0` is Sunday
- `/2:00:00` is the time
- `,M11` is the eleventh month
- `.1` is the first occurrence of the day in the month
- `.0` is Sunday
- `/2:00:00` is the time
"""
defmodule PosixTimezone do
@doc"""
## Spec
## dst_start/dst_end
- `month`: 1-12
- `week`: week of the month
- `day_of_week`: 0-6, 0 is Sunday
- `time`: {hour, minute, second}
"""
defstruct name: "",
std_name: "",
dst_name: "",
diff: 0,
dst_start: nil,
dst_end: nil
end
alias PosixTimezone, as: TZ
# Start parsing provided zone name
def parse(tz) when is_binary(tz) do
case parse_posix(tz, :std_name, %TZ{:diff => "0"}) do
{:ok, %TZ{:std_name => std, :dst_name => dst, :diff => diff} = res} ->
{:ok, %{res | :name => "#{std}#{diff}#{dst}"}}
{:error, _} = err ->
err
{:error, :invalid_time, :dst_start} ->
{:error, :invalid_dst_start_time}
{:error, :invalid_time, :dst_end} ->
{:error, :invalid_dst_end_time}
end
end
# Alpha character for standard name
defp parse_posix(<<c::utf8, rest::binary>>, :std_name, %TZ{:std_name => acc} = result) when c in ?A..?Z do
parse_posix(rest, :std_name, %{result | :std_name => <<acc::binary, c::utf8>>})
end
# Transition from standard name to diff from UTC
defp parse_posix(<<c::utf8, rest::binary>>, :std_name, %TZ{:diff => acc} = result) when c in ?0..?9 do
parse_posix(rest, :diff, %{result | :diff => <<acc::binary, c::utf8>>})
end
# Digit for diff from UTC
defp parse_posix(<<c::utf8, rest::binary>>, :diff, %TZ{:diff => acc} = result) when c in ?0..?9 do
parse_posix(rest, :diff, %{result | :diff => <<acc::binary, c::utf8>>})
end
# Transition from diff to DST name
defp parse_posix(<<c::utf8, rest::binary>>, :diff, %TZ{:diff => diff, :dst_name => acc} = result) when c in ?A..?Z do
# Convert diff to integer value
parse_posix(rest, :dst_name, %{result | :diff => String.to_integer(diff), :dst_name => <<acc::binary, c::utf8>>})
end
# Alpha character for DST name
defp parse_posix(<<c::utf8, rest::binary>>, :dst_name, %{:dst_name => acc} = result) when c in ?A..?Z do
parse_posix(rest, :dst_name, %{result | :dst_name => <<acc::binary, c::utf8>>})
end
# Times
defp parse_posix(<<?,, ?M, ?1, c::utf8, rest::binary>>, :dst_name, result) when c in ?0..?2 do
start = %{month: String.to_integer(<<?1, c::utf8>>), week: nil, day_of_week: nil, time: nil}
parse_week(rest, :dst_start, %{result | :dst_start => start})
end
defp parse_posix(<<?,, ?M, ?1, c::utf8, rest::binary>>, :dst_start, result) when c in ?0..?2 do
new_end = %{month: String.to_integer(<<?1, c::utf8>>), week: nil, day_of_week: nil, time: nil}
parse_week(rest, :dst_end, %{result | :dst_end => new_end})
end
defp parse_posix(<<?,, ?M, c::utf8, rest::binary>>, :dst_name, result) when c in ?1..?9 do
start = %{month: String.to_integer(<<c::utf8>>), week: nil, day_of_week: nil, time: nil}
parse_week(rest, :dst_start, %{result | :dst_start => start})
end
defp parse_posix(<<?,, ?M, c::utf8, rest::binary>>, :dst_start, result) when c in ?1..?9 do
new_end = %{month: String.to_integer(<<c::utf8>>), week: nil, day_of_week: nil, time: nil}
parse_week(rest, :dst_end, %{result | :dst_end => new_end})
end
# Reached end of input with all parts parsed
defp parse_posix(<<>>, :dst_name, result), do: {:ok, result}
defp parse_posix(<<>>, :dst_end, result), do: {:ok, result}
# Invalid character for current state
defp parse_posix(<<_c::utf8, _rest::binary>>, _, _result), do: {:error, :not_posix}
# Empty before all parts are processed
defp parse_posix(<<>>, _, _result), do: {:error, :not_posix}
defp parse_week(<<?., c::utf8, rest::binary>>, :dst_start, %{:dst_start => start} = result) when c in ?1..?5 do
new_start = %{start | :week => String.to_integer(<<c::utf8>>)}
parse_weekday(rest, :dst_start, %{result | :dst_start => new_start})
end
defp parse_week(<<?., c::utf8, rest::binary>>, :dst_end, %{:dst_end => dst_end} = result) when c in ?1..?5 do
new_end = %{dst_end | :week => String.to_integer(<<c::utf8>>)}
parse_weekday(rest, :dst_end, %{result | :dst_end => new_end})
end
defp parse_week(_rest, state, _result), do: {:error, :"invalid_#{state}_week"}
defp parse_weekday(<<?., c::utf8, rest::binary>>, :dst_start, %{:dst_start => start} = result) when c in ?0..?6 do
new_start = %{start | :day_of_week => String.to_integer(<<c::utf8>>)}
parse_time(rest, :dst_start, %{result | :dst_start => new_start})
end
defp parse_weekday(<<?., c::utf8, rest::binary>>, :dst_end, %{:dst_end => dst_end} = result) when c in ?0..?6 do
new_end = %{dst_end | :day_of_week => String.to_integer(<<c::utf8>>)}
parse_time(rest, :dst_end, %{result | :dst_end => new_end})
end
defp parse_weekday(_rest, state, _result), do: {:error, :"invalid_#{state}_weekday"}
defp parse_time(<<?/, h1::utf8, h2::utf8, ?:, m1::utf8, m2::utf8, ?:, s1::utf8, s2::utf8, rest::binary>>, state, result)
when h1 in ?0..?9 and h2 in ?0..?9 and m1 in ?0..?9 and m2 in ?0..9 and s1 in ?0..?9 and s2 in ?0..?9 do
parse_time(<<h1::utf8, h2::utf8>>, <<m1::utf8, m2::utf8>>, <<s1::utf8, s2::utf8>>, rest, state, result)
end
defp parse_time(<<?/, h::utf8, ?:, m1::utf8, m2::utf8, ?:, s1::utf8, s2::utf8, rest::binary>>, state, result)
when h in ?1..?9 and m1 in ?0..?9 and m2 in ?0..9 and s1 in ?0..?9 and s2 in ?0..?9 do
parse_time(<<h::utf8>>, <<m1::utf8, m2::utf8>>, <<s1::utf8, s2::utf8>>, rest, state, result)
end
defp parse_time(_rest, _state, _result), do: {:error, :not_posix}
defp parse_time(hs, ms, ss, rest, state, result) do
hour = String.to_integer(hs)
mins = String.to_integer(ms)
secs = String.to_integer(ss)
case {hour, mins, secs} do
{h,m,s} when h > 0 and h < 25 and m >= 0 and m < 60 and s >= 0 and s < 60 ->
case state do
:dst_start ->
new_start = %{result.dst_start | :time => {h,m,s}}
parse_posix(rest, :dst_start, %{result | :dst_start => new_start})
:dst_end ->
new_end = %{result.dst_end | :time => {h,m,s}}
parse_posix(rest, :dst_end, %{result | :dst_end => new_end})
end
_ ->
{:error, :invalid_time, state}
end
end
end
|
lib/parse/posix/parser.ex
| 0.808635
| 0.700056
|
parser.ex
|
starcoder
|
defmodule Enumancer do
@moduledoc """
Macros to effortlessly define highly optimized `Enum` pipelines
## Overview
`Enumancer` provides a `defenum/2` macro, which will convert a pipeline of `Enum`
function calls to an optimized tail-recursive function.
defmodule BlazingFast do
import Enumancer
defenum sum_squares(numbers) do
numbers
|> map(& &1 * &1)
|> sum()
end
end
1..10_000_000 |> BlazingFast.sum_squares() # very fast
1..10_000_000 |> Enum.map(& &1 * &1) |> Enum.sum() # super slow
1..10_000_000 |> Stream.map(& &1 * &1) |> Enum.sum() # super slow
There is no need to add `Enum.`, `map/2` will be interpreted as `Enum.map/2`
within `defenum/2`.
In order to see the actual functions that are being generated, you can just
replace `defenum/2` by `defenum_explain/2` and the code will be printed in the
console.
The `defenum_explain/2` approach can be useful if you don't want to take the risk of
using `Enumancer` and macros in your production code, but it can inspire the
implementation of your optimized recursive functions.
## Available functions
Most functions taking an `Enumerable` and returning a list can be used anywhere
in the pipeline (e.g. `map/2`, `filter/2`, `with_index/2`...).
On the other hand, functions taking an `Enumerable` and returning some non-list
accumulator (e.g. `sum/1`, `join/2`, `max/1`...) can only be used at the end of
the pipeline. There are other cases like `sort/1` or `reduce/1` which cannot work
without the full list and are also limited to the end of the pipeline.
Functions that need to stop without reducing the `Enumerable` completely, such as
`take/2` or `any?/1`, are not available at this point, but might be implemented in the future.
Also, please note that many functions from the `Enum` module are accepting optional
callbacks to add an extra map or filter step.
By design, `Enumancer` does **not** implement these.
For a very simple reason: the available primitives can be combined at will to
reproduce them, without any runtime overhead.
See examples below:
### Replacing some "composed" functions
- Instead of `|> map_join("-", fun)`, just use `|> map(fun) |> join("-")`
- Instead of `|> map_intersperse(sep, fun)`, just use `|> map(fun) |> intersperse(sep)`
- Instead of `|> count(&has_valid_foo?/1)`, just use `|> filter(&has_valid_foo?/1) |> count()`
- Instead of `|> with_index(fn x, i -> foo(x, i) end)`, just use `|> with_index() |> map(fn {x, i} -> foo(x, i) end)`
- Instead of `|> Map.new(fn x -> {x.k, x.v} end)`, just use `|> map(fn x -> {x.k, x.v} end) |> Map.new()`
### Anywhere in the pipeline
- `Enum.map/2`
- `Enum.filter/2`
- `Enum.reduce/2`
- `Enum.reject/2`
- `Enum.with_index/1`
- `Enum.with_index/2` (only accepts integer `offset`)
- `Enum.uniq/1`
- `Enum.uniq_by/2`
- `Enum.dedup/1`
- `Enum.dedup_by/2`
- `Enum.scan/2`
- `Enum.map_reduce/3` + `hd/1` (not plain `Enum.map_reduce/3`!, see explanation below)
`|> map_reduce(acc, fun)` by itself returns a tuple and cannot be piped any further.
But `|> map_reduce(acc, fun) |> hd()` can be piped if you only need the mapped list.
### Only at the end of the pipeline
- `Enum.reduce/2`
- `Enum.reduce/3`
- `Enum.max/1`
- `Enum.max/2` (only with a `module` argument)
- `Enum.min/1`
- `Enum.min/2` (only with a `module` argument)
- `Enum.count/1`
- `Enum.sum/1`
- `Enum.product/1`
- `Enum.reverse/1`
- `Enum.join/1`
- `Enum.join/2`
- `Enum.intersperse/2`
- `Enum.sort/1`
- `Enum.sort/2`
- `Enum.sort_by/2`
- `Enum.sort_by/3`
- `Enum.map_reduce/3` (without being followed by `|> hd()`)
- `Enum.frequencies/1`
- `Enum.frequencies_by/2`
- `Enum.group_by/2`
- `Map.new/1`
- `MapSet.new/1`
"""
@doc """
A macro transforming a pipeline of `Enum` transformations to an optimized
recursive function at compile time.
See `Enumancer` documentation for available functions.
## Examples
defmodule BlazingFast do
import Enumancer
defenum sum_odd_squares(numbers) do
numbers
|> filter(&rem(&1, 2) == 1)
|> map(& &1 * &1)
|> sum()
end
end
"""
defmacro defenum(head, do: body) do
do_defenum(head, body, __CALLER__)
end
@doc """
Same as `defenum/2`, but will print the generated code in the console.
Useful for debug or learning purpose.
"""
defmacro defenum_explain(head, do: body) do
ast = do_defenum(head, body, __CALLER__)
Macro.to_string(ast) |> IO.puts()
ast
end
defp do_defenum(head, body, caller) do
{fun_name, args, guards} = parse_fun_head(head)
[{enum_arg_name, _, nil} | rest_args] = args
enum_fun_name = :"do_#{fun_name}_enum"
{spec, extra_args_spec} = parse_body(body, enum_arg_name, caller, [], [])
{extra_args, extra_initial} = Enum.unzip(extra_args_spec)
spec_last = List.last(spec)
acc_value = initial_acc(spec_last)
vars = %{
rec_fun_name: :"do_#{fun_name}_list",
head: Macro.unique_var(:head, nil),
tail: Macro.unique_var(:tail, nil),
acc: Macro.unique_var(:acc, nil),
rest_args: rest_args,
extra_args: extra_args
}
main_body =
quote do
unquote(vars.acc) =
case to_list_if_efficient(unquote(hd(args))) do
list when is_list(list) ->
unquote(vars.rec_fun_name)(
list,
unquote_splicing(rest_args),
unquote_splicing(extra_initial),
unquote(acc_value)
)
_ ->
unquote(enum_fun_name)(unquote_splicing(args))
end
unquote(wrap_result(spec_last, vars.acc))
end
quote do
unquote(def_main(fun_name, args, guards, main_body))
defp unquote(vars.rec_fun_name)(
[],
unquote_splicing(wildcards(vars.rest_args)),
unquote_splicing(wildcards(vars.extra_args)),
acc
) do
acc
end
defp unquote(vars.rec_fun_name)(
[unquote(vars.head) | unquote(vars.tail)],
unquote_splicing(vars.rest_args),
unquote_splicing(vars.extra_args),
unquote(vars.acc)
) do
unquote(define_next_acc(spec, vars))
unquote(vars.rec_fun_name)(
unquote(vars.tail),
unquote_splicing(vars.rest_args),
unquote_splicing(vars.extra_args),
unquote(vars.acc)
)
end
defp unquote(enum_fun_name)(enum, unquote_splicing(vars.rest_args)) do
unquote(to_tuple_if_extras(vars.acc, wildcards(vars.extra_args))) =
Enum.reduce(
enum,
unquote(to_tuple_if_extras(acc_value, extra_initial)),
fn unquote(vars.head), unquote(composite_acc(vars)) ->
unquote(define_next_acc(spec, vars))
unquote(composite_acc(vars))
end
)
unquote(vars.acc)
end
end
end
defp to_tuple_if_extras(ast, []), do: ast
defp to_tuple_if_extras(ast, [_ | _] = asts), do: {:{}, [], [ast | asts]}
defp composite_acc(vars) do
to_tuple_if_extras(vars.acc, vars.extra_args)
end
@dialyzer :no_opaque
@doc false
def to_list_if_efficient(enum)
def to_list_if_efficient(list) when is_list(list), do: list
def to_list_if_efficient(map) when is_map(map) and not is_struct(map), do: Map.to_list(map)
def to_list_if_efficient(map_set = %MapSet{}), do: MapSet.to_list(map_set)
def to_list_if_efficient(enum), do: enum
defp wildcards(args) do
for _ <- args, do: Macro.var(:_, nil)
end
defp def_main(fun_name, args, _guards = nil, body) do
quote do
def unquote(fun_name)(unquote_splicing(args)) do
unquote(body)
end
end
end
defp def_main(fun_name, args, {:guards, guards}, body) do
quote do
def unquote(fun_name)(unquote_splicing(args)) when unquote(guards) do
unquote(body)
end
end
end
defp parse_fun_head({:when, _, [{fun_name, _ctx, args}, guards]}) do
{fun_name, args, {:guards, guards}}
end
defp parse_fun_head({fun_name, _ctx, args}) do
{fun_name, args, nil}
end
defp parse_body({enum_arg_name, _, nil}, enum_arg_name, _caller, acc, extra_args) do
{acc, extra_args}
end
defp parse_body({:|>, _, _} = pipe, enum_arg_name, caller, acc, extra_args) do
Macro.expand_once(pipe, caller) |> parse_body(enum_arg_name, caller, acc, extra_args)
end
defp parse_body(
{:hd, ctx, [{:map_reduce, _, args}]},
enum_arg_name,
caller,
acc,
extra_args
)
when is_list(args) do
parse_body({:map_reduce_no_acc, ctx, args}, enum_arg_name, caller, acc, extra_args)
end
defp parse_body(
{fun_name, _, [enum | rest_args] = args},
enum_arg_name,
caller,
acc,
extra_args
)
when is_list(args) do
case {parse_call(fun_name, rest_args), acc} do
{{:last_only, _parsed}, [_ | _]} ->
raise "#{fun_name}/#{length(args)} must be the final call in defenum"
{{_, parsed}, _} ->
parse_body(enum, enum_arg_name, caller, [parsed | acc], extra_args)
{{:extra, parsed, extra_arg}, _} ->
parse_body(enum, enum_arg_name, caller, [parsed | acc], [extra_arg | extra_args])
end
end
defp parse_call(:map, [fun]) do
{:anywhere, {:map, fun}}
end
defp parse_call(:filter, [fun]) do
{:anywhere, {:filter, fun}}
end
defp parse_call(:reject, [fun]) do
{:anywhere, {:reject, fun}}
end
defp parse_call(:uniq, []) do
uniq_acc = Macro.unique_var(:uniq_acc, nil)
{:extra, {:uniq, uniq_acc}, {uniq_acc, Macro.escape(%{})}}
end
defp parse_call(:uniq_by, [fun]) do
uniq_acc = Macro.unique_var(:uniq_acc, nil)
{:extra, {:uniq_by, uniq_acc, fun}, {uniq_acc, Macro.escape(%{})}}
end
defp parse_call(:dedup, []) do
last = Macro.unique_var(:last, nil)
{:extra, {:dedup, last}, {last, :__ENUMANCER_RESERVED__}}
end
defp parse_call(:dedup_by, [fun]) do
last = Macro.unique_var(:last, nil)
{:extra, {:dedup_by, last, fun}, {last, :__ENUMANCER_RESERVED__}}
end
defp parse_call(:with_index, []) do
parse_call(:with_index, [0])
end
defp parse_call(:with_index, [offset]) do
index = Macro.unique_var(:index, nil)
{:extra, {:with_index, index}, {index, offset}}
end
defp parse_call(:drop, [count]) do
index = Macro.unique_var(:index, nil)
initial_ast =
quote do
case unquote(count) do
count when is_integer(count) and count >= 0 -> 0
end
end
{:extra, {:drop, index, count}, {index, initial_ast}}
end
defp parse_call(:scan, [initial, fun]) do
scan_acc = Macro.unique_var(:scan_acc, nil)
{:extra, {:scan, scan_acc, fun}, {scan_acc, initial}}
end
defp parse_call(:map_reduce_no_acc, [initial, fun]) do
mr_acc = Macro.unique_var(:mr_acc, nil)
{:extra, {:map_reduce_no_acc, mr_acc, fun}, {mr_acc, initial}}
end
defp parse_call(:max, []) do
max_ast =
quote do
fn
x, acc when acc >= x -> acc
x, acc -> x
end
end
{:last_only, {:reduce, max_ast}}
end
defp parse_call(:max, [module_ast = {:__aliases__, _, _}]) do
max_ast =
quote do
fn x, acc ->
case unquote(module_ast).compare(acc, x) do
:lt -> x
_ -> acc
end
end
end
{:last_only, {:reduce, max_ast}}
end
defp parse_call(:min, []) do
max_ast =
quote do
fn
x, acc when acc <= x -> acc
x, acc -> x
end
end
{:last_only, {:reduce, max_ast}}
end
defp parse_call(:min, [module_ast = {:__aliases__, _, _}]) do
max_ast =
quote do
fn x, acc ->
case unquote(module_ast).compare(acc, x) do
:gt -> x
_ -> acc
end
end
end
{:last_only, {:reduce, max_ast}}
end
defp parse_call(:reduce, [fun]) do
{:last_only, {:reduce, fun}}
end
defp parse_call(:reduce, [acc, fun]) do
{:last_only, {:reduce, acc, fun}}
end
defp parse_call(:map_reduce, [acc, fun]) do
{:last_only, {:map_reduce, acc, fun}}
end
defp parse_call(:reverse, []) do
{:last_only, {:reverse, []}}
end
defp parse_call(:reverse, [acc]) do
{:last_only, {:reverse, acc}}
end
defp parse_call(:each, [fun]) do
{:last_only, {:each, fun}}
end
defp parse_call(:count, []) do
{:last_only, :count}
end
defp parse_call(:sum, []) do
{:last_only, :sum}
end
defp parse_call(:product, []) do
{:last_only, :product}
end
defp parse_call(:join, []) do
{:last_only, :join}
end
defp parse_call(:join, [joiner]) do
{:last_only, {:join, joiner}}
end
defp parse_call(:intersperse, [joiner]) do
{:last_only, {:intersperse, joiner}}
end
defp parse_call(:frequencies, []) do
{:last_only, :frequencies}
end
defp parse_call(:frequencies_by, [fun]) do
{:last_only, {:frequencies_by, fun}}
end
defp parse_call(:group_by, [fun]) do
{:last_only, {:group_by, fun}}
end
defp parse_call(:sort, []) do
{:last_only, :sort}
end
defp parse_call(:sort, [fun]) do
{:last_only, {:sort, fun}}
end
defp parse_call(:sort_by, [mapper]) do
{:last_only, {:sort_by, mapper, &<=/2}}
end
defp parse_call(:sort_by, [mapper, sorter]) do
{:last_only, {:sort_by, mapper, sorter}}
end
defp parse_call({:., _, [{:__aliases__, _, [:Map]}, :new]}, []) do
{:last_only, Map}
end
defp parse_call({:., _, [{:__aliases__, _, [:MapSet]}, :new]}, []) do
{:last_only, MapSet}
end
defp define_next_acc([{:map, fun} | rest], vars) do
quote do
unquote(vars.head) = unquote(fun).(unquote(vars.head))
unquote(define_next_acc(rest, vars))
end
end
defp define_next_acc([{:filter, fun} | rest], vars) do
quote do
unquote(composite_acc(vars)) =
if unquote(fun).(unquote(vars.head)) do
unquote(define_next_acc(rest, vars))
unquote(composite_acc(vars))
else
unquote(composite_acc(vars))
end
end
end
defp define_next_acc([{:reject, fun} | rest], vars) do
quote do
unquote(composite_acc(vars)) =
if unquote(fun).(unquote(vars.head)) do
unquote(composite_acc(vars))
else
unquote(define_next_acc(rest, vars))
unquote(composite_acc(vars))
end
end
end
defp define_next_acc([{:uniq, uniq_acc} | rest], vars) do
quote do
unquote(composite_acc(vars)) =
case unquote(uniq_acc) do
%{^unquote(vars.head) => _} ->
unquote(composite_acc(vars))
_ ->
unquote(uniq_acc) = Map.put(unquote(uniq_acc), unquote(vars.head), [])
unquote(define_next_acc(rest, vars))
unquote(composite_acc(vars))
end
end
end
defp define_next_acc([{:uniq_by, uniq_acc, fun} | rest], vars) do
quote do
key = unquote(fun).(unquote(vars.head))
unquote(composite_acc(vars)) =
case unquote(uniq_acc) do
%{^key => _} ->
unquote(composite_acc(vars))
_ ->
unquote(uniq_acc) = Map.put(unquote(uniq_acc), key, [])
unquote(define_next_acc(rest, vars))
unquote(composite_acc(vars))
end
end
end
defp define_next_acc([{:dedup, last} | rest], vars) do
quote do
unquote(composite_acc(vars)) =
case unquote(vars.head) do
^unquote(last) ->
unquote(composite_acc(vars))
_ ->
unquote(last) = unquote(vars.head)
unquote(define_next_acc(rest, vars))
unquote(composite_acc(vars))
end
end
end
defp define_next_acc([{:dedup_by, last, fun} | rest], vars) do
quote do
unquote(composite_acc(vars)) =
case unquote(fun).(unquote(vars.head)) do
^unquote(last) ->
unquote(composite_acc(vars))
new_last ->
unquote(last) = new_last
unquote(define_next_acc(rest, vars))
unquote(composite_acc(vars))
end
end
end
defp define_next_acc([{:with_index, index} | rest], vars) do
quote do
unquote(vars.head) = {unquote(vars.head), unquote(index)}
unquote(index) = unquote(index) + 1
unquote(define_next_acc(rest, vars))
end
end
defp define_next_acc([{:drop, index, count} | rest], vars) do
quote do
unquote(composite_acc(vars)) =
case unquote(count) do
^unquote(index) ->
unquote(define_next_acc(rest, vars))
unquote(composite_acc(vars))
_ ->
unquote(index) = unquote(index) + 1
unquote(composite_acc(vars))
end
end
end
defp define_next_acc([{:scan, scan_acc, fun} | rest], vars) do
quote do
unquote(scan_acc) = unquote(fun).(unquote(vars.head), unquote(scan_acc))
unquote(vars.head) = unquote(scan_acc)
unquote(define_next_acc(rest, vars))
end
end
defp define_next_acc([{:map_reduce_no_acc, mr_acc, fun} | rest], vars) do
quote do
{unquote(vars.head), unquote(mr_acc)} = unquote(fun).(unquote(vars.head), unquote(mr_acc))
unquote(define_next_acc(rest, vars))
end
end
defp define_next_acc(spec, vars) do
quote do
unquote(vars.acc) = unquote(reduce_acc(spec, vars))
end
end
defp reduce_acc([], vars) do
quote do
[unquote(vars.head) | unquote(vars.acc)]
end
end
defp reduce_acc([{:reduce, fun}], vars) do
quote do
case unquote(vars.acc) do
:__ENUMANCER_RESERVED__ ->
unquote(vars.head)
acc ->
unquote(fun).(unquote(vars.head), acc)
end
end
end
defp reduce_acc([{:reduce, _acc, fun}], vars) do
quote do
unquote(fun).(unquote(vars.head), unquote(vars.acc))
end
end
defp reduce_acc([{:map_reduce, _acc, fun}], vars) do
quote do
{values, acc} = unquote(vars.acc)
{value, new_acc} = unquote(fun).(unquote(vars.head), acc)
{[value | values], new_acc}
end
end
defp reduce_acc([{:reverse, _acc}], vars) do
quote do
[unquote(vars.head) | unquote(vars.acc)]
end
end
defp reduce_acc([{:each, fun}], vars) do
quote do
unquote(fun).(unquote(vars.head))
:ok
end
end
defp reduce_acc([:count], vars) do
quote do
unquote(vars.acc) + 1
end
end
defp reduce_acc([:sum], vars) do
quote do
unquote(vars.head) + unquote(vars.acc)
end
end
defp reduce_acc([:product], vars) do
quote do
unquote(vars.head) * unquote(vars.acc)
end
end
defp reduce_acc([:join], vars) do
quote do
[unquote(vars.acc) | to_string(unquote(vars.head))]
end
end
defp reduce_acc([{:join, joiner}], vars) do
quote do
[unquote(joiner), to_string(unquote(vars.head)) | unquote(vars.acc)]
end
end
defp reduce_acc([{:intersperse, joiner}], vars) do
quote do
[unquote(joiner), unquote(vars.head) | unquote(vars.acc)]
end
end
defp reduce_acc([:frequencies], vars) do
quote do
key = unquote(vars.head)
value =
case unquote(vars.acc) do
%{^key => value} -> value
_ -> 0
end
Map.put(unquote(vars.acc), key, value + 1)
end
end
defp reduce_acc([{:frequencies_by, fun}], vars) do
quote do
key = unquote(fun).(unquote(vars.head))
value =
case unquote(vars.acc) do
%{^key => value} -> value
_ -> 0
end
Map.put(unquote(vars.acc), key, value + 1)
end
end
defp reduce_acc([{:group_by, fun}], vars) do
quote do
key = unquote(fun).(unquote(vars.head))
list =
case unquote(vars.acc) do
%{^key => list} -> list
_ -> []
end
acc = Map.put(unquote(vars.acc), key, [unquote(vars.head) | list])
end
end
defp reduce_acc([:sort], vars) do
quote do
[unquote(vars.head) | unquote(vars.acc)]
end
end
defp reduce_acc([{:sort, _}], vars) do
quote do
[unquote(vars.head) | unquote(vars.acc)]
end
end
defp reduce_acc([{:sort_by, _, _}], vars) do
quote do
[unquote(vars.head) | unquote(vars.acc)]
end
end
defp reduce_acc([Map], vars) do
quote do
{key, value} = unquote(vars.head)
Map.put(unquote(vars.acc), key, value)
end
end
defp reduce_acc([MapSet], vars) do
quote do
[unquote(vars.head) | unquote(vars.acc)]
end
end
defp initial_acc(:count), do: 0
defp initial_acc(:sum), do: 0
defp initial_acc(:product), do: 1
defp initial_acc({:reduce, _fun}), do: :__ENUMANCER_RESERVED__
defp initial_acc({:reduce, acc, _fun}), do: acc
defp initial_acc({:map_reduce, acc, _fun}), do: {[], acc}
defp initial_acc({:reverse, acc}), do: acc
defp initial_acc({:each, _fun}), do: :ok
defp initial_acc(:join), do: ""
defp initial_acc({:join, _}), do: []
defp initial_acc(:frequencies), do: Macro.escape(%{})
defp initial_acc({:frequencies_by, _}), do: Macro.escape(%{})
defp initial_acc({:group_by, _}), do: Macro.escape(%{})
defp initial_acc(Map), do: Macro.escape(%{})
defp initial_acc(MapSet), do: []
defp initial_acc(_), do: []
defp wrap_result(:count, acc_ast), do: acc_ast
defp wrap_result(:sum, acc_ast), do: acc_ast
defp wrap_result(:product, acc_ast), do: acc_ast
defp wrap_result({:reduce, _}, acc_ast) do
quote do
case unquote(acc_ast) do
:__ENUMANCER_RESERVED__ -> raise Enum.EmptyError
acc -> acc
end
end
end
defp wrap_result({:reduce, _, _}, acc_ast), do: acc_ast
defp wrap_result({:map_reduce, _, _}, acc_ast) do
quote do
{list, acc} = unquote(acc_ast)
{:lists.reverse(list), acc}
end
end
defp wrap_result({:reverse, _}, acc_ast), do: acc_ast
defp wrap_result({:each, _}, _), do: :ok
defp wrap_result(:frequencies, acc_ast), do: acc_ast
defp wrap_result({:frequencies_by, _}, acc_ast), do: acc_ast
defp wrap_result({:group_by, _}, acc_ast), do: acc_ast
defp wrap_result(Map, acc_ast), do: acc_ast
defp wrap_result(MapSet, acc_ast) do
quote do
MapSet.new(unquote(acc_ast))
end
end
defp wrap_result(:sort, acc_ast) do
quote do
unquote(acc_ast) |> :lists.sort()
end
end
defp wrap_result({:sort, fun}, acc_ast) do
quote do
unquote(acc_ast) |> Enum.sort(unquote(fun))
end
end
defp wrap_result({:sort_by, mapper, sorter}, acc_ast) do
quote do
unquote(acc_ast) |> Enum.sort_by(unquote(mapper), unquote(sorter))
end
end
defp wrap_result(:join, acc_ast) do
quote do
unquote(acc_ast) |> IO.iodata_to_binary()
end
end
defp wrap_result({:join, _}, acc) do
quote do
case unquote(acc) do
[] -> ""
[_joiner | rest] -> :lists.reverse(rest) |> IO.iodata_to_binary()
end
end
end
defp wrap_result({:intersperse, _}, acc_ast) do
quote do
case unquote(acc_ast) do
[] -> []
[_joiner | rest] -> :lists.reverse(rest)
end
end
end
defp wrap_result(_, acc_ast) do
quote do
:lists.reverse(unquote(acc_ast))
end
end
end
|
lib/enumancer.ex
| 0.91204
| 0.70609
|
enumancer.ex
|
starcoder
|
defmodule EpicenterWeb.Test.Pages.DemographicsEdit do
import Euclid.Test.Extra.Assertions
import ExUnit.Assertions
import Phoenix.LiveViewTest
alias Epicenter.Cases.Person
alias Epicenter.Test
alias EpicenterWeb.Test.Pages
alias Phoenix.LiveViewTest.View
def visit(%Plug.Conn{} = conn, %Person{id: person_id}) do
conn |> Pages.visit("/people/#{person_id}/edit-demographics")
end
def assert_employment_selections(%View{} = view, expected_employment_statuses) do
assert Pages.actual_selections(view, "demographic-form-employment", "radio") == expected_employment_statuses
view
end
def assert_here(view_or_conn_or_html) do
view_or_conn_or_html |> Pages.assert_on_page("demographics-edit")
end
def assert_gender_identity_selections(%View{} = view, expected_selections) do
assert Pages.actual_selections(view, "demographic-form-gender-identity", ["checkbox", "radio"]) == expected_selections
view
end
def assert_gender_identity_other(%View{} = view, expected) do
view
|> Pages.parse()
|> Test.Html.attr("[data-role=demographic-form-gender-identity] input[type=text]", "value")
|> assert_eq([expected], returning: view)
end
def assert_major_ethnicity_selections(%View{} = view, expected_selections) do
assert Pages.actual_selections(view, "demographic-form-ethnicity", ["checkbox", "radio"]) == expected_selections
view
end
def assert_detailed_ethnicity_selections(%View{} = view, expected_selections) do
assert Pages.actual_selections(view, "demographic-form-ethnicity-hispanic-latinx-or-spanish-origin", ["checkbox", "radio"]) == expected_selections
view
end
def assert_major_ethnicity_selected(%View{} = view, expected_major_ethnicity) do
actual_selected_major_ethnicity =
view
|> Pages.actual_selections("demographic-form-ethnicity", "radio")
|> Enum.filter(fn {_, value} -> value end)
if actual_selected_major_ethnicity != [{expected_major_ethnicity, true}] do
actual_selected_major_ethnicity = actual_selected_major_ethnicity |> Enum.into([], &Kernel.elem(&1, 0))
"""
Expected to only find major ethnicity β#{expected_major_ethnicity}β selected, but found:
#{inspect(actual_selected_major_ethnicity)}
"""
|> flunk()
end
view
end
def assert_detailed_ethnicities_selected(%View{} = view, expected_detailed_ethnicities) do
expected_detailed_ethnicities_tuple = expected_detailed_ethnicities |> Enum.into([], &{&1, true})
actual_selected_detailed_ethnicities =
view |> Pages.actual_selections("detailed-ethnicity-label", "checkbox") |> Enum.filter(fn {_, value} -> value end)
if actual_selected_detailed_ethnicities != expected_detailed_ethnicities_tuple do
actual_selected_detailed_ethnicities = actual_selected_detailed_ethnicities |> Enum.into([], &Kernel.elem(&1, 0))
"""
Expected to find detailed ethnicities β#{inspect(expected_detailed_ethnicities)}β selected, but found:
#{inspect(actual_selected_detailed_ethnicities)}
"""
|> flunk()
end
view
end
def assert_marital_status_selection(%View{} = view, expected_marital_statuses) do
assert Pages.actual_selections(view, "demographic-form-marital-status", "radio") == expected_marital_statuses
view
end
def assert_notes(%View{} = view, expected_notes) do
assert view |> Pages.parse() |> Test.Html.text(~s|textarea[name="demographic_form[notes]"]|) |> String.trim_leading() == expected_notes
view
end
def assert_occupation(%View{} = view, occupation) do
assert view |> Pages.parse() |> Test.Html.attr(~s|input[name="demographic_form[occupation]"]|, "value") |> Euclid.Extra.List.first("") ==
occupation
view
end
def assert_race_selection(%View{} = view, expected_race) do
assert view |> Pages.actual_selections("demographic-form-race", ["radio", "checkbox"]) == expected_race
view
end
def assert_sex_at_birth_selection(%View{} = view, expected_sex_at_birth) do
view |> Pages.actual_selections("demographic-form-sex-at-birth", ["radio", "checkbox"]) |> assert_eq(expected_sex_at_birth)
view
end
def change_form(%View{} = view, person_params) do
view
|> form("#demographics-form", demographic_form: person_params)
|> render_change()
view
end
end
|
test/support/pages/demographics_edit.ex
| 0.651909
| 0.690716
|
demographics_edit.ex
|
starcoder
|
defmodule Replug do
@moduledoc """
```
# ---- router.ex ----
plug Replug,
plug: Corsica,
opts: {MyAppWeb.PlugConfigs, :corsica}
# ---- plug_configs.ex ----
defmodule MyAppWeb.PlugConfigs do
def corsica do
[
max_age: System.get_env("CORSICA_MAX_AGE"),
expose_headers: ~w(X-Foo),
origins: System.get_env("VALID_ORIGINS")
]
end
end
```
"""
@behaviour Plug
@impl true
def init(opts) do
plug =
case Keyword.get(opts, :plug) do
nil ->
raise("Replug requires a :plug entry with a module or tuple value")
{plug_module, opts} when is_atom(plug_module) ->
{plug_module, opts}
plug_module when is_atom(plug_module) ->
{plug_module, :only_dynamic_opts}
end
%{
plug: plug,
opts: Keyword.get(opts, :opts) || raise("Replug requires a :opts entry")
}
end
@impl true
def call(conn, %{plug: {plug_module, :only_dynamic_opts}, opts: {opts_module, opts_function}}) do
opts =
opts_module
|> apply(opts_function, [])
|> plug_module.init()
plug_module.call(conn, opts)
end
def call(conn, %{plug: {plug_module, static_opts}, opts: {opts_module, opts_function}}) do
dynamic_opts = apply(opts_module, opts_function, [])
opts =
static_opts
|> merge_opts(dynamic_opts)
|> plug_module.init()
plug_module.call(conn, opts)
end
def call(conn, %{plug: {plug_module, :only_dynamic_opts}, opts: {opts_module, opts_function, opt_args}}) do
opts =
opts_module
|> apply(opts_function, [opt_args])
|> plug_module.init()
plug_module.call(conn, opts)
end
def call(conn, %{plug: {plug_module, static_opts}, opts: {opts_module, opts_function, opt_args}}) do
dynamic_opts = apply(opts_module, opts_function, [opt_args])
opts =
static_opts
|> merge_opts(dynamic_opts)
|> plug_module.init()
plug_module.call(conn, opts)
end
defp merge_opts(static_opts, dynamic_opts)
when is_list(static_opts) and is_list(dynamic_opts) do
Keyword.merge(static_opts, dynamic_opts)
end
defp merge_opts(static_opts, dynamic_opts) when is_map(static_opts) and is_map(dynamic_opts) do
Map.merge(static_opts, dynamic_opts)
end
end
|
lib/replug.ex
| 0.647798
| 0.512327
|
replug.ex
|
starcoder
|
defmodule Tensorflow.DataClass do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
@type t ::
integer
| :DATA_CLASS_UNKNOWN
| :DATA_CLASS_SCALAR
| :DATA_CLASS_TENSOR
| :DATA_CLASS_BLOB_SEQUENCE
field(:DATA_CLASS_UNKNOWN, 0)
field(:DATA_CLASS_SCALAR, 1)
field(:DATA_CLASS_TENSOR, 2)
field(:DATA_CLASS_BLOB_SEQUENCE, 3)
end
defmodule Tensorflow.SummaryDescription do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
type_hint: String.t()
}
defstruct [:type_hint]
field(:type_hint, 1, type: :string)
end
defmodule Tensorflow.HistogramProto do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
min: float | :infinity | :negative_infinity | :nan,
max: float | :infinity | :negative_infinity | :nan,
num: float | :infinity | :negative_infinity | :nan,
sum: float | :infinity | :negative_infinity | :nan,
sum_squares: float | :infinity | :negative_infinity | :nan,
bucket_limit: [float | :infinity | :negative_infinity | :nan],
bucket: [float | :infinity | :negative_infinity | :nan]
}
defstruct [:min, :max, :num, :sum, :sum_squares, :bucket_limit, :bucket]
field(:min, 1, type: :double)
field(:max, 2, type: :double)
field(:num, 3, type: :double)
field(:sum, 4, type: :double)
field(:sum_squares, 5, type: :double)
field(:bucket_limit, 6, repeated: true, type: :double, packed: true)
field(:bucket, 7, repeated: true, type: :double, packed: true)
end
defmodule Tensorflow.SummaryMetadata.PluginData do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
plugin_name: String.t(),
content: binary
}
defstruct [:plugin_name, :content]
field(:plugin_name, 1, type: :string)
field(:content, 2, type: :bytes)
end
defmodule Tensorflow.SummaryMetadata do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
plugin_data: Tensorflow.SummaryMetadata.PluginData.t() | nil,
display_name: String.t(),
summary_description: String.t(),
data_class: Tensorflow.DataClass.t()
}
defstruct [:plugin_data, :display_name, :summary_description, :data_class]
field(:plugin_data, 1, type: Tensorflow.SummaryMetadata.PluginData)
field(:display_name, 2, type: :string)
field(:summary_description, 3, type: :string)
field(:data_class, 4, type: Tensorflow.DataClass, enum: true)
end
defmodule Tensorflow.Summary.Image do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
height: integer,
width: integer,
colorspace: integer,
encoded_image_string: binary
}
defstruct [:height, :width, :colorspace, :encoded_image_string]
field(:height, 1, type: :int32)
field(:width, 2, type: :int32)
field(:colorspace, 3, type: :int32)
field(:encoded_image_string, 4, type: :bytes)
end
defmodule Tensorflow.Summary.Audio do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
sample_rate: float | :infinity | :negative_infinity | :nan,
num_channels: integer,
length_frames: integer,
encoded_audio_string: binary,
content_type: String.t()
}
defstruct [
:sample_rate,
:num_channels,
:length_frames,
:encoded_audio_string,
:content_type
]
field(:sample_rate, 1, type: :float)
field(:num_channels, 2, type: :int64)
field(:length_frames, 3, type: :int64)
field(:encoded_audio_string, 4, type: :bytes)
field(:content_type, 5, type: :string)
end
defmodule Tensorflow.Summary.Value do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: {atom, any},
node_name: String.t(),
tag: String.t(),
metadata: Tensorflow.SummaryMetadata.t() | nil
}
defstruct [:value, :node_name, :tag, :metadata]
oneof(:value, 0)
field(:node_name, 7, type: :string)
field(:tag, 1, type: :string)
field(:metadata, 9, type: Tensorflow.SummaryMetadata)
field(:simple_value, 2, type: :float, oneof: 0)
field(:obsolete_old_style_histogram, 3, type: :bytes, oneof: 0)
field(:image, 4, type: Tensorflow.Summary.Image, oneof: 0)
field(:histo, 5, type: Tensorflow.HistogramProto, oneof: 0)
field(:audio, 6, type: Tensorflow.Summary.Audio, oneof: 0)
field(:tensor, 8, type: Tensorflow.TensorProto, oneof: 0)
end
defmodule Tensorflow.Summary do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
value: [Tensorflow.Summary.Value.t()]
}
defstruct [:value]
field(:value, 1, repeated: true, type: Tensorflow.Summary.Value)
end
|
lib/tensorflow/core/framework/summary.pb.ex
| 0.8288
| 0.590897
|
summary.pb.ex
|
starcoder
|
defmodule Money.Currency do
@moduledoc """
Functions to return lists of known, historic and
legal tender currencies.
"""
@current_currencies Cldr.Config.get_locale(Cldr.get_current_locale().cldr_locale_name)
|> Map.get(:currencies)
|> Enum.filter(fn {_code, currency} -> !is_nil(currency.iso_digits) end)
|> Enum.map(fn {code, _currency} -> code end)
|> Enum.sort()
@historic_currencies Cldr.Config.get_locale(Cldr.get_current_locale().cldr_locale_name)
|> Map.get(:currencies)
|> Enum.filter(fn {_code, currency} -> is_nil(currency.iso_digits) end)
|> Enum.map(fn {code, _currency} -> code end)
|> Enum.sort()
@tender_currencies Cldr.Config.get_locale(Cldr.get_current_locale().cldr_locale_name)
|> Map.get(:currencies)
|> Enum.filter(fn {_code, currency} -> currency.tender end)
|> Enum.map(fn {code, _currency} -> code end)
|> Enum.sort()
@doc """
Returns the list of currently active ISO 4217 currency codes.
## Example:
iex> Money.Currency.known_current_currencies
[:AED, :AFN, :ALL, :AMD, :ANG, :AOA, :ARS, :AUD, :AWG, :AZN, :BAM, :BBD, :BDT,
:BGN, :BHD, :BIF, :BMD, :BND, :BOB, :BOV, :BRL, :BSD, :BTN, :BWP, :BYN, :BZD,
:CAD, :CDF, :CHE, :CHF, :CHW, :CLF, :CLP, :CNY, :COP, :COU, :CRC, :CUC, :CUP,
:CVE, :CZK, :DJF, :DKK, :DOP, :DZD, :EGP, :ERN, :ETB, :EUR, :FJD, :FKP, :GBP,
:GEL, :GHS, :GIP, :GMD, :GNF, :GTQ, :GYD, :HKD, :HNL, :HRK, :HTG, :HUF, :IDR,
:ILS, :INR, :IQD, :IRR, :ISK, :JMD, :JOD, :JPY, :KES, :KGS, :KHR, :KMF, :KPW,
:KRW, :KWD, :KYD, :KZT, :LAK, :LBP, :LKR, :LRD, :LSL, :LYD, :MAD, :MDL, :MGA,
:MKD, :MMK, :MNT, :MOP, :MRU, :MUR, :MVR, :MWK, :MXN, :MXV, :MYR, :MZN, :NAD,
:NGN, :NIO, :NOK, :NPR, :NZD, :OMR, :PAB, :PEN, :PGK, :PHP, :PKR, :PLN, :PYG,
:QAR, :RON, :RSD, :RUB, :RWF, :SAR, :SBD, :SCR, :SDG, :SEK, :SGD, :SHP, :SLL,
:SOS, :SRD, :SSP, :STN, :SVC, :SYP, :SZL, :THB, :TJS, :TMT, :TND, :TOP, :TRY,
:TTD, :TWD, :TZS, :UAH, :UGX, :USD, :USN, :UYI, :UYU, :UZS, :VEF, :VND, :VUV,
:WST, :XAF, :XAG, :XAU, :XBA, :XBB, :XBC, :XBD, :XCD, :XDR, :XOF, :XPD, :XPF,
:XPT, :XSU, :XTS, :XUA, :XXX, :YER, :ZAR, :ZMW, :ZWL]
"""
def known_current_currencies do
@current_currencies
end
@doc """
Returns the list of historic ISO 4217 currency codes.
## Example:
iex> Money.Currency.known_historic_currencies
[:ADP, :AFA, :ALK, :AOK, :AON, :AOR, :ARA, :ARL, :ARM, :ARP, :ATS, :AZM, :BAD,
:BAN, :BEC, :BEF, :BEL, :BGL, :BGM, :BGO, :BOL, :BOP, :BRB, :BRC, :BRE, :BRN,
:BRR, :BRZ, :BUK, :BYB, :BYR, :CLE, :CNH, :CNX, :CSD, :CSK, :CYP, :DDM, :DEM,
:ECS, :ECV, :EEK, :ESA, :ESB, :ESP, :FIM, :FRF, :GEK, :GHC, :GNS, :GQE, :GRD,
:GWE, :GWP, :HRD, :IEP, :ILP, :ILR, :ISJ, :ITL, :KRH, :KRO, :LTL, :LTT, :LUC,
:LUF, :LUL, :LVL, :LVR, :MAF, :MCF, :MDC, :MGF, :MKN, :MLF, :MRO, :MTL, :MTP,
:MVP, :MXP, :MZE, :MZM, :NIC, :NLG, :PEI, :PES, :PLZ, :PTE, :RHD, :ROL, :RUR,
:SDD, :SDP, :SIT, :SKK, :SRG, :STD, :SUR, :TJR, :TMM, :TPE, :TRL, :UAK, :UGS,
:USS, :UYP, :VEB, :VNN, :XEU, :XFO, :XFU, :XRE, :YDD, :YUD, :YUM, :YUN, :YUR,
:ZAL, :ZMK, :ZRN, :ZRZ, :ZWD, :ZWR]
"""
def known_historic_currencies do
@historic_currencies
end
@doc """
Returns the list of legal tender ISO 4217 currency codes.
## Example:
iex> Money.Currency.known_tender_currencies
[:ADP, :AED, :AFA, :AFN, :ALK, :ALL, :AMD, :ANG, :AOA, :AOK, :AON, :AOR, :ARA,
:ARL, :ARM, :ARP, :ARS, :ATS, :AUD, :AWG, :AZM, :AZN, :BAD, :BAM, :BAN, :BBD,
:BDT, :BEC, :BEF, :BEL, :BGL, :BGM, :BGN, :BGO, :BHD, :BIF, :BMD, :BND, :BOB,
:BOL, :BOP, :BOV, :BRB, :BRC, :BRE, :BRL, :BRN, :BRR, :BRZ, :BSD, :BTN, :BUK,
:BWP, :BYB, :BYN, :BYR, :BZD, :CAD, :CDF, :CHE, :CHF, :CHW, :CLE, :CLF, :CLP,
:CNH, :CNX, :CNY, :COP, :COU, :CRC, :CSD, :CSK, :CUC, :CUP, :CVE, :CYP, :CZK,
:DDM, :DEM, :DJF, :DKK, :DOP, :DZD, :ECS, :ECV, :EEK, :EGP, :ERN, :ESA, :ESB,
:ESP, :ETB, :EUR, :FIM, :FJD, :FKP, :FRF, :GBP, :GEK, :GEL, :GHC, :GHS, :GIP,
:GMD, :GNF, :GNS, :GQE, :GRD, :GTQ, :GWE, :GWP, :GYD, :HKD, :HNL, :HRD, :HRK,
:HTG, :HUF, :IDR, :IEP, :ILP, :ILR, :ILS, :INR, :IQD, :IRR, :ISJ, :ISK, :ITL,
:JMD, :JOD, :JPY, :KES, :KGS, :KHR, :KMF, :KPW, :KRH, :KRO, :KRW, :KWD, :KYD,
:KZT, :LAK, :LBP, :LKR, :LRD, :LSL, :LTL, :LTT, :LUC, :LUF, :LUL, :LVL, :LVR,
:LYD, :MAD, :MAF, :MCF, :MDC, :MDL, :MGA, :MGF, :MKD, :MKN, :MLF, :MMK, :MNT,
:MOP, :MRO, :MRU, :MTL, :MTP, :MUR, :MVP, :MVR, :MWK, :MXN, :MXP, :MXV, :MYR,
:MZE, :MZM, :MZN, :NAD, :NGN, :NIC, :NIO, :NLG, :NOK, :NPR, :NZD, :OMR, :PAB,
:PEI, :PEN, :PES, :PGK, :PHP, :PKR, :PLN, :PLZ, :PTE, :PYG, :QAR, :RHD, :ROL,
:RON, :RSD, :RUB, :RUR, :RWF, :SAR, :SBD, :SCR, :SDD, :SDG, :SDP, :SEK, :SGD,
:SHP, :SIT, :SKK, :SLL, :SOS, :SRD, :SRG, :SSP, :STD, :STN, :SUR, :SVC, :SYP,
:SZL, :THB, :TJR, :TJS, :TMM, :TMT, :TND, :TOP, :TPE, :TRL, :TRY, :TTD, :TWD,
:TZS, :UAH, :UAK, :UGS, :UGX, :USD, :USN, :USS, :UYI, :UYP, :UYU, :UZS, :VEB,
:VEF, :VND, :VNN, :VUV, :WST, :XAF, :XAG, :XAU, :XBA, :XBB, :XBC, :XBD, :XCD,
:XDR, :XEU, :XFO, :XFU, :XOF, :XPD, :XPF, :XPT, :XRE, :XSU, :XTS, :XUA, :XXX,
:YDD, :YER, :YUD, :YUM, :YUN, :YUR, :ZAL, :ZAR, :ZMK, :ZMW, :ZRN, :ZRZ, :ZWD,
:ZWL, :ZWR]
"""
def known_tender_currencies do
@tender_currencies
end
end
|
lib/money/currency.ex
| 0.734024
| 0.600979
|
currency.ex
|
starcoder
|
defmodule Plymio.Ast.Form do
@moduledoc ~S"""
Utility Functions for Manipulating Asts (Quoted Forms)
Many functions return either `{:ok, value}` or `{:error, error}` where `error` with be usually an `ArgumentError`
"""
require Logger
@type error :: %ArgumentError{}
@doc ~S"""
Takes a maybe quoted value and returns the realised value as `{:ok, value}`.
Otherwise `{:error, error}` is returned.
Realisation in this context means extracting the underlying ("unquoted") value.
## Examples
iex> 1 |> maybe_form_realise
{:ok, 1}
iex> :atom |> maybe_form_realise
{:ok, :atom}
iex> "string" |> maybe_form_realise
{:ok, "string"}
iex> [1, :atom, "string"] |> maybe_form_realise
{:ok, [1, :atom, "string"]}
iex> {:x, 42} |> maybe_form_realise
{:ok, {:x, 42}}
iex> ast = {:x, 42} |> Macro.escape
...> ast |> maybe_form_realise
{:ok, {:x, 42}}
iex> %{a: 1, b: 2, c: 3} |> maybe_form_realise
{:ok, %{a: 1, b: 2, c: 3}}
iex> ast = %{a: 1, b: 2, c: 3} |> Macro.escape
...> ast |> maybe_form_realise
{:ok, %{a: 1, b: 2, c: 3}}
iex> fun = fn x -> x + 5 end
...> {:ok, fun} = fun |> maybe_form_realise
...> 42 |> fun.()
47
iex> ast = "fn x -> x + 5 end" |> Code.string_to_quoted!
...> {:ok, fun} = ast |> maybe_form_realise
...> 42 |> fun.()
47
A map's keys and values are recursively realised:
iex> ast = %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3} |> Macro.escape
iex> ast |> maybe_form_realise
{:ok, %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}}
The elements of a tuple are recursively realised:
iex> ast = [{:x, 2, [1,2,3], %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}}] |> Macro.escape
iex> ast |> maybe_form_realise
{:ok, [{:x, 2, [1,2,3], %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}}]}
The elements of a list are recursively realised:
iex> ast = [{:x,:y,:z}, [1,2,3], %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}] |> Macro.escape
iex> ast |> maybe_form_realise
{:ok, [{:x,:y,:z}, [1,2,3], %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}]}
"""
@spec maybe_form_realise(any) :: {:ok, any} | {:error, error}
def maybe_form_realise(value)
def maybe_form_realise(value)
when is_atom(value)
or is_bitstring(value)
or is_boolean(value)
or is_function(value)
or is_number(value)
or is_pid(value)
or is_port(value)
or is_reference(value) do
{:ok, value}
end
# list with maybe quoted elements
def maybe_form_realise(value) when is_list(value) do
value
|> Enum.reduce_while([], fn
v, values ->
case v |> maybe_form_realise do
{:ok, value} -> {:cont, [value | values]}
{:error, _} = result -> {:halt, result}
end
end)
|> case do
{:error, _} = result -> result
values -> {:ok, values |> Enum.reverse}
end
end
# map with maybe quoted keys and/or values
def maybe_form_realise(value) when is_map(value) do
value
|> Enum.flat_map(fn{k,v} -> [k,v] end)
|> maybe_form_realise
|> case do
{:error, _} = result -> result
{:ok, values} ->
{:ok, values |> Enum.chunk_every(2) |> Map.new(fn [k,v] -> {k,v} end)}
end
end
# quoted module attribute - leave alone
def maybe_form_realise({:@, _, [{attr_name, _, _}]} = value) when is_atom(attr_name) do
value
end
# quoted map
def maybe_form_realise({:%{}, _, args} = _value) do
args
|> maybe_form_realise
|> case do
{:error, _} = result -> result
{:ok, values} -> {:ok, values |> Enum.into(%{})}
end
end
# quoted tuple
def maybe_form_realise({:{}, _, args} = _value) do
args
|> maybe_form_realise
|> case do
{:error, _} = result -> result
{:ok, values} -> {:ok, values |> List.to_tuple}
end
end
def maybe_form_realise({_, _, _} = value) do
case value |> Macro.validate do
:ok -> {:ok, value |> Code.eval_quoted([], __ENV__) |> elem(0)}
_ ->
value
|> Tuple.to_list
|> maybe_form_realise
|> case do
{:error, _} = result -> result
{:ok, values} -> {:ok, values |> List.to_tuple}
end
end
end
# tuple with maybe quoted elements
def maybe_form_realise(value) when is_tuple(value) do
value
|> Tuple.to_list
|> maybe_form_realise
|> case do
{:error, _} = result -> result
{:ok, values} -> {:ok, values |> List.to_tuple}
end
end
# default
def maybe_form_realise(value) do
{:ok, value}
end
@doc ~S"""
Takes a maybe quoted value, realises it and, if a `Map`, returns
`{:ok, map}`. Otherwise `{:error, error}` where `error` will be a `BadMapError`.
If the realised value is a `Keyword`, its is converted to a map and
`{:ok, map}` returned.
The keys and values are recursively realised.
iex> %{a: 1, b: %{b21: 21, b22: 22}, c: 3} |> maybe_form_realise_map
{:ok, %{a: 1, b: %{b21: 21, b22: 22}, c: 3}}
iex> ast = %{a: 1, b: %{b21: 21, b22: 22}, c: 3} |> Macro.escape
iex> ast |> maybe_form_realise_map
{:ok, %{a: 1, b: %{b21: 21, b22: 22}, c: 3}}
iex> 42 |> maybe_form_realise_map
{:error, %BadMapError{term: 42}}
iex> {:x, 42} |> Macro.escape |> maybe_form_realise_map
{:error, %BadMapError{term: Macro.escape({:x, 42})}}
"""
@spec maybe_form_realise_map(any) :: {:ok, map} | {:error, error} | {:error, %BadMapError{}}
def maybe_form_realise_map(value) do
value
|> maybe_form_realise
|> case do
{:error, _} = result -> result
{:ok, realise_value} ->
cond do
is_map(realise_value) -> {:ok, realise_value}
Keyword.keyword?(realise_value) -> {:ok, realise_value |> Enum.into(%{})}
true -> {:error, %BadMapError{term: realise_value}}
end
end
end
@doc ~S"""
Takes a maybe quoted value, realises it using
`maybe_form_realise_map/1`, and if the result is `{:ok, map}`, returns the map,
else raises `error` in `{:error, error}`.
iex> %{a: 1, b: %{b21: 21, b22: 22}, c: 3} |> maybe_form_realise_map!
%{a: 1, b: %{b21: 21, b22: 22}, c: 3}
iex> ast = %{a: 1, b: %{b21: 21, b22: 22}, c: 3} |> Macro.escape
iex> ast |> maybe_form_realise_map!
%{a: 1, b: %{b21: 21, b22: 22}, c: 3}
iex> 42 |> maybe_form_realise_map!
** (BadMapError) expected a map, got: 42
iex> ast = {:x, 42} |> Macro.escape
iex> ast |> maybe_form_realise_map!
** (BadMapError) expected a map, got: {:x, 42}
"""
@spec maybe_form_realise_map!(any) :: map | no_return
def maybe_form_realise_map!(value) do
value
|> maybe_form_realise_map
|> case do
{:ok, map} when is_map(map) -> map
{:error, error} ->
Logger.error Exception.message(error)
raise error
end
end
@doc ~S"""
Takes a maybe quoted value, realises it and, if a tuple, returns `{:ok, tuple}`.
Anything else returns `{:error, error}`.
iex> {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3} |> maybe_form_realise_tuple
{:ok, {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}}
iex> {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}
...> |> Macro.escape
...> |> maybe_form_realise_tuple
{:ok, {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}}
iex> {:error, error} = 42 |> maybe_form_realise_tuple
...> match?(%ArgumentError{message: "expected a tuple; got: 42"}, error)
true
iex> {:error, error} = %{x: 42}
...> |> Macro.escape
...> |> maybe_form_realise_tuple
...> match?(%ArgumentError{message: "expected a tuple; got: %{x: 42}"}, error)
true
"""
@spec maybe_form_realise_tuple(any) :: {:ok, tuple} | {:error, error}
def maybe_form_realise_tuple(value) do
case maybe_ast_realise(value) do
tuple when is_tuple(tuple) -> {:ok, tuple}
x -> {:error, %ArgumentError{message: "expected a tuple; got: #{inspect x}"}}
end
end
@doc ~S"""
Takes a maybe quoted value, realises it using
`maybe_form_realise_tuple/1`, and if the result is `{:ok, tuple}`, returns the tuple,
else raises an `ArgumentError` exception.
iex> {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3} |> maybe_form_realise_tuple!
{:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}
iex> {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}
...> |> Macro.escape
...> |> maybe_form_realise_tuple!
{:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}
iex> 42 |> maybe_form_realise_tuple!
** (ArgumentError) expected a tuple; got: 42
iex> %{x: 42}
...> |> Macro.escape
...> |> maybe_form_realise_tuple!
** (ArgumentError) expected a tuple; got: %{x: 42}
"""
@spec maybe_form_realise_tuple!(any) :: tuple | no_return
def maybe_form_realise_tuple!(value) do
value
|> maybe_form_realise_tuple
|> case do
{:ok, tuple} when is_tuple(tuple) -> tuple
{:error, error} ->
Logger.error Exception.message(error)
raise error
end
end
@doc ~S"""
Takes a maybe quoted value, realises it and, if a function, returns
`{:ok, function}`.
Anything else returns `{:error, error}` where `error` is a `BadFunctionError`.
iex> fun = fn x -> x end
...> result = fun |> maybe_form_realise_function
...> match?({:ok, ^fun}, result)
true
iex> quoted_fun = quote(do: fn x -> x end)
...> {:ok, fun} = quoted_fun |> maybe_form_realise_function
...> is_function(fun, 1)
true
iex> {:error, error} = 42 |> maybe_form_realise_function
...> match?(%BadFunctionError{term: 42}, error)
true
iex> {:error, error} = {:x, 42} |> Macro.escape
...> |> maybe_form_realise_function
...> match?(%BadFunctionError{term: {:x, 42}}, error)
true
"""
@spec maybe_form_realise_function(any) :: {:ok, fun} | {:error, error} | {:error, %BadFunctionError{}}
def maybe_form_realise_function(value) do
value
|> maybe_form_realise
|> case do
{:error, _} = result -> result
{:ok, fun} ->
case fun |> is_function do
true -> {:ok, fun}
_ -> {:error, %BadFunctionError{term: fun}}
end
end
end
@doc ~S"""
Takes a maybe quoted value, realises it using
`maybe_form_realise_function/1`, and, if `{:ok, function}`, returns the
function, else raises `error` in `{:error, error}`.
iex> fun = fn x -> x end
...> result = fun |> maybe_form_realise_function!
...> match?(^fun, result)
true
iex> quoted_fun = quote(do: fn x -> x end)
...> fun = quoted_fun |> maybe_form_realise_function!
...> is_function(fun, 1)
true
iex> 42 |> maybe_form_realise_function!
** (BadFunctionError) expected a function, got: 42
iex> {:x, 42}
...> |> Macro.escape
...> |> maybe_form_realise_function!
** (BadFunctionError) expected a function, got: {:x, 42}
"""
@spec maybe_form_realise_function!(any) :: fun | no_return
def maybe_form_realise_function!(value) do
value
|> maybe_form_realise_function
|> case do
{:ok, fun} -> fun
{:error, error} ->
Logger.error Exception.message(error)
raise error
end
end
@doc ~S"""
Takes a maybe quoted value, realises it and, if a *compiled* module, returns `{:ok, module}`.
Tests whether the module's `__info__` function works to confirm an actual module.
Anything else returns `{:error, error}`.
iex> mod = (defmodule ABC1, do: nil) |> elem(1)
...> result = mod |> maybe_form_realise_module
...> match?({:ok, ^mod}, result)
true
iex> mod = (defmodule ABC2, do: nil) |> elem(1)
...> quoted_mod = mod |> Macro.escape
...> result = quoted_mod |> maybe_form_realise_module
...> match?({:ok, ^mod}, result)
true
iex> {:error, error} = 42 |> maybe_form_realise_module
...> match?(%ArgumentError{message: "expected a module; got: 42"}, error)
true
iex> {:error, error} = {:x, 42}
...> |> Macro.escape
...> |> maybe_form_realise_module
...> match?(%ArgumentError{message: "expected a module; got: {:x, 42}"}, error)
true
"""
@spec maybe_form_realise_module(any) :: {:ok, atom} | {:error, error}
def maybe_form_realise_module(value) do
value
|> maybe_form_realise
|> case do
{:error, _} = result -> result
{:ok, mod} when is_atom(mod) ->
try do
mod.__info__(:functions)
{:ok, mod}
rescue
_any ->
{:error, %ArgumentError{message: "expected a module; got: #{inspect mod}"}}
end
{:ok, value} ->
{:error, %ArgumentError{message: "expected a module; got: #{inspect value}"}}
end
end
@doc ~S"""
Takes a maybe quoted value, realises it using
`maybe_form_realise_module/1`, and, if `{:ok, module}`, returns the
module, else raises `error` in `{:error, error}`.
iex> mod = (defmodule ABC3, do: nil) |> elem(1)
...> result = mod |> maybe_form_realise_module!
...> match?(^mod, result)
true
iex> mod = (defmodule ABC, do: nil) |> elem(1)
...> quoted_mod = mod |> Macro.escape
...> result = quoted_mod |> maybe_form_realise_module!
...> match?(^mod, result)
true
iex> :an_atom_but_not_a_module |> maybe_form_realise_module!
** (ArgumentError) expected a module; got: :an_atom_but_not_a_module
iex> 42 |> maybe_form_realise_module!
** (ArgumentError) expected a module; got: 42
iex> {:x, 42}
...> |> Macro.escape
...> |> maybe_form_realise_module!
** (ArgumentError) expected a module; got: {:x, 42}
"""
@spec maybe_form_realise_module!(any) :: atom | no_return
def maybe_form_realise_module!(value) do
value
|> maybe_form_realise_module
|> case do
{:ok, mod} -> mod
{:error, error} ->
Logger.error Exception.message(error)
raise error
end
end
@doc ~S"""
Takes a maybe quoted value and returns the realised value.
Realisation in this context means extracting the underlying ("unquoted") value.
## Examples
iex> 1 |> maybe_ast_realise
1
iex> :atom |> maybe_ast_realise
:atom
iex> "string" |> maybe_ast_realise
"string"
iex> [1, :atom, "string"] |> maybe_ast_realise
[1, :atom, "string"]
iex> {:x, 42} |> maybe_ast_realise
{:x, 42}
iex> ast = {:x, 42} |> Macro.escape
...> ast |> maybe_ast_realise
{:x, 42}
iex> %{a: 1, b: 2, c: 3} |> maybe_ast_realise
%{a: 1, b: 2, c: 3}
iex> ast = %{a: 1, b: 2, c: 3} |> Macro.escape
...> ast |> maybe_ast_realise
%{a: 1, b: 2, c: 3}
iex> fun = fn x -> x + 5 end
...> fun = fun |> maybe_ast_realise
...> 42 |> fun.()
47
iex> ast = "fn x -> x + 5 end" |> Code.string_to_quoted!
...> fun = ast |> maybe_ast_realise
...> 42 |> fun.()
47
A map's keys and values are recursively realised:
iex> ast = %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3} |> Macro.escape
iex> ast |> maybe_ast_realise
%{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}
The elements of a tuple are recursively realised:
iex> ast = [{:x, 2, [1,2,3], %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}}] |> Macro.escape
iex> ast |> maybe_ast_realise
[{:x, 2, [1,2,3], %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}}]
The elements of a list are recursively realised:
iex> ast = [{:x,:y,:z}, [1,2,3], %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}] |> Macro.escape
iex> ast |> maybe_ast_realise
[{:x,:y,:z}, [1,2,3], %{a: %{a1: 1}, b: %{b21: 21, b22: 22}, c: 3}]
"""
@spec maybe_ast_realise(any) :: any
def maybe_ast_realise(value)
def maybe_ast_realise(value)
when is_atom(value)
or is_bitstring(value)
or is_boolean(value)
or is_function(value)
or is_number(value)
or is_pid(value)
or is_port(value)
or is_reference(value) do
value
end
# list with maybe quoted elements
def maybe_ast_realise(value) when is_list(value) do
value |> Enum.map(fn v -> v |> maybe_ast_realise end)
end
# map with maybe quoted keys and/or values
def maybe_ast_realise(value) when is_map(value) do
value
|> Stream.map(fn
{k,v} when is_atom(k) -> {k, v |> maybe_ast_realise}
{k,v} -> {k |> maybe_ast_realise, v |> maybe_ast_realise}
end)
|> Enum.into(%{})
end
# quoted module attribute - leave alone
def maybe_ast_realise({:@, _, [{attr_name, _, _}]} = value) when is_atom(attr_name) do
value
end
# quoted map
def maybe_ast_realise({:%{}, _, args} = _value) do
args
|> Enum.into(%{})
|> maybe_ast_realise
end
# quoted tuple
def maybe_ast_realise({:{}, _, args} = _value) do
args
|> Enum.map(fn v -> v |> maybe_ast_realise end)
|> List.to_tuple
end
def maybe_ast_realise({_, _, _} = value) do
case value |> Macro.validate do
:ok -> value |> Code.eval_quoted([], __ENV__) |> elem(0)
_ -> value
end
end
# tuple with maybe quoted elements
def maybe_ast_realise(value) when is_tuple(value) do
value
|> Tuple.to_list
|> Enum.map(fn v -> v |> maybe_ast_realise end)
|> List.to_tuple
end
# default
def maybe_ast_realise(value) do
value
end
@doc ~S"""
Takes a maybe quoted value, realises it and, if a `Map`, returns
`{:ok, map}`.
If the realised value is a `Keyword`, its is converted to a map and
`{:ok, map}` returned.
Anything else returns `:error.`
The keys and values are recursively realised.
iex> %{a: 1, b: %{b21: 21, b22: 22}, c: 3} |> maybe_ast_realise_map
{:ok, %{a: 1, b: %{b21: 21, b22: 22}, c: 3}}
iex> ast = %{a: 1, b: %{b21: 21, b22: 22}, c: 3} |> Macro.escape
iex> ast |> maybe_ast_realise_map
{:ok, %{a: 1, b: %{b21: 21, b22: 22}, c: 3}}
iex> 42 |> maybe_ast_realise_map
:error
iex> ast = {:x, 42} |> Macro.escape
iex> ast |> maybe_ast_realise_map
:error
"""
@spec maybe_ast_realise_map(any) :: {:ok, map} | :error
def maybe_ast_realise_map(value) do
realise_value = maybe_ast_realise(value)
cond do
is_map(realise_value) -> {:ok, realise_value}
Keyword.keyword?(realise_value) -> {:ok, realise_value |> Enum.into(%{})}
true -> :error
end
end
@doc ~S"""
Takes a maybe quoted value, realises it using
`maybe_ast_realise_map/1`, and if the result is `{:ok, map}`, returns the map,
else raises a `BadMapError` exception.
iex> %{a: 1, b: %{b21: 21, b22: 22}, c: 3} |> maybe_ast_realise_map!
%{a: 1, b: %{b21: 21, b22: 22}, c: 3}
iex> ast = %{a: 1, b: %{b21: 21, b22: 22}, c: 3} |> Macro.escape
iex> ast |> maybe_ast_realise_map!
%{a: 1, b: %{b21: 21, b22: 22}, c: 3}
iex> 42 |> maybe_ast_realise_map!
** (BadMapError) expected a map, got: :error
iex> ast = {:x, 42} |> Macro.escape
iex> ast |> maybe_ast_realise_map!
** (BadMapError) expected a map, got: :error
"""
@spec maybe_ast_realise_map!(any) :: map | no_return
def maybe_ast_realise_map!(value) do
realised_value = maybe_ast_realise_map(value)
case realised_value do
{:ok, map} when is_map(map) -> map
:error ->
message =
"#{inspect __MODULE__}.maybe_ast_realise_map!: expected a map; got: #{inspect realised_value} value #{inspect value}"
Logger.error message
raise BadMapError, term: realised_value
end
end
@doc ~S"""
Takes a maybe quoted value, realises it and, if a tuple, returns `{:ok, tuple}`.
Anything else returns `:error`.
iex> {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3} |> maybe_ast_realise_tuple
{:ok, {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}}
iex> {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}
...> |> Macro.escape
...> |> maybe_ast_realise_tuple
{:ok, {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}}
iex> 42 |> maybe_ast_realise_tuple
:error
iex> %{x: 42}
...> |> Macro.escape
...> |> maybe_ast_realise_tuple
:error
"""
@spec maybe_ast_realise_tuple(any) :: {:ok, tuple} | :error
def maybe_ast_realise_tuple(value) do
case maybe_ast_realise(value) do
tuple when is_tuple(tuple) -> {:ok, tuple}
_ -> :error
end
end
@doc ~S"""
Takes a maybe quoted value, realises it using
`maybe_ast_realise_tuple/1`, and if the result is `{:ok, tuple}`, returns the tuple,
else raises an `ArgumentError` exception.
iex> {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3} |> maybe_ast_realise_tuple!
{:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}
iex> {:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}
...> |> Macro.escape
...> |> maybe_ast_realise_tuple!
{:one, 1, %{"two1" => 21, :two2 => 22}, "tre", 3}
iex> 42 |> maybe_ast_realise_tuple!
** (ArgumentError) expected a tuple, got: :error
iex> %{x: 42}
...> |> Macro.escape
...> |> maybe_ast_realise_tuple!
** (ArgumentError) expected a tuple, got: :error
"""
@spec maybe_ast_realise_tuple!(any) :: tuple | no_return
def maybe_ast_realise_tuple!(value) do
realised_value = maybe_ast_realise_tuple(value)
case realised_value do
{:ok, tuple} when is_tuple(tuple) -> tuple
:error ->
message = "expected a tuple, got: #{inspect realised_value}"
Logger.error message
raise ArgumentError, message: message
end
end
@doc ~S"""
Takes a maybe quoted value, realises it and, if a function, returns
`{:ok, function}`. Anything else returns `:error`.
iex> fun = fn x -> x end
...> result = fun |> maybe_ast_realise_function
...> match?({:ok, ^fun}, result)
true
iex> quoted_fun = quote(do: fn x -> x end)
...> {:ok, fun} = quoted_fun |> maybe_ast_realise_function
...> is_function(fun, 1)
true
iex> 42 |> maybe_ast_realise_function
:error
iex> {:x, 42} |> Macro.escape
...> |> maybe_ast_realise_function
:error
"""
@spec maybe_ast_realise_function(any) :: {:ok, fun} | :error
def maybe_ast_realise_function(value) do
case maybe_ast_realise(value) do
fun when is_function(fun) -> {:ok, fun}
_ -> :error
end
end
@doc ~S"""
Takes a maybe quoted value, realises it using
`maybe_ast_realise_function/1`, and, if `{:ok, function}`, returns the
function, else raises a `BadFunctionError` exception.
iex> fun = fn x -> x end
...> result = fun |> maybe_ast_realise_function!
...> match?(^fun, result)
true
iex> quoted_fun = quote(do: fn x -> x end)
...> fun = quoted_fun |> maybe_ast_realise_function!
...> is_function(fun, 1)
true
iex> 42 |> maybe_ast_realise_function!
** (BadFunctionError) expected a function, got: :error
iex> {:x, 42}
...> |> Macro.escape
...> |> maybe_ast_realise_function!
** (BadFunctionError) expected a function, got: :error
"""
@spec maybe_ast_realise_function!(any) :: fun | no_return
def maybe_ast_realise_function!(value) do
realised_value = maybe_ast_realise_function(value)
case maybe_ast_realise_function(value) do
{:ok, fun} when is_function(fun) -> fun
:error ->
message =
"#{inspect __MODULE__}.maybe_ast_realise_function!: expected a function; got: #{inspect realised_value} value #{inspect value}"
Logger.error message
raise BadFunctionError, term: realised_value
end
end
@doc ~S"""
Takes a maybe quoted value, realises it and, if a *compiled* module, returns `{:ok, module}`.
Tests whether the module's `__info__` function works to confirm an actual module.
Anything else returns `:error`.
iex> mod = (defmodule XYZ1, do: nil) |> elem(1)
...> result = mod |> maybe_ast_realise_module
...> match?({:ok, ^mod}, result)
true
iex> mod = (defmodule XYZ2, do: nil) |> elem(1)
...> quoted_mod = mod |> Macro.escape
...> result = quoted_mod |> maybe_ast_realise_module
...> match?({:ok, ^mod}, result)
true
iex> 42 |> maybe_ast_realise_module
:error
iex> {:x, 42}
...> |> Macro.escape
...> |> maybe_ast_realise_module
:error
"""
@spec maybe_ast_realise_module(any) :: {:ok, atom} | :error
def maybe_ast_realise_module(value) do
case maybe_ast_realise(value) do
mod when is_atom(mod) ->
try do
mod.__info__(:functions)
{:ok, mod}
rescue
_any ->
:error
end
_ -> :error
end
end
@doc ~S"""
Takes a maybe quoted value, realises it using
`maybe_ast_realise_module/1`, and, if `{:ok, module}`, returns the
module, else raises a `ArgumentError` exception.
iex> mod = (defmodule XYZ3, do: nil) |> elem(1)
...> result = mod |> maybe_ast_realise_module!
...> match?(^mod, result)
true
iex> mod = (defmodule XYZ4, do: nil) |> elem(1)
...> quoted_mod = mod |> Macro.escape
...> result = quoted_mod |> maybe_ast_realise_module!
...> match?(^mod, result)
true
iex> :an_atom_but_not_a_module |> maybe_ast_realise_module!
** (ArgumentError) expected a module, got: :error
iex> 42 |> maybe_ast_realise_module!
** (ArgumentError) expected a module, got: :error
iex> {:x, 42}
...> |> Macro.escape
...> |> maybe_ast_realise_module!
** (ArgumentError) expected a module, got: :error
"""
@spec maybe_ast_realise_module!(any) :: atom | no_return
def maybe_ast_realise_module!(value) do
realised_value = maybe_ast_realise_module(value)
case realised_value do
{:ok, mod} when is_atom(mod) -> mod
:error ->
message =
"#{inspect __MODULE__}.maybe_ast_realise_module!: expected a module; got: #{inspect value}"
Logger.error message
raise ArgumentError, message: "expected a module, got: #{inspect realised_value}"
end
end
@doc ~S"""
`maybe_ast_escape/1` escapes (`Macro.escape/1`) any value other than a module attribute or an existing ast (i.e. `Macro.validate/1` returns `:ok`)
## Examples
iex> 42 |> maybe_ast_escape
42
iex> :two |> maybe_ast_escape
:two
iex> %{a: 1} |> maybe_ast_escape
{:%{}, [], [a: 1]}
iex> %{a: 1} |> Macro.escape |> maybe_ast_escape
{:%{}, [], [a: 1]}
iex> [1, %{b: 2}, {:c, 2, :tre}] |> maybe_ast_escape
[1, {:%{}, [], [b: 2]}, {:{}, [], [:c, 2, :tre]}]
iex> [1, %{b: 2}, {:c, 2, :tre}] |> Macro.escape |> maybe_ast_escape
[1, {:%{}, [], [b: 2]}, {:{}, [], [:c, 2, :tre]}]
"""
@spec maybe_ast_escape(any) :: Macro.t
def maybe_ast_escape(value)
# quoted module attribute - leave alone
def maybe_ast_escape({:@, _, _} = value) do
value
end
# already a valid ast? if not, escape
def maybe_ast_escape(value) do
case value |> Macro.validate do
:ok -> value
_ -> value |> Macro.escape
end
end
end
|
lib/ast/form.ex
| 0.887229
| 0.529993
|
form.ex
|
starcoder
|
defmodule React do
use GenServer
defmodule InputCell do
defstruct [:name, :value]
@type t :: %InputCell{name: String.t(), value: any}
end
defmodule OutputCell do
defstruct [:name, :inputs, :compute, :value, callbacks: %{}]
@type t :: %OutputCell{
name: String.t(),
inputs: [String.t()],
compute: fun(),
value: any,
callbacks: %{String.t() => fun()}
}
end
# CLIENT SIDE
@opaque cells :: pid
@type cell :: {:input, String.t(), any} | {:output, String.t(), [String.t()], fun()}
@doc """
Start a reactive system
"""
@spec new(cells :: [cell]) :: {:ok, pid}
def new(cells) do
GenServer.start_link(React, cells)
end
@doc """
Return the value of an input or output cell
"""
@spec get_value(cells :: pid, cell_name :: String.t()) :: any()
def get_value(cells, cell_name) do
GenServer.call(cells, {:get_value, cell_name})
end
@doc """
Set the value of an input cell
"""
@spec set_value(cells :: pid, cell_name :: String.t(), value :: any) :: :ok
def set_value(cells, cell_name, value) do
GenServer.cast(cells, {:set_value, cell_name, value})
end
@doc """
Add a callback to an output cell
"""
@spec add_callback(
cells :: pid,
cell_name :: String.t(),
callback_name :: String.t(),
callback :: fun()
) :: :ok
def add_callback(cells, cell_name, callback_name, callback) do
GenServer.cast(cells, {:add_callback, cell_name, callback_name, callback})
end
@doc """
Remove a callback from an output cell
"""
@spec remove_callback(cells :: pid, cell_name :: String.t(), callback_name :: String.t()) :: :ok
def remove_callback(cells, cell_name, callback_name) do
GenServer.cast(cells, {:remove_callback, cell_name, callback_name})
end
# SERVER SIDE
defmodule State do
defstruct [:cells, :dependencies]
end
@impl true
def init(cells) do
cells =
Map.new(cells, fn
{:input, name, value} ->
{name, %InputCell{name: name, value: value}}
{:output, name, inputs, compute} ->
{name, %OutputCell{name: name, inputs: inputs, compute: compute}}
end)
initialized_cells =
Map.new(cells, fn {name, cell} -> {name, initialize_value(cell, cells)} end)
dependencies =
Enum.reduce(cells, %{}, fn
{name, %OutputCell{inputs: [a]}}, deps ->
Map.update(deps, a, [name], fn names -> [name | names] end)
{name, %OutputCell{inputs: [a, b]}}, deps ->
Map.update(deps, a, [name], fn names -> [name | names] end)
|> Map.update(b, [name], fn names -> [name | names] end)
_input, deps ->
deps
end)
{:ok, %State{cells: initialized_cells, dependencies: dependencies}}
end
@impl true
def handle_call({:get_value, name}, _from, %State{cells: cells} = state) do
{:reply, cells[name].value, state}
end
@impl true
def handle_cast({:set_value, name, value}, %State{cells: cells, dependencies: deps} = state) do
%InputCell{} = input = cells[name]
cells =
Map.put(cells, name, %{input | value: value})
|> update_dependencies(deps[name], deps)
{:noreply, %{state | cells: cells}}
end
@impl true
def handle_cast({:add_callback, name, callback_name, callback}, %State{cells: cells} = state) do
%OutputCell{callbacks: callbacks} = cell = cells[name]
callbacks = Map.put(callbacks, callback_name, callback)
{:noreply, %{state | cells: Map.put(cells, name, %{cell | callbacks: callbacks})}}
end
@impl true
def handle_cast({:remove_callback, name, callback_name}, %State{cells: cells} = state) do
%OutputCell{callbacks: callbacks} = cell = cells[name]
callbacks = Map.delete(callbacks, callback_name)
{:noreply, %{state | cells: Map.put(cells, name, %{cell | callbacks: callbacks})}}
end
defp initialize_value(%OutputCell{value: nil, inputs: [a], compute: f} = cell, cells) do
reference = initialize_value(cells[a], cells)
%{cell | value: f.(reference.value)}
end
defp initialize_value(%OutputCell{value: nil, inputs: [a, b], compute: f} = cell, cells) do
reference_a = initialize_value(cells[a], cells)
reference_b = initialize_value(cells[b], cells)
%{cell | value: f.(reference_a.value, reference_b.value)}
end
defp initialize_value(cell, _cells), do: cell
defp update_dependencies(cells, [name | to_update], dependencies) do
cell = cells[name]
value =
case cell do
%OutputCell{inputs: [a], compute: f} -> f.(cells[a].value)
%OutputCell{inputs: [a, b], compute: f} -> f.(cells[a].value, cells[b].value)
end
cells = Map.put(cells, name, %{cell | value: value})
if(value == cell.value) do
update_dependencies(cells, to_update, dependencies)
else
cells = Map.put(cells, cell, %{cell | value: value})
Enum.each(cell.callbacks, fn {name, send} -> send.(name, value) end)
next = Map.get(dependencies, name, [])
update_dependencies(cells, to_update ++ next, dependencies)
end
end
defp update_dependencies(cells, _empty, _dependencies), do: cells
end
|
exercises/practice/react/.meta/example.ex
| 0.824991
| 0.513912
|
example.ex
|
starcoder
|
defmodule React do
use GenServer
@opaque cells :: pid
@typep input :: {:input, String.t(), any}
@typep output :: {:output, String.t(), [String.t()], fun()}
@type cell :: input | output
@doc """
Start a reactive system
"""
@spec new(cells :: [cell]) :: {:ok, pid}
def new(cells) do
GenServer.start_link(__MODULE__, cells)
end
@doc """
Return the value of an input or output cell
"""
@spec get_value(cells :: pid, cell_name :: String.t()) :: any()
def get_value(cells, cell_name) do
GenServer.call(cells, {:get_value, cell_name})
end
@doc """
Set the value of an input cell
"""
@spec set_value(cells :: pid, cell_name :: String.t(), value :: any) :: :ok
def set_value(cells, cell_name, value) do
GenServer.cast(cells, {:set_value, cell_name, value})
end
@doc """
Add a callback to an output cell
"""
@spec add_callback(
cells :: pid,
cell_name :: String.t(),
callback_name :: String.t(),
callback :: fun()
) :: :ok
def add_callback(cells, cell_name, callback_name, callback) do
GenServer.cast(cells, {:add_callback, cell_name, callback_name, callback})
end
@doc """
Remove a callback from an output cell
"""
@spec remove_callback(cells :: pid, cell_name :: String.t(), callback_name :: String.t()) :: :ok
def remove_callback(cells, cell_name, callback_name) do
GenServer.cast(cells, {:remove_callback, cell_name, callback_name})
end
@impl GenServer
def init(cells) do
initial_state =
cells
|> Enum.reduce(%{input: Map.new(), output: Map.new(), output_order: []}, fn
{:input, name, value}, acc ->
new_input_map = Map.put(acc.input, name, value)
%{acc | input: new_input_map}
{:output, name, deps, fun}, acc ->
output = %{name: name, deps: deps, value: 0, fun: fun, callbacks: Map.new()}
new_output_map = Map.put(acc.output, name, output)
new_output_order = [name | acc.output_order]
new_acc = %{acc | output: new_output_map, output_order: new_output_order}
compute_cell(new_acc, output)
end)
|> Map.update!(:output_order, &Enum.reverse/1)
{:ok, initial_state}
end
@impl GenServer
def handle_call({:get_value, cell_name}, _, state) do
{:reply, find_cell_value!(state, cell_name), state}
end
@impl GenServer
def handle_cast({:set_value, cell_name, value}, state) do
new_state =
Enum.reduce(
state.output_order,
%{state | input: Map.put(state.input, cell_name, value)},
&compute_cell(&2, Map.fetch!(state.output, &1))
)
{:noreply, new_state}
end
def handle_cast({:add_callback, cell_name, callback_name, callback}, state) do
new_state =
update_in(state, [:output, cell_name, :callbacks], &Map.put(&1, callback_name, callback))
{:noreply, new_state}
end
def handle_cast({:remove_callback, cell_name, callback_name}, state) do
new_state = update_in(state, [:output, cell_name, :callbacks], &Map.delete(&1, callback_name))
{:noreply, new_state}
end
defp find_cell!(cells, cell_name) do
# cursed :D
with :error <- Map.fetch(cells.input, cell_name),
:error <- Map.fetch(cells.output, cell_name) do
raise KeyError
else
{:ok, value} -> value
end
end
defp find_cell_value!(cells, cell_name) do
case find_cell!(cells, cell_name) do
%{value: value} -> value
value -> value
end
end
defp compute_cell(cells, computable) do
args = Enum.map(computable.deps, &find_cell_value!(cells, &1))
new_value = apply(computable.fun, args)
if new_value != computable.value do
Enum.each(computable.callbacks, fn {name, cb} -> cb.(name, new_value) end)
update_in(cells, [:output, computable.name, :value], fn _ -> new_value end)
else
cells
end
end
end
|
exercism/elixir/react/lib/react.ex
| 0.786377
| 0.41481
|
react.ex
|
starcoder
|
defmodule Elasticfusion.Index do
defmacro __using__(_opts) do
quote do
import Elasticfusion.Index
@transforms []
@before_compile {Elasticfusion.Index.Compiler, :compile_index}
end
end
@doc """
Defines the searchable index name.
This setting is required.
"""
defmacro index_name(name) do
quote do: @index_name unquote(name)
end
@doc """
Defines the document type.
See https://www.elastic.co/blog/index-vs-type for differences
between index names and document types.
This setting is required.
"""
defmacro document_type(type) do
quote do: @document_type unquote(type)
end
@doc """
Defines index settings applied on index (re)creation.
See https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html
for available settings.
Default settings for Elasticsearch indexes are:
```
%{
number_of_shards: 5,
number_of_replicas: 1
}
```
"""
defmacro index_settings(settings) do
quote do: @index_settings unquote(settings)
end
@doc """
Defines explicit mapping for the document type,
set on index (re)creation.
An example:
```
%{
tags: %{type: :keyword},
date: %{type: :date}
}
```
This setting is required.
"""
defmacro mapping(mapping) do
quote do: @mapping unquote(mapping)
end
@doc """
Defines a serialization function that produces a map
of indexed fields and their corresponding values
given a record struct.
See `Elasticfusion.Document` for more information
on indexing operations.
This setting is required.
"""
defmacro serialize(fun_ast) do
ast = Macro.escape(fun_ast)
quote do: @serialize_fun_ast unquote(ast)
end
@doc """
Defines the mapping field used for keyword queries.
An example:
Given `:tag_names` as the keyword field,
"tag one, tag two" is parsed as:
```
%{bool: %{must: [
%{term: %{tag_names: "tag one"}},
%{term: %{tag_names: "tag two"}}
]}}
```
"""
defmacro keyword_field(name) do
quote do: @keyword_field unquote(name)
end
@doc """
Defines fields that can occur in string queries
(e.g. "field: value"), specified as a keyword list of
`{:mapping_field, "text field"}`.
Depending on the type specified in `mapping`,
field values can be parsed as dates, numbers, or literals.
"""
defmacro queryable_fields(fields) do
quote do: @queryable_fields unquote(fields)
end
@doc """
Defines a custom field query transform that produces
an Elasticsearch query for a given field,
qualifier (if present), value, and /external context/.
The first argument specifies the field as encountered
in a textual query (field is the part before the ':',
e.g. "created by" for "created by: some user").
The second argument is a function that takes 3 arguments:
* a qualifier ("less than", "more than", "earlier than",
"later than", or `nil`),
* a value (value is the part after the ':' and an optional
qualifier, e.g. "5" for "stars: less than 5"),
* and /external context/ (see below),
returning an Elasticsearch query.
/external context/ is set by the caller of
`Elasticfusion.Search.Builder.parse_search_string/3`.
Consider the following examples:
```
# "uploaded by: Cool Username"
# =>
# %{term: %{created_by: "cool username"}}
def_transform "uploaded by", fn(_, username, _) ->
indexed_username = String.downcase(username)
%{term: %{created_by: indexed_username}}
end
# "found in: my favorites"
# (external context: %User{name: "cool username"})
# =>
# %{term: %{favorited_by: "cool username"}}
def_transform "found in", fn(_, "my favorites", %User{name: name}) ->
%{term: %{favorited_by: name}}
end
# "starred by: less than 5 people"
# =>
# %{range: %{stars: %{lt: "5"}}}
def_transform "starred by", fn
("less than", value, _) ->
[_, count] = Regex.run(~r/(\\d+) people/, value)
%{range: %{stars: %{lt: count}}}
("more than", value, _) ->
[_, count] = Regex.run(~r/(\\d+) people/, value)
%{range: %{stars: %{gt: count}}}
end
```
"""
defmacro def_transform(field, transform_fun_ast) do
ast = Macro.escape(transform_fun_ast)
quote do: @transforms [{unquote(field), unquote(ast)} | @transforms]
end
end
|
lib/elasticfusion/index.ex
| 0.808597
| 0.878001
|
index.ex
|
starcoder
|
defmodule Intcode do
require IEx
def build(s) do
s
|> parse_machine
|> assign_offsets(0, %{})
end
def parse_machine(s) do
s
|> String.split(",")
|> Enum.map(&(Integer.parse(&1) |> elem(0)))
end
def execute(machine, input \\ [1])
def execute(machine, {:mailbox, pid}), do: execute(machine, {0, 0}, {receive_fn(), send_fn(pid)})
def execute(machine, input), do: execute(machine, {0, 0}, {input_fn(input), puts_fn()})
defp execute(machine, {sp, rb} = ptrs, {input_f, output_f} = io) do
opcode = machine |> read(sp) |> rem(100)
case opcode do
1 -> machine |> op(ptrs, &Kernel.+/2) |> execute({sp + 4, rb}, io)
2 -> machine |> op(ptrs, &Kernel.*/2) |> execute({sp + 4, rb}, io)
3 ->
{val, input_f} = input_f.()
machine |> write_input(ptrs, val) |> execute({sp + 2, rb}, {input_f, output_f})
4 -> machine |> output(ptrs, output_f) |> execute({sp + 2, rb}, io)
5 ->
sp = machine |> jump_if_true(ptrs)
execute(machine, {sp, rb}, io)
6 ->
sp = machine |> jump_if_false(ptrs)
execute(machine, {sp, rb}, io)
7 -> machine |> less_than(ptrs) |> execute({sp + 4, rb}, io)
8 -> machine |> equal_to(ptrs) |> execute({sp + 4, rb}, io)
9 ->
[rb_adj] = args(machine, ptrs, 1)
execute(machine, {sp + 2, rb + rb_adj}, io)
99 -> machine
end
end
defp input_fn(l), do: fn -> {hd(l), input_fn(tl(l))} end
defp receive_fn() do
fn ->
receive do
a -> {a, receive_fn()}
end
end
end
defp send_fn(pid), do: fn(v) -> send(pid, v) end
defp puts_fn(), do: fn(v) -> IO.puts v end
defp equal_to(machine, {sp, rb} = ptrs) do
addr = write_addr(machine, ptrs, 3)
case args(machine, {sp, rb}, 2) do
[a, a] -> write(machine, addr, 1)
_ -> write(machine, addr, 0)
end
end
defp less_than(machine, ptrs) do
addr = write_addr(machine, ptrs, 3)
case args(machine, ptrs, 2) do
[a, b] when a < b -> write(machine, addr, 1)
_ -> write(machine, addr, 0)
end
end
defp jump_if_true(machine, {sp, _} = ptrs) do
case args(machine, ptrs, 2) do
[v, addr] when v != 0 -> addr
_ -> sp + 3
end
end
defp jump_if_false(machine, {sp, _} = ptrs) do
case args(machine, ptrs, 2) do
[0, addr] -> addr
_ -> sp + 3
end
end
defp op(machine, ptrs, f) do
[a, b] = args(machine, ptrs, 2)
addr = write_addr(machine, ptrs, 3)
write(machine, addr, f.(a, b))
end
defp args(machine, {sp, rb}, nargs) do
modes = machine |> read(sp) |> div(100)
args_from_modes(machine, {sp + 1, rb}, modes, nargs, [])
end
defp args_from_modes(_machine, _ptrs, _modes, 0, args), do: Enum.reverse(args)
defp args_from_modes(machine, {sp, rb}, modes, n, args) when rem(modes, 10) == 0 do
addr = read(machine, sp)
arg = read(machine, addr)
args_from_modes(machine, {sp + 1, rb}, div(modes, 10), n - 1, [arg | args])
end
defp args_from_modes(machine, {sp, rb}, modes, n, args) when rem(modes, 10) == 1 do
arg = read(machine, sp)
args_from_modes(machine, {sp + 1, rb}, div(modes, 10), n - 1, [arg | args])
end
defp args_from_modes(machine, {sp, rb}, modes, n, args) when rem(modes, 10) == 2 do
addr = read(machine, sp) + rb
arg = read(machine, addr)
args_from_modes(machine, {sp + 1, rb}, div(modes, 10), n - 1, [arg | args])
end
defp write_addr(machine, {sp, rb}, pos) do
base = (:math.pow(10, pos) * 10) |> round
mode = machine |> read(sp) |> div(base)
adjust = case mode do
2 -> rb
_ -> 0
end
read(machine, sp + pos) + adjust
end
defp write_input(machine, ptrs, input) do
addr = write_addr(machine, ptrs, 1)
write(machine, addr, input)
end
defp output(machine, ptrs, f) do
[arg] = args(machine, ptrs, 1)
f.(arg)
machine
end
def read(machine, addr) when addr >= 0, do: Map.get(machine, addr, 0)
def write(machine, addr, value), do: Map.put(machine, addr, value)
def assign_offsets([], _, map), do: map
def assign_offsets([op | rest], sp, map) do
assign_offsets(rest, sp + 1, write(map, sp, op))
end
end
|
year_2019/lib/intcode.ex
| 0.50293
| 0.611672
|
intcode.ex
|
starcoder
|
defmodule Mix.Releases.Checks.LoadedOrphanedApps do
@moduledoc """
This check determines whether or not any of the applications in the release
satisfy all three of the following conditions:
* Have a start type of `:load` or `:none`
* Are not included by any other application in the release (orphaned)
* Are expected to be started by at least one other application in the release,
i.e. are present in the dependent application's `:applications` list.
Such "loaded-orphaned" applications will result in a release which only partially boots,
except in the rare case where the loaded applications are started before `:init` attempts
to boot the dependents. If the loaded applications are _not_ started before this point, the
application controller will wait indefinitely for the loaded applications to be started, which
will never occur because the thing which might have started them isn't started itself yet.
In general this should be very rare, but has occurred, and can be very difficult to troubleshoot.
This check provides information on how to work around the case where this happens, but the solutions
are one of the following:
* Add these loaded applications, and their dependents, to included_applications in the releasing app.
This requires that the releasing app take over the lifecycle of these applications, namely starting them
during it's own start callback, generally by adding them to it's supervisor tree. Recommended only for
those cases where it is absolutely required that the application be started at a particular point in time.
* Remove the `:load` start type from the applications which are orphaned, effectively allowing them to be
started by `:init` when needed. This does imply that the application will be started, rather than simply
loaded, which may not be desired - in such cases you need to evaluate the dependent applications to see whether
they truly need to have the dependency started, or if they can be modified and remove it from their applications list.
If neither of those work, you will need to use included_applications.
"""
use Mix.Releases.Checks
alias Mix.Releases.Release
alias Mix.Releases.App
def run(%Release{applications: apps}) do
# Applications with start type :load or :none
loaded =
apps
|> Enum.filter(fn %App{start_type: type} -> type in [:none, :load] end)
|> Enum.map(fn %App{name: name} -> name end)
|> MapSet.new()
# Applications which are in some other application's :included_applications list
included_apps =
apps
|> Enum.flat_map(fn %App{included_applications: ia} -> ia end)
|> Enum.uniq()
|> MapSet.new()
# Applications which are in some other application's :applications list
required_apps =
apps
|> Enum.flat_map(fn %App{applications: a} -> a end)
|> Enum.uniq()
|> MapSet.new()
# Applications which have start type :load, but are not included applications
loaded_not_included =
loaded
|> MapSet.difference(included_apps)
# Applications which have start type :load, are not included,
# but are in some other application's :applications list
loaded_but_required =
loaded_not_included
|> MapSet.intersection(required_apps)
# A list of applications which require the `loaded_but_required` apps
requiring_apps =
apps
|> Enum.filter(fn %App{applications: a} ->
required_loaded =
a
|> MapSet.new()
|> MapSet.intersection(loaded_but_required)
|> MapSet.to_list()
required_loaded != []
end)
|> Enum.map(fn %App{name: a} -> a end)
# A list of applications which either directly or transitively require
# the applications which are loaded and required
required_transitively = require_transitively(apps, requiring_apps)
if Enum.empty?(loaded_but_required) do
:ok
else
warning = """
You have specified a start type of :load or :none for the following orphan applications:
#{Enum.join(Enum.map(loaded_but_required, fn a -> " #{inspect(a)}" end), "\n")}
These applications are considered orphaned because they are not included by another
application (i.e. present in the included_applications list). Since they are only loaded,
neither the runtime, or any application is responsible for ensuring they are started.
This is a problem because the following applications - either directly or transitively -
depend on the above applications to be started before they can start; and this cannot
be guaranteed:
#{Enum.join(Enum.map(required_transitively, fn a -> " #{inspect(a)}" end), "\n")}
If you do not address this, your release may appear to start successfully, but may
in fact only be partially started, which can manifest as portions of your application
not working as expected. For example, a Phoenix endpoint not binding to it's configured port.
You should either add all of these applications to :included_applications, and ensure
they are started as part of your application; or you should change the start type of the
first set of applications to :permanent or leave the start type unspecified. The latter
is the best approach when possible.
"""
{:ok, warning}
end
end
defp require_transitively(all, requiring) do
require_transitively(all, requiring, requiring)
end
defp require_transitively(_all, [], acc), do: acc
defp require_transitively(all, [app | rest], acc) do
requiring =
all
|> Enum.filter(fn %App{applications: a} -> Enum.member?(a, app) end)
|> Enum.reject(fn %App{name: a} -> Enum.member?(acc, a) end)
|> Enum.map(fn %App{name: a} -> a end)
require_transitively(all, rest ++ requiring, acc ++ requiring)
end
end
|
lib/mix/lib/releases/checks/loaded_orphaned_apps.ex
| 0.791015
| 0.553385
|
loaded_orphaned_apps.ex
|
starcoder
|
defmodule Expected.MnesiaStore do
@moduledoc """
Stores login data in a Mnesia table.
To use this store, configure `:expected` accordingly and set the table name in
the application configuration:
config :expected,
store: :mnesia,
table: :logins,
...
This table is not created by the store. You can use helpers to create it (see
`Expected`) or create it yourself. In the latter case, you **must** ensure
that:
* the table is a `:bag`,
* it stores `Expected.MnesiaStore.LoginRecord`, *i.e.* the `record_name` is
set to `:login` and the attributes are the `Expected.Login` keys,
* `:serial` and `:last_login` must be indexed.
For instance:
:mnesia.start()
:mnesia.create_table(
:logins,
type: :bag,
record_name: :login,
attributes: Expected.Login.keys(),
index: [:serial, :last_login],
disc_copies: [node()]
)
For Mnesia to work properly, you need to add it to your extra applications:
def application do
[
mod: {MyApp.Application, []},
extra_applications: [:logger, :mnesia]
]
end
"""
@behaviour Expected.Store
import __MODULE__.LoginRecord
alias __MODULE__.LoginRecord
alias Expected.Login
alias Expected.ConfigurationError
alias Expected.MnesiaStoreError
@impl true
def init(opts) do
case Keyword.fetch(opts, :table) do
{:ok, table} -> table
:error -> raise ConfigurationError, reason: :no_mnesia_table
end
end
@impl true
def list_user_logins(username, table) do
t = fn ->
:mnesia.read(table, username)
end
case :mnesia.transaction(t) do
{:atomic, user_logins} ->
Enum.map(user_logins, &to_struct(&1))
{:aborted, {:no_exists, _}} ->
raise MnesiaStoreError, reason: :table_not_exists
end
end
@impl true
def get(username, serial, table) do
t = fn ->
do_get(username, serial, table)
end
case :mnesia.transaction(t) do
{:atomic, [login]} ->
{:ok, to_struct(login)}
{:atomic, []} ->
{:error, :no_login}
{:aborted, {:no_exists, _}} ->
raise MnesiaStoreError, reason: :table_not_exists
end
end
@impl true
def put(%Login{username: username, serial: serial} = login, table) do
t = fn ->
do_delete(username, serial, table)
:mnesia.write(table, from_struct(login), :write)
end
case :mnesia.transaction(t) do
{:atomic, _} ->
:ok
{:aborted, {:no_exists, _}} ->
raise MnesiaStoreError, reason: :table_not_exists
{:aborted, {:bad_type, _}} ->
raise MnesiaStoreError, reason: :invalid_table_format
end
end
@impl true
def delete(username, serial, table) do
t = fn ->
do_delete(username, serial, table)
end
case :mnesia.transaction(t) do
{:atomic, _} ->
:ok
{:aborted, {:no_exists, _}} ->
raise MnesiaStoreError, reason: :table_not_exists
end
end
@impl true
def clean_old_logins(max_age, table) do
native_max_age = System.convert_time_unit(max_age, :seconds, :native)
oldest_timestamp = System.os_time() - native_max_age
t = fn ->
old_logins =
:mnesia.select(table, [
{
login(last_login: :"$1"),
[{:<, :"$1", oldest_timestamp}],
[:"$_"]
}
])
for login <- old_logins do
:mnesia.delete_object(table, login, :write)
to_struct(login)
end
end
case :mnesia.transaction(t) do
{:atomic, deleted_logins} ->
deleted_logins
{:aborted, {:no_exists, _}} ->
raise MnesiaStoreError, reason: :table_not_exists
end
end
@spec do_get(String.t(), String.t(), atom()) :: [LoginRecord.t()]
defp do_get(username, serial, table) do
:mnesia.index_match_object(
table,
login(username: username, serial: serial),
1 + login(:serial),
:read
)
end
@spec do_delete(String.t(), String.t(), atom()) :: :ok
defp do_delete(username, serial, table) do
case do_get(username, serial, table) do
[login] -> :mnesia.delete_object(table, login, :write)
[] -> :ok
end
end
end
|
lib/expected/mnesia_store.ex
| 0.831109
| 0.506958
|
mnesia_store.ex
|
starcoder
|
defmodule Geometry.MultiPointM do
@moduledoc """
A set of points from type `Geometry.PointM`.
`MultiPointM` implements the protocols `Enumerable` and `Collectable`.
## Examples
iex> Enum.map(
...> MultiPointM.new([
...> PointM.new(1, 2, 4),
...> PointM.new(3, 4, 6)
...> ]),
...> fn [x, _y, _m] -> x end
...> )
[1, 3]
iex> Enum.into([PointM.new(1, 2, 4)], MultiPointM.new())
%MultiPointM{
points:
MapSet.new([
[1, 2, 4]
])
}
"""
alias Geometry.{GeoJson, MultiPointM, PointM, WKB, WKT}
defstruct points: MapSet.new()
@type t :: %MultiPointM{points: MapSet.t(Geometry.coordinate())}
@doc """
Creates an empty `MultiPointM`.
## Examples
iex> MultiPointM.new()
%MultiPointM{points: MapSet.new()}
"""
@spec new :: t()
def new, do: %MultiPointM{}
@doc """
Creates a `MultiPointM` from the given `Geometry.PointM`s.
## Examples
iex> MultiPointM.new([
...> PointM.new(1, 2, 4),
...> PointM.new(1, 2, 4),
...> PointM.new(3, 4, 6)
...> ])
%MultiPointM{points: MapSet.new([
[1, 2, 4],
[3, 4, 6]
])}
iex> MultiPointM.new([])
%MultiPointM{points: MapSet.new()}
"""
@spec new([PointM.t()]) :: t()
def new([]), do: %MultiPointM{}
def new(points) do
%MultiPointM{points: Enum.into(points, MapSet.new(), fn point -> point.coordinate end)}
end
@doc """
Returns `true` if the given `MultiPointM` is empty.
## Examples
iex> MultiPointM.empty?(MultiPointM.new())
true
iex> MultiPointM.empty?(
...> MultiPointM.new(
...> [PointM.new(1, 2, 4), PointM.new(3, 4, 6)]
...> )
...> )
false
"""
@spec empty?(t()) :: boolean
def empty?(%MultiPointM{} = multi_point), do: Enum.empty?(multi_point.points)
@doc """
Creates a `MultiPointM` from the given coordinates.
## Examples
iex> MultiPointM.from_coordinates(
...> [[-1, 1, 1], [-2, 2, 2], [-3, 3, 3]]
...> )
%MultiPointM{
points: MapSet.new([
[-1, 1, 1],
[-2, 2, 2],
[-3, 3, 3]
])
}
iex> MultiPointM.from_coordinates(
...> [[-1, 1, 1], [-2, 2, 2], [-3, 3, 3]]
...> )
%MultiPointM{
points: MapSet.new([
[-1, 1, 1],
[-2, 2, 2],
[-3, 3, 3]
])
}
"""
@spec from_coordinates([Geometry.coordinate()]) :: t()
def from_coordinates(coordinates), do: %MultiPointM{points: MapSet.new(coordinates)}
@doc """
Returns an `:ok` tuple with the `MultiPointM` from the given GeoJSON term.
Otherwise returns an `:error` tuple.
## Examples
iex> ~s(
...> {
...> "type": "MultiPoint",
...> "coordinates": [
...> [1.1, 1.2, 1.4],
...> [20.1, 20.2, 20.4]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> MultiPointM.from_geo_json()
{:ok, %MultiPointM{points: MapSet.new([
[1.1, 1.2, 1.4],
[20.1, 20.2, 20.4]
])}}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json), do: GeoJson.to_multi_point(json, MultiPointM)
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_multi_point(json, MultiPointM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `MultiPointM`.
There are no guarantees about the order of points in the returned
`coordinates`.
## Examples
```elixir
MultiPointM.to_geo_json(
MultiPointM.new([
PointM.new(-1.1, -2.2, -4.4),
PointM.new(1.1, 2.2, 4.4)
])
)
# =>
# %{
# "type" => "MultiPoint",
# "coordinates" => [
# [-1.1, -2.2, -4.4],
# [1.1, 2.2, 4.4]
# ]
# }
```
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%MultiPointM{points: points}) do
%{
"type" => "MultiPoint",
"coordinates" => MapSet.to_list(points)
}
end
@doc """
Returns an `:ok` tuple with the `MultiPointM` from the given WKT string.
Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> MultiPointM.from_wkt(
...> "MultiPoint M (-5.1 7.8 1, 0.1 0.2 2)"
...> )
{:ok, %MultiPointM{
points: MapSet.new([
[-5.1, 7.8, 1],
[0.1, 0.2, 2]
])
}}
iex> MultiPointM.from_wkt(
...> "SRID=7219;MultiPoint M (-5.1 7.8 1, 0.1 0.2 2)"
...> )
{:ok, {
%MultiPointM{
points: MapSet.new([
[-5.1, 7.8, 1],
[0.1, 0.2, 2]
])
},
7219
}}
iex> MultiPointM.from_wkt("MultiPoint M EMPTY")
...> {:ok, %MultiPointM{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, MultiPointM)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, MultiPointM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKT representation for a `MultiPointM`. With option `:srid` an
EWKT representation with the SRID is returned.
There are no guarantees about the order of points in the returned
WKT-string.
## Examples
```elixir
MultiPointM.to_wkt(MultiPointM.new())
# => "MultiPoint M EMPTY"
MultiPointM.to_wkt(
MultiPointM.new([
PointM.new(7.1, 8.1, 1),
PointM.new(9.2, 5.2, 2)
]
)
# => "MultiPoint M (7.1 8.1 1, 9.2 5.2 2)"
MultiPointM.to_wkt(
MultiPointM.new([
PointM.new(7.1, 8.1, 1),
PointM.new(9.2, 5.2, 2)
]),
srid: 123
)
# => "SRID=123;MultiPoint M (7.1 8.1 1, 9.2 5.2 2)"
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%MultiPointM{points: points}, opts \\ []) do
WKT.to_ewkt(
<<
"MultiPoint M ",
points |> MapSet.to_list() |> to_wkt_points()::binary()
>>,
opts
)
end
@doc """
Returns the WKB representation for a `MultiPointM`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:xdr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.PointM.to_wkb/1` function.
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid(), mode: Geometry.mode()]
def to_wkb(%MultiPointM{} = multi_point, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
to_wkb(multi_point, srid, endian, mode)
end
@doc """
Returns an `:ok` tuple with the `MultiPointM` from the given WKB string. Otherwise
returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
An example of a simpler geometry can be found in the description for the
`Geometry.PointM.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, MultiPointM)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, MultiPointM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the number of elements in `MultiPointM`.
## Examples
iex> MultiPointM.size(
...> MultiPointM.new([
...> PointM.new(11, 12, 14),
...> PointM.new(21, 22, 24)
...> ])
...> )
2
"""
@spec size(t()) :: non_neg_integer()
def size(%MultiPointM{points: points}), do: MapSet.size(points)
@doc """
Checks if `MulitPointM` contains `point`.
## Examples
iex> MultiPointM.member?(
...> MultiPointM.new([
...> PointM.new(11, 12, 14),
...> PointM.new(21, 22, 24)
...> ]),
...> PointM.new(11, 12, 14)
...> )
true
iex> MultiPointM.member?(
...> MultiPointM.new([
...> PointM.new(11, 12, 14),
...> PointM.new(21, 22, 24)
...> ]),
...> PointM.new(1, 2, 4)
...> )
false
"""
@spec member?(t(), PointM.t()) :: boolean()
def member?(%MultiPointM{points: points}, %PointM{coordinate: coordinate}),
do: MapSet.member?(points, coordinate)
@doc """
Converts `MultiPointM` to a list.
## Examples
iex> MultiPointM.to_list(
...> MultiPointM.new([
...> PointM.new(11, 12, 14),
...> PointM.new(21, 22, 24)
...> ])
...> )
[
[11, 12, 14],
[21, 22, 24]
]
"""
@spec to_list(t()) :: [PointM.t()]
def to_list(%MultiPointM{points: points}), do: MapSet.to_list(points)
@compile {:inline, to_wkt_points: 1}
defp to_wkt_points([]), do: "EMPTY"
defp to_wkt_points([coordinate | points]) do
<<"(",
Enum.reduce(points, PointM.to_wkt_coordinate(coordinate), fn coordinate, acc ->
<<acc::binary(), ", ", PointM.to_wkt_coordinate(coordinate)::binary()>>
end)::binary(), ")">>
end
@doc false
@compile {:inline, to_wkb: 4}
@spec to_wkb(t(), Geometry.srid(), Geometry.endian(), Geometry.mode()) :: Geometry.wkb()
def to_wkb(%MultiPointM{points: points}, srid, endian, mode) do
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_points(MapSet.to_list(points), endian, mode)::binary()
>>
end
@compile {:inline, to_wkb_points: 3}
defp to_wkb_points(points, endian, mode) do
Enum.reduce(points, WKB.length(points, endian, mode), fn point, acc ->
<<acc::binary(), PointM.to_wkb(point, nil, endian, mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "40000004"
{:ndr, false} -> "04000040"
{:xdr, true} -> "60000004"
{:ndr, true} -> "04000060"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0x40000004::big-integer-size(32)>>
{:ndr, false} -> <<0x40000004::little-integer-size(32)>>
{:xdr, true} -> <<0x60000004::big-integer-size(32)>>
{:ndr, true} -> <<0x60000004::little-integer-size(32)>>
end
end
defimpl Enumerable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def count(multi_point) do
{:ok, MultiPointM.size(multi_point)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def member?(multi_point, val) do
{:ok, MultiPointM.member?(multi_point, val)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def slice(multi_point) do
size = MultiPointM.size(multi_point)
{:ok, size, &Enumerable.List.slice(MultiPointM.to_list(multi_point), &1, &2, size)}
end
# credo:disable-for-next-line Credo.Check.Readability.Specs
def reduce(multi_point, acc, fun) do
Enumerable.List.reduce(MultiPointM.to_list(multi_point), acc, fun)
end
end
defimpl Collectable do
# credo:disable-for-next-line Credo.Check.Readability.Specs
def into(%MultiPointM{points: points}) do
fun = fn
list, {:cont, x} ->
[{x, []} | list]
list, :done ->
new = Enum.into(list, %{}, fn {point, []} -> {point.coordinate, []} end)
%MultiPointM{points: %{points | map: Map.merge(points.map, Map.new(new))}}
_list, :halt ->
:ok
end
{[], fun}
end
end
end
|
lib/geometry/multi_point_m.ex
| 0.956604
| 0.81549
|
multi_point_m.ex
|
starcoder
|
defmodule TimeZoneInfo.DataStore.ErlangTermStorage do
@moduledoc false
# This module implements the `TimeZoneInfo.DataStore` and stores the data with
# [:ets](https://erlang.org/doc/man/ets.html).
@behaviour TimeZoneInfo.DataStore
@app :time_zone_info
@time_zones :time_zone_info_time_zones
@transitions :time_zone_info_transitions
@links :time_zone_info_links
@rules :time_zone_info_rules
@impl true
def put(data) do
create_tables([@app, @time_zones, @transitions, @links, @rules])
with true <- put(data, :version, @app),
true <- put(data, :time_zones, @transitions),
true <- put(data, :rules, @rules),
true <- put(data, :links, @links),
true <- put(data, :time_zone_names, @time_zones) do
:ok
else
_ -> :error
end
end
@impl true
def delete! do
Enum.each([@app, @time_zones, @transitions, @links, @rules], fn table ->
case :ets.info(table) do
:undefined -> :ok
_ -> :ets.delete(table)
end
end)
end
@impl true
def get_transitions(:error), do: {:error, :transitions_not_found}
def get_transitions({:ok, time_zone}) do
with :error <- fetch(@transitions, time_zone) do
{:error, :transitions_not_found}
end
end
def get_transitions(time_zone) do
with :error <- fetch(@transitions, time_zone) do
@links |> fetch(time_zone) |> get_transitions()
end
end
@impl true
def get_rules(rules) do
with :error <- fetch(@rules, rules) do
{:error, :rules_not_found}
end
end
@impl true
def get_time_zones(links: select) when select in [:ignore, :only, :include] do
case fetch(@time_zones, select) do
{:ok, time_zones} -> time_zones
:error -> []
end
end
@impl true
def version do
case fetch(@app, :version) do
{:ok, version} -> version
:error -> nil
end
end
@impl true
def empty? do
case :ets.info(@app) do
:undefined -> true
_ -> false
end
end
@impl true
def info do
%{
version: version(),
tables: info([@app, @time_zones, @transitions, @links, @rules]),
time_zones: length(get_time_zones(links: :ignore)),
links: length(get_time_zones(links: :only))
}
end
defp info(tables) do
Enum.into(tables, %{}, fn table ->
case :ets.info(table) do
:undefined ->
{table, :undefined}
info ->
{table, [size: info[:size], memory: info[:memory]]}
end
end)
end
defp put(data, :version, table) do
:ets.insert(table, {:version, Map.fetch!(data, :version)})
end
defp put(data, :time_zone_names, table) do
time_zones = data |> Map.get(:time_zones) |> Map.keys() |> Enum.sort()
links = data |> Map.get(:links) |> Map.keys() |> Enum.sort()
all = time_zones |> Enum.concat(links) |> Enum.sort()
with true <- :ets.insert(table, {:ignore, time_zones}),
true <- :ets.insert(table, {:only, links}),
true <- :ets.insert(table, {:include, all}) do
true
end
end
defp put(data, key, table) do
data
|> Map.get(key, %{})
|> Enum.all?(fn value -> :ets.insert(table, value) end)
end
defp fetch(table, key) do
case :ets.match(table, {key, :"$1"}) do
[] -> :error
[[value]] -> {:ok, value}
end
end
defp create_tables(tables), do: Enum.each(tables, &create_table/1)
defp create_table(table) do
case :ets.info(table) do
:undefined -> :ets.new(table, [:named_table, :set, :public, read_concurrency: true])
_ -> table
end
end
end
|
lib/time_zone_info/data_store/erlang_term_storage.ex
| 0.822368
| 0.570092
|
erlang_term_storage.ex
|
starcoder
|
defmodule SimpleCipher do
@moduledoc false
@doc """
Given a `plaintext` and `key`, encode each character of the `plaintext` by
shifting it by the corresponding letter in the alphabet shifted by the number
of letters represented by the `key` character, repeating the `key` if it is
shorter than the `plaintext`.
For example, for the letter 'd', the alphabet is rotated to become:
defghijklmnopqrstuvwxyzabc
You would encode the `plaintext` by taking the current letter and mapping it
to the letter in the same position in this rotated alphabet.
abcdefghijklmnopqrstuvwxyz
defghijklmnopqrstuvwxyzabc
"a" becomes "d", "t" becomes "w", etc...
Each letter in the `plaintext` will be encoded with the alphabet of the `key`
character in the same position. If the `key` is shorter than the `plaintext`,
repeat the `key`.
Example:
plaintext = "testing"
key = "abc"
The key should repeat to become the same length as the text, becoming
"abcabca". If the key is longer than the text, only use as many letters of it
as are necessary.
"""
@spec encode(String.t(), String.t()) :: String.t()
def encode(plaintext, key) do
rotate(plaintext, key, :forward)
end
@doc """
Given a `ciphertext` and `key`, decode each character of the `ciphertext` by
finding the corresponding letter in the alphabet shifted by the number of
letters represented by the `key` character, repeating the `key` if it is
shorter than the `ciphertext`.
The same rules for key length and shifted alphabets apply as in `encode/2`,
but you will go the opposite way, so "d" becomes "a", "w" becomes "t",
etc..., depending on how much you shift the alphabet.
"""
@spec decode(String.t(), String.t()) :: String.t()
def decode(ciphertext, key) do
rotate(ciphertext, key, :backward)
end
@doc """
Generate a random key of a given length. It should contain lowercase letters only.
"""
@spec generate_key(integer()) :: String.t()
def generate_key(length) do
1..length |> Enum.map(fn _ -> Enum.random(?a..?z) end) |> List.to_string()
end
defp rotate(plaintext, key, direction) when is_binary(plaintext) and is_binary(key) do
key_stream = Stream.cycle(String.to_charlist(key))
plaintext
|> String.to_charlist()
|> Enum.zip_reduce(key_stream, [], &(&3 ++ [rotate(&1, &2, direction)]))
|> to_string()
end
defp rotate(plain_char, key_char, direction) do
p = plain_char - ?a
k = key_char - ?a
n = (direction == :forward && p + k) || p - k
?a + Integer.mod(n, 26)
end
end
|
simple-cipher/lib/simple_cipher.ex
| 0.914853
| 0.779238
|
simple_cipher.ex
|
starcoder
|
defmodule Assertions do
@moduledoc """
Helpful assertions with great error messages to help you write better tests.
"""
alias Assertions.Comparisons
@type comparison :: (any, any -> boolean | no_return)
@doc """
Asserts that the return value of the given expression is `true`.
This is different than the normal behavior of `assert` since that will pass
for any value that is "truthy" (anything other than `false` or `nil`). This is
a stricter assertion, only passing if the value is `true`. This is very
helpful for testing values that are expected to only be booleans.
This will also check specifically for `nil` values when using `>`, `<`, `>=`
or `<=` since those frequently have unintended behavior.
iex> assert!(:a == :a)
true
iex> assert!(10 > 5)
true
iex> map = %{key: true}
iex> assert!(map.key)
true
"""
@spec assert!(Macro.expr()) :: true | no_return
defmacro assert!({operator, _, [left, right]} = assertion)
when operator in [:>, :<, :>=, :<=] do
expr = escape_quoted(:assert!, assertion)
{args, value} = extract_args(assertion, __CALLER__)
quote do
left = unquote(left)
right = unquote(right)
if is_nil(left) or is_nil(right) do
assert false,
left: left,
right: right,
expr: unquote(expr),
message:
"`nil` is not allowed as an argument to `#{unquote(operator)}` when using `assert!`"
else
value = unquote(value)
unless value == true do
raise ExUnit.AssertionError,
args: unquote(args),
expr: unquote(expr),
message: "Expected `true`, got #{inspect(value)}"
end
true
end
end
end
defmacro assert!(assertion) do
{args, value} = extract_args(assertion, __CALLER__)
quote do
value = unquote(value)
unless value == true do
raise ExUnit.AssertionError,
args: unquote(args),
expr: unquote(escape_quoted(:assert!, assertion)),
message: "Expected `true`, got #{inspect(value)}"
end
value
end
end
@doc """
Asserts that the return value of the given expression is `false`.
This is different than the normal behavior of `refute/1` since that will pass
if the value is either `false` or `nil`. This is a stricter assertion, only
passing if the value is `false`. This is very helpful for testing values that
are expected to only be booleans.
This will also check specifically for `nil` values when using `>`, `<`, `>=`
or `<=` since those frequently have unintended behavior.
iex> refute!(5 > 10)
true
iex> refute!("a" == "A")
true
"""
@spec refute!(Macro.expr()) :: true | no_return
defmacro refute!({operator, _, [left, right]} = assertion)
when operator in [:>, :<, :>=, :<=] do
expr = escape_quoted(:refute!, assertion)
{args, value} = extract_args(assertion, __CALLER__)
quote do
left = unquote(left)
right = unquote(right)
if is_nil(left) or is_nil(right) do
raise ExUnit.AssertionError,
args: unquote(args),
expr: unquote(expr),
left: left,
right: right,
message:
"`nil` is not allowed as an argument to `#{unquote(operator)}` when using `refute!`"
else
value = unquote(value)
unless value == false do
raise ExUnit.AssertionError,
args: unquote(args),
expr: unquote(expr),
left: left,
right: right,
message: "Expected `false`, got #{inspect(value)}"
end
true
end
end
end
defmacro refute!(assertion) do
{args, value} = extract_args(assertion, __CALLER__)
quote do
value = unquote(value)
unless value == false do
raise ExUnit.AssertionError,
args: unquote(args),
expr: unquote(escape_quoted(:refute!, assertion)),
message: "Expected `false`, got #{inspect(value)}"
end
true
end
end
@doc """
Asserts that a function should raise an exception, but without forcing the user to specify which
exception should be raised. This is essentially a less-strict version of `assert_raise/2`.
iex> assert_raise(fn -> String.to_existing_atom("asleimflisesliseli") end)
true
"""
@spec assert_raise(fun()) :: true | no_return
def assert_raise(func) do
try do
func.()
ExUnit.Assertions.flunk("Expected exception but nothing was raised")
rescue
e in ExUnit.AssertionError ->
raise e
_ ->
true
end
end
@doc """
Asserts that two lists contain the same elements without asserting they are
in the same order.
iex> assert_lists_equal([1, 2, 3], [1, 3, 2])
true
"""
@spec assert_lists_equal(list, list) :: true | no_return
defmacro assert_lists_equal(left, right) do
assertion =
assertion(
quote do
assert_lists_equal(unquote(left), unquote(right))
end
)
quote do
{left_diff, right_diff, equal?} = Comparisons.compare_lists(unquote(left), unquote(right))
if equal? do
true
else
raise ExUnit.AssertionError,
args: [unquote(left), unquote(right)],
left: left_diff,
right: right_diff,
expr: unquote(assertion),
message: "Comparison of each element failed!"
end
end
end
@doc """
Asserts that two lists contain the same elements without asserting they are
in the same order.
The given comparison function determines if the two lists are considered
equal.
iex> assert_lists_equal(["dog"], ["cat"], &(is_binary(&1) and is_binary(&2)))
true
"""
@spec assert_lists_equal(list, list, comparison) :: true | no_return
defmacro assert_lists_equal(left, right, comparison) do
assertion =
assertion(
quote do
assert_lists_equal(unquote(left), unquote(right), unquote(comparison))
end
)
quote do
{left_diff, right_diff, equal?} =
Comparisons.compare_lists(unquote(left), unquote(right), unquote(comparison))
if equal? do
true
else
raise ExUnit.AssertionError,
args: [unquote(left), unquote(right), unquote(comparison)],
left: left_diff,
right: right_diff,
expr: unquote(assertion),
message: "Comparison of each element failed!"
end
end
end
@doc """
Asserts that a `map` is in the given `list`.
This is either done by passing a list of `keys`, and the values at those keys
will be compared to determine if the map is in the list.
iex> map = %{first: :first, second: :second}
iex> list = [%{first: :first, second: :second, third: :third}]
iex> keys = [:first, :second]
iex> assert_map_in_list(map, list, keys)
true
Or this is done by passing a comparison function that determines if the map
is in the list.
If using a comparison function, the `map` is the first argument to that
function, and the elements in the list are the second argument.
iex> map = %{first: :first, second: :second}
iex> list = [%{"first" => :first, "second" => :second, "third" => :third}]
iex> comparison = &(&1.first == &2["first"] and &1.second == &2["second"])
iex> assert_map_in_list(map, list, comparison)
true
"""
@spec assert_map_in_list(map, [map], [any]) :: true | no_return
@spec assert_map_in_list(map, [map], comparison) :: true | no_return
defmacro assert_map_in_list(map, list, keys_or_comparison) do
assertion =
assertion(
quote do
assert_map_in_list(unquote(map), unquote(list), unquote(keys_or_comparison))
end
)
quote do
keys_or_comparison = unquote(keys_or_comparison)
{in_list?, map, list, message} =
if is_list(keys_or_comparison) do
keys = keys_or_comparison
map = Map.take(unquote(map), keys)
list = Enum.map(unquote(list), &Map.take(&1, keys))
keys = unquote(stringify_list(keys_or_comparison))
message = "Map matching the values for keys `#{keys}` not found"
{Enum.member?(list, map), map, list, message}
else
comparison = keys_or_comparison
map = unquote(map)
list = unquote(list)
message = "Map not found in list using given comparison"
{Enum.any?(list, &comparison.(map, &1)), map, list, message}
end
if in_list? do
true
else
raise ExUnit.AssertionError,
args: [unquote(map), unquote(list)],
left: map,
right: list,
expr: unquote(assertion),
message: message
end
end
end
@doc """
Asserts that two maps are equal.
Equality can be determined in two ways. First, by passing a list of keys. The
values at these keys will be used to determine if the maps are equal.
iex> left = %{first: :first, second: :second, third: :third}
iex> right = %{first: :first, second: :second, third: :fourth}
iex> keys = [:first, :second]
iex> assert_maps_equal(left, right, keys)
true
The second is to pass a comparison function that returns a boolean that
determines if the maps are equal. When using a comparison function, the first
argument to the function is the `left` map and the second argument is the
`right` map.
iex> left = %{first: :first, second: :second, third: :third}
iex> right = %{"first" => :first, "second" => :second, "third" => :fourth}
iex> comparison = &(&1.first == &2["first"] and &1.second == &2["second"])
iex> assert_maps_equal(left, right, comparison)
true
"""
@spec assert_maps_equal(map, map, [any]) :: true | no_return
@spec assert_maps_equal(map, map, comparison) :: true | no_return
defmacro assert_maps_equal(left, right, keys_or_comparison) do
assertion =
assertion(
quote do
assert_maps_equal(unquote(left), unquote(right), unquote(keys_or_comparison))
end
)
quote do
keys_or_comparison = unquote(keys_or_comparison)
left = unquote(left)
right = unquote(right)
{left_diff, right_diff, equal?, message} =
if is_list(keys_or_comparison) do
keys = keys_or_comparison
left = Map.take(left, keys)
right = Map.take(right, keys)
{left_diff, right_diff, equal?} = Comparisons.compare_maps(left, right)
message = "Values for #{unquote(stringify_list(keys_or_comparison))} not equal!"
{left_diff, right_diff, equal?, message}
else
comparison = keys_or_comparison
{left, right, comparison.(left, right), "Maps not equal using given comprison"}
end
if equal? do
true
else
raise ExUnit.AssertionError,
args: [unquote(left), unquote(right)],
left: left_diff,
right: right_diff,
expr: unquote(assertion),
message: message
end
end
end
@doc """
Asserts that the `struct` is present in the `list`.
There are two ways to make this comparison. First is to pass a list of keys
to use to compare the `struct` to the structs in the `list`.
iex> now = DateTime.utc_now()
iex> list = [DateTime.utc_now(), Date.utc_today()]
iex> keys = [:year, :month, :day]
iex> assert_struct_in_list(now, list, keys)
true
The second way to use this assertion is to pass a comparison function.
When using a comparison function, the `struct` is the first argument to that
function and the elements in the `list` will be the second argument.
iex> now = DateTime.utc_now()
iex> list = [DateTime.utc_now(), Date.utc_today()]
iex> assert_struct_in_list(now, list, &(&1.year == &2.year))
true
"""
@spec assert_struct_in_list(struct, [struct], [atom]) :: true | no_return
@spec assert_struct_in_list(struct, [struct], (struct, struct -> boolean)) :: true | no_return
defmacro assert_struct_in_list(struct, list, keys_or_comparison) do
assertion =
assertion(
quote do
assert_struct_in_list(unquote(struct), unquote(list), unquote(keys_or_comparison))
end
)
quote do
struct = unquote(struct)
list = unquote(list)
keys_or_comparison = unquote(keys_or_comparison)
{in_list?, message} =
if is_list(keys_or_comparison) do
keys = [:__struct__ | keys_or_comparison]
struct = Map.take(struct, keys)
list = Enum.map(list, &Map.take(&1, keys))
{struct in list,
"Struct matching the values for keys #{unquote(stringify_list(keys_or_comparison))} not found"}
else
comparison = keys_or_comparison
{Enum.any?(list, &comparison.(struct, &1)),
"Struct not found in list using the given comparison"}
end
if in_list? do
true
else
raise ExUnit.AssertionError,
args: [struct, list, keys_or_comparison],
left: struct,
right: list,
expr: unquote(assertion),
message: message
end
end
end
@doc """
Asserts that two structs are equal.
Equality can be determined in two ways. First, by passing a list of keys. The
values at these keys and the type of the structs will be used to determine if
the structs are equal.
iex> left = DateTime.utc_now()
iex> right = DateTime.utc_now()
iex> keys = [:year, :minute]
iex> assert_structs_equal(left, right, keys)
true
The second is to pass a comparison function that returns a boolean that
determines if the structs are equal. When using a comparison function, the
first argument to the function is the `left` struct and the second argument
is the `right` struct.
iex> left = DateTime.utc_now()
iex> right = DateTime.utc_now()
iex> comparison = &(&1.year == &2.year and &1.minute == &2.minute)
iex> assert_structs_equal(left, right, comparison)
true
"""
@spec assert_structs_equal(struct, struct, [atom]) :: true | no_return
@spec assert_structs_equal(struct, struct, (any, any -> boolean)) :: true | no_return
defmacro assert_structs_equal(left, right, keys_or_comparison) do
assertion =
assertion(
quote do
assert_structs_equal(unquote(left), unquote(right), unquote(keys_or_comparison))
end
)
quote do
left = unquote(left)
right = unquote(right)
keys_or_comparison = unquote(keys_or_comparison)
{left_diff, right_diff, equal?, message} =
if is_list(keys_or_comparison) do
keys = [:__struct__ | keys_or_comparison]
left = Map.take(left, keys)
right = Map.take(right, keys)
message = "Values for #{unquote(stringify_list(keys_or_comparison))} not equal!"
{left_diff, right_diff, equal?} = Comparisons.compare_maps(left, right)
{left_diff, right_diff, equal?, message}
else
comparison = keys_or_comparison
{left_diff, right_diff, equal?} =
case comparison.(left, right) do
{_, _, equal?} = result when is_boolean(equal?) -> result
true_or_false when is_boolean(true_or_false) -> {left, right, true_or_false}
end
{left_diff, right_diff, equal?, "Comparison failed!"}
end
if equal? do
true
else
raise ExUnit.AssertionError,
args: [unquote(left), unquote(right)],
left: left_diff,
right: right_diff,
expr: unquote(assertion),
message: message
end
end
end
@doc """
Asserts that all maps, structs or keyword lists in `list` have the same
`value` for `key`.
iex> assert_all_have_value([%{key: :value}, %{key: :value, other: :key}], :key, :value)
true
iex> assert_all_have_value([[key: :value], [key: :value, other: :key]], :key, :value)
true
iex> assert_all_have_value([[key: :value], %{key: :value, other: :key}], :key, :value)
true
"""
@spec assert_all_have_value(list(map | struct | Keyword.t()), any, any) :: true | no_return
defmacro assert_all_have_value(list, key, value) do
assertion =
assertion(
quote do
assert_all_have_value(unquote(list), unquote(key), unquote(value))
end
)
quote do
key = unquote(key)
value = unquote(value)
list =
Enum.map(unquote(list), fn
map when is_map(map) -> Map.take(map, [key])
list -> [{key, Keyword.get(list, key, :key_not_present)}]
end)
diff =
Enum.reject(list, fn
map when is_map(map) -> Map.equal?(map, %{key => value})
list -> Keyword.equal?(list, [{key, value}])
end)
if diff == [] do
true
else
raise ExUnit.AssertionError,
args: [unquote(list), unquote(key), unquote(value)],
left: %{key => value},
right: diff,
expr: unquote(assertion),
message: "Values for `#{inspect(key)}` not equal in all elements!"
end
end
end
@doc """
Asserts that the file at `path` is changed to match `comparison` after
executing the given `expression`.
If the file matches `comparison` before executing `expr`, this assertion will
fail. The file does not have to exist before executing `expr` in order for
this assertion to pass.
iex> path = Path.expand("../tmp/file.txt", __DIR__)
iex> result = assert_changes_file(path, "hi") do
iex> File.mkdir_p!(Path.dirname(path))
iex> File.write(path, "hi")
iex> end
iex> File.rm_rf!(Path.dirname(path))
iex> result
true
"""
@spec assert_changes_file(Path.t(), String.t() | Regex.t(), Macro.expr()) :: true | no_return
defmacro assert_changes_file(path, comparison, [do: expr] = expression) do
assertion =
assertion(
quote do
assert_changes_file(unquote(path), unquote(comparison), unquote(expression))
end
)
quote do
path = unquote(path)
comparison = unquote(comparison)
args = [unquote(path), unquote(comparison)]
{match_before?, start_file} =
case File.read(path) do
{:ok, start_file} -> {start_file =~ comparison, start_file}
_ -> {false, nil}
end
if match_before? do
raise ExUnit.AssertionError,
args: args,
expr: unquote(assertion),
left: start_file,
right: unquote(comparison),
message: "File #{inspect(path)} matched `#{inspect(comparison)}` before executing expr!"
else
unquote(expr)
end_file =
case File.read(path) do
{:ok, end_file} ->
end_file
_ ->
raise ExUnit.AssertionError,
args: args,
expr: unquote(assertion),
message: "File #{inspect(path)} does not exist after executing expr!"
end
if end_file =~ comparison do
true
else
raise ExUnit.AssertionError,
args: args,
left: end_file,
right: comparison,
expr: unquote(assertion),
message: "File did not change to match comparison after expr!"
end
end
end
end
@doc """
Asserts that the file at `path` is created after executing the given
`expression`.
iex> path = Path.expand("../tmp/file.txt", __DIR__)
iex> File.mkdir_p!(Path.dirname(path))
iex> result = assert_creates_file path do
iex> File.write(path, "hi")
iex> end
iex> File.rm_rf!(Path.dirname(path))
iex> result
true
"""
@spec assert_creates_file(Path.t(), Macro.expr()) :: true | no_return
defmacro assert_creates_file(path, [do: expr] = expression) do
assertion =
assertion(
quote do
assert_creates_file(unquote(path), unquote(expression))
end
)
quote do
path = unquote(path)
args = [unquote(path)]
if File.exists?(path) do
raise ExUnit.AssertionError,
args: args,
expr: unquote(assertion),
message: "File #{inspect(path)} existed before executing expr!"
else
unquote(expr)
if File.exists?(path) do
true
else
raise ExUnit.AssertionError,
args: args,
expr: unquote(assertion),
message: "File #{inspect(path)} does not exist after executing expr!"
end
end
end
end
@doc """
Asserts that the file at `path` is deleted after executing the given
`expression`.
iex> path = Path.expand("../tmp/file.txt", __DIR__)
iex> File.mkdir_p!(Path.dirname(path))
iex> File.write(path, "hi")
iex> assert_deletes_file path do
iex> File.rm_rf!(Path.dirname(path))
iex> end
true
"""
@spec assert_deletes_file(Path.t(), Macro.expr()) :: true | no_return
defmacro assert_deletes_file(path, [do: expr] = expression) do
assertion =
assertion(
quote do
assert_deletes_file(unquote(path), unquote(expression))
end
)
quote do
path = unquote(path)
args = [unquote(path)]
if !File.exists?(path) do
raise ExUnit.AssertionError,
args: args,
expr: unquote(assertion),
message: "File #{inspect(path)} did not exist before executing expr!"
else
unquote(expr)
if !File.exists?(path) do
true
else
raise ExUnit.AssertionError,
args: args,
expr: unquote(assertion),
message: "File #{inspect(path)} exists after executing expr!"
end
end
end
end
@doc """
Tests that a message matching the given `pattern`, and only that message, is
received before the given `timeout`, specified in milliseconds.
The optional second argument is a timeout for the `receive` to wait for the
expected message, and defaults to 100ms.
## Examples
iex> send(self(), :hello)
iex> assert_receive_only(:hello)
true
iex> send(self(), [:hello])
iex> assert_receive_only([_])
true
iex> a = :hello
iex> send(self(), :hello)
iex> assert_receive_only(^a)
true
iex> send(self(), :hello)
iex> assert_receive_only(a when is_atom(a))
iex> a
:hello
iex> send(self(), %{key: :value})
iex> assert_receive_only(%{key: value} when is_atom(value))
iex> value
:value
If a message is received after the assertion has matched a message to the
given pattern, but the second message is received before the timeout, that
second message is ignored and the assertion returns `true`.
This assertion only tests that the message that matches the given pattern was
the first message in the process inbox, and that nothing was sent between the
sending the message that matches the pattern and when `assert_receive_only/2`
was called.
iex> Process.send_after(self(), :hello, 20)
iex> Process.send_after(self(), :hello_again, 50)
iex> assert_receive_only(:hello, 100)
true
"""
@spec assert_receive_only(Macro.expr(), non_neg_integer) :: any | no_return
defmacro assert_receive_only(pattern, timeout \\ 100) do
binary = Macro.to_string(pattern)
caller = __CALLER__
assertion =
assertion(
quote do
assert_receive_only(unquote(pattern), unquote(timeout))
end
)
expanded_pattern = expand_pattern(pattern, caller)
vars = collect_vars_from_pattern(expanded_pattern)
{timeout, pattern, failure_message} =
if function_exported?(ExUnit.Assertions, :__timeout__, 4) do
assert_receive_data(:old, pattern, expanded_pattern, timeout, caller, vars, binary)
else
assert_receive_data(:new, pattern, expanded_pattern, timeout, caller, vars, binary)
end
bind_variables =
quote do
{received, unquote(vars)}
end
quote do
timeout = unquote(timeout)
unquote(bind_variables) =
receive do
unquote(pattern) ->
result = unquote(bind_variables)
receive do
thing ->
raise ExUnit.AssertionError,
expr: unquote(assertion),
message: "`#{inspect(thing)}` was also in the mailbox"
after
0 ->
result
end
random_thing ->
raise ExUnit.AssertionError,
expr: unquote(assertion),
message: "Received unexpected message: `#{inspect(random_thing)}`"
after
timeout -> flunk(unquote(failure_message))
end
true
end
end
defp assert_receive_data(:old, pattern, _, timeout, caller, vars, binary) do
pins = collect_pins_from_pattern(pattern, Macro.Env.vars(caller))
{pattern, pattern_finder} = patterns(pattern, vars)
timeout =
if is_integer(timeout) do
timeout
else
quote do: ExUnit.Assertions.__timeout__(unquote(timeout))
end
failure_message =
quote do
ExUnit.Assertions.__timeout__(
unquote(binary),
unquote(pins),
unquote(pattern_finder),
timeout
)
end
{timeout, pattern, failure_message}
end
defp assert_receive_data(:new, pattern, expanded_pattern, timeout, caller, vars, _) do
code = escape_quoted(:assert_receive_only, pattern)
pins = collect_pins_from_pattern(expanded_pattern, Macro.Env.vars(caller))
{pattern, pattern_finder} = patterns(expanded_pattern, vars)
timeout =
if function_exported?(ExUnit.Assertions, :__timeout__, 2) do
quote do
ExUnit.Assertions.__timeout__(unquote(timeout), :assert_receive_timeout)
end
else
quote do
ExUnit.Assertions.__timeout__(unquote(timeout))
end
end
failure_message =
quote do
ExUnit.Assertions.__timeout__(
unquote(Macro.escape(expanded_pattern)),
unquote(code),
unquote(pins),
unquote(pattern_finder),
timeout
)
end
{timeout, pattern, failure_message}
end
defp patterns(pattern, vars) do
pattern =
case pattern do
{:when, meta, [left, right]} ->
{:when, meta, [quote(do: unquote(left) = received), right]}
left ->
quote(do: unquote(left) = received)
end
quoted_pattern =
quote do
case message do
unquote(pattern) ->
_ = unquote(vars)
true
_ ->
false
end
end
pattern_finder =
quote do
fn message ->
unquote(suppress_warning(quoted_pattern))
end
end
{pattern, pattern_finder}
end
@doc """
Asserts that some condition succeeds within a given timeout (in milliseconds)
and sleeps for a given time between checks of the given condition (in
milliseconds).
This is helpful for testing that asynchronous operations have succeeded within
a certain timeframe. This method of testing asynchronous operations is less
reliable than other methods, but it can often be more useful at an integration
level.
iex> Process.send_after(self(), :hello, 50)
iex> assert_async do
iex> assert_received :hello
iex> end
true
iex> Process.send_after(self(), :hello, 50)
iex> assert_async(timeout: 75, sleep_time: 40) do
iex> assert_received :hello
iex> end
true
iex> Process.send_after(self(), :hello, 50)
iex> try do
iex> assert_async(timeout: 4, sleep_time: 2) do
iex> assert_received :hello
iex> end
iex> rescue
iex> _ -> :failed
iex> end
:failed
"""
@spec assert_async(Keyword.t(), Macro.expr()) :: true | no_return
defmacro assert_async(opts \\ [], [do: expr] = expression) do
sleep_time = Keyword.get(opts, :sleep_time, 10)
timeout = Keyword.get(opts, :timeout, 100)
assertion =
assertion(
quote do
assert_async(unquote(opts), unquote(expression))
end
)
condition =
quote do
fn -> unquote(expr) end
end
quote do
assert_async(unquote(condition), unquote(assertion), unquote(timeout), unquote(sleep_time))
end
end
@doc false
def assert_async(condition, expr, timeout, sleep_time) do
start_time = NaiveDateTime.utc_now()
end_time = NaiveDateTime.add(start_time, timeout, :millisecond)
assert_async(condition, end_time, expr, timeout, sleep_time)
end
@doc false
def assert_async(condition, end_time, expr, timeout, sleep_time) do
result =
try do
condition.()
rescue
_ in [ExUnit.AssertionError] -> false
end
if result == false do
if NaiveDateTime.compare(NaiveDateTime.utc_now(), end_time) == :lt do
Process.sleep(sleep_time)
assert_async(condition, end_time, expr, timeout, sleep_time)
else
raise ExUnit.AssertionError,
args: [timeout],
expr: expr,
message: "Given condition did not return true before timeout: #{timeout}"
end
else
true
end
end
defp assertion(quoted), do: Macro.escape(quoted, prune_metadata: true)
defp stringify_list(list) do
quote do
unquote(list)
|> Enum.map(fn
elem when is_atom(elem) -> ":#{elem}"
elem when is_binary(elem) -> "\"#{elem}\""
elem -> "#{inspect(elem)}"
end)
|> Enum.join(", ")
end
end
defp expand_pattern({:when, meta, [left, right]}, caller) do
left = expand_pattern_except_vars(left, Macro.Env.to_match(caller))
right = expand_pattern_except_vars(right, %{caller | context: :guard})
{:when, meta, [left, right]}
end
defp expand_pattern(expr, caller) do
expand_pattern_except_vars(expr, Macro.Env.to_match(caller))
end
defp expand_pattern_except_vars(expr, caller) do
Macro.prewalk(expr, fn
{var, _, context} = node when is_atom(var) and is_atom(context) -> node
other -> Macro.expand(other, caller)
end)
end
defp collect_vars_from_pattern(expr) do
Macro.prewalk(expr, [], fn
{:"::", _, [left, _]}, acc ->
{[left], acc}
{skip, _, [_]}, acc when skip in [:^, :@] ->
{:ok, acc}
{:_, _, context}, acc when is_atom(context) ->
{:ok, acc}
{name, meta, context}, acc when is_atom(name) and is_atom(context) ->
{:ok, [{name, [generated: true] ++ meta, context} | acc]}
node, acc ->
{node, acc}
end)
|> elem(1)
end
defp collect_pins_from_pattern(expr, vars) do
{_, pins} =
Macro.prewalk(expr, [], fn
{:^, _, [{name, _, nil} = var]}, acc ->
if {name, nil} in vars do
{:ok, [{name, var} | acc]}
else
{:ok, acc}
end
form, acc ->
{form, acc}
end)
Enum.uniq_by(pins, &elem(&1, 0))
end
defp suppress_warning({name, meta, [expr, [do: clauses]]}) do
clauses =
Enum.map(clauses, fn {:->, meta, args} ->
{:->, [generated: true] ++ meta, args}
end)
{name, meta, [expr, [do: clauses]]}
end
defp extract_args({root, meta, [_ | _] = args} = expr, env) do
arity = length(args)
reserved? =
is_atom(root) and (Macro.special_form?(root, arity) or Macro.operator?(root, arity))
all_quoted_literals? = Enum.all?(args, &Macro.quoted_literal?/1)
case Macro.expand_once(expr, env) do
^expr when not reserved? and not all_quoted_literals? ->
vars = for i <- 1..arity, do: Macro.var(:"arg#{i}", __MODULE__)
quoted =
quote do
{unquote_splicing(vars)} = {unquote_splicing(args)}
unquote({root, meta, vars})
end
{vars, quoted}
other ->
{ExUnit.AssertionError.no_value(), other}
end
end
defp extract_args(expr, _env) do
{ExUnit.AssertionError.no_value(), expr}
end
defp escape_quoted(kind, expr) do
Macro.escape({kind, [], [expr]}, prune_metadata: true)
end
end
|
lib/assertions.ex
| 0.930789
| 0.808483
|
assertions.ex
|
starcoder
|
defmodule Tomato.ProgressBar do
@format [
bar: "=",
blank: " ",
left: "|",
right: "|",
percent: true,
suffix: false,
bar_color: [],
blank_color: [],
width: :auto
]
@min_bar_width 1
@max_bar_width 100
@fallback 80
def render(current, total) do
percent = (current / total * 100) |> round
suffix = [
formatted_percent(@format[:percent], percent),
formatted_suffix(@format[:suffix], current, total),
newline_if_complete(current, total)
]
write(
@format,
{@format[:bar], @format[:bar_color], percent},
{@format[:blank], @format[:blank_color]},
suffix
)
end
defp formatted_percent(false, _), do: ""
defp formatted_percent(true, number) do
number
|> Integer.to_string()
|> String.pad_leading(4)
|> Kernel.<>("%")
end
defp formatted_suffix(:count, total, total), do: " (#{total})"
defp formatted_suffix(:count, current, total), do: " (#{current}/#{total})"
defp formatted_suffix(false, _, _), do: ""
defp newline_if_complete(total, total), do: "\n"
defp newline_if_complete(_, _), do: ""
def write(format, {bar, bar_color}, suffix) do
write(format, {bar, bar_color, 100}, {"", []}, suffix)
end
def write(format, {bar, bar_color, bar_percent}, {blank, blank_color}, suffix) do
{bar_width, blank_width} = bar_and_blank_widths(format, suffix, bar_percent)
full_bar = [
bar |> repeat(bar_width) |> color(bar_color),
blank |> repeat(blank_width) |> color(blank_color)
]
IO.write(chardata(format, full_bar, suffix))
end
defp bar_and_blank_widths(format, suffix, bar_percent) do
full_bar_width = full_bar_width(format, suffix)
bar_width = (bar_percent / 100 * full_bar_width) |> round
blank_width = full_bar_width - bar_width
{bar_width, blank_width}
end
defp chardata(format, bar, suffix) do
[
ansi_prefix(),
format[:left],
bar,
format[:right],
suffix
]
end
defp full_bar_width(format, suffix) do
other_text = chardata(format, "", suffix) |> IO.chardata_to_string()
determine(format[:width], other_text)
end
defp repeat("", _), do: ""
defp repeat(bar, width) do
bar
|> String.graphemes()
|> Stream.cycle()
|> Enum.take(width)
|> Enum.join()
end
def ansi_prefix do
[
ansi_clear_line(),
"\r"
]
|> Enum.join()
end
def strip_invisibles(string) do
string |> String.replace(~r/\e\[\d*[a-zA-Z]|[\r\n]/, "")
end
def color(content, []), do: content
def color(content, ansi_codes) do
[ansi_codes, content, IO.ANSI.reset()]
end
defp ansi_clear_line do
"\e[2K"
end
def determine(terminal_width_config, other_text) do
available_width = terminal_width(terminal_width_config)
other_width = other_text |> strip_invisibles() |> String.length()
remaining_width = available_width - other_width
clamp(remaining_width, @min_bar_width, @max_bar_width)
end
defp terminal_width(config) do
case config do
:auto -> terminaldetermine()
fixed_value -> fixed_value
end
end
defp clamp(number, min_value, max_value) do
number |> min(max_value) |> max(min_value)
end
def terminaldetermine do
case :io.columns() do
{:ok, count} -> count
_ -> @fallback
end
end
end
|
lib/progressbar.ex
| 0.505859
| 0.454714
|
progressbar.ex
|
starcoder
|
defmodule Blogit.Supervisor do
@moduledoc """
This module represents the root Supervisor of Blogit.
It uses a `one_for_all` strategy to supervise its children.
The children are:
* `Blogit.Server` worker used as the core process of `Blogit`. If it fails
all the top-level processes of the application must be restarted, thus
the `one_for_all` strategy.
* `Blogit.Components.Supervisor` supervisor which supervises
the components of Blogit that can be queried. If this process fails it will
be good to restart all the top-level processes, because the `Blogit.Server`
process is used by the components to manage their data.
* A Task.Supervisor used to supervise all the Tasks in Blogit.
* If the Applications is using the `Blogit.RepositoryProviders.Memory`
provider, a worker representing the in-memory repository is started
and supervised too.
`Blogit.Supervisor` is started in `Blogit.start/2` using
`Blogit.Supervisor.start_link/1` and the result of this call is what the
`start` function of the `Blogit` application returns.
"""
use Supervisor
@doc """
Accepts a module implementing `Blogit.RepositoryProvider` and starts the
supervisor defined by this module.
The `Blogit.RepositoryProvider` module is passed to the `Supervisor.init/1`
callback implemented by this module and is used to create and start the
`Blogit.Server` worker.
This function is called by `Blogit.start/2` and its result is returned by it.
"""
@spec start_link(module) :: Supervisor.on_start()
def start_link(repository_provider) do
Supervisor.start_link(__MODULE__, repository_provider, name: __MODULE__)
end
def init(repository_provider) do
children = [
supervisor(Blogit.Components.Supervisor, []),
supervisor(Task.Supervisor, [[name: :tasks_supervisor]]),
worker(Blogit.Server, [repository_provider])
]
children =
case repository_provider do
Blogit.RepositoryProviders.Memory ->
[worker(repository_provider, []) | children]
_ ->
children
end
opts = [strategy: :one_for_all]
supervise(children, opts)
end
end
|
lib/blogit/supervisor.ex
| 0.627609
| 0.551876
|
supervisor.ex
|
starcoder
|
if Code.ensure_loaded?(Plug.Conn) do
defmodule Joken.Plug do
import Joken
alias Joken.Token
require Logger
@moduledoc """
A Plug for signing and verifying authentication tokens.
## Usage
There are two possible scenarios:
1. Same configuration for all routes
2. Per route configuration
In the first scenario just add this plug before the dispatch plug.
defmodule MyRouter do
use Plug.Router
plug Joken.Plug, verify: &MyRouter.verify_function/0
plug :match
plug :dispatch
post "/user" do
# will only execute here if token is present and valid
end
match _ do
# will only execute here if token is present and valid
end
def verify_function() do
%Joken.Token{}
|> Joken.with_signer(hs256("secret"))
|> Joken.with_sub(1234567890)
end
end
In the second scenario, you will need at least plug ~> 0.14 in your deps.
Then you must plug this AFTER :match and BEFORE :dispatch.
defmodule MyRouter do
use Plug.Router
# route options
@skip_token_verification %{joken_skip: true}
plug :match
plug Joken.Plug, verify: &MyRouter.verify_function/0
plug :dispatch
post "/user" do
# will only execute here if token is present and valid
end
# see options section below
match _, private: @skip_token_verification do
# will NOT try to validate a token
end
def verify_function() do
%Joken.Token{}
|> Joken.with_signer(hs256("secret"))
|> Joken.with_sub(1234567890)
end
end
## Options
This plug accepts the following options in its initialization:
- `verify` (required): a function used to verify the token. The function must at least specify algorithm used and your secret using the `with_signer` function (see above). Must return a Token.
- `on_error` (optional): a function that will be called with `conn` and
`message`. Must return a tuple containing the conn and a binary representing
the 401 response. If it's a map, it will be turned into json, otherwise, it
will be returned as is.
When using this with per route options you must pass a private map of
options to the route. The keys that Joken will look for in that map are:
- `joken_skip`: skips token validation
- `joken_verify`: Same as `verify` above. Overrides
`verify` if it was defined on the Plug
- `joken_on_error`: Same as `on_error` above. Overrides
`on_error` if it was defined on the Plug
"""
import Plug.Conn
@lint {Credo.Check.Design.AliasUsage, false}
@doc false
def init(opts) do
verify = get_verify(opts)
on_error = Keyword.get(opts, :on_error, &Joken.Plug.default_on_error/2)
token_function = Keyword.get(opts, :token, &Joken.Plug.default_token_function/1)
{verify, on_error, token_function}
end
@doc false
def call(conn, {verify, on_error, token_function}) do
conn = if Map.has_key?(conn.private, :joken_verify) do
conn
else
set_joken_verify(conn, verify)
end
conn = if Map.has_key?(conn.private, :joken_on_error) do
conn
else
put_private(conn, :joken_on_error, on_error)
end
conn = if Map.has_key?(conn.private, :joken_token_function) do
conn
else
put_private(conn, :joken_token_function, token_function)
end
if Map.get(conn.private, :joken_skip, false) do
conn
else
parse_auth(conn, conn.private[:joken_token_function].(conn))
end
end
defp get_verify(options) do
case Keyword.take(options, [:verify, :on_verifying]) do
[verify: verify] -> verify
[verify: verify, on_verifying: _] ->
warn_on_verifying()
verify
[on_verifying: verify] ->
warn_on_verifying()
verify
[] ->
warn_supply_verify_function()
nil
end
end
defp warn_on_verifying do
Logger.warn "on_verifying is deprecated for the Joken plug and will be removed in a future version. Please use verify instead."
end
defp warn_supply_verify_function do
Logger.warn "You need to supply a verify function to the Joken token."
end
defp set_joken_verify(conn, verify) do
case conn.private do
%{joken_on_verifying: deprecated_verify} ->
warn_on_verifying()
put_private(conn, :joken_verify, deprecated_verify)
_ ->
put_private(conn, :joken_verify, verify)
end
end
defp parse_auth(conn, nil) do
send_401(conn, "Unauthorized")
end
defp parse_auth(conn, ""), do: parse_auth(conn, nil)
defp parse_auth(conn, incoming_token) do
payload_fun = Map.get(conn.private, :joken_verify)
verified_token = payload_fun.()
|> with_compact_token(incoming_token)
|> verify
evaluate(conn, verified_token)
end
defp evaluate(conn, %Token{error: nil} = token) do
assign(conn, :joken_claims, get_claims(token))
end
defp evaluate(conn, %Token{error: message}) do
send_401(conn, message)
end
defp send_401(conn, message) do
on_error = conn.private[:joken_on_error]
{conn, message} = case on_error.(conn, message) do
{conn, map} when is_map(map) ->
create_json_response(conn, map)
response ->
response
end
conn
|> send_resp(401, message)
|> halt
end
defp create_json_response(conn, map) do
conn = put_resp_content_type(conn, "application/json")
json = Poison.encode!(map)
{conn, json}
end
@doc false
def default_on_error(conn, message) do
{conn, message}
end
@doc false
def default_token_function(conn) do
get_req_header(conn, "authorization") |> token_from_header
end
defp token_from_header(["Bearer " <> incoming_token]), do: incoming_token
defp token_from_header(_), do: nil
end
end
|
lib/joken/plug.ex
| 0.628065
| 0.42937
|
plug.ex
|
starcoder
|
defmodule Nebulex.Caching.Decorators do
@moduledoc ~S"""
Function decorators which provide a way of annotating functions to be cached
or evicted. By means of these decorators, it is possible the implementation
of cache usage patterns like **Read-through**, **Write-through**,
**Cache-as-SoR**, etc.
## Shared Options
All of the caching macros below accept the following options:
* `:cache` - Defines what cache to use (required). Raises `ArgumentError`
if the option is not present.
* `:key` - Defines the cache access key (optional). If this option
is not present, a default key is generated by hashing a two-elements
tuple; first element is the function's name and the second one the
list of arguments (e.g: `:erlang.phash2({name, args})`).
* `:opts` - Defines the cache options that will be passed as argument
to the invoked cache function (optional).
* `:match` - Defines a function that takes one argument and will be used to decide
if the cache should be updated or not (optional). If this option is not present,
the value will always be updated. Does not have any effect upon eviction
since values are always evicted before executing the function logic.
## Example
Suppose we are using `Ecto` and we want to define some caching functions in
the context `MyApp.Accounts`.
defmodule MyApp.Accounts do
use Nebulex.Caching.Decorators
import Ecto.Query
alias MyApp.Accounts.User
alias MyApp.Cache
alias MyApp.Repo
@decorate cache(cache: Cache, key: {User, id}, opts: [ttl: 3600])
def get_user!(id) do
Repo.get!(User, id)
end
@decorate cache(cache: Cache, key: {User, clauses})
def get_user_by!(clauses) do
Repo.get_by!(User, clauses)
end
@decorate cache(cache: Cache)
def users_by_segment(segment \\\\ "standard") do
query = from(q in User, where: q.segment == ^segment)
Repo.all(query)
end
@decorate cache(cache: Cache, key: {User, :latest}, match: &(not is_nil(&1)))
def get_newest_user() do
Repo.get_newest(User)
end
@decorate update(cache: Cache, key: {User, user.id})
def update_user!(%User{} = user, attrs) do
user
|> User.changeset(attrs)
|> Repo.update!()
end
@decorate evict(cache: Cache, keys: [{User, user.id}, {User, [username: user.username]}])
def delete_user(%User{} = user) do
Repo.delete(user)
end
end
"""
use Decorator.Define, cache: 1, evict: 1, update: 1
@doc """
Provides a way of annotating functions to be cached (cacheable aspect).
The returned value by the code block is cached if it doesn't exist already
in cache, otherwise, it is returned directly from cache and the code block
is not executed.
## Options
See the "Shared options" section at the module documentation.
## Examples
defmodule MyApp.Example do
use Nebulex.Caching.Decorators
alias MyApp.Cache
@decorate cache(cache: Cache, key: name)
def get_by_name(name, age) do
# your logic (maybe the loader to retrieve the value from the SoR)
end
@decorate cache(cache: Cache, key: age, opts: [ttl: 3600])
def get_by_age(age) do
# your logic (maybe the loader to retrieve the value from the SoR)
end
@decorate cache(cache: Cache)
def all(query) do
# your logic (maybe the loader to retrieve the value from the SoR)
end
@decorate cache(cache: Cache, key: {User, :latest}, match: &(not is_nil(&1)))
def get_newest_user() do
Repo.get_newest(User)
end
end
The **Read-through** pattern is supported by this decorator. The loader to
retrieve the value from the system-of-record (SoR) is your function's logic
and the rest is provided by the macro under-the-hood.
"""
def cache(attrs, block, context) do
caching_action(:cache, attrs, block, context)
end
@doc """
Provides a way of annotating functions to be evicted (eviction aspect).
On function's completion, the given key or keys (depends on the `:key` and
`:keys` options) are deleted from the cache.
## Options
* `:keys` - Defines the set of keys meant to be evicted from cache
on function completion.
* `:all_entries` - Defines if all entries must be removed on function
completion. Defaults to `false`.
See the "Shared options" section at the module documentation.
## Examples
defmodule MyApp.Example do
use Nebulex.Caching.Decorators
alias MyApp.Cache
@decorate evict(cache: Cache, key: name)
def evict(name) do
# your logic (maybe write/delete data to the SoR)
end
@decorate evict(cache: Cache, keys: [name, id])
def evict_many(name) do
# your logic (maybe write/delete data to the SoR)
end
@decorate evict(cache: Cache, all_entries: true)
def evict_all(name) do
# your logic (maybe write/delete data to the SoR)
end
end
The **Write-through** pattern is supported by this decorator. Your function
provides the logic to write data to the system-of-record (SoR) and the rest
is provided by the decorator under-the-hood. But in contrast with `update`
decorator, when the data is written to the SoR, the key for that value is
deleted from cache instead of updated.
"""
def evict(attrs, block, context) do
caching_action(:evict, attrs, block, context)
end
@doc """
Provides a way of annotating functions to be evicted; but updating the cached
key instead of deleting it.
The content of the cache is updated without interfering with the function
execution. That is, the method would always be executed and the result
cached.
The difference between `cache/3` and `update/3` is that `cache/3` will skip
running the function if the key exists in the cache, whereas `update/3` will
actually run the function and then put the result in the cache.
## Options
See the "Shared options" section at the module documentation.
## Examples
defmodule MyApp.Example do
use Nebulex.Caching.Decorators
alias MyApp.Cache
@decorate update(cache: Cache, key: name)
def update(name) do
# your logic (maybe write data to the SoR)
end
@decorate update(cache: Cache, opts: [ttl: 3600])
def update_with_ttl(name) do
# your logic (maybe write data to the SoR)
end
@decorate update(cache: Cache, match: &match_function/1)
def update_when_not_nil() do
# your logic (maybe write data to the SoR)
end
defp match_function(value) do
# your condition to skip updating the cache
edn
end
The **Write-through** pattern is supported by this decorator. Your function
provides the logic to write data to the system-of-record (SoR) and the rest
is provided by the decorator under-the-hood.
"""
def update(attrs, block, context) do
caching_action(:update, attrs, block, context)
end
## Private Functions
defp caching_action(action, attrs, block, context) do
cache = attrs[:cache] || raise ArgumentError, "expected cache: to be given as argument"
key_var =
Keyword.get(
attrs,
:key,
quote(do: :erlang.phash2({unquote(context.module), unquote(context.name)}))
)
keys_var = Keyword.get(attrs, :keys, [])
match_var = Keyword.get(attrs, :match, quote(do: fn _ -> true end))
opts_var =
attrs
|> Keyword.get(:opts, [])
|> Keyword.put(:return, :value)
action_logic = action_logic(action, block, attrs)
quote do
cache = unquote(cache)
key = unquote(key_var)
keys = unquote(keys_var)
opts = unquote(opts_var)
match = unquote(match_var)
unquote(action_logic)
end
end
defp action_logic(:cache, block, _attrs) do
quote do
if value = cache.get(key, opts) do
value
else
value = unquote(block)
with true <- apply(match, [value]),
value <- cache.set(key, value, opts) do
value
else
false -> value
end
end
end
end
defp action_logic(:evict, block, attrs) do
all_entries? = Keyword.get(attrs, :all_entries, false)
quote do
:ok =
if unquote(all_entries?) do
cache.flush()
else
Enum.each([key | keys], fn k ->
if k, do: cache.delete(k)
end)
end
unquote(block)
end
end
defp action_logic(:update, block, _attrs) do
quote do
value = unquote(block)
with true <- apply(match, [value]),
value <- cache.set(key, value, opts) do
value
else
false -> value
end
end
end
end
|
lib/nebulex/caching/decorators.ex
| 0.899984
| 0.635449
|
decorators.ex
|
starcoder
|
defmodule Tensorflow.MemoryLogStep do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer,
handle: String.t()
}
defstruct [:step_id, :handle]
field(:step_id, 1, type: :int64)
field(:handle, 2, type: :string)
end
defmodule Tensorflow.MemoryLogTensorAllocation do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer,
kernel_name: String.t(),
tensor: Tensorflow.TensorDescription.t() | nil
}
defstruct [:step_id, :kernel_name, :tensor]
field(:step_id, 1, type: :int64)
field(:kernel_name, 2, type: :string)
field(:tensor, 3, type: Tensorflow.TensorDescription)
end
defmodule Tensorflow.MemoryLogTensorDeallocation do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
allocation_id: integer,
allocator_name: String.t()
}
defstruct [:allocation_id, :allocator_name]
field(:allocation_id, 1, type: :int64)
field(:allocator_name, 2, type: :string)
end
defmodule Tensorflow.MemoryLogTensorOutput do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer,
kernel_name: String.t(),
index: integer,
tensor: Tensorflow.TensorDescription.t() | nil
}
defstruct [:step_id, :kernel_name, :index, :tensor]
field(:step_id, 1, type: :int64)
field(:kernel_name, 2, type: :string)
field(:index, 3, type: :int32)
field(:tensor, 4, type: Tensorflow.TensorDescription)
end
defmodule Tensorflow.MemoryLogRawAllocation do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer,
operation: String.t(),
num_bytes: integer,
ptr: non_neg_integer,
allocation_id: integer,
allocator_name: String.t()
}
defstruct [
:step_id,
:operation,
:num_bytes,
:ptr,
:allocation_id,
:allocator_name
]
field(:step_id, 1, type: :int64)
field(:operation, 2, type: :string)
field(:num_bytes, 3, type: :int64)
field(:ptr, 4, type: :uint64)
field(:allocation_id, 5, type: :int64)
field(:allocator_name, 6, type: :string)
end
defmodule Tensorflow.MemoryLogRawDeallocation do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer,
operation: String.t(),
allocation_id: integer,
allocator_name: String.t(),
deferred: boolean
}
defstruct [:step_id, :operation, :allocation_id, :allocator_name, :deferred]
field(:step_id, 1, type: :int64)
field(:operation, 2, type: :string)
field(:allocation_id, 3, type: :int64)
field(:allocator_name, 4, type: :string)
field(:deferred, 5, type: :bool)
end
|
lib/tensorflow/core/framework/log_memory.pb.ex
| 0.728265
| 0.547343
|
log_memory.pb.ex
|
starcoder
|
defmodule Lonely.Result.List do
@moduledoc """
Functions to operate on result lists.
"""
alias Lonely.Result
@type t :: Result.t
@doc """
Combines a list of results into a result with a list of values. If there is
any error, the first is returned.
iex> import Lonely.Result.List
...> combine([])
{:ok, []}
iex> import Lonely.Result.List
...> combine([{:ok, 1}, {:ok, 2}, {:ok, 3}])
{:ok, [1, 2, 3]}
iex> import Lonely.Result.List
...> combine([{:ok, 1}, {:error, 2}, {:ok, 3}])
{:error, 2}
iex> import Lonely.Result.List
...> combine([{:ok, 1}, {:error, 2}, {:error, 3}])
{:error, 2}
"""
@spec combine([t]) :: t
def combine(xs) do
xs
|> Enum.reduce_while({:ok, []}, &combine_reducer/2)
|> Result.map(&Enum.reverse/1)
end
defp combine_reducer(a = {:ok, _}, acc), do:
{:cont, cons(a, acc)}
defp combine_reducer(error, _), do:
{:halt, error}
@doc """
Cons cell.
iex> import Lonely.Result.List
...> cons({:ok, 1}, {:ok, []})
{:ok, [1]}
iex> import Lonely.Result.List
...> cons({:error, :boom}, {:ok, []})
{:error, :boom}
iex> import Lonely.Result.List
...> cons({:ok, 1}, {:error, :boom})
{:error, :boom}
"""
def cons({:ok, x}, {:ok, xs}) when is_list(xs), do:
{:ok, [x | xs]}
def cons({:ok, _}, e = {:error, _}), do: e
def cons(e = {:error, _}, _), do: e
@doc """
Splits a result list into a list of results.
iex> import Lonely.Result.List
...> split({:ok, []})
[]
iex> import Lonely.Result.List
...> split({:ok, [1]})
[{:ok, 1}]
iex> import Lonely.Result.List
...> split({:ok, [1, 2]})
[{:ok, 1}, {:ok, 2}]
iex> import Lonely.Result.List
...> split({:error, :boom})
{:error, :boom}
"""
@spec split(t) :: [t]
def split({:ok, []}), do: []
def split({:ok, xs}) when is_list(xs), do:
Enum.map(xs, &({:ok, &1}))
def split(e = {:error, _}), do: e
end
|
lib/lonely/result/list.ex
| 0.738009
| 0.41567
|
list.ex
|
starcoder
|
defmodule ExDebugger.Meta do
@moduledoc """
Debugging the debugger.
In order to facilitate development of `ExDebugger`, various `inspect`
statements have been placed strategically which can be switched on/off
by means of the settings under `#{Documentation.debug_options_path()}`.
```elixir
config :ex_debugger, :meta_debug,
all: %{show_module_tokens: false, show_tokenizer: false, show_ast_before: false, show_ast_after: false},
"Elixir.Support.EdgeCases.CondStatement.Minimal": {true, true, true, true},
```
"""
# The `struct` defined here validates the input received therefrom and
# provides a set of convenience functions to abstract the code away from
# 'ugly' conditional statements.
@opts ExDebugger.Helpers.Formatter.opts()
if Application.get_env(:ex_debugger, :debug_options_file) do
@external_resource Application.get_env(:ex_debugger, :debug_options_file)
@debug Config.Reader.read!(Application.get_env(:ex_debugger, :debug_options_file))
else
@debug %{}
end
@accepted_keys MapSet.new([
:show_module_tokens,
:show_tokenizer,
:show_ast_before,
:show_ast_after
])
@default %{
show_module_tokens: false,
show_tokenizer: false,
show_ast_before: false,
show_ast_after: false
}
defstruct all: @default,
caller: @default,
caller_module: nil
@doc false
def new(caller_module) do
meta_debug = Keyword.get(@debug, :ex_debugger) |> Keyword.get(:meta_debug)
all =
meta_debug
|> Keyword.get(:all)
|> validate_meta_debug
caller =
meta_debug
|> Keyword.get(caller_module, @default)
|> validate_meta_debug
struct!(__MODULE__, %{
all: all,
caller: caller,
caller_module: caller_module
})
end
@doc false
def debug(input, meta = %__MODULE__{}, def_name, key) do
if Map.get(meta.all, key) || Map.get(meta.caller, key) do
IO.inspect(input, [{:label, format_label(meta.caller_module, def_name, key)} | @opts])
end
input
end
@doc false
defp format_label(caller_module, def_name, key) do
"#{caller_module}/#{def_name}/#{format_key(key)}"
end
@doc false
defp format_key(:show_module_tokens), do: "caller_module"
defp format_key(:show_tokenizer), do: "tokenizer"
defp format_key(:show_ast_before), do: "def_do_block_ast"
defp format_key(:show_ast_after), do: "updated_def_do_block_ast"
@doc false
defp validate_meta_debug(input) do
input
|> case do
m when is_map(m) ->
if MapSet.new(current_keys = Map.keys(m)) == @accepted_keys do
m
else
raise "#{@external_resource} section: :meta_debug contains incorrect configuration. Accepted Keys: #{
inspect(@accepted_keys, @opts)
}. Instead: #{inspect(current_keys, @opts)}"
end
{a, b, c, d} ->
%{show_module_tokens: a, show_tokenizer: b, show_ast_before: c, show_ast_after: d}
incorrect_format ->
raise "#{@external_resource} section: :meta_debug contains incorrect configuration. Either specify a map with keys: #{
inspect(@accepted_keys, @opts)
}. Either specify a tuple; for example: {false, true, false, false}. Instead: #{
inspect(incorrect_format, @opts)
}"
end
end
end
|
lib/ex_debugger/meta.ex
| 0.660501
| 0.691562
|
meta.ex
|
starcoder
|
defmodule ShEx.Schema do
@moduledoc """
A ShEx schema is a collection of ShEx shape expressions that prescribes conditions that RDF data graphs must meet in order to be considered "conformant".
Usually a `ShEx.Schema` is not created by hand, but read from a ShExC or ShExJ
representation via `ShEx.ShExC.decode/2` or `ShEx.ShExJ.decode/2`.
"""
defstruct [
# [shapeExpr+]?
:shapes,
# shapeExpr?
:start,
# [IRI+]?
:imports,
# [SemAct+]?
:start_acts
]
alias ShEx.{ShapeMap, ShapeExpression}
@parallel_default Application.get_env(:shex, :parallel)
@flow_opts_defaults Application.get_env(:shex, :flow_opts)
@flow_opts MapSet.new(~w[max_demand min_demand stages window buffer_keep buffer_size]a)
@doc !"""
Creates a `ShEx.Schema`.
"""
def new(shapes, start \\ nil, imports \\ nil, start_acts \\ nil) do
%ShEx.Schema{
shapes: shapes |> List.wrap() |> Map.new(fn shape -> {shape.id, shape} end),
start: start,
imports: imports,
start_acts: start_acts
}
|> validate()
end
defp validate(schema) do
with :ok <- check_refs(schema, labeled_triple_expressions([schema])) do
{:ok, schema}
end
end
@doc """
Validates that a `RDF.Data` structure conforms to a `ShEx.Schema` according to a `ShEx.ShapeMap`.
"""
def validate(schema, data, shape_map, opts \\ [])
def validate(schema, data, %ShapeMap{type: :query} = shape_map, opts) do
with {:ok, fixed_shape_map} <- ShapeMap.to_fixed(shape_map, data) do
validate(schema, data, fixed_shape_map, opts)
end
end
def validate(schema, data, %ShapeMap{type: :fixed} = shape_map, opts) do
start = start_shape_expr(schema)
state = %{
ref_stack: [],
labeled_triple_expressions: labeled_triple_expressions([schema])
}
if par_opts = parallelization_options(shape_map, data, opts) do
shape_map
|> ShapeMap.associations()
|> Flow.from_enumerable(par_opts)
|> Flow.map(fn association ->
schema
|> shape_expr(association.shape, start)
|> ShapeExpression.satisfies(data, schema, association, state)
end)
|> Enum.reduce(%ShapeMap{type: :result}, fn association, shape_map ->
ShapeMap.add(shape_map, association)
end)
else
shape_map
|> ShapeMap.associations()
|> Enum.reduce(%ShapeMap{type: :result}, fn association, result_shape_map ->
shape = shape_expr(schema, association.shape, start)
ShapeMap.add(
result_shape_map,
ShapeExpression.satisfies(shape, data, schema, association, state)
)
end)
end
end
defp parallelization_options(shape_map, data, opts) do
if use_parallelization?(Keyword.get(opts, :parallel, @parallel_default), shape_map) do
if opts |> Keyword.keys() |> MapSet.new() |> MapSet.disjoint?(@flow_opts) do
flow_opts_defaults(shape_map, data, opts)
else
opts
end
end
end
defp use_parallelization?(nil, shape_map) do
shape_map.type == :query or Enum.count(shape_map) > 10
end
defp use_parallelization?(parallel, _), do: parallel
defp flow_opts_defaults(shape_map, data, opts) do
@flow_opts_defaults || [max_demand: 3]
end
defp check_refs(schema, labeled_triple_expressions) do
ShEx.Operator.check(schema, fn
{:shape_expression_label, id} ->
if schema.shapes[id] do
:ok
else
{:error, "couldn't resolve shape expression reference #{inspect(id)}"}
end
{:triple_expression_label, id} ->
if labeled_triple_expressions[id] do
:ok
else
{:error, "couldn't resolve triple expression reference #{inspect(id)}"}
end
operator ->
if ShEx.TripleExpression.impl_for(operator) && Map.has_key?(operator, :id) do
if is_nil(schema.shapes[operator.id]) do
:ok
else
{:error,
"#{inspect(operator.id)} can't be a shape expression label and a triple expression label"}
end
else
:ok
end
end)
end
defp labeled_triple_expressions(operators) do
Enum.reduce(operators, %{}, fn operator, acc ->
case ShEx.Operator.triple_expression_label_and_operands(operator) do
{nil, []} ->
acc
{triple_expr_label, []} ->
acc
|> Map.put(triple_expr_label, operator)
{nil, triple_expressions} ->
acc
|> Map.merge(labeled_triple_expressions(triple_expressions))
{triple_expr_label, triple_expressions} ->
acc
|> Map.put(triple_expr_label, operator)
|> Map.merge(labeled_triple_expressions(triple_expressions))
end
end)
end
@doc false
def shape_expr_with_id(schema, shape_label) do
Map.get(schema.shapes, shape_label)
end
defp start_shape_expr(schema) do
if RDF.resource?(schema.start) do
shape_expr_with_id(schema, schema.start)
else
schema.start
end
end
defp shape_expr(_, :start, start_expr), do: start_expr
defp shape_expr(schema, shape_label, _), do: shape_expr_with_id(schema, shape_label)
defimpl ShEx.Operator do
def children(schema) do
Map.values(schema.shapes)
end
def triple_expression_label_and_operands(schema),
do: {nil, children(schema)}
end
end
|
lib/shex/schema.ex
| 0.762866
| 0.603377
|
schema.ex
|
starcoder
|
defmodule ExWire.Framing.Secrets do
@moduledoc """
Secrets are used to both encrypt and authenticate incoming
and outgoing peer to peer messages.
"""
alias ExthCrypto.{AES, MAC}
alias ExthCrypto.ECIES.ECDH
alias ExthCrypto.Hash.Keccak
alias ExWire.Handshake
@type t :: %__MODULE__{
egress_mac: MAC.mac_inst(),
ingress_mac: MAC.mac_inst(),
mac_encoder: ExthCrypto.Cipher.cipher(),
mac_secret: ExthCrypto.Key.symmetric_key(),
encoder_stream: ExthCrypto.Cipher.stream(),
decoder_stream: ExthCrypto.Cipher.stream(),
token: binary()
}
defstruct [
:egress_mac,
:ingress_mac,
:mac_encoder,
:mac_secret,
:encoder_stream,
:decoder_stream,
:token
]
@spec new(
MAC.mac_inst(),
MAC.mac_inst(),
ExthCrypto.Key.symmetric_key(),
ExthCrypto.Key.symmetric_key(),
binary()
) :: t
def new(egress_mac, ingress_mac, mac_secret, symmetric_key, token) do
# initialize AES stream with empty init_vector
encoder_stream = AES.stream_init(:ctr, symmetric_key, <<0::size(128)>>)
decoder_stream = AES.stream_init(:ctr, symmetric_key, <<0::size(128)>>)
mac_encoder = {AES, AES.block_size(), :ecb}
%__MODULE__{
egress_mac: egress_mac,
ingress_mac: ingress_mac,
mac_encoder: mac_encoder,
mac_secret: mac_secret,
encoder_stream: encoder_stream,
decoder_stream: decoder_stream,
token: token
}
end
@doc """
After a handshake has been completed (i.e. auth and ack have been exchanged),
we're ready to derive the secrets to be used to encrypt frames. This function
performs the required computation. The token created as part of these secrets
can be used to resume a connection with a minimal handshake.
From RLPx documentation (https://github.com/ethereum/devp2p/blob/master/rlpx.md),
```
ephemeral-shared-secret = ecdh.agree(ephemeral-privkey, remote-ephemeral-pubk)
shared-secret = sha3(ephemeral-shared-secret || sha3(nonce || initiator-nonce))
token = sha3(shared-secret)
aes-secret = sha3(ephemeral-shared-secret || shared-secret)
# destroy shared-secret
mac-secret = sha3(ephemeral-shared-secret || aes-secret)
# destroy ephemeral-shared-secret
Initiator:
egress-mac = sha3.update(mac-secret ^ recipient-nonce || auth-sent-init)
# destroy nonce
ingress-mac = sha3.update(mac-secret ^ initiator-nonce || auth-recvd-ack)
# destroy remote-nonce
Recipient:
egress-mac = sha3.update(mac-secret ^ initiator-nonce || auth-sent-ack)
# destroy nonce
ingress-mac = sha3.update(mac-secret ^ recipient-nonce || auth-recvd-init)
# destroy remote-nonce
```
"""
def derive_secrets(handshake = %Handshake{}) do
{_public, private_key} = handshake.random_key_pair
ephemeral_shared_secret =
ECDH.generate_shared_secret(private_key, handshake.remote_random_pub)
shared_secret =
Keccak.kec(
ephemeral_shared_secret <> Keccak.kec(handshake.resp_nonce <> handshake.init_nonce)
)
token = Keccak.kec(shared_secret)
aes_secret = Keccak.kec(ephemeral_shared_secret <> shared_secret)
mac_secret = Keccak.kec(ephemeral_shared_secret <> aes_secret)
{egress_mac, ingress_mac} = derive_ingress_egress(handshake, mac_secret)
new(egress_mac, ingress_mac, mac_secret, aes_secret, token)
end
def derive_ingress_egress(handshake = %Handshake{initiator: true}, mac_secret) do
egress_mac =
MAC.init(:kec)
|> MAC.update(ExthCrypto.Math.xor(mac_secret, handshake.resp_nonce))
|> MAC.update(handshake.encoded_auth_msg)
ingress_mac =
MAC.init(:kec)
|> MAC.update(ExthCrypto.Math.xor(mac_secret, handshake.init_nonce))
|> MAC.update(handshake.encoded_ack_resp)
{egress_mac, ingress_mac}
end
def derive_ingress_egress(handshake = %Handshake{initiator: false}, mac_secret) do
egress_mac =
MAC.init(:kec)
|> MAC.update(ExthCrypto.Math.xor(mac_secret, handshake.init_nonce))
|> MAC.update(handshake.encoded_ack_resp)
ingress_mac =
MAC.init(:kec)
|> MAC.update(ExthCrypto.Math.xor(mac_secret, handshake.resp_nonce))
|> MAC.update(handshake.encoded_auth_msg)
{egress_mac, ingress_mac}
end
end
|
apps/ex_wire/lib/ex_wire/framing/secrets.ex
| 0.881436
| 0.412767
|
secrets.ex
|
starcoder
|
defmodule ExInsights do
@moduledoc """
Exposes methods for POST events & metrics to Azure Application Insights.
For more information on initialization and usage consult the [README.md](readme.html)
"""
alias ExInsights.Data.Payload
@typedoc """
Measurement name. Will be used extensively in the app insights UI
"""
@type name :: String.t | atom
@typedoc ~S"""
A map of `[name -> string]` to add metadata to a tracking request
"""
@type properties :: %{optional(name) => String.t}
@typedoc ~S"""
A map of `[name -> string]` to add measurement data to a tracking request
"""
@type measurements :: %{optional(name) => number}
@typedoc ~S"""
Defines the level of severity for the event.
"""
@type severity_level :: :verbose | :info | :warning | :error | :critical
@typedoc ~S"""
Represents the exception's stack trace.
"""
@type stack_trace :: [stack_trace_entry]
@type stack_trace_entry ::
{module, atom, arity_or_args, location} |
{(... -> any), arity_or_args, location}
@typep arity_or_args :: non_neg_integer | list
@typep location :: keyword
@doc ~S"""
Log a user action or other occurrence.
### Parameters:
```
name: name of the event (string)
properties (optional): a map of [string -> string] pairs for adding extra properties to this event
measurements (optional): a map of [string -> number] values associated with this event that can be aggregated/sumed/etc. on the UI
```
"""
@spec track_event(name :: name, properties :: properties, measurements :: measurements) :: :ok
def track_event(name, properties \\ %{}, measurements \\ %{})
when is_binary(name)
do
Payload.create_event_payload(name, properties, measurements)
|> track()
end
@doc ~S"""
Log a trace message.
### Parameters:
```
message: A string to identify this event in the portal.
severity_level: The level of severity for the event.
properties: map[string, string] - additional data used to filter events and metrics in the portal. Defaults to empty.
```
"""
@spec track_trace(String.t, severity_level :: severity_level, properties :: properties) :: :ok
def track_trace(message, severity_level \\ :info, properties \\ %{}) do
Payload.create_trace_payload(message, severity_level, properties)
|> track()
end
@doc ~S"""
Log an exception you have caught.
### Parameters:
```
exception: An Error from a catch clause, or the string error message.
stack_trace: An erlang stacktrace.
properties: map[string, string] - additional data used to filter events and metrics in the portal. Defaults to empty.
measurements: map[string, number] - metrics associated with this event, displayed in Metrics Explorer on the portal. Defaults to empty.
```
"""
@spec track_exception(String.t, stack_trace :: stack_trace, String.t | nil, properties :: properties, measurements :: measurements) :: :ok
def track_exception(exception, stack_trace, handle_at \\ nil, properties \\ %{}, measurements \\ %{}) do
Payload.create_exception_payload(exception, stack_trace, handle_at, properties, measurements)
|> track()
end
@doc ~S"""
Log a numeric value that is not associated with a specific event.
Typically used to send regular reports of performance indicators.
### Parameters:
```
name: name of the metric
value: the value of the metric (number)
properties (optional): a map of [string -> string] pairs for adding extra properties to this event
```
"""
@spec track_metric(name :: name, number, properties :: properties) :: :ok
def track_metric(name, value, properties \\ %{})
when is_binary(name)
do
Payload.create_metric_payload(name, value, properties)
|> track()
end
@doc ~S"""
Log a dependency, for example requests to an external service or SQL calls.
### Parameters:
```
name: String that identifies the dependency.
command_name: String of the name of the command made against the dependency (eg. full URL with querystring or SQL command text).
elapsed_time_ms: Number for elapsed time in milliseconds of the command made against the dependency.
success: Boolean which indicates success.
dependency_type_name: String which denotes dependency type. Defaults to nil.
target: String of the target host of the dependency.
properties (optional): map[string, string] - additional data used to filter events and metrics in the portal. Defaults to empty.
```
"""
@spec track_dependency(name :: name, String.t, number, boolean, String.t, String.t | nil, properties :: properties) :: :ok
def track_dependency(name, command_name, elapsed_time_ms, success, dependency_type_name \\ "", target \\ nil, properties \\ %{}) do
Payload.create_dependency_payload(name, command_name, elapsed_time_ms, success, dependency_type_name, target, properties)
|> track()
end
@spec track(map) :: :ok
defp track(payload) do
ExInsights.Aggregation.Worker.track(payload)
:ok
end
end
|
lib/ex_insights.ex
| 0.916119
| 0.897291
|
ex_insights.ex
|
starcoder
|
defmodule Haex.Data.Parser do
@moduledoc """
Parses an AST received from `Haex.data/1` macro, and generates a
`Haex.Data.t()` struct which can be used to generate modules to represent
that data type
"""
alias Haex.Data
alias Haex.Data.DataConstructor
alias Haex.Data.TypeConstructor
@spec parse(Macro.t()) :: Data.t()
def parse({:"::", _meta, [type_ast, data_asts]}) do
type_constructor = parse_type_constructor(type_ast)
data_constructors = parse_data_constructors(data_asts)
%Data{
type_constructor: type_constructor,
data_constructors: data_constructors
}
end
@spec parse_type_constructor(Macro.t()) :: TypeConstructor.t()
defp parse_type_constructor({:__aliases__, _, type_name}) do
%TypeConstructor{
name: type_name,
params: []
}
end
defp parse_type_constructor({{:., _, [{:__aliases__, _, type_name}, :t]}, _, type_params_ast}) do
type_params = parse_type_params(type_params_ast)
%TypeConstructor{
name: type_name,
params: type_params
}
end
@spec parse_type_params(Macro.t()) :: [atom()]
defp parse_type_params([]), do: []
defp parse_type_params(no_parens: true), do: []
defp parse_type_params(type_params_ast) do
Enum.map(type_params_ast, fn {type_param_name, _meta, _ctx} -> type_param_name end)
end
@spec parse_data_constructors(Macro.t()) :: [DataConstructor.t()]
defp parse_data_constructors({:|, _meta, _asts} = ast) do
ast |> or_ast_to_list() |> parse_data_constructors()
end
defp parse_data_constructors(data_asts) when is_list(data_asts) do
Enum.map(data_asts, &parse_data_constructor/1)
end
defp parse_data_constructors(data_ast) when not is_list(data_ast) do
[parse_data_constructor(data_ast)]
end
@spec or_ast_to_list(Macro.t()) :: [Macro.t()]
defp or_ast_to_list({:|, _meta, [h_ast, t_ast]}), do: [h_ast | or_ast_to_list(t_ast)]
defp or_ast_to_list(ast), do: [ast]
@spec parse_data_constructor(Macro.t()) :: DataConstructor.t()
defp parse_data_constructor({:__aliases__, _, data_name}) do
%DataConstructor{
name: data_name,
params: [],
record?: false
}
end
defp parse_data_constructor({{:., _, [{:__aliases__, _, data_name}, :t]}, _, data_params_ast}) do
{data_params, is_record} = parse_data_params(data_params_ast)
%DataConstructor{
name: data_name,
params: data_params,
record?: is_record
}
end
@spec parse_data_params(Macro.t()) :: {[Data.param()], is_record :: boolean()}
defp parse_data_params([data_params_ast]) when is_list(data_params_ast) do
if Keyword.keyword?(data_params_ast) do
params =
Enum.map(data_params_ast, fn {param_name, data_param_ast} ->
{param_name, parse_data_param(data_param_ast)}
end)
{params, true}
else
raise "expected a keyword list got: #{data_params_ast |> inspect}"
end
end
defp parse_data_params(data_params_ast) do
params = Enum.map(data_params_ast, &parse_data_param/1)
{params, false}
end
@spec parse_data_param(Macro.t()) :: Data.param()
defp parse_data_param({name, _, args}) when not is_list(args) do
{:variable, name}
end
defp parse_data_param(external_type_ast) do
{:external_type, external_type_ast}
end
end
|
lib/haex/data/parser.ex
| 0.846784
| 0.618924
|
parser.ex
|
starcoder
|
defmodule Opus.Instrumentation do
@moduledoc false
defmacro instrument(event, fun) do
handling = __MODULE__.definstrument(fun)
quote do
@doc false
def instrument(unquote(event), _, metrics), do: unquote(handling)
end
end
defmacro instrument(event, opts, fun) do
handling = __MODULE__.definstrument(fun)
quote do
@doc false
def instrument(unquote(event), unquote(opts), metrics), do: unquote(handling)
end
end
def definstrument(fun) do
quote do
case unquote(fun) do
f when is_function(f, 0) -> f.()
f when is_function(f, 1) -> f.(metrics)
end
end
end
def default_callback do
quote do
@doc false
def instrument(_, _, _), do: :ok
end
end
def run_instrumented({_module, _type, _name, %{instrument?: false}}, _input, fun)
when is_function(fun, 0),
do: fun.()
def run_instrumented({_module, _type, name, _opts} = stage, input, fun)
when is_function(fun, 0) do
start = :erlang.monotonic_time()
run_instrumenters(:before_stage, stage, %{stage: name, input: input})
{status, new_input} = ret = fun.()
time = :erlang.monotonic_time() - start
run_instrumenters(:stage_completed, stage, %{
stage: name,
input: input,
result: format_result(ret),
time: time
})
{status, %{time: time, input: new_input}}
end
def run_instrumenters(event, {module, _type, _name, _opts} = stage, metrics) do
case Application.get_env(:opus, :instrumentation, []) do
instrumenter when is_atom(instrumenter) ->
do_run_instrumenters([module | [instrumenter]], event, stage, metrics)
instrumenters when is_list(instrumenters) ->
do_run_instrumenters([module | instrumenters], event, stage, metrics)
_ ->
do_run_instrumenters([module], event, stage, metrics)
end
end
defp do_run_instrumenters(instrumenters, event, {module, _type, name, _opts}, metrics) do
for instrumenter <- instrumenters,
is_atom(instrumenter),
function_exported?(instrumenter, :instrument, 3) do
case event do
e when e in [:pipeline_started, :pipeline_completed] ->
instrumenter.instrument(event, %{pipeline: module}, metrics)
e ->
instrumenter.instrument(e, %{stage: %{pipeline: module, name: name}}, metrics)
end
end
end
defp format_result({:cont, value}), do: {:ok, value}
defp format_result({:halt, value}), do: {:error, value}
end
|
lib/opus/instrumentation.ex
| 0.619471
| 0.434821
|
instrumentation.ex
|
starcoder
|
defmodule Logz.Nginx do
@moduledoc """
Implements a Stream that reads from files and emits maps.
"""
require Logger
defp parse_month(str) do
case str do
"Jan" -> 1
"Feb" -> 2
"Mar" -> 3
"Apr" -> 4
"May" -> 5
"Jun" -> 6
"Jul" -> 7
"Aug" -> 8
"Sep" -> 9
"Oct" -> 10
"Nov" -> 11
"Dec" -> 12
end
end
defp offset(sign, hours, minutes) do
off = String.to_integer(hours) * 3600 + String.to_integer(minutes) * 60
case sign do
"+" -> off
"-" -> -off
end
end
def parse_date!(str) do
case Regex.scan(~r{(\d+)/(\w+)/(\d+):(\d+):(\d+):(\d+) (\+|-)(\d\d)(\d\d)}, str) do
[[_, day, month, year, hour, minute, second, off_sign, off_hour, off_min]] ->
{:ok, date} =
NaiveDateTime.new(
String.to_integer(year),
parse_month(month),
String.to_integer(day),
String.to_integer(hour),
String.to_integer(minute),
String.to_integer(second)
)
tstamp =
NaiveDateTime.add(date, offset(off_sign, off_hour, off_min), :second)
|> NaiveDateTime.diff(~N[1970-01-01 00:00:00], :second)
tstamp
matched ->
throw({:error, matched})
end
end
def parse_request(request) do
case Regex.scan(~r{([a-zA-Z]+) ([^\s]+) [^\"]+}, request) do
[[_, method, uri]] ->
{method, uri}
_ ->
{nil, nil}
end
end
def parse(line) do
# 192.168.127.123 - - [07/Jun/2020:06:40:03 +0000] "GET /blog HTTP/1.1" 301 185 "-" "UA"
addr = ~S{([^\s]*)}
tstamp = ~S{\[(.*)\]}
request = ~S{"(.*)"}
status = ~S{([\d]+)}
size = ~S{([\d]+)}
user_agent = ~s{"(.*)"}
case Regex.scan(
~r/#{addr} - - #{tstamp} #{request} #{status} #{size} ".*" #{user_agent}/,
line
) do
[[_, addr, tstamp, request, status, size, user_agent]] ->
{method, uri} = parse_request(request)
{:ok,
%{
addr: addr,
tstamp: parse_date!(tstamp),
request_method: method,
uri: uri,
request: request,
status: status,
size: size,
user_agent: user_agent
}}
matched ->
# IO.inspect(line)
{:error, {line, matched}}
end
end
def parse!(line) do
{:ok, data} = parse(line)
data
end
end
|
lib/logz/nginx.ex
| 0.584745
| 0.456349
|
nginx.ex
|
starcoder
|
defmodule SvgBuilder.Transform do
alias SvgBuilder.Element
@moduledoc """
Apply transforms to SVG elements.
These add or append to the "transform" attribute
on an element.
"""
@doc """
Apply a translation to an element.
"""
@spec translate(Element.t(), number, number) :: Element.t()
def translate(element, tx, ty) do
add_transform(element, "translate(#{tx},#{ty})")
end
@doc """
Apply a matrix transform to an element.
Given `{a, b, c, d, e, f}` the transformation is defined by the matrix:
| a c e |
| b d f |
| 0 0 1 |
https://www.w3.org/TR/SVG11/coords.html#TransformMatrixDefined
"""
@spec matrix(Element.t(), {number, number, number, number, number, number}) :: Element.t()
def matrix(element, {a, b, c, d, e, f}) do
add_transform(element, "matrix(#{a},#{b},#{c},#{d},#{e},#{f})")
end
@doc """
Apply a scale transformation to the element.
"""
@spec scale(Element.t(), number, number) :: Element.t()
def scale(element, sx, sy) do
add_transform(element, "scale(#{sx},#{sy})")
end
@doc """
Apply a rotation to the element.
Angle is in degrees.
"""
@spec rotate(Element.t(), number) :: Element.t()
def rotate(element, angle) do
add_transform(element, "rotate(#{angle})")
end
@doc """
Apply a rotation to an element around a defined center point.
Angle is in degrees.
"""
@spec rotate(Element.t(), number, number, number) :: Element.t()
def rotate(element, angle, cx, cy) do
add_transform(element, "rotate(#{angle}, #{cx}, #{cy})")
end
@doc """
Apply a skew on the X axis to the element.
"""
@spec skew_x(Element.t(), number) :: Element.t()
def skew_x(element, angle) do
add_transform(element, "skewX(#{angle})")
end
@doc """
Apply a skew on the Y axis to the element.
"""
@spec skew_y(Element.t(), number) :: Element.t()
def skew_y(element, angle) do
add_transform(element, "skewY(#{angle})")
end
defp add_transform({type, attrs, children}, transform) do
t =
[Map.get(attrs, :transform, ""), transform]
|> Enum.join(" ")
|> String.trim()
{type, Map.put(attrs, :transform, t), children}
end
end
|
lib/transform.ex
| 0.896841
| 0.633552
|
transform.ex
|
starcoder
|
defmodule Nba.Stats do
@moduledoc """
Provides a function for each stats.nba.com endpoint.
## Examples
See what endpoints you can hit:
Nba.Stats.endpoints()
#=> [:assist_tracker:, :box_score:, :box_score_summary:, ...]
Pass in the atom `:help` as a parameter to an endpoint function
to get a list of the available query parameters for the endpoint.
Nba.Stats.player_profile(:help)
#=> [:LeagueID, :PerMode, :PlayerID]
If you need example values for a query param, use `Nba.Stats.values_for/1`.
Nba.Stats.values_for("PerMode")
#=> ["Totals", "PerGame", "MinutesPer", "Per36"]
Boilerplate default values should be filled in for the most part, but as the API is
always changing, the app may not fill in all the values correctly. Pay attention to
the error message to see what was missing from the API call.
Nba.Stats.player_profile()
#=> {:error, "PlayerID is required"}
Now that you know what query params you can pass, we can make a call to the endpoint
by passing in a list of tuples to the endpoint function. Alternatively, you have the
option of passing in a map.
Nba.Stats.player_profile(PlayerID: 1628366, PerMode: "Totals")
#=> {:ok, %{"CareerHighs" => ...}}
Nba.Stats.player_profile(%{"PlayerID" => 1628366, "PerMode" => "Totals"})
#=> {:ok, %{"CareerHighs" => ...}}
Nba.Stats.player_profile(PlayerID: "Go Bruins")
#=> {:error, "The value 'Go Bruins' is not valid for PlayerID.; PlayerID is required"}
Note: The functions with a `!` raise an exception if the API call results in an error.
Nba.Stats.player_profile!(PlayerID: "Go Bruins")
#=> ** (RuntimeError) The value 'Go Bruins' is not valid for PlayerID.; PlayerID is required
"""
alias Nba.Parser
alias Nba.Http
defp http, do: Application.get_env(:nba, :http, Nba.Http)
Parser.Stats.endpoints()
|> Enum.each(fn endpoint ->
name = endpoint["name"]
def unquote(:"#{name}")(params \\ %{})
@spec unquote(:"#{name}")(:help) :: list(String.t())
def unquote(:"#{name}")(:help) do
Parser.Stats.endpoints_by_name()
|> Map.get(unquote(name))
|> Map.get("parameters")
|> Enum.sort()
|> Enum.map(&String.to_atom/1)
end
@spec unquote(:"#{name}")(map) :: {:ok | :error, map | String.t}
def unquote(:"#{name}")(params) when is_map(params) do
endpoint = Parser.Stats.endpoints_by_name()[unquote(name)]
valid_keys = Map.get(endpoint, "parameters")
url = Map.get(endpoint, "url")
query_string = build_query_string(params, valid_keys)
(url <> query_string)
|> http().get(Parser.headers())
|> Parser.Stats.transform_api_response()
end
@spec unquote(:"#{name}")(list(tuple)) :: {:ok | :error, map | String.t}
def unquote(:"#{name}")(params) when is_list(params) do
apply(__MODULE__, :"#{unquote(name)}", [Enum.into(params, %{})])
end
@spec unquote(:"#{name}!")(map) :: map
def unquote(:"#{name}!")(params \\ %{}) do
case apply(__MODULE__, :"#{unquote(name)}", [params]) do
{:ok, result} -> result
{:error, error} -> raise %RuntimeError{message: error}
_ -> raise %RuntimeError{message: "Error calling API"}
end
end
end)
@spec endpoints() :: list(atom)
@doc "Returns a list of atoms, one for each endpoint function in the Stats module"
def endpoints() do
Parser.Stats.endpoints()
|> Enum.map(&Map.get(&1, "name"))
|> Enum.map(&String.to_atom/1)
end
@spec values_for(String.t | atom) :: list(String.t)
@doc "Returns a list of valid query param keys for an endpoint"
def values_for(param_name) do
param = Parser.Stats.params_by_name()["#{param_name}"]
if param, do: param["values"], else: []
end
@doc false
def build_query_string(params, valid_keys) do
default_values_for(valid_keys)
|> Map.merge(atom_key_to_string_key(params))
|> Http.query_string_from_map()
end
@spec default_values_for(list(String.t)) :: map
defp default_values_for(parameter_keys) do
parameter_keys
|> Enum.reduce(%{}, fn key, acc ->
default_value = Parser.Stats.params_by_name() |> Map.get(key) |> Map.get("default")
Map.put(acc, key, default_value)
end)
end
@spec atom_key_to_string_key(map) :: map
defp atom_key_to_string_key(map) do
Map.new(map, fn {k, v} ->
if is_atom(k), do: {Atom.to_string(k), v}, else: {k, v}
end)
end
end
|
lib/nba/stats.ex
| 0.803945
| 0.480235
|
stats.ex
|
starcoder
|
defmodule ExUnitFixtures.Imp.Preprocessing do
@moduledoc false
# Provides functions that pre-process fixtures at compile time.
# Most of the functions provide some sort of transformation or validation
# process that we need to do on fixtures at compile time.
alias ExUnitFixtures.FixtureDef
@type fixture_dict :: %{atom: FixtureDef.t}
@doc """
Checks there are no fixtures named `fixture_name` already in `fixtures`.
Raises an error if any clashes are found.
"""
def check_clashes(fixture_name, fixtures) do
if Enum.find(fixtures, fn f -> f.name == fixture_name end) != nil do
raise "There is already a fixture named #{fixture_name} in this module."
end
end
@doc """
Pre-processes the defined & imported fixtures in a module.
This will take the list of fixtures that have been defined, resolve their
dependencies and produce a map of those dependencies merged with any imported
dependencies.
The map will use fully qualified fixture names for keys.
"""
@spec preprocess_fixtures([FixtureDef.t],
[:atom] | fixture_dict) :: fixture_dict
def preprocess_fixtures(local_fixtures,
imported_modules) when is_list(imported_modules) do
imported_fixtures = fixtures_from_modules(imported_modules)
preprocess_fixtures(local_fixtures, imported_fixtures)
end
def preprocess_fixtures(local_fixtures, imported_fixtures) do
resolved_locals =
for f <- resolve_dependencies(local_fixtures, imported_fixtures),
into: %{},
do: {f.qualified_name, f}
local_fixtures
|> hide_fixtures(imported_fixtures)
|> Dict.merge(resolved_locals)
end
@doc """
Resolves dependencies for fixtures in a module.
Replaces the unqualified dep_names in a FixtureDef with qualified names.
"""
@spec resolve_dependencies([FixtureDef.t], fixture_dict) :: [FixtureDef.t]
def resolve_dependencies(local_fixtures, imported_fixtures) do
visible_fixtures =
for {_, f} <- imported_fixtures,
!f.hidden,
into: %{},
do: {f.name, f}
all_fixtures = Map.merge(
visible_fixtures,
(for f <- local_fixtures, into: %{}, do: {f.name, f})
)
for fixture <- local_fixtures do
resolved_deps = for dep <- fixture.dep_names do
resolve_dependency(dep, fixture, all_fixtures, visible_fixtures)
end
%{fixture | qualified_dep_names: resolved_deps}
end
end
@spec resolve_dependency(:atom, FixtureDef.t,
fixture_dict, fixture_dict) :: :atom
defp resolve_dependency(:context, _, _, _) do
# Special case the ExUnit.Case context
:context
end
defp resolve_dependency(dep_name, fixture,
all_fixtures, visible_fixtures) do
resolved_dep = if dep_name == fixture.name do
visible_fixtures[dep_name]
else
all_fixtures[dep_name]
end
unless resolved_dep do
ExUnitFixtures.Imp.report_missing_dep(dep_name,
all_fixtures |> Map.values)
end
validate_dep(fixture, resolved_dep)
resolved_dep.qualified_name
end
@spec hide_fixtures([FixtureDef.t], fixture_dict) :: fixture_dict
defp hide_fixtures(local_fixtures, imported_fixtures) do
# Hides any fixtures in imported_fixtures that have been shadowed by
# local_fixtures.
names_to_hide = for f <- local_fixtures, into: MapSet.new, do: f.name
for {name, f} <- imported_fixtures, into: %{} do
if Set.member?(names_to_hide, f.name) do
{name, %{f | hidden: true}}
else
{name, f}
end
end
end
@spec fixtures_from_modules([:atom]) :: fixture_dict
defp fixtures_from_modules(modules) do
imported_fixtures = for module <- modules, do: module.fixtures
Enum.reduce imported_fixtures, %{}, &Dict.merge/2
end
@spec validate_dep(FixtureDef.t, FixtureDef.t) :: :ok | no_return
defp validate_dep(fixture, resolved_dependency)
defp validate_dep(%{scope: :module, name: fixture_name},
%{scope: :test, name: dep_name}) do
raise """
Mis-matched scopes:
#{fixture_name} is scoped to the test module
#{dep_name} is scoped to the test.
But #{fixture_name} depends on #{dep_name}
"""
end
defp validate_dep(_fixture, _resolved_dep), do: :ok
end
|
lib/ex_unit_fixtures/imp/preprocessing.ex
| 0.812123
| 0.47993
|
preprocessing.ex
|
starcoder
|
defmodule Vapor do
@moduledoc """
Vapor provides mechanisms for handling runtime configuration in your system.
"""
alias Vapor.{
Store,
Watch
}
@type key :: String.t() | list()
@type type :: :string | :int | :float | :bool
@type value :: String.t() | integer | float | boolean
@doc """
Fetches a value from the config under the key provided. Accept a list forming a path of keys.
You need to specify a type to convert the value into through the `as:` element.
The accepted types are `:string`, `:int`, `:float` and `:bool`
## Example
VaporExample.Config.get("config_key", as: :string)
VaporExample.Config.get(["nested", "config_key"], as: :string)
"""
@callback get(key :: key, type :: [as: type]) ::
{:ok, value} | {:error, Vapor.ConversionError} | {:error, Vapor.NotFoundError}
@doc """
Similar to `c:get/2` but raise if an error happens
## Example
VaporExample.Config.get!("config_key", as: :string)
VaporExample.Config.get!(["nested", "config_key"], as: :string)
"""
@callback get!(key :: key, type :: [as: type]) :: value | none
@doc """
Set the value under the key in the store.
## Example
VaporExample.Config.set("key", "value")
VaporExample.Config.set(["nested", "key"], "value")
"""
@callback set(key :: key, value :: value) :: {:ok, value}
@callback handle_change :: (:ok)
defmacro __using__(_opts) do
quote do
@behaviour Vapor
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :supervisor,
restart: :permanent,
shutdown: 500
}
end
def set(key, value) when is_binary(key) do
set([key], value)
end
def set(key, value) when is_list(key) do
GenServer.call(__MODULE__, {:set, key, value})
handle_change()
end
def get(key, as: type) when is_binary(key) do
get([key], as: type)
end
def get(key, as: type) when is_list(key) do
case :ets.lookup(__MODULE__, key) do
[] ->
{:error, Vapor.NotFoundError}
[{^key, value}] ->
Vapor.Converter.apply(value, type)
end
end
def get!(key, as: type) when is_binary(key) do
get!([key], as: type)
end
def get!(key, as: type) when is_list(key) do
case get(key, as: type) do
{:ok, val} ->
val
{:error, error} ->
raise error, {key, type}
end
end
end
end
@doc """
Starts a configuration store and any watches.
"""
def start_link(module, plans, opts) do
if opts[:name] do
name = Keyword.fetch!(opts, :name)
Supervisor.start_link(__MODULE__, {module, plans}, name: :"#{name}_sup")
else
raise Vapor.ConfigurationError, "must supply a `:name` argument"
end
end
@doc """
Stops the configuration store and any watches.
"""
def stop(name) do
Supervisor.stop(:"#{name}_sup")
end
def init({module, plans}) do
children = [
{Watch.Supervisor, [name: Watch.Supervisor.sup_name(module)]},
{Store, {module, plans}}
]
Supervisor.init(children, strategy: :one_for_one)
end
end
|
lib/vapor.ex
| 0.881742
| 0.426202
|
vapor.ex
|
starcoder
|
defmodule Sitemapper do
@moduledoc """
Sitemapper is an Elixir library for generating [XML Sitemaps](https://www.sitemaps.org).
It's designed to generate large sitemaps while maintaining a low
memory profile. It can persist sitemaps to Amazon S3, disk or any
other adapter you wish to write.
"""
alias Sitemapper.{File, IndexGenerator, SitemapGenerator, SitemapReference}
@doc """
Receives a `Stream` of `Sitemapper.URL` and returns a `Stream` of
`{filename, body}` tuples, representing the individual sitemap XML
files, followed by an index XML file.
Accepts the following `Keyword` options in `opts`:
* `sitemap_url` - The base URL where the generated sitemap
files will live. e.g. `http://example.org`, if your sitemap lives at
`http://example.org/sitemap.xml` (required)
* `gzip` - Sets whether the files are gzipped (default: `true`)
* `name` - An optional suffix for the sitemap filename. e.g. If you
set to `news`, will produce `sitemap-news.xml.gz` and
`sitemap-news-00001.xml.gz` filenames. (default: `nil`)
* `index_lastmod` - An optional Date/DateTime/NaiveDateTime for the lastmod
element in the index. (default: `Date.utc_today()`)
"""
@spec generate(stream :: Enumerable.t(), opts :: keyword) :: Stream.t()
def generate(enum, opts) do
sitemap_url = Keyword.fetch!(opts, :sitemap_url)
gzip_enabled = Keyword.get(opts, :gzip, true)
name = Keyword.get(opts, :name)
index_lastmod = Keyword.get(opts, :index_lastmod, Date.utc_today())
enum
|> Stream.concat([:end])
|> Stream.transform(nil, &reduce_url_to_sitemap/2)
|> Stream.transform(1, &reduce_file_to_name_and_body(&1, &2, name, gzip_enabled))
|> Stream.concat([:end])
|> Stream.transform(nil, &reduce_to_index(&1, &2, sitemap_url, name, gzip_enabled, index_lastmod))
|> Stream.map(&maybe_gzip_body(&1, gzip_enabled))
end
@doc """
Receives a `Stream` of `{filename, body}` tuples, and persists
those to the `Sitemapper.Store`.
Will raise if persistence fails.
Accepts the following `Keyword` options in `opts`:
* `store` - The module of the desired `Sitemapper.Store`,
such as `Sitemapper.S3Store`. (required)
* `store_config` - A `Keyword` list with options for the
`Sitemapper.Store`. (optional, but usually required)
"""
@spec persist(Enumerable.t(), keyword) :: Stream.t()
def persist(enum, opts) do
store = Keyword.fetch!(opts, :store)
store_config = Keyword.get(opts, :store_config, [])
enum
|> Stream.each(fn {filename, body} ->
:ok = store.write(filename, body, store_config)
end)
end
@doc """
Receives a `Stream` of `{filename, body}` tuples, takes the last
one (the index file), and pings Google and Bing with its URL.
"""
@spec ping(Enumerable.t(), keyword) :: Stream.t()
def ping(enum, opts) do
sitemap_url = Keyword.fetch!(opts, :sitemap_url)
enum
|> Stream.take(-1)
|> Stream.map(fn {filename, _body} ->
index_url =
URI.parse(sitemap_url)
|> join_uri_and_filename(filename)
|> URI.to_string()
Sitemapper.Pinger.ping(index_url)
end)
end
defp reduce_url_to_sitemap(:end, nil) do
{[], nil}
end
defp reduce_url_to_sitemap(:end, progress) do
done = SitemapGenerator.finalize(progress)
{[done], nil}
end
defp reduce_url_to_sitemap(url, nil) do
reduce_url_to_sitemap(url, SitemapGenerator.new())
end
defp reduce_url_to_sitemap(url, progress) do
case SitemapGenerator.add_url(progress, url) do
{:error, reason} when reason in [:over_length, :over_count] ->
done = SitemapGenerator.finalize(progress)
next = SitemapGenerator.new() |> SitemapGenerator.add_url(url)
{[done], next}
new_progress ->
{[], new_progress}
end
end
defp reduce_file_to_name_and_body(%File{body: body}, counter, name, gzip_enabled) do
{[{filename(name, gzip_enabled, counter), body}], counter + 1}
end
defp maybe_gzip_body({filename, body}, true) do
{filename, :zlib.gzip(body)}
end
defp maybe_gzip_body({filename, body}, false) do
{filename, body}
end
defp reduce_to_index(:end, nil, _sitemap_url, _name, _gzip_enabled, _lastmod) do
{[], nil}
end
defp reduce_to_index(:end, index_file, _sitemap_url, name, gzip_enabled, _lastmod) do
done_file = IndexGenerator.finalize(index_file)
{filename, body} = index_file_to_data_and_name(done_file, name, gzip_enabled)
{[{filename, body}], nil}
end
defp reduce_to_index({filename, body}, nil, sitemap_url, name, gzip_enabled, lastmod) do
reduce_to_index({filename, body}, IndexGenerator.new(), sitemap_url, name, gzip_enabled, lastmod)
end
defp reduce_to_index({filename, body}, index_file, sitemap_url, _name, _gzip_enabled, lastmod) do
reference = filename_to_sitemap_reference(filename, sitemap_url, lastmod)
case IndexGenerator.add_sitemap(index_file, reference) do
{:error, reason} when reason in [:over_length, :over_count] ->
raise "Generated more than 50,000 sitemap indexes"
new_file ->
{[{filename, body}], new_file}
end
end
defp index_file_to_data_and_name(%File{body: body}, name, gzip_enabled) do
{filename(name, gzip_enabled), body}
end
defp filename_to_sitemap_reference(filename, sitemap_url, lastmod) do
loc =
URI.parse(sitemap_url)
|> join_uri_and_filename(filename)
|> URI.to_string()
%SitemapReference{loc: loc, lastmod: lastmod}
end
defp join_uri_and_filename(%URI{path: nil} = uri, filename) do
URI.merge(uri, filename)
end
defp join_uri_and_filename(%URI{path: path} = uri, filename) do
path = Path.join(path, filename)
URI.merge(uri, path)
end
defp filename(name, gzip, count \\ nil) do
prefix = ["sitemap", name] |> Enum.reject(&is_nil/1) |> Enum.join("-")
suffix =
case count do
nil ->
""
c ->
str = Integer.to_string(c)
"-" <> String.pad_leading(str, 5, "0")
end
extension =
case gzip do
true -> ".xml.gz"
false -> ".xml"
end
prefix <> suffix <> extension
end
end
|
lib/sitemapper.ex
| 0.852214
| 0.520801
|
sitemapper.ex
|
starcoder
|
defmodule Ecto.Associations.Assoc do
@moduledoc """
This module provides the assoc selector merger and utilities around it.
"""
alias Ecto.Query.QueryExpr
alias Ecto.Query.Util
@doc """
Transforms a result set based on the assoc selector, loading the associations
onto their parent model. See `Ecto.Query.select/3`.
"""
@spec run([Ecto.Model.t], Ecto.Query.t) :: [Ecto.Model.t]
def run([], _query), do: []
def run(results, query) do
case query.select do
%QueryExpr{expr: {:assoc, _, [parent, fields]}} ->
merge(results, parent, fields, query)
_ ->
results
end
end
@doc """
Decomposes an `assoc(var, fields)` or `var` into `{var, fields}`.
"""
@spec decompose_assoc(Macro.t) :: {Macro.t, [Macro.t]}
def decompose_assoc({:&, _, [_]} = var), do: {var, []}
def decompose_assoc({:assoc, _, [var, fields]}), do: {var, fields}
defp merge(rows, var, fields, query) do
# Pre-create rose tree of reflections and accumulator dicts in the same
# structure as the fields tree
refls = create_refls(var, fields, query)
{_, _, acc} = create_acc(fields)
acc = {HashSet.new, [], acc}
# Populate tree of dicts of associated entities from the result set
{_keys, parents, children} = Enum.reduce(rows, acc, &merge_to_dict(&1, {nil, refls}, &2))
# Load associated entities onto their parents
parents = for parent <- parents, do: build_struct({0, parent}, children, refls) |> elem(1)
Enum.reverse(parents)
end
defp merge_to_dict({struct, sub_structs}, {refl, sub_refls}, {keys, dict, sub_dicts}) do
# We recurse down the tree of the row result, the reflections and the
# dict accumulators
if struct do
module = struct.__struct__
pk_field = module.__schema__(:primary_key)
pk_value = Map.get(struct, pk_field)
end
# The set makes sure that we don't add duplicated associated entities
if struct && not Set.member?(keys, pk_value) do
keys = Set.put(keys, pk_value)
if refl do
# Add associated model to dict with association key, we use to
# put the model on the right parent later
# Also store position so we can sort
assoc_key = Map.get(struct, refl.assoc_key)
item = {Dict.size(dict), struct}
dict = Dict.update(dict, assoc_key, [item], &[item|&1])
else
# If no reflection we are at the top-most parent
dict = [struct|dict]
end
end
# Recurse down
zipped = List.zip([sub_structs, sub_refls, sub_dicts])
sub_dicts = for {recs, refls, dicts} <- zipped do
merge_to_dict(recs, refls, dicts)
end
{keys, dict, sub_dicts}
end
defp build_struct({pos, parent}, children, refls) do
zipped = List.zip([children, refls])
# Load all associated children onto the parent
new_parent =
Enum.reduce(zipped, parent, fn {child, refl}, parent ->
{refl, refls} = refl
{_, children, sub_children} = child
# Get the children associated to the parent
struct_key = Map.get(parent, refl.key)
if struct_key do
my_children = Dict.get(children, struct_key) || []
# Recurse down and build the children
built_children = for child <- my_children, do: build_struct(child, sub_children, refls)
else
built_children = []
end
# Fix ordering that was shuffled by HashDict
sorted_children = built_children
|> Enum.sort(&compare/2)
|> Enum.map(&elem(&1, 1))
set_loaded(parent, refl, sorted_children)
end)
{pos, new_parent}
end
defp create_refls(var, fields, query) do
Enum.map(fields, fn {field, nested} ->
{inner_var, fields} = decompose_assoc(nested)
model = Util.find_source(query.sources, var) |> Util.model
refl = model.__schema__(:association, field)
{refl, create_refls(inner_var, fields, query)}
end)
end
defp create_acc(fields) do
acc = Enum.map(fields, fn {_field, nested} ->
{_, fields} = decompose_assoc(nested)
create_acc(fields)
end)
{HashSet.new, HashDict.new, acc}
end
defp compare({pos1, _}, {pos2, _}), do: pos1 < pos2
defp set_loaded(struct, refl, loaded) do
unless refl.__struct__ == Ecto.Reflections.HasMany do
loaded = List.first(loaded)
end
Ecto.Associations.load(struct, refl.field, loaded)
end
end
|
lib/ecto/associations/assoc.ex
| 0.775052
| 0.600159
|
assoc.ex
|
starcoder
|
defmodule Game do
@moduledoc """
Intcode Arcade Cabinet
"""
defstruct tiles: %{}, score: 0, output: [], window: nil
def print({map, score}) do
pts = Map.keys(map)
{min_x, max_x} = Enum.map(pts, fn {x, _y} -> x end) |> Enum.min_max()
{min_y, max_y} = Enum.map(pts, fn {_x, y} -> y end) |> Enum.min_max()
for y <- min_y..max_y do
for x <- min_x..max_x do
IO.write(print_char(Map.get(map, {x,y}, :empty)))
end
IO.write("\n")
end
IO.puts "SCORE: #{score}"
:ok
end
defp print_char(:empty), do: " "
defp print_char(:wall), do: "="
defp print_char(:block), do: "#"
defp print_char(:paddle), do: "_"
defp print_char(:ball), do: "*"
def render({map, score}) do
ExNcurses.clear()
pts = Map.keys(map)
{min_x, max_x} = Enum.map(pts, fn {x, _y} -> x end) |> Enum.min_max()
{min_y, max_y} = Enum.map(pts, fn {_x, y} -> y end) |> Enum.min_max()
for y <- min_y..max_y do
for x <- min_x..max_x do
ExNcurses.mvaddstr(y, x, print_char(Map.get(map, {x,y}, :empty)))
end
end
ExNcurses.mvaddstr(max_y + 5, 0, "Score: #{score}")
ExNcurses.refresh()
end
@doc """
Run the arcade game
"""
def play(str, quarters \\ 1)do
ExNcurses.initscr()
win = ExNcurses.newwin(100, 100, 1, 0)
{:ok, _pid} = Agent.start_link(fn -> %Game{window: win} end, name: __MODULE__)
code = Intcode.load(str) |> Intcode.poke(0, quarters)
Intcode.run(code, [], &input/0, &output/1)
result = Agent.get(__MODULE__, fn state -> {state.tiles, state.score} end)
Agent.stop(__MODULE__)
ExNcurses.endwin()
result
end
def input do
state = Agent.get(__MODULE__, fn state -> {state.tiles, state.score} end)
render(state)
{bx, _by} = find_item(:ball)
{px, _py} = find_item(:paddle)
bx - px
end
def find_item(item) do
Agent.get(__MODULE__, fn state -> state.tiles end)
|> Enum.find(fn {_k, v} -> v == item end)
|> elem(0)
end
def output(cmd) do
output(cmd, Agent.get(__MODULE__, fn state -> state.output end))
end
def output(cmd, curr_output) when length(curr_output) < 2 do
Agent.update(__MODULE__, fn state -> %Game{state | output: [cmd | curr_output]} end)
end
def output(cmd, curr_output) do
[x, y, type] = [cmd | curr_output] |> Enum.reverse()
output(x, y, type)
end
def output(-1, 0, score) do
Agent.update(__MODULE__, fn state -> %Game{state | score: score, output: []} end)
end
def output(x, y, type) do
kind = tile(type)
tiles = Agent.get(__MODULE__, fn state -> state.tiles end)
tiles = Map.put(tiles, {x, y}, kind)
Agent.update(__MODULE__, fn state -> %Game{state | tiles: tiles, output: []} end)
end
defp tile(0), do: :empty
defp tile(1), do: :wall
defp tile(2), do: :block
defp tile(3), do: :paddle
defp tile(4), do: :ball
end
|
apps/day13/lib/game.ex
| 0.605566
| 0.504089
|
game.ex
|
starcoder
|
defmodule Datix.Date do
@moduledoc """
A `Date` parser using `Calendar.strftime` format string.
"""
@doc """
Parses a date string according to the given `format`.
See the `Calendar.strftime` documentation for how to specify a format-string.
## Options
* `:calendar` - the calendar to build the `Date`, defaults to `Calendar.ISO`
* `:preferred_date` - a string for the preferred format to show dates,
it can't contain the `%x` format and defaults to `"%Y-%m-%d"`
if the option is not received
* `:month_names` - a list of the month names, if the option is not received
it defaults to a list of month names in English
* `:abbreviated_month_names` - a list of abbreviated month names, if the
option is not received it defaults to a list of abbreviated month names in
English
* `:day_of_week_names` - a list of day names, if the option is not received
it defaults to a list of day names in English
* `:abbreviated_day_of_week_names` - a list of abbreviated day names, if the
option is not received it defaults to a list of abbreviated day names in
English
Missing values will be set to minimum.
## Examples
```elixir
iex> Datix.Date.parse("2022-05-11", "%x")
{:ok, ~D[2022-05-11]}
iex> Datix.Date.parse("2021/01/10", "%Y/%m/%d")
{:ok, ~D[2021-01-10]}
iex> Datix.Date.parse("2021/01/10", "%x", preferred_date: "%Y/%m/%d")
{:ok, ~D[2021-01-10]}
iex> Datix.Date.parse("18", "%y")
{:ok, ~D[0018-01-01]}
iex> Datix.Date.parse("", "")
{:ok, ~D[0000-01-01]}
iex> Datix.Date.parse("1736/13/03", "%Y/%m/%d", calendar: Coptic)
{:ok, ~D[1736-13-03 Cldr.Calendar.Coptic]}
iex> Datix.Date.parse("Mi, 1.4.2020", "%a, %-d.%-m.%Y",
...> abbreviated_day_of_week_names: ~w(Mo Di Mi Do Fr Sa So))
{:ok, ~D[2020-04-01]}
iex> Datix.Date.parse("Fr, 1.4.2020", "%a, %-d.%-m.%Y",
...> abbreviated_day_of_week_names: ~w(Mo Di Mi Do Fr Sa So))
{:error, :invalid_date}
```
"""
@spec parse(String.t(), String.t(), list()) ::
{:ok, Date.t()}
| {:error, :invalid_date}
| {:error, :invalid_input}
| {:error, {:parse_error, expected: String.t(), got: String.t()}}
| {:error, {:conflict, [expected: term(), got: term(), modifier: String.t()]}}
| {:error, {:invalid_string, [modifier: String.t()]}}
| {:error, {:invalid_integer, [modifier: String.t()]}}
| {:error, {:invalid_modifier, [modifier: String.t()]}}
def parse(date_str, format_str, opts \\ []) do
with {:ok, data} <- Datix.strptime(date_str, format_str, opts) do
new(data, opts)
end
end
@doc """
Parses a date string according to the given `format`, erroring out for
invalid arguments.
"""
@spec parse!(String.t(), String.t(), list()) :: Date.t()
def parse!(date_str, format_str, opts \\ []) do
date_str
|> Datix.strptime!(format_str, opts)
|> new(opts)
|> case do
{:ok, date} ->
date
{:error, reason} ->
raise ArgumentError, "cannot build date, reason: #{inspect(reason)}"
end
end
@doc false
def new(%{year: year, month: month, day: day} = data, opts) do
with {:ok, date} <- Date.new(year, month, day, Datix.calendar(opts)) do
validate(date, data)
end
end
def new(%{year_2_digit: year, month: _month, day: _day} = data, opts) do
data
|> Map.put(:year, year)
|> Map.delete(:year_2_digit)
|> new(opts)
end
def new(data, opts), do: data |> Datix.assume(Date) |> new(opts)
defp validate(date, data) when is_map(data) do
validate(
date,
data
|> Map.drop([
:am_pm,
:day,
:hour,
:hour_12,
:microsecond,
:minute,
:month,
:second,
:year,
:zone_abbr,
:zone_offset
])
|> Enum.to_list()
)
end
defp validate(date, []), do: {:ok, date}
defp validate(date, [{:day_of_week, day_of_week} | rest]) do
case Date.day_of_week(date) do
^day_of_week -> validate(date, rest)
_day_of_week -> {:error, :invalid_date}
end
end
defp validate(date, [{:day_of_year, day_of_jear} | rest]) do
case Date.day_of_year(date) do
^day_of_jear -> validate(date, rest)
_day_of_jear -> {:error, :invalid_date}
end
end
defp validate(date, [{:quarter, quarter} | rest]) do
case Date.quarter_of_year(date) do
^quarter -> validate(date, rest)
_quarter -> {:error, :invalid_date}
end
end
end
|
lib/datix/date.ex
| 0.925255
| 0.906073
|
date.ex
|
starcoder
|
defmodule CCSP.Chapter2.DnaSearch do
@moduledoc """
Corresponds to CCSP in Python, Section 2.1, titled "DNA Search"
"""
@type nucleotide :: non_neg_integer
# should only ever have exactly 3 elements
# note that we do not use a tuple like CCSPiP as lists are better suited
@type codon :: list(nucleotide)
@type gene :: list(codon)
@spec grapheme_to_nucleotide(String.t()) :: nucleotide
defp grapheme_to_nucleotide(nucleotide) do
nucleotide = String.upcase(nucleotide)
cond do
nucleotide == "A" -> 0
nucleotide == "C" -> 1
nucleotide == "G" -> 2
nucleotide == "T" -> 3
end
end
@spec string_to_nucleotides(String.t()) :: list(nucleotide)
def string_to_nucleotides(str) do
str
|> String.graphemes()
|> Enum.reduce([], fn elem, acc ->
[grapheme_to_nucleotide(elem) | acc]
end)
end
@spec string_to_gene(String.t()) :: gene
def string_to_gene(str) do
str
|> string_to_nucleotides()
|> Enum.chunk_every(3, 1, :discard)
end
@spec linear_contains?(String.t(), String.t()) :: boolean
def linear_contains?(gene, key_codon) do
gene = string_to_gene(gene)
codon = string_to_nucleotides(key_codon)
Enum.any?(gene, &(&1 == codon))
end
@spec binary_search(gene, codon, non_neg_integer, non_neg_integer) :: boolean
defp binary_search(gene, key_codon, low, high) when low <= high do
mid = div(low + high, 2)
gene_codon = Enum.at(gene, mid)
cond do
gene_codon < key_codon -> binary_search(gene, key_codon, mid + 1, high)
gene_codon > key_codon -> binary_search(gene, key_codon, low, mid - 1)
gene_codon == key_codon -> true
end
end
defp binary_search(_, _, low, high) when low > high do
false
end
@spec binary_contains?(String.t(), String.t()) :: boolean
def binary_contains?(gene, key_codon) do
# must be sorted for binary search to work properly
sorted_gene = Enum.sort(string_to_gene(gene))
codon = string_to_nucleotides(key_codon)
binary_search(sorted_gene, codon, 0, length(sorted_gene) - 1)
end
end
|
lib/ccsp/chapter2/dna_search.ex
| 0.772316
| 0.638018
|
dna_search.ex
|
starcoder
|
defmodule Extractly do
alias Extractly.DoNotEdit
import Extractly.Helpers
@moduledoc """
Provide easy access to information inside the templates rendered by `mix xtra`
"""
@doc """
Emits a comment including a message not to edit the created file, as it will be recreated from this template.
It is a convenience to include this into your templates as follows
<%= xtra.do_not_edit_warning %>
or I18n'ed
<%= xtra.do_not_edit_warning, lang: :fr %>
If you are not generating html or markdown the comment can be parametrized
<%= xtra.do_not_edit_warning, comment_start: "-- ", comment_end: "" %>
If you want to include the name of the source template use `template: template` option, so
a call may be as complex as:
<%= xtra.do_not_edit_warning, comment_start: "-- ", comment_end: "", template: template, lang: :it %>
"""
def do_not_edit_warning(opts \\ []), do: DoNotEdit.warning(opts)
@doc ~S"""
Returns docstring of a function
Ex:
iex(1)> {:ok, lines} = Extractly.functiondoc("Extractly.moduledoc/2") |> hd()
...(1)> lines |> String.split("\n") |> Enum.take(3)
[" Returns docstring of a module", "", " E.g. verbatim"]
We can also pass a list of functions to get their docs concatenated
iex(2)> [{:ok, moduledoc}, {:error, message}] = Extractly.functiondoc(["Extractly.moduledoc/2", "Extactly.functiondoc/2"])
...(2)> moduledoc |> String.split("\n") |> Enum.take(4)
[ " Returns docstring of a module",
" E.g. verbatim",
"",
" Extractly.moduledoc(\"Extractly\")"]
...(2)> message
"Function doc for function Extactly.functiondoc/2 not found"
If all the functions are in the same module the following form can be used
iex(3)> [{:ok, out}, _] = Extractly.functiondoc(["moduledoc/2", "functiondoc/2"], module: "Extractly")
...(3)> String.split(out, "\n") |> hd()
" Returns docstring of a module"
However it is convenient to add a markdown headline before each functiondoc, especially in these cases,
it can be done by indicating the `headline: level` option
iex(4)> [{:ok, moduledoc}, {:ok, functiondoc}] = Extractly.functiondoc(["moduledoc/2", "functiondoc/2"], module: "Extractly", headline: 2)
...(4)> moduledoc |> String.split("\n") |> Enum.take(3)
[ "## Extractly.moduledoc/2",
"",
" Returns docstring of a module"]
...(4)> functiondoc |> String.split("\n") |> Enum.take(3)
[ "## Extractly.functiondoc/2",
"",
" Returns docstring of a function"]
Often times we are interested by **all** public functiondocs...
iex(5)> [{:ok, out}|_] = Extractly.functiondoc(:all, module: "Extractly", headline: 2)
...(5)> String.split(out, "\n") |> Enum.take(3)
[ "## Extractly.do_not_edit_warning/1",
"",
" Emits a comment including a message not to edit the created file, as it will be recreated from this template."]
We can specify a language to wrap indented code blocks into ` ```elixir\n...\n``` `
Here is an example
iex(6)> [ok: doc] = Extractly.functiondoc("Extractly.functiondoc/2", wrap_code_blocks: "elixir")
...(6)> doc |> String.split("\n") |> Enum.take(10)
[ " Returns docstring of a function",
" Ex:",
"",
"```elixir",
" iex(1)> {:ok, lines} = Extractly.functiondoc(\"Extractly.moduledoc/2\") |> hd()",
" ...(1)> lines |> String.split(\"\\n\") |> Enum.take(3)",
" [\" Returns docstring of a module\", \"\", \" E.g. verbatim\"]",
"```",
"",
" We can also pass a list of functions to get their docs concatenated"]
"""
def functiondoc(name, opts \\ [])
def functiondoc(:all, opts) do
case Keyword.get(opts, :module) do
nil -> [{:error, "No module given for `functiondoc(:all, ...)`"}]
module_name -> _all_functiondocs(module_name, opts)
end
end
def functiondoc(names, opts) when is_list(names) do
prefix =
case Keyword.get(opts, :module) do
nil -> ""
module_name -> "#{module_name}."
end
names
|> Enum.flat_map(&functiondoc("#{prefix}#{&1}", opts))
|> Enum.map(fn {status, result} -> {status, _postprocess(result, opts)} end)
end
def functiondoc(name, opts) when is_binary(name) do
headline = fdoc_headline(name, opts)
case _functiondoc(name) do
nil -> [{:error, "Function doc for function #{name} not found"}]
doc -> [{:ok, headline <> (doc |> _postprocess(opts))}]
end
end
@doc """
Returns docstring of a macro
"""
def macrodoc(name, opts \\ []) do
{module, macro_name, arity} = _parse_entity_name(name)
case Code.ensure_loaded(module) do
{:module, _} ->
{:ok, _get_entity_doc(module, macro_name, arity, :macro) |> _postprocess(opts)}
_ ->
{:error, "macro not found #{name}"}
end
end
@doc ~S"""
Returns docstring of a module
E.g. verbatim
iex(7)> {:ok, doc} = Extractly.moduledoc("Extractly")
...(7)> doc
" Provide easy access to information inside the templates rendered by `mix xtra`\n"
We can use the same options as with `functiondoc`
iex(8)> {:ok, doc} = Extractly.moduledoc("Extractly", headline: 2)
...(8)> doc |> String.split("\n") |> Enum.take(3)
[
"## Extractly", "", " Provide easy access to information inside the templates rendered by `mix xtra`"
]
If we also want to use `functiondoc :all, module: "Extractly"` **after** the call of `moduledoc` we can
include `:all` in the call of `moduledoc`, which will include function and macro docstrings as well
iex(9)> [{:ok, moduledoc} | _] =
...(9)> moduledoc("Extractly", headline: 3, include: :all)
...(9)> moduledoc
"### Extractly\n\n Provide easy access to information inside the templates rendered by `mix xtra`\n"
iex(10)> [_, {:ok, first_functiondoc} | _] =
...(10)> moduledoc("Extractly", headline: 3, include: :all)
...(10)> first_functiondoc |> String.split("\n") |> Enum.take(5)
[
"### Extractly.do_not_edit_warning/1",
"",
" Emits a comment including a message not to edit the created file, as it will be recreated from this template.",
"",
" It is a convenience to include this into your templates as follows"
]
"""
def moduledoc(name, opts \\ []) do
module = String.replace(name, ~r{\A(?:Elixir\.)?}, "Elixir.") |> String.to_atom()
headline = fdoc_headline(name, opts)
moduledoc_ =
case Code.ensure_loaded(module) do
{:module, _} ->
_get_moduledoc(module) |> _postprocess(opts) |> _check_nil_moduledoc(name, headline)
_ ->
{:error, "module not found #{module}"}
end
case Keyword.get(opts, :include) do
:all ->
more_docs = functiondoc(:all, Keyword.put(opts, :module, name))
[moduledoc_ | more_docs]
nil ->
moduledoc_
x ->
[
moduledoc_,
{:error,
"Illegal value #{x} for include: keyword in moduledoc for module #{name}, legal values are nil and :all"}
]
end
end
@doc ~S"""
Extract Table Of Contents from a markdown document
The files used for the following doctest can be found [here](https://github.com/RobertDober/extractly/tree/master/test/fixtures)
iex(11)> lines = [
...(11)> "## Usage",
...(11)> "### API",
...(11)> "#### EarmarkParser.as_ast/2",
...(11)> "### Support",
...(11)> ]
...(11)> toc(lines, gh_links: true)
{:ok, [
"- [Usage](#usage)",
" - [API](#api)",
" - [EarmarkParser.as_ast/2](#earmarkparseras_ast2)",
" - [Support](#support)",
]}
But if you do not want links
iex(12)> lines = [
...(12)> "## Usage",
...(12)> "### API",
...(12)> "#### EarmarkParser.as_ast/2",
...(12)> "### Support",
...(12)> ]
...(12)> toc(lines)
{:ok, [
"- Usage",
" - API",
" - EarmarkParser.as_ast/2",
" - Support",
]}
In case of bad options an error tuple is returned (no utf8 encoded
input should ever result in an error_tuple
iex(13)> lines = [] # options are checked even if input is empty
...(13)> toc(lines, no_such_option: "x")
{:error, "Unsupported option no_such_option"}
A more detailed description can be found in `Extractly.Toc`'s docstrings
"""
def toc(markdown_doc, options \\ []) do
case markdown_doc |> Extractly.Tools.lines_from_source() |> Extractly.Toc.render(options) do
{:error, message} -> {:error, message}
data -> {:ok, data}
end
end
defp _check_nil_moduledoc(moduledoc_or_nil, name, headline)
defp _check_nil_moduledoc(nil, name, _hl),
do: {:error, "module #{name} does not have a moduledoc"}
defp _check_nil_moduledoc(doc, _name, headline), do: {:ok, headline <> doc}
@doc ~S"""
Returns the output of a mix task
Ex:
iex(14)> Extractly.task("cmd", ~W[echo 42])
"42\n"
iex(15)> try do
...(15)> Extractly.task("xxx")
...(15)> rescue
...(15)> e in RuntimeError -> e.message |> String.split("\n") |> hd()
...(15)> end
"The following output was produced wih error code 1"
"""
def task(task, args \\ [])
def task(task, args) do
case System.cmd("mix", [task | args]) do
{output, 0} ->
output
{output, error} ->
raise "The following output was produced wih error code #{error}\n#{output}"
end
end
@doc """
A convenience method to access this libraries version
"""
def version do
:application.ensure_started(:extractly)
with {:ok, version} = :application.get_key(:extractly, :vsn), do: to_string(version)
end
defp _all_functiondocs(module_name, opts) do
module = "Elixir.#{module_name}" |> String.to_atom()
case Code.ensure_loaded(module) do
{:module, _} -> _get_functiondocs(module, opts)
_ -> [{:error, "cannot load module `#{module}'"}]
end
end
defp _extract_functiondoc(function_info)
defp _extract_functiondoc({_, _, _, doc_map, _}) when is_map(doc_map) do
case doc_map do
%{"en" => docstring} -> docstring
_ -> nil
end
end
defp _extract_functiondoc(_) do
nil
end
defp _extract_functiondoc_with_headline(
{{_, function_name, function_arity}, _, _, _, _} = function_info,
opts
) do
module_name = Keyword.get(opts, :module)
full_name = "#{module_name}.#{function_name}/#{function_arity}"
case _extract_functiondoc(function_info) do
nil -> {:error, "functiondoc for #{full_name} not found"}
doc -> {:ok, fdoc_headline(full_name, opts) <> (doc |> _postprocess(opts))}
end
end
defp _functiondoc(name) do
{module, function_name, arity} = _parse_entity_name(name)
case Code.ensure_loaded(module) do
{:module, _} -> _get_entity_doc(module, function_name, arity, :function)
_ -> nil
end
end
defp _get_entity_doc(module, name, arity, entity_type) do
if function_exported?(module, :__info__, 1) do
{:docs_v1, _, :elixir, _, _, _, docs} = Code.fetch_docs(module)
Enum.find_value(docs, &_find_entity_doc(&1, name, arity, entity_type))
end
end
defp _get_functiondocs(module, opts) do
if function_exported?(module, :__info__, 1) do
{:docs_v1, _, :elixir, _, _, _, docs} = Code.fetch_docs(module)
docs
|> Enum.map(&_extract_functiondoc_with_headline(&1, opts))
|> Enum.filter(fn {status, _} -> status == :ok end)
else
[{:error, "cannot access #{module.__info__ / 1}"}]
end
end
defp _get_moduledoc(module) do
if function_exported?(module, :__info__, 1) do
case Code.fetch_docs(module) do
{:docs_v1, _, :elixir, _, %{"en" => module_doc}, _, _} -> module_doc
_ -> nil
end
end
end
defp _find_entity_doc(doctuple, function_name, arity, entity_type) do
case doctuple do
{{^entity_type, ^function_name, ^arity}, _anno, _sign, %{"en" => doc}, _metadata} -> doc
_ -> nil
end
end
defp _parse_entity_name(name) do
names = String.split(name, ".")
[func | modules] = Enum.reverse(names)
module = ["Elixir" | Enum.reverse(modules)] |> Enum.join(".") |> String.to_atom()
[function_name, arity] = String.split(func, "/")
function_name = String.to_atom(function_name)
{arity, _} = Integer.parse(arity)
{module, function_name, arity}
end
defp _postprocess(input, opts)
defp _postprocess(nil, _opts), do: nil
defp _postprocess(input, opts) do
wrap? = Keyword.get(opts, :wrap_code_blocks)
input_ = Extractly.Directives.process(input, !!wrap?)
case wrap? do
nil -> input_
lang -> wrap_code_blocks(input_, lang)
end
end
end
|
lib/extractly.ex
| 0.804675
| 0.601477
|
extractly.ex
|
starcoder
|
defmodule Movielist.Reports do
@moduledoc """
The Reports context.
"""
import Ecto.Query, warn: false
alias Movielist.Repo
alias Movielist.Admin
alias Movielist.Admin.Movie
alias Movielist.Admin.Rating
def increment(num) do
num + 1
end
def calculate_percent_of_ratings(total, ratings_count) do
total / max(ratings_count, 1) |> Float.round(2)
end
def calculate_rating_total(ratings) do
Enum.reduce(ratings, 0, fn (rating, total) -> total + rating.score end)
end
@doc """
Returns map with count of movies for genre id and their average pre-rating
"""
def movie_stats_for_genre(genre_id) do
from(m in Movie, where: m.genre_id == ^genre_id, select: %{movie_count: count(m), average_pre_rating: avg(m.pre_rating)})
|> Repo.one!
end
@doc """
Returns map with count of rated movies for genre id and their average score
"""
def rating_stats_for_genre(genre_id) do
from(r in Rating, join: m in assoc(r, :movie), where: m.genre_id == ^genre_id, select: %{rating_count: count(r), average_score: avg(r.score)})
|> Repo.one!
end
@doc """
Base query for ratings by year
"""
def list_ratings_for_year_base_query(year) do
Admin.list_ratings_base_query()
|> where([r], fragment("EXTRACT(year FROM ?)", r.date_scored) == ^year)
end
@doc """
Returns list of ratings by year
"""
def list_ratings_for_year(year, :date) do
list_ratings_for_year_base_query(year)
|> order_by(asc: :date_scored, asc: :id, desc: :score)
|> Repo.all
end
def list_ratings_for_year(year, :score) do
list_ratings_for_year_base_query(year)
|> order_by(desc: :score, asc: :date_scored, asc: :id)
|> Repo.all
end
@doc """
Gets the month number and number of ratings (movies watched) in that month for the given year
"""
def calculate_ratings_per_month(ratings, is_current_year) do
end_month = case is_current_year do
true -> Common.ModelHelpers.Date.today.month
false -> 12
end
month_range = 1..end_month
initial_month_map = month_range |> Enum.map(fn (i) -> {i, 0} end) |> Map.new
month_map = ratings
|> Enum.reduce(initial_month_map, fn (rating, month_map) -> Map.update!(month_map, rating.date_scored.month, &increment/1) end)
month_range
|> Enum.map(fn (month_number) -> %{month_number: month_number, count: month_map[month_number]} end)
end
end
|
apps/movielist/lib/movielist/admin/reports.ex
| 0.784526
| 0.447823
|
reports.ex
|
starcoder
|
defmodule Plug.Session do
@moduledoc """
A plug to handle session cookies and session stores.
The session is accessed via functions on `Plug.Conn`. Cookies and
session have to be fetched with `Plug.Conn.fetch_session/1` before the
session can be accessed.
## Session stores
See `Plug.Session.Store` for the specification session stores are required to
implement.
Plug ships with the following session stores:
* `Plug.Session.ETS`
## Options
* `:store` - session store module (required);
* `:key` - session cookie key (required);
* `:domain` - see `Plug.Conn.put_resp_cookies/4`;
* `:max_age` - see `Plug.Conn.put_resp_cookies/4`;
* `:path` - see `Plug.Conn.put_resp_cookies/4`;
* `:secure` - see `Plug.Conn.put_resp_cookies/4`;
Additional options can be given to the session store, see the store's
documentation for the options it accepts.
## Examples
plug Plug.Session, store: :ets, key: "sid", secure: true, table: :session
"""
alias Plug.Conn
@behaviour Plug
@cookie_opts [:domain, :max_age, :path, :secure]
def init(opts) do
store = Keyword.fetch!(opts, :store) |> convert_store
key = Keyword.fetch!(opts, :key)
cookie_opts = Keyword.take(opts, @cookie_opts)
store_opts = Keyword.drop(opts, [:store, :key] ++ @cookie_opts)
store_config = store.init(store_opts)
%{store: store,
store_config: store_config,
key: key,
cookie_opts: cookie_opts}
end
def call(conn, config) do
Conn.assign_private(conn, :plug_session_fetch, fetch_session(config))
end
defp convert_store(store) do
case atom_to_binary(store) do
"Elixir." <> _ -> store
reference -> Module.concat(Plug.Session, String.upcase(reference))
end
end
defp fetch_session(config) do
%{store: store, store_config: store_config, key: key} = config
fn conn ->
if sid = conn.cookies[key] do
{sid, session} = store.get(sid, store_config)
end
conn
|> Conn.assign_private(:plug_session, session || %{})
|> Conn.assign_private(:plug_session_fetch, &(&1))
|> Conn.register_before_send(before_send(sid, config))
end
end
defp before_send(sid, config) do
%{store: store, store_config: store_config, key: key,
cookie_opts: cookie_opts} = config
fn conn ->
case Map.get(conn.private, :plug_session_info) do
:write ->
sid = store.put(sid, conn.private[:plug_session], store_config)
:drop ->
if sid, do: store.delete(sid, store_config)
sid = nil
:renew ->
if sid, do: store.delete(sid, store_config)
sid = store.put(nil, conn.private[:plug_session], store_config)
nil ->
:ok
end
if sid do
conn = Conn.put_resp_cookie(conn, key, sid, cookie_opts)
end
conn
end
end
end
|
lib/plug/session.ex
| 0.772273
| 0.496948
|
session.ex
|
starcoder
|
defmodule TuringMachine do
@moduledoc """
Turing machine simulator.
"""
alias TuringMachine.Program
@type state :: any
@type value :: any
@type t :: %__MODULE__{
initial_tape: (integer -> value),
tape_hash: %{optional(integer) => value},
position: integer,
state: state,
accept_states: list(state),
}
@doc """
Function for the `"0"` filled tape.
Which is the default `initial_tape` for a `TuringMachine`.
Note that `"0"` is a string, not an integer.
It fits to programs by `Program.from_string/1` or `Program.from_file/1`.
"""
@spec zero_tape(integer) :: String.t
def zero_tape(_pos), do: "0"
defstruct [
initial_tape: &__MODULE__.zero_tape/1,
tape_hash: %{},
position: 0,
state: "0",
accept_states: ["A"],
]
@doc """
Make a `initial_tape` function from `list`.
Values out of the list range are initialized to `default`.
Note that `&Enum.at(list, &1, default)` doesn't work for negative positions.
"""
@spec tape_from_list(list(value), value) :: (integer -> value)
def tape_from_list(list, default \\ "0") do
fn
position when position < 0 -> default
position when is_integer(position) -> Enum.at(list, position, default)
end
end
@doc """
Get the value of tape at the given position.
"""
@spec at(t, integer) :: value
def at(machine, position) do
case Map.fetch(machine.tape_hash, position) do
{:ok, val} -> val
:error -> (machine.initial_tape).(position)
end
end
@doc """
Take values of tape in the given range.
You can pass `from` greater or less than or equal to `to`.
If `from` is less than `to`, values are reversed.
```
machine = %TuringMachine{initial_tape: fn n -> n end}
TuringMachine.slice_tape(machine, 0, 2)
# => [0, 1, 2]
TuringMachine.slice_tape(machine, 2, -2)
# => [2, 1, 0, -1, -2]
TuringMachine.slice_tape(machine, 42, 42)
# => [42]
```
"""
@spec slice_tape(t, integer, integer) :: list(value)
def slice_tape(machine, from, to) do
Enum.map(from..to, &at(machine, &1))
end
@doc """
Maps the tape values by the given function.
The evaluated values in `tape_hash` are also evaluated at this time,
while not yet evaluated `initial_tape` values are not.
```
machine = %TuringMachine{initial_tape: fn n -> n end, tape_hash: %{1 => 10}}
new_machine = TuringMachine.map_tape(machine, fn n -> n * 2 end)
new_machine.tape_hash
# => %{1 => 20}
TuringMachine.slice_tape(new_machine, 0, 2)
# => [0, 20, 4]
```
"""
@spec map_tape(t, (value -> value)) :: t
def map_tape(machine, f) do
new_initial_tape = fn pos -> f.(machine.initial_tape.(pos)) end
new_tape_hash = Enum.into(machine.tape_hash, %{}, fn {pos, val} -> {pos, f.(val)} end)
Map.merge(machine, %{initial_tape: new_initial_tape, tape_hash: new_tape_hash})
end
@doc """
Evaluate `initial_tape` function and store the results in `tape_hash`.
Useful to avoid duplicate evaluations when the machine is intended to run
multiple programs.
You can pass list or range of positions:
```
machine = %TuringMachine{initial_tape: fn n -> n * 2 end}
TuringMachine.eval_tape(machine, [1, 3, 5]).tape_hash
# => %{1 => 2, 3 => 6, 5 => 10}
TuringMachine.eval_tape(machine, -1..2).tape_hash
# => %{-1 => -2, 0 => 0, 1 => 2, 2 => 4}
"""
@spec eval_tape(t, list(integer) | Range.t) :: t
def eval_tape(machine, positions) do
evaluated = Enum.into(positions, %{}, fn pos ->
{pos, at(machine, pos)}
end)
update_in(machine.tape_hash, &Map.merge(&1, evaluated))
end
@doc """
Process 1 step for the `machine` with the `program`.
Raises when no command is found for the state.
"""
@spec step(t, Program.t) :: t | none
def step(%{state: state, accept_states: accept_states, position: position} = machine, program) do
if state in accept_states do
machine
else
value = at(machine, position)
case Enum.find(program, &match?({^state, ^value, _, _, _}, &1)) do
nil ->
raise "No command matches for: #{inspect({state, value})}"
{_, _, next_value, next_direction, next_state} ->
Map.merge(machine, %{
tape_hash: Map.put(machine.tape_hash, position, next_value),
position: position + Program.direction_to_diff(next_direction),
state: next_state
})
end
end
end
@doc """
Steps `n` times.
"""
@spec step_times(t, Program.t, non_neg_integer) :: t | none
def step_times(machine, _program, 0), do: machine
def step_times(machine, program, times) do
if machine.state in machine.accept_states do
machine
else
step_times(step(machine, program), program, times - 1)
end
end
@doc """
Run the program until the machine state becomes one of its `accept_states`.
This may go into infinite loop.
"""
@spec run(t, Program.t) :: t | none
def run(machine, program) do
if machine.state in machine.accept_states do
machine
else
run(step(machine, program), program)
end
end
end
|
lib/turing_machine.ex
| 0.843975
| 0.90355
|
turing_machine.ex
|
starcoder
|
% Basic unit test structure for Elixir.
%
% ## Example
%
% A basic setup for ExUnit is shown below:
%
% % File: assertion_test.exs
%
% % 1) If you wish to configure ExUnit. See a list of options below.
% ExUnit.configure
%
% % 2) Next we create a new TestCase and add ExUnit::Case to it
% module AssertionTest
% mixin ExUnit::Case
%
% % 3) A test is a method which name finishes with _test
% def always_pass_test
% % 4) You can get most of your tests done with pattern matching
% true = true
% end
% end
%
% To run the test above, all you need to to is to use the bin/exunit script that ships with Elixir.
% Assuming you named your file assertion_test.ex, you can run it as:
%
% bin/exunit assertion_test.exs
%
% ## Assertions
%
% Most of ExUnit assertions can be done with pattern matching. However, there are
% a few assertions over ExUnit::Assertions to aid testing.
%
% ## Callbacks
%
% ExUnit provides `setup` and `teardown` callbacks before and after running each test.
%
% For instance, imagine you have to connection to a database before each test and
% disconnect from it after each test is executed, regardless if it failed or not.
% You can do it as follow:
%
% module QueryTest
% mixin ExUnit::Case
%
% def setup(_)
% @('connection, Database.connection)
% end
%
% def query_test
% "2" = @connection.query("SELECT 1 + 1")
% end
%
% def teardown(_)
% @connection.disconnect
% end
% end
%
% It is important to notice two things:
%
% 1) Both `setup` and `teardown` methods receives an atom with the name of test
% begin executed. This allows you to specialize the behavior for one specific test.
%
% 2) The `setup` method needs to necessarily return a data type of the same kind
% of the test case (self). For instance, the following is wrong:
%
% def setup
% MyModule.something
% end
%
% However the following works:
%
% def setup
% MyModule.something
% self
% end
%
% ## Options
%
% ExUnit supports the following options given to configure:
%
% * `'formatter` - The formatter that will print results
% * `'max_cases` - Maximum number of cases to run in parallel
%
module ExUnit
def start
ExUnit::Server.start
end
def configure(options)
ExUnit::Server.merge_options(options)
end
def run
cases = ExUnit::Server.cases
options = ExUnit::Server.options
formatter = options['formatter] || #ExUnit::Formatter()
max_cases = options['max_cases] || 4
#ExUnit::Runner(formatter, cases, max_cases).start
end
end
|
lib/ex_unit.ex
| 0.745769
| 0.582966
|
ex_unit.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.