code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Exchange.Utils do
@moduledoc """
Auxiliary functions for Exchange APP
"""
@doc """
Fetches the completed trades stored by a `Exchange.TimeSeries` adapter given a ticker and a id
## Parameters
- ticker: Market where the fetch should be made
- trader_id: The that a given trade must match
"""
@spec fetch_completed_trades(ticker :: atom, trader_id :: String.t()) :: list
def fetch_completed_trades(ticker, trader_id) do
time_series().completed_trades_by_id(ticker, trader_id)
end
@doc """
Fetches the completed trades stored by a `Exchange.TimeSeries` adapter given a ticker and a id
## Parameters
- ticker: Market where the fetch should be made
- trader_id: The that a given trade must match
"""
@spec fetch_all_completed_trades(ticker :: atom) :: list
def fetch_all_completed_trades(ticker) do
time_series().completed_trades(ticker)
end
@doc """
Fetches the completed stored by a `Exchange.TimeSeries` adapter given a ticker and a trade id.
## Parameters
- ticker: Market where the fetch should be made
- trade_id: Id of the requested trade
"""
@spec fetch_completed_trade_by_trade_id(ticker :: atom, trade_id :: String.t()) ::
Exchange.Trade
def fetch_completed_trade_by_trade_id(ticker, trade_id) do
time_series().get_completed_trade_by_trade_id(ticker, trade_id)
end
@doc """
Fetches the active orders stored by a `Exchange.TimeSeries` adapter given a ticker
## Parameters
- ticker: Market where the fetch should be made
"""
@spec fetch_live_orders(ticker :: atom) :: list
def fetch_live_orders(ticker) do
time_series().get_live_orders(ticker)
end
@doc """
Prints an `Exchange.OrderBook`
"""
@spec print_order_book(order_book :: Exchange.OrderBook.order_book()) :: :ok
def print_order_book(order_book) do
IO.puts("----------------------------")
IO.puts(" Price Level | ID | Size ")
IO.puts("----------------------------")
order_book.buy
|> Map.keys()
|> Enum.sort()
|> Enum.reverse()
|> Enum.each(fn price_point ->
IO.puts(price_point)
Map.get(order_book.buy, price_point)
|> Enum.each(fn order ->
IO.puts(" #{order.order_id}, #{order.size}")
end)
end)
IO.puts("----------------------------")
IO.puts(" Sell side | ID | Size ")
IO.puts("----------------------------")
order_book.sell
|> Map.keys()
|> Enum.sort()
|> Enum.each(fn price_point ->
IO.puts(price_point)
Map.get(order_book.sell, price_point)
|> Enum.each(fn order ->
IO.puts(" #{order.order_id}, #{order.size}")
end)
end)
end
@doc """
Return a empty `Exchange.OrderBook`
"""
@spec empty_order_book :: Exchange.OrderBook.order_book()
def empty_order_book do
%Exchange.OrderBook{
name: :AUXLND,
currency: :GBP,
buy: %{},
sell: %{},
order_ids: Map.new(),
completed_trades: [],
ask_min: 99_999,
bid_max: 1,
max_price: 100_000,
min_price: 0
}
end
@doc """
Creates a limit order for a given ticker
"""
@spec sample_order(map) :: Exchange.Order.order()
def sample_order(%{size: z, price: p, side: s}) do
%Exchange.Order{
type: :limit,
order_id: "9",
trader_id: "alchemist9",
side: s,
initial_size: z,
size: z,
price: p,
acknowledged_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond),
modified_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond)
}
end
@doc """
Creates a expiring limit order for a given ticker
"""
@spec sample_expiring_order(%{
price: number,
side: atom,
size: number,
exp_time: number,
id: String.t()
}) ::
Exchange.Order.order()
def sample_expiring_order(%{size: z, price: p, side: s, id: id, exp_time: t}) do
%Exchange.Order{
type: :limit,
order_id: id,
trader_id: "test_user_1",
side: s,
initial_size: z,
size: z,
price: p,
exp_time: t
}
end
@doc """
This function places sample buy orders and sell orders in the correct market using the ticker.
## Arguments
- ticker: Market where the orders should be placed
"""
@spec sample_matching_engine_init(ticker :: atom) :: :ok
def sample_matching_engine_init(ticker) do
buy_book =
[
%Exchange.Order{
type: :limit,
order_id: "4",
trader_id: "alchemist1",
side: :buy,
initial_size: 250,
size: 250,
price: 4000
},
%Exchange.Order{
type: :limit,
order_id: "6",
trader_id: "alchemist2",
side: :buy,
initial_size: 500,
size: 500,
price: 4000
},
%Exchange.Order{
type: :limit,
order_id: "2",
trader_id: "alchemist3",
side: :buy,
initial_size: 750,
size: 750,
price: 3970
},
%Exchange.Order{
type: :limit,
order_id: "7",
trader_id: "alchemist4",
side: :buy,
initial_size: 150,
size: 150,
price: 3960
}
]
|> Enum.map(
&%{
&1
| ticker: ticker,
acknowledged_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond)
}
)
sell_book =
[
%Exchange.Order{
type: :limit,
order_id: "1",
trader_id: "alchemist5",
side: :sell,
initial_size: 750,
size: 750,
price: 4010
},
%Exchange.Order{
type: :limit,
order_id: "5",
trader_id: "alchemist6",
side: :sell,
initial_size: 500,
size: 500,
price: 4010
},
%Exchange.Order{
type: :limit,
order_id: "8",
trader_id: "alchemist7",
side: :sell,
initial_size: 750,
size: 750,
price: 4010
},
%Exchange.Order{
type: :limit,
order_id: "3",
trader_id: "alchemist8",
side: :sell,
initial_size: 250,
size: 250,
price: 4020
}
]
|> Enum.map(
&%{
&1
| ticker: ticker,
acknowledged_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond)
}
)
(buy_book ++ sell_book)
|> Enum.each(fn order ->
Exchange.MatchingEngine.place_order(ticker, order)
end)
end
@doc """
Creates an `Exchange.OrderBook` with sample buy and sell orders
## Arguments
- ticker: Market where the order book belongs
"""
@spec sample_order_book(ticker :: atom) :: Exchange.OrderBook.order_book()
def sample_order_book(ticker) do
buy_book =
[
%Exchange.Order{
type: :limit,
order_id: "4",
trader_id: "alchemist1",
side: :buy,
initial_size: 250,
size: 250,
ticker: ticker,
price: 4000
},
%Exchange.Order{
type: :limit,
order_id: "6",
trader_id: "alchemist2",
side: :buy,
initial_size: 500,
size: 500,
ticker: ticker,
price: 4000
},
%Exchange.Order{
type: :limit,
order_id: "2",
trader_id: "alchemist3",
side: :buy,
initial_size: 750,
size: 750,
ticker: ticker,
price: 3970
},
%Exchange.Order{
type: :limit,
order_id: "7",
trader_id: "alchemist4",
side: :buy,
initial_size: 150,
size: 150,
ticker: ticker,
price: 3960
}
]
|> Enum.map(&%{&1 | acknowledged_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond)})
sell_book =
[
%Exchange.Order{
type: :limit,
order_id: "1",
trader_id: "alchemist5",
side: :sell,
initial_size: 750,
size: 750,
ticker: ticker,
price: 4010
},
%Exchange.Order{
type: :limit,
order_id: "5",
trader_id: "alchemist6",
side: :sell,
initial_size: 500,
size: 500,
ticker: ticker,
price: 4010
},
%Exchange.Order{
type: :limit,
order_id: "8",
trader_id: "alchemist7",
side: :sell,
initial_size: 750,
size: 750,
ticker: ticker,
price: 4010
},
%Exchange.Order{
type: :limit,
order_id: "3",
trader_id: "alchemist8",
side: :sell,
initial_size: 250,
size: 250,
ticker: ticker,
price: 4020
}
]
|> Enum.map(&%{&1 | acknowledged_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond)})
order_book = %Exchange.OrderBook{
name: ticker,
currency: :GBP,
buy: %{},
sell: %{},
order_ids: Map.new(),
completed_trades: [],
ask_min: 99_999,
bid_max: 1001,
max_price: 100_000,
min_price: 1000
}
(buy_book ++ sell_book)
|> Enum.reduce(order_book, fn order, order_book ->
Exchange.OrderBook.price_time_match(order_book, order)
end)
end
@doc """
Creates a random order for a given ticker
## Arguments
- ticker: Market where the order should be placed
"""
@spec random_order(ticker :: atom) :: Exchange.Order.order()
def random_order(ticker) do
trader_id = "alchemist" <> Integer.to_string(Enum.random(0..9))
side = Enum.random([:buy, :sell])
type = Enum.random([:market, :limit, :marketable_limit])
price = 0..10 |> Enum.map(fn x -> 2000 + x * 200 end) |> Enum.random()
size = 0..10 |> Enum.map(fn x -> 1000 + x * 500 end) |> Enum.random()
order_id = UUID.uuid1()
%Exchange.Order{
order_id: order_id,
trader_id: trader_id,
side: side,
price: price,
initial_size: size,
size: size,
type: type,
exp_time: DateTime.utc_now() |> DateTime.to_unix(:millisecond),
ticker: ticker,
acknowledged_at: DateTime.utc_now() |> DateTime.to_unix(:nanosecond)
}
end
@doc """
Function that generates n random orders given a specific ticker
## Arguments
- ticker: Market where the order should be placed
- n: Number of orders to be generated
"""
@spec generate_random_orders(n :: number, ticker :: atom) :: [Exchange.Order.order()]
def generate_random_orders(n, ticker)
when is_integer(n) and n > 0 do
Enum.reduce(0..n, [], fn _n, acc ->
[random_order(ticker) | acc]
end)
end
@doc """
Retrieves the module of an adapter of `Exchange.TimeSeries`
"""
@spec time_series :: any
def time_series do
Application.get_env(:exchange, :time_series_adapter, Exchange.Adapters.InMemoryTimeSeries)
end
end
|
lib/exchange/utils.ex
| 0.891811
| 0.601359
|
utils.ex
|
starcoder
|
defmodule Elastic.Scroller do
alias Elastic.Index
alias Elastic.Scroll
use GenServer
@moduledoc ~S"""
Provides an API for working with [Elastic Search's Scroll API](https://www.elastic.co/guide/en/elasticsearch/reference/2.4/search-request-scroll.html)
## Example
```elixir
{:ok, pid} = Elastic.Scroller.start_link(%{index: "answer"})
# get the first "page" of results
Elastic.Scroller.results(pid)
# Request the second page
Elastic.Scroller.next_page(pid)
# get the second "page" of results
Elastic.Scroller.results(pid)
```
Then you can choose to kill the search context yourself... keeping in mind
of course that Elastic Search will do this automatically after the
keepalive (default of 1 minute) expires for the scroll.
```elixir
Elastic.Scroller.clear(pid)
```
"""
@doc ~S"""
Starts an Elastic.Scroller server.
For usage information refer to the documentation at the top of this module.
"""
@spec start_link(%{
required(:index) => String.t,
optional(:body) => map(),
optional(:size) => pos_integer(),
optional(:keepalive) => String.t,
})
:: {:ok, pid()}
def start_link(opts = %{index: index}) do
opts = opts
|> Map.put_new(:body, %{})
|> Map.put_new(:size, 100)
|> Map.put_new(:keepalive, "1m")
|> Map.put(:index, index)
GenServer.start_link(__MODULE__, opts)
end
@spec init(%{
required(:index) => String.t,
required(:body) => map(),
required(:size) => pos_integer(),
required(:keepalive) => String.t,
})
:: {:ok, pid()} | {:stop, String.t}
def init(state = %{index: index, body: body, size: size, keepalive: keepalive}) do
scroll = Scroll.start(%{
index: index,
body: body,
size: size,
keepalive: keepalive
})
case scroll do
{:ok, 200, %{"_scroll_id" => id, "hits" => %{"hits" => hits}}} ->
{:ok, Map.merge(state, %{scroll_id: id, hits: hits})}
{:error, _status, error} ->
{:stop, inspect(error)}
end
end
@doc ~S"""
Returns the results of the current scroll location.
```elixir
Elastic.Scroller.results(pid)
```
"""
@spec results(pid())
:: [map()]
def results(pid) do
GenServer.call(pid, :results)
end
@doc ~S"""
Fetches the next page of results and returns a scroll ID.
To retrieve the results that come from this request, make a call to `Elastic.Scroller.results/1`.
```elixir
Elastic.Scroller.next_page(pid)
```
"""
@spec next_page(pid())
:: {:ok, String.t}
| {:error, :search_context_not_found, map()}
| {:error, String.t}
def next_page(pid) do
GenServer.call(pid, :next_page)
end
@doc false
def scroll_id(pid) do
GenServer.call(pid, :scroll_id)
end
@doc false
def index(pid) do
GenServer.call(pid, :index)
end
@doc false
def body(pid) do
GenServer.call(pid, :body)
end
@doc false
def keepalive(pid) do
GenServer.call(pid, :keepalive)
end
@doc false
def size(pid) do
GenServer.call(pid, :size)
end
@doc false
def clear(pid) do
GenServer.call(pid, :clear)
end
def handle_call(:results, _from, state = %{hits: hits}) do
{:reply, hits, state}
end
def handle_call(:next_page, _from, state = %{index: index, body: body, keepalive: keepalive, scroll_id: scroll_id}) do
scroll = %{
index: index,
body: body,
scroll_id: scroll_id,
keepalive: keepalive
}
case scroll |> Scroll.next do
{:ok, 200, %{
"_scroll_id" => id,
"hits" => %{"hits" => hits}}} ->
state = state |> Map.merge(%{scroll_id: id, hits: hits})
{:reply, {:ok, id}, state}
{:error, 404, error} ->
{:reply, {:error, :search_context_not_found, error}, state}
{:error, _, error} ->
{:reply, {:error, inspect(error)}, state}
end
end
def handle_call(:scroll_id, _from, state = %{scroll_id: scroll_id}) do
{:reply, scroll_id, state}
end
def handle_call(:size, _from, state = %{size: size}) do
{:reply, size, state}
end
def handle_call(:index, _from, state = %{index: index}) do
{:reply, Index.name(index), state}
end
def handle_call(:body, _from, state = %{body: body}) do
{:reply, body, state}
end
def handle_call(:keepalive, _from, state = %{keepalive: keepalive}) do
{:reply, keepalive, state}
end
def handle_call(:clear, _from, state = %{scroll_id: scroll_id}) do
response = Scroll.clear([scroll_id])
{:reply, response, state}
end
end
|
lib/elastic/scroller.ex
| 0.8628
| 0.749912
|
scroller.ex
|
starcoder
|
defmodule NebulexExt.Adapters.Replicated do
@moduledoc """
Adapter module for replicated cache.
This adapter depends on a local cache adapter, it adds a thin layer
on top of it in order to replicate requests across a group of nodes,
where is supposed the local cache is running already.
PG2 is used by the adapter to manage the cluster nodes. When the replicated
cache is started in a node, it creates a PG2 group and joins it (the cache
supervisor PID is joined to the group).
When a write function is invoked, the adapter executes it in all the PG2 group members
(i.e. the data is replicated to all the nodes).
When a read function is invoked, the adapter executes it locally (using the local adapter).
## Features
* Support for Replicated Cache
* Support for transactions via Erlang global name registration facility
## Options
These options should be set in the config file and require
recompilation in order to make an effect.
* `:adapter` - The adapter name, in this case, `NebulexExt.Adapters.Replicated`.
* `:local` - The Local Cache module. The value to this option should be
`Nebulex.Adapters.Local`, unless you want to provide a custom local
cache adapter.
* `:rpc_timeout` - Timeout used on the rpc calls
## Example
`Nebulex.Cache` is the wrapper around the Cache. We can define the
local and replicated cache as follows:
defmodule MyApp.LocalCache do
use Nebulex.Cache, otp_app: :my_app, adapter: Nebulex.Adapters.Local
end
defmodule MyApp.ReplicatedCache do
use Nebulex.Cache, otp_app: :my_app, adapter: NebulexExt.Adapters.Replicated
end
Where the configuration for the Cache must be in your application
environment, usually defined in your `config/config.exs`:
config :my_app, MyApp.LocalCache,
n_shards: 2,
gc_interval: 3600
config :my_app, MyApp.ReplicatedCache,
local: MyApp.LocalCache
For more information about the usage, check `Nebulex.Cache`.
## Extended API
This adapter provides some additional functions to the `Nebulex.Cache` API.
### `nodes/0`
Returns the nodes that belongs to the caller Cache.
MyCache.nodes()
### `new_generation/1`
Creates a new generation in all nodes that belongs to the caller Cache.
MyCache.new_generation()
## Limitations
This adapter has some limitation for two functions: `get_and_update/4` and
`update/5`. They both have a parameter that is an anonymous function, and
the anonymous function is compiled into the module where it was created,
which means it necessarily doesn't exists on remote nodes. To ensure they
work as expected, you must provide functions from modules existing in all
nodes of the group.
"""
# Inherit default transaction implementation
use NebulexExt.Adapter.Transaction
# Provide Cache Implementation
@behaviour Nebulex.Adapter
## Adapter Impl
@impl true
defmacro __before_compile__(env) do
otp_app = Module.get_attribute(env.module, :otp_app)
config = Module.get_attribute(env.module, :config)
rpc_timeout = Keyword.get(config, :rpc_timeout, 1000)
unless local = Keyword.get(config, :local) do
raise ArgumentError,
"missing :local configuration in " <>
"config #{inspect otp_app}, #{inspect env.module}"
end
quote do
alias Nebulex.Adapters.Local.Generation
def __local__, do: unquote(local)
def __rpc_timeout__, do: unquote(rpc_timeout)
def nodes do
pg2_namespace()
|> :pg2.get_members()
|> Enum.map(&node(&1))
|> :lists.usort
end
def new_generation(opts \\ []) do
{res, _} = :rpc.multicall(nodes(), Generation, :new, [unquote(local), opts])
res
end
def init(config) do
:ok = :pg2.create(pg2_namespace())
unless self() in :pg2.get_members(pg2_namespace()) do
:ok = :pg2.join(pg2_namespace(), self())
end
{:ok, config}
end
defp pg2_namespace, do: {:nebulex, __MODULE__}
end
end
## Adapter Impl
## Local operations
@impl true
def init(_cache, _opts), do: {:ok, []}
@impl true
def get(cache, key, opts), do: cache.__local__.get(key, opts)
@impl true
def has_key?(cache, key), do: cache.__local__.has_key?(key)
@impl true
def keys(cache), do: cache.__local__.keys()
@impl true
def size(cache), do: cache.__local__.size()
@impl true
def reduce(cache, acc_in, fun, opts), do: cache.__local__.reduce(acc_in, fun, opts)
@impl true
def to_map(cache, opts), do: cache.__local__.to_map(opts)
@impl true
def pop(cache, key, opts), do: cache.__local__.pop(key, opts)
## Cluster operations
@impl true
def set(cache, key, value, opts) do
cache
|> exec_and_broadcast(:set, [key, value, opts])
end
@impl true
def update(cache, key, initial, fun, opts) do
cache
|> exec_and_broadcast(:update, [key, initial, fun, opts])
end
@impl true
def update_counter(cache, key, incr, opts) do
cache
|> exec_and_broadcast(:update_counter, [key, incr, opts])
end
@impl true
def get_and_update(cache, key, fun, opts) when is_function(fun, 1) do
cache
|> exec_and_broadcast(:get_and_update, [key, fun, opts])
end
@impl true
def delete(cache, key, opts), do: multicall(cache, :delete, [key, opts])
@impl true
def flush(cache), do: multicall(cache, :flush, [])
## Private Functions
defp exec_and_broadcast(cache, operation, args = [key | _]) do
transaction(cache, [keys: [key], nodes: cache.nodes], fn ->
result = apply(cache.__local__, operation, args)
value =
case operation do
:get_and_update -> elem(result, 1)
_ -> result
end
set_args = [key, value, [{:on_conflict, :override}]]
_ = multicall(cache, :set, set_args, [include_local: false, use_transaction: false])
result
end)
end
defp multicall(cache, fun, args, opts \\ []) do
case Keyword.get(opts, :use_transaction, true) do
true -> multicall_transaction(cache, fun, args, opts)
false -> do_multicall(cache, fun, args, opts)
end
end
defp multicall_transaction(cache, fun, args, opts) do
trans_opts = [
keys: trans_keys(args),
nodes: multicall_nodes(cache, opts)
]
transaction(cache, trans_opts, fn ->
do_multicall(cache, fun, args, opts)
end)
end
defp trans_keys(_args = []), do: []
defp trans_keys(_args = [key | _]), do: [key]
defp do_multicall(cache, fun, args, opts) do
cache
|> multicall_nodes(opts)
|> :rpc.multicall(cache.__local__, fun, args, cache.__rpc_timeout__)
|> parse_multicall_result
end
defp multicall_nodes(cache, opts) do
case Keyword.get(opts, :include_local, true) do
true -> cache.nodes
false -> List.delete(cache.nodes, node())
end
end
defp parse_multicall_result({[], []}) do
nil
end
defp parse_multicall_result({results, []}) do
case badrpc_reasons(results) do
[{:EXIT, {remote_ex, _}} | _] -> raise remote_ex
[reason | _] -> {:error, {:badrpc, reason}}
[] -> hd(results)
end
end
defp parse_multicall_result({_, bad_nodes}) do
raise "bad nodes on multicall: #{bad_nodes}"
end
defp badrpc_reasons(results), do: for {:badrpc, reason} <- results, do: reason
end
|
lib/nebulex_ext/adapters/replicated.ex
| 0.841874
| 0.620047
|
replicated.ex
|
starcoder
|
defmodule Vault.Engine.KVV2 do
@moduledoc """
Get and put secrets using the v2 KV (versioned) secrets engine
See: [Vault Docs](https://www.vaultproject.io/api/secret/kv/kv-v2.html) for details.
"""
@behaviour Vault.Engine.Adapter
@type vault :: Vault.t()
@type path :: String.t()
@type version :: integer
@type token :: String.t()
@type value :: map()
@type errors :: list()
@type options :: list()
@doc """
Get a secret from vault. Optionally supply a version, otherwise gets latest
value.
## Examples
Fetch a value at a specific version, with the `:version` option.
```
{:ok, %{"foo" => "bar"}} = Vault.Engine.KVV2.read(vault, "secret/to/get, [version: 1])
{:ok, %{"bar" => "baz"}} = Vault.Engine.KVV2.read(vault, "secret/to/get, [version: 2])
```
Because of the nature of soft deletes,fetching soft-deleted secrets will return
an error.
```
{:error, ["Key not found"]} = Vault.Engine.KVV2.read(vault, "soft/deleted/secret", [version: 1])
```
However, if you wish to see the metadata or additional values, setting full_response to `true` will return
can return a soft deleted key as a success.
```
{:ok, %{
"auth" => nil,
"data" => %{
"data" => nil,
"metadata" => %{
"created_time" => "2018-11-21T19:49:49.339727561Z",
"deletion_time" => "2018-11-21T19:49:49.353904424Z",
"destroyed" => false,
"version" => 1
}
},
"lease_duration" => 0,
"lease_id" => "",
"renewable" => false,
"request_id" => "e289ff31-609f-44fa-7161-55c63fda3d43",
"warnings" => nil,
"wrap_info" => nil
}
} = Vault.Engine.KVV2.read(vault, "soft/deleted/secret", [version: 1, full_response: true])
```
Options:
- `version: integer` - the version you want to return.
- `full_response: boolean` - get the whole response back on success, not just the data field
"""
@impl true
@spec read(vault, path, options) :: {:ok, value} | {:error, errors}
def read(vault, path, options \\ []) do
path = v2_data_path(path) <> with_version(options)
full_response = Keyword.get(options, :full_response, false)
# normalize nested response.
case Vault.Engine.Generic.read(vault, path, options) do
{:ok, %{} = data} when full_response == true ->
{:ok, data}
{:ok, %{"data" => nil}} ->
{:error, ["Key not found"]}
{:ok, %{"data" => data}} when full_response == false ->
{:ok, data}
otherwise ->
otherwise
end
end
@doc """
Put a secret in vault, on a given path.
## Examples
Write a new version:
```
{:ok, %{}} = Vault.Engine.Generic.write(vault, "path/to/write", %{ foo: "bar" })
```
Check and set - see [Vault Docs](https://www.vaultproject.io/api/secret/kv/kv-v2.html#create-update-secret)
for details
```
# write only if the value doesn't exist
{:ok, response } = Vault.Engine.Generic.write(vault, "path/to/write", %{ foo: "bar" }, [cas: 0])
# write only if the cas matches the current secret version
{:ok, response } = Vault.Engine.Generic.write(vault, "path/to/write", %{ foo: "bar" }, [cas: 1])
```
Get the full response body from vault:
```
{:ok, %{
"data" => %{
"created_time" => "2018-03-22T02:36:43.986212308Z",
"deletion_time" => "",
"destroyed" => false,
"version" => 1
},
}
} = Vault.Engine.Generic.write(vault, "path/to/write", %{ foo: "bar" }, [full_response: true])
```
### Options
- `cas: integer` set a check-and-set value
- `full_response: boolean` - get the whole response back on success, not just the data field
"""
@impl true
@spec write(vault, path, value, options) :: {:ok, map()} | {:error, errors}
def write(vault, path, value, options \\ []) do
value =
if cas = Keyword.get(options, :cas, false),
do: %{data: value, options: %{cas: cas}},
else: %{data: value}
Vault.Engine.Generic.write(vault, v2_data_path(path), value, options)
end
@doc """
This endpoint returns a list of key names at the specified location. Folders are
suffixed with /. The input must be a folder; list on a file will not return a value.
## Examples
```
{:ok, %{
"keys"=> ["foo", "foo/"]
}
} = Vault.Engine.KVV2.List(vault, "path/to/list/", [full_response: true])
```
With the full Response:
```
{:ok, %{
"data" => %{
"keys"=> ["foo", "foo/"]
},
}
} = Vault.Engine.KVV2.List(vault, "path/to/list/", [full_response: true])
```
"""
@impl true
@spec list(vault, path, options) :: {:ok, map()} | {:error, errors}
def list(vault, path, options \\ []) do
Vault.Engine.Generic.list(vault, v2_metadata_path(path), options)
end
@doc """
Soft or Hard Delete a versioned secret. Requires a list of versions to be removed. This request
produces an empty body, so an empty map is returned.
## Examples
Soft delete a version of a secret
```
{:ok, %{
"data" => nil,
"metadata" => %{
"created_time" => "2018-11-21T19:49:49.339727561Z",
"deletion_time" => "2018-11-21T19:49:49.353904424Z",
"destroyed" => false,
"version" => 5
}
}
} = Vault.Engine.KVV2.Delete(vault, "path/to/delete", versions: [5], full_response: true)
```
Hard delete a secret
{:ok, %{
"data" => nil,
"metadata" => %{
"created_time" => "2018-11-21T19:49:49.339727561Z",
"deletion_time" => "2018-11-21T19:49:49.353904424Z",
"destroyed" => true,
"version" => 5
}
}
} = Vault.Engine.KVV2.Delete(vault, "path/to/delete", versions: [5], destroy: true, full_response: true)
"""
@impl true
@spec delete(vault, path, options) :: {:ok, map()} | {:error, errors}
def delete(vault, path, options \\ []) do
{destroy, options} = Keyword.pop(options, :destroy)
{versions, options} = Keyword.pop(options, :versions)
path = if destroy, do: v2_destroy_path(path), else: v2_delete_path(path)
case versions do
value when is_list(value) ->
options = Keyword.merge([method: :post, body: %{versions: versions}], options)
Vault.Engine.Generic.delete(vault, path, options)
_otherwise ->
{:error, ["A list of versions is required"]}
end
end
defp v2_path(path, prefix) do
String.split(path, "/", parts: 2) |> Enum.join("/" <> prefix <> "/")
end
defp v2_data_path(path), do: v2_path(path, "data")
defp v2_metadata_path(path), do: v2_path(path, "metadata")
defp v2_delete_path(path), do: v2_path(path, "delete")
defp v2_destroy_path(path), do: v2_path(path, "destroy")
defp with_version([]), do: ""
defp with_version(options) do
case Keyword.get(options, :version) do
nil -> ""
version -> "?version=#{version}"
end
end
end
|
lib/vault/engine/kv/v2.ex
| 0.907094
| 0.784236
|
v2.ex
|
starcoder
|
defmodule LoadResource.Scope do
@moduledoc """
This module defines a scope that can be used in validating a resouce.
A simple example: we have books and citations. When loading a citation, we want to validate that it belongs to a valid book -- that is, to add `citation.book_id = ${valid_book_id}` to our SQL query.
Scopes contain
* `column`: the column on the resource to check
And one of
* `scope_key`: an atom representing a value stores on `conn.assigns`
* `value`: a remote (e.g. NOT anonymous) function/1 that accepts `conn` and returns a value
The value returned by `value` / retrieved from conn.assigns can be either:
* a primitive (atom, string, number, or boolean), in which case it is used in the SQL query
* a map or struct containing an `:id` key, in which case the id value is used
Any other value will result in an `LoadResource.Scope.UnprocessableValueError` being raised.
"""
@enforce_keys [:column]
defstruct [:column, :value, :scope_key]
alias LoadResource.Scope
@doc """
A convenience method for creating scopes for earlier loaded resources.
`Scope.from_atom(:book)` is equivalent (though not identical) to writing:
```
%Scope{
column: :book_id,
value: fn(conn, scope_key) -> conn.assigns[:book]
}
```
In pseudo-SQL, that scope turns into `where book_id = {conn.assigns[:book].id}`.
(This assumes `conn.assigns[:book]` is an appropriate value, as it will be if it's a something previously loaded by `LoadResource.Plug`.)
"""
def from_atom(scope_key) when is_atom(scope_key) do
%Scope{
column: :"#{scope_key}_id",
# it would be nice to just pass in an anonymous function, but that doesn't actually work --
# when Elixir tries to serialize the value as part of plug setup, it chokes on the anonymous
# function
scope_key: scope_key
}
end
@doc """
Run a scope on a given `conn` object and return the value for use by `LoadResource.QueryBuilder`.
If needed, this method will transform the result of the `value` function into an appropriate value (for instance, from a map containing an `:id` key to the appropriate value).
Given this scope and an `identify_source_book_id/1` function that returns either `"foo"` or `%{id: "foo"}`:
```
scope = %Scope{column: :source_book_id, value: &identify_source_book_id/1}
```
`Scope.evaluate(scope)` will return "foo".
```
"""
def evaluate(%Scope{value: value}, conn) when is_function(value, 1) do
process_scope_value(value.(conn))
end
def evaluate(%Scope{scope_key: scope_key}, conn) when is_atom(scope_key) do
process_scope_value(conn.assigns[scope_key])
end
defp process_scope_value(value) when is_atom(value), do: value
defp process_scope_value(value) when is_bitstring(value), do: value
defp process_scope_value(value) when is_number(value), do: value
defp process_scope_value(value) when is_boolean(value), do: value
defp process_scope_value(%{id: id}), do: id
defp process_scope_value(value) do
raise Scope.UnprocessableValueError, value
end
end
|
lib/load_resource/scope.ex
| 0.876456
| 0.930521
|
scope.ex
|
starcoder
|
defmodule Etherscan.API.Logs do
@moduledoc """
Module to wrap Etherscan event log endpoints.
[Etherscan API Documentation](https://etherscan.io/apis#logs)
"""
use Etherscan.API
use Etherscan.Constants
alias Etherscan.Log
@operators ["and", "or"]
@get_logs_default_params %{
address: nil,
fromBlock: 0,
toBlock: "latest",
topic0: nil,
topic0_1_opr: nil,
topic1: nil,
topic1_2_opr: nil,
topic2: nil,
topic2_3_opr: nil,
topic3: nil
}
@doc """
Returns a list of valid topic operators for `get_logs/1`.
## Example
iex> Etherscan.API.Logs.operators()
#{@operators |> inspect()}
"""
@spec operators :: list(String.t())
def operators, do: @operators
@doc """
An alternative API to the native eth_getLogs.
See `operators/0` for all valid topic operators.
`params[fromBlock|toBlock]` can be a block number or the string `"latest"`.
Either the `address` or `topic(x)` params are required.
For API performance and security considerations, **only the first 1000 results
are returned.**
## Example
iex> params = %{
address: "#{@test_topic_address}", # Ethereum blockchain address
fromBlock: 0, # Start block number
toBlock: "latest", # End block number
topic0: "#{@test_topic_0}", # The first topic filter
topic0_1_opr: "and", # The topic operator between topic0 and topic1
topic1: "", # The second topic filter
topic1_2_opr: "and", # The topic operator between topic1 and topic2
topic2: "", # The third topic filter
topic2_3_opr: "and", # The topic operator between topic2 and topic3
topic3: "", # The fourth topic filter
}
iex> Etherscan.get_logs(params)
{:ok, [%Etherscan.Log{}]}
"""
@spec get_logs(params :: map(), network :: String.t()) :: {:ok, list(Log.t())} | {:error, atom()}
def get_logs(params, network \\ :default)
def get_logs(%{address: address}, network) when not is_address(address), do: @error_invalid_address
def get_logs(%{fromBlock: from_block}, network)
when not (is_integer(from_block) or from_block == "latest"),
do: @error_invalid_from_block
def get_logs(%{toBlock: to_block}, network) when not (is_integer(to_block) or to_block == "latest"),
do: @error_invalid_to_block
def get_logs(%{topic0_1_opr: operator}, network) when operator not in @operators,
do: @error_invalid_topic0_1_opr
def get_logs(%{topic1_2_opr: operator}, network) when operator not in @operators,
do: @error_invalid_topic1_2_opr
def get_logs(%{topic2_3_opr: operator}, network) when operator not in @operators,
do: @error_invalid_topic2_3_opr
def get_logs(params, network) when is_map(params) do
params = merge_params(params, @get_logs_default_params)
"logs"
|> get("getLogs", params, network)
|> parse(as: %{"result" => [%Log{}]})
|> wrap(:ok)
end
def get_logs(_, _), do: @error_invalid_params
end
|
lib/etherscan/api/logs.ex
| 0.855081
| 0.52409
|
logs.ex
|
starcoder
|
defprotocol Flop.Schema do
@moduledoc """
This protocol allows you to set query options in your Ecto schemas.
## Usage
Derive `Flop.Schema` in your Ecto schema and set the filterable and sortable
fields.
defmodule Flop.Pet do
use Ecto.Schema
@derive {
Flop.Schema,
filterable: [:name, :species],
sortable: [:name, :age]
}
schema "pets" do
field :name, :string
field :age, :integer
field :species, :string
end
end
After that, you can pass the module as the `:for` option to `Flop.validate/2`.
iex> Flop.validate(%Flop{order_by: [:name]}, for: Flop.Pet)
{:ok,
%Flop{
filters: [],
limit: nil,
offset: nil,
order_by: [:name],
order_directions: nil,
page: nil,
page_size: nil
}}
iex> {:error, %Flop.Meta{} = meta} = Flop.validate(
...> %Flop{order_by: [:species]}, for: Flop.Pet
...> )
iex> meta.params
%{"order_by" => [:species], "filters" => []}
iex> meta.errors
[
order_by: [
{"has an invalid entry",
[validation: :subset, enum: [:name, :age, :owner_name, :owner_age]]}
]
]
## Default and maximum limits
To define a default or maximum limit, you can set the `default_limit` and
`max_limit` option when deriving `Flop.Schema`. The maximum limit will be
validated and the default limit applied by `Flop.validate/1`.
@derive {
Flop.Schema,
filterable: [:name, :species],
sortable: [:name, :age],
max_limit: 100,
default_limit: 50
}
## Default sort order
To define a default sort order, you can set the `default_order_by` and
`default_order_directions` options when deriving `Flop.Schema`. The default
values are applied by `Flop.validate/1`. If no order directions are set,
`:asc` is assumed for all fields.
@derive {
Flop.Schema,
filterable: [:name, :species],
sortable: [:name, :age],
default_order_by: [:name, :age],
default_order_directions: [:asc, :desc]
}
## Restricting pagination types
By default, `page`/`page_size`, `offset`/`limit` and cursor-based pagination
(`first`/`after` and `last`/`before`) are enabled. If you want to restrict the
pagination type for a schema, you can do that by setting the
`pagination_types` option.
@derive {
Flop.Schema,
filterable: [:name, :species],
sortable: [:name, :age],
pagination_types: [:first, :last]
}
See also `t:Flop.option/0` and `t:Flop.pagination_type/0`. Setting the value
to `nil` allows all pagination types.
## Compound fields
Sometimes you might need to apply a search term to multiple fields at once,
e.g. you might want to search in both the family name and given name field.
You can do that with Flop by defining a compound field.
@derive {
Flop.Schema,
filterable: [:full_name],
sortable: [:full_name],
compound_fields: [full_name: [:family_name, :given_name]]
}
This allows you to use the field name `:full_name` as any other field in the
filter and order parameters.
### Filtering
params = %{
filters: [%{
field: :full_name,
op: :==,
value: "margo"
}]
}
This would translate to:
WHERE family_name='margo' OR given_name ='margo'
Partial matches and splitting of the search term can be achieved with one of
the ilike operators.
params = %{
filters: [%{
field: :full_name,
op: :ilike_and,
value: "<NAME>"
}]
}
This would translate to:
WHERE (family_name ilike '%margo%' OR given_name ='%margo%')
AND (family_name ilike '%martindale%' OR given_name ='%martindale%')
### Filter operator rules
- `:=~`, `:like`, `:like_and`, `:like_or`, `:ilike`, `:ilike_and`,
`:ilike_or` - The filter value is split at whitespace characters as usual.
The filter matches for a value if it matches for any of the fields.
- `:empty` - Matches if all fields of the compound field are `nil`.
- `:not_empty` - Matches if any field of the compound field is not `nil`.
- `:==`, `:!=`, `:<=`, `:<`, `:>=`, `:>`, `:in`, `:contains` - The filter
value is normalized by splitting the string at whitespaces and joining it
with a space. The values of all fields of the compound field are split by
whitespace character and joined with a space, and the resulting values are
joined with a space again. **This will be added in a future version. These
filter operators are ignored for compound fields at the moment.**
### Sorting
params = %{
order_by: [:full_name],
order_directions: [:desc]
}
This would translate to:
ORDER BY family_name DESC, given_name DESC
Note that compound fields cannot be used as pagination cursors.
## Join fields
If you need to filter or order across tables, you can define join fields.
As an example, let's define these schemas:
schema "owners" do
field :name, :string
field :email, :string
has_many :pets, Pet
end
schema "pets" do
field :name, :string
field :species, :string
belongs_to :owner, Owner
end
And now we want to find all owners that have pets of the species
`"E. africanus"`. To do this, first we need to define a join field on the
`Owner` schema.
@derive {
Flop.Schema,
filterable: [:pet_species],
sortable: [:pet_species],
join_fields: [pet_species: [binding: :pets, field: :species]]
}
In this case, `:pet_species` would be the alias of the field that you can
refer to in the filter and order parameters. The `:binding` option refers to
the named binding you set with the `:as` option in the join statement of your
query. `:field` is the field name on that binding.
In order to retrieve the pagination cursor value for a join field, Flop needs
to know how to get the field value from the struct that is returned from the
database. `Flop.Schema.get_field/2` is used for that. By default, Flop assumes
that the binding name matches the name of the field for the association in
your Ecto schema (the one you set with `has_one`, `has_many` or `belongs_to`).
In the example above, Flop would try to access the field in the struct under
the path `[:pets, :species]`.
If you have joins across multiple tables, or if you can't give the binding
the same name as the association field, you can specify the path explicitly.
@derive {
Flop.Schema,
filterable: [:pet_species],
sortable: [:pet_species],
join_fields: [
pet_species: [
binding: :pets,
field: :species,
path: [:pets, :species]
]
}
After setting up the join fields, you can write a query like this:
params = %{
filters: [%{field: :pet_species, op: :==, value: "E. africanus"}]
}
Owner
|> join(:left, [o], p in assoc(o, :pets), as: :pets)
|> preload([pets: p], [pets: p])
|> Flop.validate_and_run!(params, for: Owner)
If your query returns data in a different format, you don't need to set the
`:path` option. Instead, you can pass a custom cursor value function in the
options. See `Flop.Cursor.get_cursors/2` and `t:Flop.option/0`.
Note that Flop doesn't create the join clauses for you. The named bindings
already have to be present in the query you pass to the Flop functions.
"""
@fallback_to_any true
@doc """
Returns the field type in a schema.
- `{:normal, atom}` - An ordinary field on the schema. The second tuple
element is the field name.
- `{:compound, [atom]}` - A combination of fields defined with the
`compound_fields` option. The list of atoms refers to the list of fields
that are included.
- `{:join, map}` - A field from a named binding as defined with the
`join_fields` option. The map has keys for the `:binding`, `:field` and
`:path`.
## Examples
iex> field_type(%Flop.Pet{}, :age)
{:normal, :age}
iex> field_type(%Flop.Pet{}, :full_name)
{:compound, [:family_name, :given_name]}
iex> field_type(%Flop.Pet{}, :owner_name)
{:join, %{binding: :owner, field: :name, path: [:owner, :name]}}
"""
@doc since: "0.11.0"
@spec field_type(any, atom) ::
{:normal, atom} | {:compound, [atom]} | {:join, map}
def field_type(data, field)
@doc """
Returns the filterable fields of a schema.
iex> Flop.Schema.filterable(%Flop.Pet{})
[
:age,
:full_name,
:name,
:owner_age,
:owner_name,
:owner_tags,
:pet_and_owner_name,
:species,
:tags
]
"""
@spec filterable(any) :: [atom]
def filterable(data)
@doc false
@spec apply_order_by(any, Ecto.Query.t(), tuple | keyword) :: Ecto.Query.t()
def apply_order_by(data, q, expr)
@doc false
@spec cursor_dynamic(any, keyword, map) :: any
def cursor_dynamic(data, order, cursor_map)
@doc """
Gets the field value from a struct.
Resolves join fields and compound fields according to the config.
# join_fields: [owner_name: {:owner, :name}]
iex> pet = %Flop.Pet{name: "George", owner: %Flop.Owner{name: "Carl"}}
iex> Flop.Schema.get_field(pet, :name)
"George"
iex> Flop.Schema.get_field(pet, :owner_name)
"Carl"
# compound_fields: [full_name: [:family_name, :given_name]]
iex> pet = %Flop.Pet{given_name: "George", family_name: "Gooney"}
iex> Flop.Schema.get_field(pet, :full_name)
"<NAME>"
For join fields, this function relies on the binding name in the schema config
matching the field name for the association in the struct.
"""
@doc since: "0.13.0"
@spec get_field(any, atom) :: any
def get_field(data, field)
@doc """
Returns the allowed pagination types of a schema.
iex> Flop.Schema.pagination_types(%Flop.Fruit{})
[:first, :last, :offset]
"""
@doc since: "0.9.0"
@spec pagination_types(any) :: [Flop.pagination_type()] | nil
def pagination_types(data)
@doc """
Returns the sortable fields of a schema.
iex> Flop.Schema.sortable(%Flop.Pet{})
[:name, :age, :owner_name, :owner_age]
"""
@spec sortable(any) :: [atom]
def sortable(data)
@doc """
Returns the default limit of a schema.
iex> Flop.Schema.default_limit(%Flop.Fruit{})
50
"""
@doc since: "0.3.0"
@spec default_limit(any) :: pos_integer | nil
def default_limit(data)
@doc """
Returns the default order of a schema.
iex> Flop.Schema.default_order(%Flop.Fruit{})
%{order_by: [:name], order_directions: [:asc]}
"""
@doc since: "0.7.0"
@spec default_order(any) :: %{
order_by: [atom] | nil,
order_directions: [Flop.order_direction()] | nil
}
def default_order(data)
@doc """
Returns the maximum limit of a schema.
iex> Flop.Schema.max_limit(%Flop.Pet{})
1000
"""
@doc since: "0.2.0"
@spec max_limit(any) :: pos_integer | nil
def max_limit(data)
end
defimpl Flop.Schema, for: Any do
@instructions """
Flop.Schema protocol must always be explicitly implemented.
To do this, you have to derive Flop.Schema in your Ecto schema module. You
have to set both the filterable and the sortable option.
@derive {
Flop.Schema,
filterable: [:name, :species],
sortable: [:name, :age, :species]
}
schema "pets" do
field :name, :string
field :age, :integer
field :species, :string
end
"""
# credo:disable-for-next-line
defmacro __deriving__(module, _struct, options) do
filterable_fields = Keyword.get(options, :filterable)
sortable_fields = Keyword.get(options, :sortable)
if is_nil(filterable_fields) || is_nil(sortable_fields),
do: raise(ArgumentError, @instructions)
default_limit = Keyword.get(options, :default_limit)
max_limit = Keyword.get(options, :max_limit)
pagination_types = Keyword.get(options, :pagination_types)
default_order = %{
order_by: Keyword.get(options, :default_order_by),
order_directions: Keyword.get(options, :default_order_directions)
}
compound_fields = Keyword.get(options, :compound_fields, [])
join_fields =
options
|> Keyword.get(:join_fields, [])
|> Enum.map(&normalize_join_opts/1)
field_type_func = build_field_type_func(compound_fields, join_fields)
order_by_func = build_order_by_func(compound_fields, join_fields)
get_field_func = build_get_field_func(compound_fields, join_fields)
cursor_dynamic_func_compound =
build_cursor_dynamic_func_compound(compound_fields)
cursor_dynamic_func_join = build_cursor_dynamic_func_join(join_fields)
cursor_dynamic_func_normal = build_cursor_dynamic_func_normal()
quote do
defimpl Flop.Schema, for: unquote(module) do
import Ecto.Query
require Logger
def default_limit(_) do
unquote(default_limit)
end
def default_order(_) do
unquote(Macro.escape(default_order))
end
unquote(field_type_func)
unquote(order_by_func)
unquote(get_field_func)
def filterable(_) do
unquote(filterable_fields)
end
def max_limit(_) do
unquote(max_limit)
end
def pagination_types(_) do
unquote(pagination_types)
end
def sortable(_) do
unquote(sortable_fields)
end
def cursor_dynamic(_, [], _), do: true
unquote(cursor_dynamic_func_compound)
unquote(cursor_dynamic_func_join)
unquote(cursor_dynamic_func_normal)
end
end
end
def normalize_join_opts({name, opts}) do
opts =
case opts do
{binding, field} ->
%{binding: binding, field: field, path: [binding, field]}
opts when is_list(opts) ->
binding = Keyword.fetch!(opts, :binding)
field = Keyword.fetch!(opts, :field)
%{
binding: binding,
field: field,
path: opts[:path] || [binding, field]
}
end
{name, opts}
end
def build_field_type_func(compound_fields, join_fields) do
compound_field_funcs =
for {name, fields} <- compound_fields do
quote do
def field_type(_, unquote(name)) do
{:compound, unquote(fields)}
end
end
end
join_field_funcs =
for {name, opts} <- join_fields do
quote do
def field_type(_, unquote(name)) do
{:join, unquote(Macro.escape(opts))}
end
end
end
default_funcs =
quote do
def field_type(_, name) do
{:normal, name}
end
end
[compound_field_funcs, join_field_funcs, default_funcs]
end
def build_cursor_dynamic_func_compound(compound_fields) do
for {compound_field, _fields} <- compound_fields do
quote do
def cursor_dynamic(_, [{_, unquote(compound_field)}], _) do
Logger.warn(
"Flop: Cursor pagination is not supported for compound fields. Ignored."
)
true
end
def cursor_dynamic(
struct,
[{_, unquote(compound_field)} | tail],
cursor
) do
Logger.warn(
"Flop: Cursor pagination is not supported for compound fields. Ignored."
)
cursor_dynamic(struct, tail, cursor)
end
end
end
end
# credo:disable-for-next-line
def build_cursor_dynamic_func_join(join_fields) do
for {join_field, %{binding: binding, field: field}} <- join_fields do
bindings = Code.string_to_quoted!("[#{binding}: r]")
quote do
def cursor_dynamic(_, [{direction, unquote(join_field)}], cursor)
when direction in [:asc, :asc_nulls_first, :asc_nulls_last] do
field_cursor = cursor[unquote(join_field)]
if is_nil(field_cursor) do
true
else
dynamic(
unquote(bindings),
field(r, unquote(field)) > ^field_cursor
)
end
end
def cursor_dynamic(_, [{direction, unquote(join_field)}], cursor)
when direction in [:desc, :desc_nulls_first, :desc_nulls_last] do
field_cursor = cursor[unquote(join_field)]
if is_nil(field_cursor) do
true
else
dynamic(
unquote(bindings),
field(r, unquote(field)) < ^field_cursor
)
end
end
def cursor_dynamic(
struct,
[{direction, unquote(join_field)} | [{_, _} | _] = tail],
cursor
) do
field_cursor = cursor[unquote(join_field)]
if is_nil(field_cursor) do
cursor_dynamic(struct, tail, cursor)
else
case direction do
dir when dir in [:asc, :asc_nulls_first, :asc_nulls_last] ->
dynamic(
unquote(bindings),
field(r, unquote(field)) >= ^field_cursor and
(field(r, unquote(field)) > ^field_cursor or
^cursor_dynamic(struct, tail, cursor))
)
dir when dir in [:desc, :desc_nulls_first, :desc_nulls_last] ->
dynamic(
unquote(bindings),
field(r, unquote(field)) <= ^field_cursor and
(field(r, unquote(field)) < ^field_cursor or
^cursor_dynamic(struct, tail, cursor))
)
end
end
end
end
end
end
# credo:disable-for-next-line
def build_cursor_dynamic_func_normal do
quote do
def cursor_dynamic(_, [{direction, field}], cursor) do
field_cursor = cursor[field]
if is_nil(field_cursor) do
true
else
case direction do
dir when dir in [:asc, :asc_nulls_first, :asc_nulls_last] ->
dynamic([r], field(r, ^field) > ^cursor[field])
dir when dir in [:desc, :desc_nulls_first, :desc_nulls_last] ->
dynamic([r], field(r, ^field) < ^cursor[field])
end
end
end
def cursor_dynamic(
struct,
[{direction, field} | [{_, _} | _] = tail],
cursor
) do
field_cursor = cursor[field]
if is_nil(field_cursor) do
Flop.Schema.cursor_dynamic(struct, tail, cursor)
else
case direction do
dir when dir in [:asc, :asc_nulls_first, :asc_nulls_last] ->
dynamic(
[r],
field(r, ^field) >= ^field_cursor and
(field(r, ^field) > ^field_cursor or
^Flop.Schema.cursor_dynamic(struct, tail, cursor))
)
dir when dir in [:desc, :desc_nulls_first, :desc_nulls_last] ->
dynamic(
[r],
field(r, ^field) <= ^field_cursor and
(field(r, ^field) < ^field_cursor or
^Flop.Schema.cursor_dynamic(struct, tail, cursor))
)
end
end
end
end
end
def build_order_by_func(compound_fields, join_fields) do
compound_field_funcs =
for {name, fields} <- compound_fields do
quote do
def apply_order_by(struct, q, {direction, unquote(name)}) do
Enum.reduce(unquote(fields), q, fn field, acc_q ->
Flop.Schema.apply_order_by(struct, acc_q, {direction, field})
end)
end
end
end
join_field_funcs =
for {join_field, %{binding: binding, field: field}} <- join_fields do
bindings = Code.string_to_quoted!("[#{binding}: r]")
quote do
def apply_order_by(_struct, q, {direction, unquote(join_field)}) do
order_by(
q,
unquote(bindings),
[{^direction, field(r, unquote(field))}]
)
end
end
end
normal_field_func =
quote do
def apply_order_by(_struct, q, direction) do
order_by(q, ^direction)
end
end
[compound_field_funcs, join_field_funcs, normal_field_func]
end
def build_get_field_func(compound_fields, join_fields) do
compound_field_funcs =
for {name, fields} <- compound_fields do
quote do
def get_field(struct, unquote(name)) do
Enum.map_join(
unquote(fields),
" ",
&Flop.Schema.get_field(struct, &1)
)
end
end
end
join_field_funcs =
for {name, %{path: path}} <- join_fields do
quote do
def get_field(struct, unquote(name)) do
Enum.reduce(unquote(path), struct, fn field, acc ->
case acc do
%{} -> Map.get(acc, field)
_ -> nil
end
end)
# assoc = Map.get(struct, unquote(assoc_field)) || %{}
# Map.get(assoc, unquote(field))
end
end
end
fallback_func =
quote do
def get_field(struct, field), do: Map.get(struct, field)
end
[compound_field_funcs, join_field_funcs, fallback_func]
end
function_names = [
:default_limit,
:default_order,
:filterable,
:max_limit,
:pagination_types,
:sortable
]
for function_name <- function_names do
def unquote(function_name)(struct) do
raise Protocol.UndefinedError,
protocol: @protocol,
value: struct,
description: @instructions
end
end
def field_type(struct, _) do
raise Protocol.UndefinedError,
protocol: @protocol,
value: struct,
description: @instructions
end
def apply_order_by(struct, _, _) do
raise Protocol.UndefinedError,
protocol: @protocol,
value: struct,
description: @instructions
end
def cursor_dynamic(struct, _, _) do
raise Protocol.UndefinedError,
protocol: @protocol,
value: struct,
description: @instructions
end
# add default implementation for maps, so that cursor value functions can use
# it without checking protocol implementation
def get_field(%{} = map, field), do: Map.get(map, field)
def get_field(thing, _) do
raise Protocol.UndefinedError,
protocol: @protocol,
value: thing,
description: @instructions
end
end
|
lib/flop/schema.ex
| 0.826607
| 0.703069
|
schema.ex
|
starcoder
|
defmodule Geohash do
@moduledoc ~S"""
Geohash encode/decode and helper functions
## Usage
- Encode coordinates with `Geohash.encode(lat, lon, precision \\ 11)`
```
Geohash.encode(42.6, -5.6, 5)
# "ezs42"
```
- Decode coordinates with `Geohash.decode(geohash)`
```
Geohash.decode("ezs42")
# {42.605, -5.603}
```
- Find neighbors with `Geohash.neighbors(geohash)`
```
Geohash.neighbors("abx1")
# %{"n" => "abx4",
# "s" => "abx0",
# "e" => "abx3",
# "w" => "abwc",
# "ne" => "abx6",
# "se" => "abx2",
# "nw" => "abwf",
# "sw" => "abwb"}
```
- Find adjacent with `Geohash.adjacent(geohash, direction)`
```
Geohash.adjacent("abx1", "n")
# "abx4"
```
"""
import Geohash.Helpers
@geobase32 List.to_tuple('0123456789bcdefghjkmnpqrstuvwxyz')
@geobase32_index prepare_indexed('0123456789bcdefghjkmnpqrstuvwxyz')
@doc ~S"""
Encodes given coordinates to a geohash of length `precision`
## Examples
```
iex> Geohash.encode(42.6, -5.6, 5)
"ezs42"
```
"""
def encode(lat, lon, precision \\ 11) do
bits = encode_to_bits(lat, lon, precision * 5)
to_geobase32(bits)
end
@doc ~S"""
Encodes given coordinates to a bitstring of length `bits_length`
## Examples
```
iex> Geohash.encode_to_bits(42.6, -5.6, 25)
<<0b0110111111110000010000010::25>>
```
"""
def encode_to_bits(lat, lon, bits_length) do
starting_position = bits_length - 1
# odd bits
lat_bits = lat_to_bits(lat, starting_position - 1)
# even bits
lon_bits = lon_to_bits(lon, starting_position)
geo_bits = lat_bits + lon_bits
<<geo_bits::size(bits_length)>>
end
def to_geobase32(bits) do
chars = for <<c::5 <- bits>>, do: elem(@geobase32, c)
chars |> to_string
end
defp lon_to_bits(lon, position) do
geo_to_bits(lon, position, {-180.0, 180.0})
end
defp lat_to_bits(lat, position) do
geo_to_bits(lat, position, {-90.0, 90.0})
end
defp geo_to_bits(_, position, _) when position < 0 do
0
end
# Decodes given lat or lon creating the bits using 2^x to
# positionate the bit instead of building a bitstring.
# It moves by 2 to already set the bits on odd or even positions.
defp geo_to_bits(n, position, {gmin, gmax}) do
mid = (gmin + gmax) / 2
if n >= mid do
round(:math.pow(2, position)) + geo_to_bits(n, position - 2, {mid, gmax})
else
geo_to_bits(n, position - 2, {gmin, mid})
end
end
# --------------------------
@doc ~S"""
Decodes given geohash to a coordinate pair
## Examples
```
iex> {_lat, _lng} = Geohash.decode("ezs42")
{42.605, -5.603}
```
"""
def decode(geohash) do
geohash
|> decode_to_bits
|> bits_to_coordinates_pair
end
@doc ~S"""
Calculates bounds for a given geohash
## Examples
```
iex> Geohash.bounds("u4pruydqqv")
%{
min_lon: 10.407432317733765,
min_lat: 57.649109959602356,
max_lon: 10.407443046569824,
max_lat: 57.649115324020386
}
```
"""
def bounds(geohash) do
geohash
|> decode_to_bits
|> bits_to_bounds
end
@doc ~S"""
Decodes given geohash to a bitstring
## Examples
```
iex> Geohash.decode_to_bits("ezs42")
<<0b0110111111110000010000010::25>>
```
"""
def decode_to_bits(geohash) do
geohash
|> to_charlist
|> Enum.map(&from_geobase32/1)
|> Enum.reduce(<<>>, fn c, acc -> <<acc::bitstring, c::bitstring>> end)
end
def bits_to_coordinates_pair(bits) do
bitslist = for <<bit::1 <- bits>>, do: bit
lat =
bitslist
|> min_max_lat
|> rounded_middle
lon =
bitslist
|> min_max_lon
|> rounded_middle
{lat, lon}
end
defp bits_to_bounds(bits) do
bitslist = for <<bit::1 <- bits>>, do: bit
{min_lat, max_lat} = min_max_lat(bitslist)
{min_lon, max_lon} = min_max_lon(bitslist)
%{min_lon: min_lon, min_lat: min_lat, max_lon: max_lon, max_lat: max_lat}
end
defp min_max_lat(bitlist) do
bitlist
|> filter_odd
|> bits_to_coordinate({-90.0, 90.0})
end
defp min_max_lon(bitlist) do
bitlist
|> filter_even
|> bits_to_coordinate({-180.0, 180.0})
end
@neighbor %{
"n" => {'p0r21436x8zb9dcf5h7kjnmqesgutwvy', 'bc01fg45238967deuvhjyznpkmstqrwx'},
"s" => {'14365h7k9dcfesgujnmqp0r2twvyx8zb', '238967debc01fg45kmstqrwxuvhjyznp'},
"e" => {'bc01fg45238967deuvhjyznpkmstqrwx', 'p0r21436x8zb9dcf5h7kjnmqesgutwvy'},
"w" => {'238967debc01fg45kmstqrwxuvhjyznp', '14365h7k9dcfesgujnmqp0r2twvyx8zb'}
} |> prepare_directions
@border %{
"n" => {'prxz', 'bcfguvyz'},
"s" => {'028b', '0145hjnp'},
"e" => {'bcfguvyz', 'prxz'},
"w" => {'0145hjnp', '028b'}
} |> prepare_directions
defp border_case(direction, type, tail) do
@border[direction]
|> elem(type)
|> Map.get(tail)
end
@doc ~S"""
Calculate `adjacent/2` geohash in ordinal direction `["n","s","e","w"]`.
Deals with boundary cases when adjacent is not of the same prefix.
## Examples
```
iex> Geohash.adjacent("abx1","n")
"abx4"
```
"""
def adjacent(geohash, direction) when direction in ["n", "s", "w", "e"] do
prefix_len = byte_size(geohash) - 1
# parent will be a string of the prefix, last_ch will be an int of last char
<<parent::binary-size(prefix_len), last_ch::size(8)>> = geohash
type = rem(prefix_len + 1, 2)
# check for edge-cases which don't share common prefix
parent =
if border_case(direction, type, last_ch) && prefix_len > 0 do
adjacent(parent, direction)
else
parent
end
# append letter for direction to parent
# look up index of last char use as position in base32
pos =
@neighbor[direction]
|> elem(type)
|> Map.get(last_ch)
q = [elem(@geobase32, pos)]
parent <> to_string(q)
end
@doc ~S"""
Calculate adjacent hashes for the 8 touching `neighbors/1`
## Examples
```
iex> Geohash.neighbors("abx1")
%{"n" => "abx4",
"s" => "abx0",
"e" => "abx3",
"w" => "abwc",
"ne" => "abx6",
"se" => "abx2",
"nw" => "abwf",
"sw" => "abwb"}
```
"""
def neighbors(geohash) do
n = adjacent(geohash, "n")
s = adjacent(geohash, "s")
%{
"n" => n,
"s" => s,
"e" => adjacent(geohash, "e"),
"w" => adjacent(geohash, "w"),
"ne" => adjacent(n, "e"),
"se" => adjacent(s, "e"),
"nw" => adjacent(n, "w"),
"sw" => adjacent(s, "w")
}
end
def filter_even(bitlists) do
{acc, _even?} =
Enum.reduce(bitlists, {<<>>, true},
fn _bit, {acc, false = _even?} -> {acc, true}
bit, {acc, true = _even?} -> {<<acc::bitstring, bit::1>>, false}
end)
acc
end
def filter_odd(bitlists) do
{acc, _even?} =
Enum.reduce(bitlists, {<<>>, true},
fn bit, {acc, false = _even?} -> {<<acc::bitstring, bit::1>>, true}
_bit, {acc, true = _even?} -> {acc, false}
end)
acc
end
defp middle({min, max}) do
middle(min, max)
end
defp middle(min, max) do
(min + max) / 2
end
defp rounded_middle(min_max) do
min_max
|> middle
|> round_coordinate(min_max)
end
defp bits_to_coordinate(<<>>, min_max) do
min_max
end
defp bits_to_coordinate(bits, {min, max}) do
<<bit::1, rest::bitstring>> = bits
mid = middle(min, max)
{start, finish} =
case bit do
1 -> {mid, max}
0 -> {min, mid}
end
bits_to_coordinate(rest, {start, finish})
end
# Rounding criteria taken from:
# https://github.com/chrisveness/latlon-geohash/blob/decb13b09a7f1e219a2ca86ff8432fb9e2774fc7/latlon-geohash.js#L117
# See demo of that implementation here:
# http://www.movable-type.co.uk/scripts/geohash.html
defp round_coordinate(coord, {min, max}) do
Float.round(coord, round(Float.floor(2 - :math.log10(max - min))))
end
defp from_geobase32(char) do
%{^char => i} = @geobase32_index
<<i::5>>
end
end
|
lib/geohash.ex
| 0.908341
| 0.882225
|
geohash.ex
|
starcoder
|
defmodule TelemetryMetricsRiemann do
@moduledoc """
`Telemetry.Metrics` reporter for riemann-compatible metric servers.
To use it, start the reporter with the `start_link/1` function, providing it a list of
`Telemetry.Metrics` metric definitions:
import Telemetry.Metrics
TelemetryMetricsRiemann.start_link(
metrics: [
counter("http.request.count"),
sum("http.request.payload_size"),
last_value("vm.memory.total")
]
)
> Note that in the real project the reporter should be started under a supervisor, e.g. the main
> supervisor of your application.
By default the reporter formats and delegates the metrics to a riemann client which will send the metrics
to a configured riemann server.
Note that the reporter doesn't aggregate metrics in-process - it sends metric updates to riemann
whenever a relevant Telemetry event is emitted.
## Translation between Telemetry.Metrics and riemann
In this section we walk through how the Telemetry.Metrics metric definitions are mapped to riemann metrics
and their types at runtime.
Telemetry.Metrics names are translated as follows:
* if the event name was provided as a string, e.g. "http.request.count",
it is sent to riemann server as-is (using the `service` field)
* if the event name was provided as a list of atoms, e.g. [:http, :request, :count],
it is first converted to a string by joiging the segments with dots.
In this example, the riemann `service` name would be "http.request.count" as well
If the metric has tags, it is send to to riemann as `tags`.
If the metric has tag value, it is converted to riemann `attributes` fields.
Also, the following attributes, if present will be used to fill the riemann default fields protocol and consequently removed from attributes:
* `host`, a hostname
* `state`, any string which represents a state "ok", "critical", "online"
* `ttl`, a floating-point-time, in seconds, that this event is considered valid for
* `time`, the time of the event, in unix epoch time
* `time_micros`, the time of the event, in microseconds
All metrics values from Telemetry.Metrics type is converted to riemann `metric` field. There is no special
conversion rules. The riemann server, based on the configurations done, has an important role to convert/calculate each
metric send by TelemetryMetricsRiemann. This reporter acts only as bridge to the [riemann protocol](http://riemann.io/concepts.html).
The following table shows how `Telemetry.Metrics` metrics map riemann metrics:
| Telemetry.Metrics | riemann |
|-------------------|--------|
| `last_value` | `metric` field, always set to an absolute value |
| `counter` | `metric` field, always increased by 1 |
| `sum` | `metric` field, increased and decreased by the provided value |
| `summary` | `metric` field recording individual measurement |
| `histogram` | `metric` field recording individual measurement |
### Counter
Telemetry.Metrics counter is simply represented as a riemann `metric`.
Each event the metric is based on increments the counter by 1.
Example, given the metric definition:
counter("http.request.count")
and the event
:telemetry.execute([:http, :request], %{duration: 120})
the following riemann event would be sent to riemann server
[service: "http.requests.count", metric: 1]
### Last value
Last value metric is represented as a riemann `metric` value,
whose values are always set to the value of the measurement from the most recent event.
Example, given the metric definition:
last_value("vm.memory.total")
and the event
:telemetry.execute([:vm, :memory], %{total: 1024})
the following riemann event would be sent to riemann server
[service: "vm.memory.total", metric: 1024]
### Sum
Sum metric is also represented as a riemann `metric` value - the difference is that it always changes relatively and is never set to an absolute value.
Example, given the metric definition:
sum("http.request.payload_size")
and the event
:telemetry.execute([:http, :request], %{payload_size: 1076})
the following riemann event would be sent to riemann server
[service: "http.request.payload_size", metric: +1024]
When the measurement is negative, the riemann metric is decreased accordingly.
### Summary
Summary metric is also represented as a riemann `metric` value - the difference is that it always changes relatively and is never set to an absolute value.
Example, given the metric definition:
summary("http.request.duration")
and the event
:telemetry.execute([:http, :request], %{duration: 120})
the following riemann event would be sent to riemann server
[service: "http.request.duration", metric: 120]
### Distribution
There is no distribution metric type in riemann equivalent to Telemetry.Metrics distribution.
However, a distribution metric is also represented as a riemann `metric` value.
Example, given the metric definition:
distribution("http.request.duration", buckets: [0])
and the event
:telemetry.execute([:http, :request], %{duration: 120})
the following riemann event would be sent to riemann server
[service: "http.request.duration", metric: 120]
Since histograms are configured on the riemann server side (for example using [riemann folds](https://riemann.io/api/riemann.folds.html)),
the `:buckets` option has no effect when used with this reporter.
## Prefixing metric names
Sometimes it's convenient to prefix all metric names with particular value, to group them by the name of the service,
the host, or something else. You can use `:prefix` option to provide a prefix which will be
prepended to all metrics published by the reporter.
"""
use GenServer
alias Telemetry.Metrics
alias TelemetryMetricsRiemann.EventHandler
@type option ::
{:client, Atom}
| {:metrics, [Metrics.t()]}
| {:prefix, String.t()}
@type options :: [option]
@doc """
Reporter's child spec.
This function allows you to start the reporter under a supervisor like this:
children = [
{TelemetryMetricsRiemann, options}
]
See `start_link/1` for a list of available options.
"""
@spec child_spec(options) :: Supervisor.child_spec()
def child_spec(options) do
%{id: __MODULE__, start: {__MODULE__, :start_link, [options]}}
end
@doc """
Starts a reporter and links it to the calling process.
The available options are:
* `:metrics` - a list of Telemetry.Metrics metric definitions which will be published by the
reporter
* `:client` - the module that implements riemann client interface: `TelemetryMetricsRiemann.Riemannx` or
`TelemetryMetricsRiemann.Katja`
* `:prefix` - a prefix prepended to the name of each metric published by the reporter. Defaults
to `nil`.
You can read more about all the options in the `TelemetryMetricsRiemann` module documentation.
## Example
import Telemetry.Metrics
TelemetryMetricsRiemann.start_link(
metrics: [
counter("http.request.count"),
sum("http.request.payload_size"),
last_value("vm.memory.total")
],
prefix: "my-service",
client: TelemetryMetricsRiemann.Riemannx
)
"""
@spec start_link(options) :: GenServer.on_start()
def start_link(options) do
GenServer.start_link(__MODULE__, options)
end
@doc false
@spec client_error(pid(), reason :: term) :: :ok
def client_error(reporter, reason) do
GenServer.cast(reporter, {:client_error, reason})
end
@impl true
def init(options) do
host = options |> Keyword.get(:host) |> maybe_get_hostname()
metrics = Keyword.fetch!(options, :metrics)
client = Keyword.get(options, :client)
prefix = Keyword.get(options, :prefix)
Process.flag(:trap_exit, true)
handler_ids = EventHandler.attach(metrics, self(), client, prefix, host)
{:ok, %{handler_ids: handler_ids, client: client}}
end
@impl true
def handle_cast({:client_error, _reason} = msg, state) do
{:stop, msg, state}
end
@impl true
def handle_info({:EXIT, _pid, reason}, state) do
{:stop, reason, state}
end
@impl true
def terminate(_reason, state) do
EventHandler.detach(state.handler_ids)
:ok
end
defp maybe_get_hostname(nil) do
:inet.gethostname() |> elem(1) |> List.to_string()
end
defp maybe_get_hostname(host) when is_list(host) do
List.to_string(host)
end
defp maybe_get_hostname(host) when is_binary(host) do
host
end
defp maybe_get_hostname(host) do
raise ArgumentError, message: "The #{inspect(host)} is invalid"
end
end
|
lib/telemetry_metrics_riemann.ex
| 0.949972
| 0.656878
|
telemetry_metrics_riemann.ex
|
starcoder
|
defmodule Checkov do
@moduledoc """
Checkov aims to emulate the data driven testing functionality of the [Spock Framework](http://spockframework.org/)
A where block can be used in a data_test to exercise the assertions of the test multiple times.
```
defmodule MyModuleTest do
use ExUnit.Case
import Checkov
data_test "\#{a} + \#{b} == \#{result}" do
assert a + b == result
where [
[:a, :b, :result],
[1, 2, 3],
[4, 5, 9],
[1.2, 3.4, 4.6],
]
end
end
```
Will create and run three tests.
```
MyModuleTest
* test 4 + 5 == 9 (0.00ms)
* test 1 + 2 == 3 (0.00ms)
* test 1.2 + 3.4 == 4.6 (0.00ms)
Finished in 0.03 seconds
3 tests, 0 failures
```
Checkov also support an alternative syntax, below will create and run the same three tests.
```
defmodule MyModuleTest do
use ExUnit.Case
import Checkov
data_test "\#{a} + \#{b} == \#{result}" do
assert a + b == result
where a: [1, 4, 1.2],
b: [2, 5, 3.4],
result: [3, 9, 4.6]
end
end
```
Data tests also accept an optional second paramter where you can receive the context from a setup block.
Any variable created in there where block is available to be used in the name of the test!
"""
defmodule InvalidBindingsException do
defexception [:message]
end
defmacro __using__(_opts) do
quote do
import Checkov
use ExUnit.Case
end
end
defmacro data_test(name, context \\ quote(do: %{}), do: do_block) do
{test_block, where} = extract_where_function(do_block)
if not Checkov.Binding.valid?(where) do
raise InvalidBindingsException,
message: "All bindings in where function must be the same length"
end
Checkov.Binding.get_bindings(where)
|> Enum.map(fn binding -> {Checkov.TestName.unroll_name(name, binding), binding} end)
|> Enum.reduce([], fn {name, binding}, acc -> [{name, fix_name(name, acc), binding} | acc] end)
|> Enum.map(fn {_original_name, name, binding} ->
create_test(name, binding, test_block, context)
end)
end
defp extract_where_function(body) do
Macro.prewalk(body, {}, fn exp, acc ->
case match?({:where, _, _}, exp) do
true -> {nil, exp}
false -> {exp, acc}
end
end)
end
defp fix_name(name, test_defs) do
count =
Enum.count(test_defs, fn {original_name, _fixed_name, _binding} -> original_name == name end)
case count == 0 do
true -> name
false -> name <> " [#{count + 1}]"
end
end
defp create_test(name, binding, test_block, context) do
quoted_variables =
Enum.map(binding, fn {variable_name, variable_value} ->
{:__block__, [],
[
{:=, [], [{:var!, [context: Elixir, import: Kernel], [{variable_name, [], Elixir}]}, variable_value]},
{:=, [], [{:_, [], Elixir}, {:var!, [context: Elixir, import: Kernel], [{variable_name, [], Elixir}]}]}
]}
end)
quote do
test unquote(name), unquote(context) do
unquote_splicing(quoted_variables)
unquote(test_block)
end
end
end
end
|
lib/checkov.ex
| 0.70912
| 0.982406
|
checkov.ex
|
starcoder
|
defmodule LearnKit.NaiveBayes.Gaussian do
@moduledoc """
Module for Gaussian NB algorithm
"""
defstruct data_set: [], fit_data: []
alias LearnKit.NaiveBayes.Gaussian
use Gaussian.Normalize
use Gaussian.Fit
use Gaussian.Classify
use Gaussian.Score
@type label :: atom
@type feature :: [integer]
@type prediction :: {label, number}
@type predictions :: [prediction]
@type point :: {label, feature}
@type features :: [feature]
@type data_set :: [{label, features}]
@type fit_feature :: %{mean: float, standard_deviation: float, variance: float}
@type fit_features :: [fit_feature]
@type fit_data :: [{label, fit_features}]
@doc """
Creates classifier with empty data_set
## Examples
iex> classifier = LearnKit.NaiveBayes.Gaussian.new
%LearnKit.NaiveBayes.Gaussian{data_set: [], fit_data: []}
"""
@spec new() :: %Gaussian{data_set: []}
def new, do: Gaussian.new([])
@doc """
Creates classifier with data_set
## Parameters
- data_set: Keyword list with labels and features in tuples
## Examples
iex> classifier = LearnKit.NaiveBayes.Gaussian.new([{:a1, [[1, 2], [2, 3]]}, {:b1, [[-1, -2]]}])
%LearnKit.NaiveBayes.Gaussian{data_set: [a1: [[1, 2], [2, 3]], b1: [[-1, -2]]], fit_data: []}
"""
@spec new(data_set) :: %Gaussian{data_set: data_set}
def new(data_set), do: %Gaussian{data_set: data_set}
@doc """
Add train data to classifier
## Parameters
- classifier: %LearnKit.NaiveBayes.Gaussian{}
- train data: tuple with label and feature
## Examples
iex> classifier = classifier |> LearnKit.NaiveBayes.Gaussian.add_train_data({:a1, [-1, -1]})
%LearnKit.NaiveBayes.Gaussian{data_set: [a1: [[-1, -1]]], fit_data: []}
"""
@spec add_train_data(%Gaussian{data_set: data_set}, point) :: %Gaussian{data_set: data_set}
def add_train_data(%Gaussian{data_set: data_set}, {key, value}) do
features = if Keyword.has_key?(data_set, key), do: Keyword.get(data_set, key), else: []
data_set = Keyword.put(data_set, key, [value | features])
%Gaussian{data_set: data_set}
end
@doc """
Normalize train data
## Parameters
- classifier: %LearnKit.NaiveBayes.Gaussian{}
- type: none/minimax/z_normalization, default is none, optional
## Examples
iex> classifier = classifier |> LearnKit.NaiveBayes.Gaussian.normalize_train_data("minimax")
%LearnKit.NaiveBayes.Gaussian{
data_set: [a1: [[0.6666666666666666, 0.8], [1.0, 1.0]], b1: [[0.0, 0.0]]],
fit_data: []
}
"""
@spec normalize_train_data(%Gaussian{data_set: data_set}, String.t()) :: %Gaussian{data_set: data_set, fit_data: fit_data}
def normalize_train_data(%Gaussian{data_set: data_set}, type \\ "none") when is_binary(type) do
%Gaussian{data_set: normalize_data(data_set, type), fit_data: []}
end
@doc """
Fit train data
## Parameters
- classifier: %LearnKit.NaiveBayes.Gaussian{}
## Examples
iex> classifier = classifier |> LearnKit.NaiveBayes.Gaussian.fit
%LearnKit.NaiveBayes.Gaussian{
data_set: [a1: [[-1, -1]]],
fit_data: [
a1: [
%{mean: -1.0, standard_deviation: 0.0, variance: 0.0},
%{mean: -1.0, standard_deviation: 0.0, variance: 0.0}
]
]
}
"""
@spec fit(%Gaussian{data_set: data_set}) :: %Gaussian{data_set: data_set, fit_data: fit_data}
def fit(%Gaussian{data_set: data_set}) do
%Gaussian{data_set: data_set, fit_data: fit_data(data_set)}
end
@doc """
Return probability estimates for the feature
## Parameters
- classifier: %LearnKit.NaiveBayes.Gaussian{}
## Examples
iex> classifier |> LearnKit.NaiveBayes.Gaussian.predict_proba([1, 2])
{:ok, [a1: 0.0359, a2: 0.0039]}
"""
@spec predict_proba(%Gaussian{fit_data: fit_data}, feature) :: {:ok, predictions}
def predict_proba(%Gaussian{fit_data: fit_data}, feature) do
result = classify_data(fit_data, feature)
{:ok, result}
end
@doc """
Return exact prediction for the feature
## Parameters
- classifier: %LearnKit.NaiveBayes.Gaussian{}
## Examples
iex> classifier |> LearnKit.NaiveBayes.Gaussian.predict([1, 2])
{:ok, {:a1, 0.334545454}}
"""
@spec predict(%Gaussian{fit_data: fit_data}, feature) :: {:ok, prediction}
def predict(%Gaussian{fit_data: fit_data}, feature) do
result = fit_data |> classify_data(feature) |> Enum.sort_by(&(elem(&1, 1))) |> Enum.at(-1)
{:ok, result}
end
@doc """
Returns the mean accuracy on the given test data and labels
## Parameters
- classifier: %LearnKit.NaiveBayes.Gaussian{}
## Examples
iex> classifier |> LearnKit.NaiveBayes.Gaussian.score
{:ok, 0.857143}
"""
@spec score(%Gaussian{data_set: data_set, fit_data: fit_data}) :: {:ok, number}
def score(%Gaussian{data_set: data_set, fit_data: fit_data}) do
result = calc_score(fit_data, data_set)
{:ok, result}
end
end
|
lib/learn_kit/naive_bayes/gaussian.ex
| 0.932821
| 0.734358
|
gaussian.ex
|
starcoder
|
defmodule BSV.Hash do
@moduledoc """
A collection of one-way hashing functions used frequently throughout Bitcoin.
All hashing functions accept the `:encoding` option which can be either
`:base64` or `:hex`.
"""
import BSV.Util, only: [encode: 2]
@doc """
Computes the RIPEMD hash of a given input, outputting 160 bits.
## Examples
iex> BSV.Hash.ripemd160("hello world")
<<152, 198, 21, 120, 76, 203, 95, 229, 147, 111, 188, 12, 190, 157, 253, 180, 8, 217, 47, 15>>
iex> BSV.Hash.ripemd160("hello world", encoding: :hex)
"98c615784ccb5fe5936fbc0cbe9dfdb408d92f0f"
"""
@spec ripemd160(binary(), keyword()) :: binary()
def ripemd160(data, opts \\ []) when is_binary(data),
do: hash(data, :ripemd160, opts)
@doc """
Computes the SHA-1 hash of a given input, outputting 160 bits.
## Examples
iex> BSV.Hash.sha1("hello world")
<<42, 174, 108, 53, 201, 79, 207, 180, 21, 219, 233, 95, 64, 139, 156, 233, 30, 232, 70, 237>>
iex> BSV.Hash.sha1("hello world", encoding: :hex)
"2aae6c35c94fcfb415dbe95f408b9ce91ee846ed"
"""
@spec sha1(binary(), keyword()) :: binary()
def sha1(data, opts \\ []) when is_binary(data),
do: hash(data, :sha, opts)
@doc """
Computes the HMAC of the of the given input using a secret key and the SHA-1
algorithm.
## Examples
iex> BSV.Hash.sha1_hmac("hello world", "test")
<<90, 9, 227, 4, 243, 198, 13, 99, 63, 241, 103, 53, 236, 147, 30, 17, 22, 255, 33, 209>>
iex> BSV.Hash.sha1_hmac("hello world", "test", encoding: :hex)
"5a09e304f3c60d633ff16735ec931e1116ff21d1"
"""
@spec sha1_hmac(binary(), binary(), keyword()) :: binary()
def sha1_hmac(data, key, opts \\ [])
when is_binary(data) and is_binary(key),
do: hmac(data, key, :sha, opts)
@doc """
Computes the SHA-2 hash of a given input, outputting 256 bits.
## Examples
iex> BSV.Hash.sha256("hello world")
<<185, 77, 39, 185, 147, 77, 62, 8, 165, 46, 82, 215, 218, 125, 171, 250, 196, 132, 239, 227, 122, 83, 128, 238, 144, 136, 247, 172, 226, 239, 205, 233>>
iex> BSV.Hash.sha256("hello world", encoding: :hex)
"b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"
"""
@spec sha256(binary(), keyword()) :: binary()
def sha256(data, opts \\ []) when is_binary(data),
do: hash(data, :sha256, opts)
@doc """
Computes the HMAC of the of the given input using a secret key and the SHA-256
algorithm.
## Examples
iex> BSV.Hash.sha256_hmac("hello world", "test")
<<209, 89, 110, 13, 66, 128, 242, 189, 45, 49, 28, 224, 129, 159, 35, 189, 224, 220, 131, 77, 130, 84, 185, 41, 36, 8, 141, 233, 76, 56, 217, 34>>
iex> BSV.Hash.sha256_hmac("hello world", "test", encoding: :hex)
"d1596e0d4280f2bd2d311ce0819f23bde0dc834d8254b92924088de94c38d922"
"""
@spec sha256_hmac(binary(), binary(), keyword()) :: binary()
def sha256_hmac(data, key, opts \\ [])
when is_binary(data) and is_binary(key),
do: hmac(data, key, :sha256, opts)
@doc """
Computes a RIPEMD hash of a SHA-256 hash, outputting 160 bits. This is
commonly used inside Bitcoin, particularly for Bitcoin addresses.
## Examples
iex> BSV.Hash.sha256_ripemd160("hello world")
<<215, 213, 238, 120, 36, 255, 147, 249, 76, 48, 85, 175, 147, 130, 200, 108, 104, 181, 202, 146>>
iex> BSV.Hash.sha256_ripemd160("hello world", encoding: :hex)
"d7d5ee7824ff93f94c3055af9382c86c68b5ca92"
"""
@spec sha256_ripemd160(binary(), keyword()) :: binary()
def sha256_ripemd160(data, opts \\ []) when is_binary(data),
do: sha256(data) |> ripemd160(opts)
@doc """
Computes a double SHA256 hash. This hash function is commonly used inside
Bitcoin, particularly for the hash of a block and the hash of a transaction.
## Examples
iex> BSV.Hash.sha256_sha256("hello world")
<<188, 98, 212, 184, 13, 158, 54, 218, 41, 193, 108, 93, 77, 159, 17, 115, 31, 54, 5, 44, 114, 64, 26, 118, 194, 60, 15, 181, 169, 183, 68, 35>>
iex> BSV.Hash.sha256_sha256("hello world", encoding: :hex)
"bc62d4b80d9e36da29c16c5d4d9f11731f36052c72401a76c23c0fb5a9b74423"
"""
@spec sha256_sha256(binary(), keyword()) :: binary()
def sha256_sha256(data, opts \\ []) when is_binary(data),
do: sha256(data) |> sha256(opts)
@doc """
Computes the SHA-2 hash of a given input, outputting 512 bits.
## Examples
iex> BSV.Hash.sha512("hello world", encoding: :hex)
"309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f"
iex> BSV.Hash.sha512("hello world", encoding: :base64)
"MJ7MSJwS1utMxA9QyQLytNDtd+5RGnx6m808qG1M2G+YndNbxf9JlnDaNCVbRbDP2DDoH2Bdz33FVC6TrpzXbw=="
"""
@spec sha512(binary(), keyword()) :: binary()
def sha512(data, opts \\ []) when is_binary(data),
do: hash(data, :sha512, opts)
@doc """
Computes the HMAC of the of the given input using a secret key and the SHA-512
algorithm.
## Examples
iex> BSV.Hash.sha512_hmac("hello world", "test", encoding: :hex)
"2536d175df94a4638110701d8a0e2cbe56e35f2dcfd167819148cd0f2c8780cb3d3df52b4aea8f929004dd07235ae802f4b5d160a2b8b82e8c2f066289de85a3"
iex> BSV.Hash.sha512_hmac("hello world", "test", encoding: :base64)
"JTbRdd+UpGOBEHAdig4svlbjXy3P0WeBkUjNDyyHgMs9PfUrSuqPkpAE3QcjWugC9LXRYKK4uC6MLwZiid6Fow=="
"""
@spec sha512_hmac(binary(), binary(), keyword()) :: binary()
def sha512_hmac(data, key, opts \\ [])
when is_binary(data) and is_binary(key),
do: hmac(data, key, :sha512, opts)
# Computes the hash of the given binary using the specified algorithm
defp hash(data, alg, opts) do
encoding = Keyword.get(opts, :encoding)
:crypto.hash(alg, data)
|> encode(encoding)
end
# Computes the hmac of the given binary with the key, using the specified
# algorithm
defp hmac(data, key, alg, opts) do
encoding = Keyword.get(opts, :encoding)
:crypto.mac(:hmac, alg, key, data)
|> encode(encoding)
end
end
|
lib/bsv/hash.ex
| 0.91579
| 0.545286
|
hash.ex
|
starcoder
|
defmodule Plymio.Vekil.Forom.Form do
@moduledoc ~S"""
The module implements the `Plymio.Vekil.Forom` protocol and produces *quoted forms*.
See `Plymio.Vekil.Forom` for the definitions of the protocol functions.
See `Plymio.Vekil` for an explanation of the test environment.
The default `:produce_default` is an empty list.
The default `:realise_default` is *the unset value* (`Plymio.Fontais.the_unset_value/0`).
## Module State
See `Plymio.Vekil.Forom` for the common fields.
The module's state is held in a `struct` with the following field(s):
| Field | Aliases | Purpose |
| :--- | :--- | :--- |
| `:forom` | | *holds the quoted form* |
"""
require Plymio.Fontais.Guard
require Plymio.Fontais.Option
require Plymio.Fontais.Vekil.ProxyForomDict, as: PROXYFOROMDICT
use Plymio.Fontais.Attribute
use Plymio.Vekil.Attribute
@type t :: %__MODULE__{}
@type opts :: Plymio.Fontais.opts()
@type error :: Plymio.Fontais.error()
@type kv :: Plymio.Fontais.kv()
@type product :: Plymio.Vekil.product()
import Plymio.Fontais.Error,
only: [
new_error_result: 1
],
warn: false
import Plymio.Fontais.Option,
only: [
opts_create_aliases_dict: 1,
opts_canonical_keys: 2
]
@plymio_vekil_forom_form_kvs_aliases [
# struct
@plymio_vekil_field_alias_forom,
@plymio_vekil_field_alias_produce_default,
@plymio_vekil_field_alias_realise_default,
@plymio_fontais_field_alias_protocol_name,
@plymio_fontais_field_alias_protocol_impl,
# virtual
@plymio_vekil_field_alias_seen,
@plymio_vekil_field_alias_vekil,
@plymio_vekil_field_alias_proxy
]
@plymio_vekil_forom_form_dict_aliases @plymio_vekil_forom_form_kvs_aliases
|> opts_create_aliases_dict
@doc false
def update_canonical_opts(opts, dict \\ @plymio_vekil_forom_form_dict_aliases) do
opts |> opts_canonical_keys(dict)
end
@plymio_vekil_defstruct [
{@plymio_vekil_field_forom, @plymio_fontais_the_unset_value},
{@plymio_vekil_field_produce_default, []},
{@plymio_vekil_field_realise_default, @plymio_fontais_the_unset_value},
{@plymio_fontais_field_protocol_name, Plymio.Vekil.Forom},
{@plymio_fontais_field_protocol_impl, __MODULE__}
]
defstruct @plymio_vekil_defstruct
@doc_new ~S"""
`new/1` takes an optional *opts* and creates a new *forom* returning `{:ok, forom}`.
## Examples
iex> {:ok, forom} = new()
...> match?(%FOROMFORM{}, forom)
true
`Plymio.Vekil.Utility.forom?/1` returns `true` if the value implements `Plymio.Vekil.Forom`
iex> {:ok, forom} = new()
...> forom |> Plymio.Vekil.Utility.forom?
true
The form is passed using the `:forom` key:
iex> {:ok, forom} = new(forom: quote(do: x = x + 1))
...> forom |> Plymio.Vekil.Utility.forom?
true
iex> {:ok, forom} = new(
...> forom: quote(do: x = x + 1), proxy: :x_add_1)
...> forom |> Plymio.Vekil.Utility.forom?
true
Same example but here the realise function is used to access the
*form* in the `:forom` field:
iex> {:ok, forom} = new(
...> forom: quote(do: x = x + 1), proxy: :x_add_1)
...> {:ok, {form, _}} = forom |> FOROMPROT.realise
...> form |> harnais_helper_test_forms!(binding: [x: 7])
{8, ["x = x + 1"]}
The *form* is validated:
iex> {:error, error} = new(forom: %{a: 1})
...> error |> Exception.message
"form invalid, got: %{a: 1}"
"""
@doc_update ~S"""
`update/2` implements `Plymio.Vekil.Forom.update/2`.
## Examples
iex> {:ok, forom} = new(
...> forom: quote(do: x = x + 1), proxy: :x_add_1)
...> {:ok, forom} = forom |> update(forom: quote(do: x = x * x))
...> {:ok, {form, _}} = forom |> FOROMPROT.realise
...> form |> harnais_helper_test_forms!(binding: [x: 7])
{49, ["x = x * x"]}
"""
@doc_normalise ~S"""
`normalise/1` creates a new *forom* from its argument unless the argument is already one.
## Examples
iex> {:ok, forom} = quote(do: x = x + 1) |> normalise
...> {:ok, {form, _}} = forom |> FOROMPROT.realise
...> form |> harnais_helper_test_forms!(binding: [x: 3])
{4, ["x = x + 1"]}
iex> {:ok, forom} = normalise(
...> forom: quote(do: x = x + 1), proxy: :add_1)
...> {:ok, {form, _}} = forom |> FOROMPROT.realise
...> form |> harnais_helper_test_forms!(binding: [x: 3])
{4, ["x = x + 1"]}
Multiples *forms* can be stored:
iex> {:ok, forom} = [
...> quote(do: x = x + 1),
...> quote(do: x = x * x),
...> quote(do: x = x - 1)
...> ] |> normalise
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 3])
{15, ["x = x + 1", "x = x * x", "x = x - 1"]}
An invalid *form* returns an error result:
iex> {:error, error} = %{a: 1} |> normalise
...> error |> Exception.message
"form invalid, got: %{a: 1}"
An existing *forom* (of any implementation) is returned unchanged:
iex> {:ok, forom} = quote(do: x = x + 1) |> normalise
...> {:ok, forom} = forom |> normalise
...> {:ok, {form, _}} = forom |> FOROMPROT.realise
...> form |> harnais_helper_test_forms!(binding: [x: 8])
{9, ["x = x + 1"]}
"""
@doc_produce ~S"""
`produce/2` takes a *forom* and an optional *opts*, calls `update/2`
with the *vekil* and the *opts* if any, and returns `{:ok, {product, forom}}`.
The `product` will be `Keyword` with one or more `:forom` keys where the values are the *forms*.
## Examples
iex> {:ok, forom} = quote(do: x = x + 1) |> normalise
...> {:ok, {product, %FOROMFORM{}}} = forom |> FOROMPROT.produce
...> [:forom] = product |> Keyword.keys |> Enum.uniq
...> product |> Keyword.get_values(:forom)
...> |> harnais_helper_test_forms!(binding: [x: 41])
{42, ["x = x + 1"]}
iex> {:ok, forom} = [
...> quote(do: x = x + 1),
...> quote(do: x = x * x),
...> quote(do: x = x - 1)
...> ] |> normalise
...> {:ok, {product, _}} = forom |> FOROMPROT.produce
...> product |> Keyword.get_values(:forom)
...> |> harnais_helper_test_forms!(binding: [x: 3])
{15, ["[x = x + 1, x = x * x, x = x - 1]"]}
If *opts* are given, `update/2` is called before producing the *forom*:
iex> {:ok, forom} = new()
...> {:ok, forom} = forom |> update(forom: quote(do: x = x + 1))
...> {:ok, {product, %FOROMFORM{}}} = forom |> FOROMPROT.produce
...> [:forom] = product |> Keyword.keys |> Enum.uniq
...> product |> Keyword.get_values(:forom)
...> |> harnais_helper_test_forms!(binding: [x: 41])
{42, ["x = x + 1"]}
An empty *forom* does not produce any `:forom` keys:
iex> {:ok, forom} = new()
...> {:ok, {product, _}} = forom |> FOROMPROT.produce
...> product |> Keyword.get_values(:forom)
...> |> harnais_helper_test_forms!(binding: [x: 41])
{nil, []}
"""
@doc_realise ~S"""
`realise/2` takes a *forom* and an optional *opts*, calls
`produce/2` and then gets (`Keyword.get_values/2`) the `:forom` key
values from the *product*.
The forms are then normalised
(`Plymio.Fontais.Form.forms_normalise/1`) and `{:ok, {forms, forom}}` returned.
## Examples
iex> {:ok, forom} = quote(do: x = x + 1) |> normalise
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 41])
{42, ["x = x + 1"]}
iex> {:ok, forom} = [
...> quote(do: x = x + 1),
...> quote(do: x = x * x),
...> quote(do: x = x - 1)
...> ] |> normalise
...> {:ok, {forms, %FOROMFORM{}}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 3])
{15, ["x = x + 1", "x = x * x", "x = x - 1"]}
If *opts* are given, `update/2` is called before realising the *forom*:
iex> {:ok, forom} = new()
...> {:ok, {forms, %FOROMFORM{}}} = forom
...> |> FOROMPROT.realise(forom: quote(do: x = x + 1))
...> forms |> harnais_helper_test_forms!(binding: [x: 41])
{42, ["x = x + 1"]}
An empty *forom* does not produce any `:forom` keys so the `:realise_default` is returned:
iex> {:ok, forom} = new()
...> {:ok, {value, _forom}} = forom |> FOROMPROT.realise
...> value |> Plymio.Fontais.Guard.is_value_unset
true
"""
@vekil [
Plymio.Vekil.Codi.Dict.__vekil__(),
# overrides to the defaults
%{
doc_false: quote(do: @doc(false)),
state_def_new_doc: quote(do: @doc(unquote(@doc_new))),
state_def_update_doc: quote(do: @doc(unquote(@doc_update))),
vekil_forom_def_normalise_doc: quote(do: @doc(unquote(@doc_normalise))),
vekil_forom_def_produce_doc: quote(do: @doc(unquote(@doc_produce))),
vekil_forom_def_realise_doc: quote(do: @doc(unquote(@doc_realise)))
}
]
|> PROXYFOROMDICT.create_proxy_forom_dict!()
@vekil
|> Enum.sort_by(fn {k, _v} -> k end)
@vekil_proxies [
:state_base_package,
:state_defp_update_field_header,
:state_vekil_defp_update_field_vekil_ignore,
:state_vekil_proxy_defp_update_field_proxy_ignore,
:state_vekil_defp_update_field_seen_ignore,
:vekil_forom_form_defp_update_field_forom_validate_form,
:state_vekil_defp_update_field_produce_default_passthru,
:state_vekil_defp_update_field_realise_default_passthru,
:state_defp_update_field_unknown,
:vekil_defp_validate_vekil,
:vekil_forom_form_def_produce,
:vekil_forom_form_def_realise,
:vekil_forom_form_defp_realise_product,
:vekil_forom_def_normalise,
:vekil_forom_form_defp_forom_value_normalise
]
@codi_opts [
{@plymio_fontais_key_dict, @vekil}
]
@vekil_proxies
|> PROXYFOROMDICT.reify_proxies(@codi_opts)
end
defimpl Plymio.Vekil.Forom, for: Plymio.Vekil.Forom.Form do
@funs :functions
|> @protocol.__info__
|> Keyword.drop([:__protocol__, :impl_for, :impl_for!])
for {fun, arity} <- @funs do
defdelegate unquote(fun)(unquote_splicing(Macro.generate_arguments(arity, nil))), to: @for
end
end
defimpl Inspect, for: Plymio.Vekil.Forom.Form do
use Plymio.Vekil.Attribute
import Plymio.Fontais.Guard,
only: [
is_value_unset_or_nil: 1
]
def inspect(
%Plymio.Vekil.Forom.Form{@plymio_vekil_field_forom => forom},
_opts
) do
forom_telltale =
forom
|> case do
x when is_value_unset_or_nil(x) -> "-F"
x when is_list(x) -> "F=L#{length(x)}"
x when is_atom(x) -> "F=#{to_string(x)}"
_x -> "+F"
end
forom_telltale =
[
forom_telltale
]
|> List.flatten()
|> Enum.reject(&is_nil/1)
|> Enum.join("; ")
"FOROMForm(#{forom_telltale})"
end
end
|
lib/vekil/concrete/forom/form.ex
| 0.855911
| 0.582758
|
form.ex
|
starcoder
|
defmodule SvgBuilder.Units do
@moduledoc """
Units for SVG documents.
"""
@type len_t() :: number | {number, atom} | {number, number} | binary | nil
@type length_list_t() :: len_t | [len_t]
@type angle_t() :: number | binary | {number, :deg | :rad | :grad}
@spec angle(angle_t) :: binary
def angle(angle) when is_number(angle) do
"#{angle}"
end
def angle(angle) when is_binary(angle) do
regexp = ~r/(([\+\-]?[0-9]*\.[0-9+])|([\+\-]?[0-9]+))(deg|grad|rad)?/
if String.match?(angle, regexp) do
angle
else
raise ArgumentError, "invalid angle for SvgBuilder attribute/property"
end
end
def angle({angle, unit}) when unit in [:deg, :rad, :grad] do
"#{angle}#{unit}"
end
@spec len(len_t) :: binary | nil
def len(nil) do
nil
end
def len(length) when is_number(length) do
"#{length}"
end
def len(length) when is_binary(length) do
regexp = ~r/(([\+\-]?[0-9]*\.[0-9+])|([\+\-]?[0-9]+))(em|ex|px|in|cm|mm|pt|pc|%)?/
if String.match?(length, regexp) do
length
else
raise ArgumentError, "invalid length for SvgBuilder attribute/property"
end
end
def len({length, unit}) when unit in [:em, :ex, :px, :in, :cm, :mm, :pt, :pc] do
"#{length}#{unit}"
end
def len({x, y}) when is_number(y) do
"#{x} #{y}"
end
@spec length_list(length_list_t) :: binary | nil
def length_list(l) when is_list(l) do
do_length_list(l, "")
end
def length_list(l) do
len(l)
end
defp do_length_list([n], acc) do
String.trim("#{acc} #{len(n)}")
end
defp do_length_list([n | rest], acc) do
do_length_list(rest, "#{acc} #{len(n)}")
end
@spec number(number | binary) :: binary
def number(n) when is_number(n) do
"#{n}"
end
def number(n) when is_binary(n) do
regexp = ~r/([\+\-]?[0-9]*\.[0-9+])|([\+\-]?[0-9]+)/
if String.match?(n, regexp) do
n
else
raise ArgumentError, "invalid number for SvgBuilder attribute/property"
end
end
@spec number_list(number | binary | [number | binary]) :: binary
def number_list(n) when is_number(n) do
number(n)
end
def number_list(n) when is_list(n) do
do_number_list(n, "")
end
defp do_number_list([n], acc) do
String.trim("#{acc} #{number(n)}")
end
defp do_number_list([n | rest], acc) do
do_number_list(rest, "#{acc} #{number(n)}")
end
end
|
lib/units.ex
| 0.708414
| 0.462291
|
units.ex
|
starcoder
|
defmodule ListDict do
@moduledoc """
A Dict implementation that works on lists of two-items tuples.
For more information about the functions and their APIs, please
consult the `Dict` module.
"""
@doc """
Returns a new `ListDict`, i.e. an empty list.
"""
def new, do: []
@doc """
Creates a new `ListDict` from the given pairs.
"""
def new(pairs) do
Enum.map pairs, fn({ k, v }) -> { k, v } end
end
@doc """
Creates a new `ListDict` from the given pairs
via the given transformation function.
"""
def new(list, transform) when is_function(transform) do
Enum.map list, transform
end
@doc """
Returns all keys in the dict.
"""
def keys(dict) do
lc { key, _ } inlist dict, do: key
end
@doc """
Returns all values in the dict.
"""
def values(dict) do
lc { _, value } inlist dict, do: value
end
@doc """
Returns the dict size.
"""
def size(dict) do
length(dict)
end
@doc """
Returns true if the dict has the given key.
"""
def has_key?(dict, key) do
:lists.keymember(key, 1, dict)
end
@doc """
Returns the value under key from the given
dict or default if no key is set.
"""
def get(dict, key, default // nil) do
case :lists.keyfind(key, 1, dict) do
{ ^key, value } -> value
false -> default
end
end
@doc false
def get!(dict, key) do
IO.write "[WARNING] Dict.get! and ListDict.get! are deprecated, please use Dict.fetch! and ListDict.fetch! instead\n#{Exception.format_stacktrace}"
case :lists.keyfind(key, 1, dict) do
{ ^key, value } -> value
false -> raise(KeyError, key: key)
end
end
@doc """
Returns the value under key from the given
dict in a tagged tuple, otherwise `:error`.
"""
def fetch(dict, key) do
case :lists.keyfind(key, 1, dict) do
{ ^key, value } -> { :ok, value }
false -> :error
end
end
@doc """
Returns the value under the given key
raises `KeyError` if the key does not exist.
"""
def fetch!(dict, key) do
case :lists.keyfind(key, 1, dict) do
{ ^key, value } -> value
false -> raise(KeyError, key: key)
end
end
@doc """
Returns the value under the given key
from the dict as well as the dict without that key.
"""
def pop(dict, key, default // nil) do
{ get(dict, key, default), delete(dict, key) }
end
@doc """
Puts the given key-value pair in the dict.
"""
def put(dict, key, val) do
[{key, val}|delete(dict, key)]
end
@doc """
Puts the given key-value pair in the dict
if no entry exists yet.
"""
def put_new(dict, key, val) do
case :lists.keyfind(key, 1, dict) do
{ ^key, _ } -> dict
false -> [{key,val}|dict]
end
end
@doc """
Deletes the entry under the given key from the dict.
"""
def delete(dict, key) do
lc { k, _ } = tuple inlist dict, key != k, do: tuple
end
@doc """
Merges the given Enumerable into the dict.
"""
def merge(dict, enum, callback // fn(_k, _v1, v2) -> v2 end)
def merge(dict1, dict2, fun) do
Enum.reduce dict2, dict1, fn { k, v2 }, acc ->
update(acc, k, v2, fn(v1) -> fun.(k, v1, v2) end)
end
end
@doc """
Splits a dict into two dicts,
one containing entries with key in the keys list,
and another containing entries with key not in keys.
Returns a 2-tuple of the new dicts.
"""
def split(dict, keys) do
acc = { new(), new() }
Enum.reduce dict, acc, fn({ k, v }, { take, drop }) ->
if :lists.member(k, keys) do
{ [{k,v}|take], drop }
else
{ take, [{k,v}|drop] }
end
end
end
@doc """
Returns a new dict with only the entries
which key is in keys
"""
def take(dict, keys) do
lc { k, _ } = tuple inlist dict, :lists.member(k, keys), do: tuple
end
@doc """
Returns a new dict with only the entries
which key is not in keys
"""
def drop(dict, keys) do
lc { k, _ } = tuple inlist dict, not :lists.member(k, keys), do: tuple
end
@doc """
Updates the key in the dict according to the given function.
"""
def update([{key, value}|dict], key, fun) do
[{key, fun.(value)}|delete(dict, key)]
end
def update([{_, _} = e|dict], key, fun) do
[e|update(dict, key, fun)]
end
def update([], key, _fun) do
raise(KeyError, key: key)
end
@doc """
Updates the key in the dict according to the given function
or uses the given initial value if no entry exists.
"""
def update([{key, value}|dict], key, _initial, fun) do
[{key, fun.(value)}|delete(dict, key)]
end
def update([{_, _} = e|dict], key, initial, fun) do
[e|update(dict, key, initial, fun)]
end
def update([], key, initial, _fun) do
[{key, initial}]
end
@doc """
Returns an empty `ListDict`.
"""
def empty(_dict), do: []
@doc """
Check if the ListDict is equal to another ListDict.
"""
def equal?(dict, other) do
:lists.keysort(1, dict) == :lists.keysort(1, other)
end
@doc """
Converts the dict to a list.
"""
def to_list(dict), do: dict
end
|
lib/elixir/lib/list_dict.ex
| 0.833155
| 0.717556
|
list_dict.ex
|
starcoder
|
defmodule K8s.Client.Runner.Watch do
@moduledoc """
`K8s.Client` runner that will watch a resource or resources and stream results back to a process.
"""
alias K8s.Client.Runner.Base
alias K8s.Client.Runner.Watch.Stream
alias K8s.Conn
alias K8s.Operation
alias K8s.Operation.Error
@resource_version_json_path ~w(metadata resourceVersion)
@doc """
Watch a resource or list of resources. Provide the `stream_to` option or results will be stream to `self()`.
Note: Current resource version will be looked up automatically.
## Examples
```elixir
{:ok, conn} = K8s.Conn.from_file("test/support/kube-config.yaml")
operation = K8s.Client.list("v1", "Namespace")
{:ok, reference} = Watch.run(conn, operation, stream_to: self())
```
```elixir
{:ok, conn} = K8s.Conn.from_file("test/support/kube-config.yaml")
operation = K8s.Client.get("v1", "Namespace", [name: "test"])
{:ok, reference} = Watch.run(conn, operation, stream_to: self())
```
"""
@spec run(Conn.t(), Operation.t(), keyword()) :: Base.result_t()
def run(%Conn{} = conn, %Operation{method: :get} = operation, http_opts) do
case get_resource_version(conn, operation) do
{:ok, rv} -> run(conn, operation, rv, http_opts)
err -> err
end
end
def run(op, _, _) do
msg = "Only HTTP GET operations (list, get) are supported. #{inspect(op)}"
{:error, %Error{message: msg}}
end
@doc """
Watch a resource or list of resources from a specific resource version. Provide the `stream_to` option or results will be stream to `self()`.
## Examples
```elixir
{:ok, conn} = K8s.Conn.from_file("test/support/kube-config.yaml")
operation = K8s.Client.list("v1", "Namespace")
resource_version = 3003
{:ok, reference} = Watch.run(conn, operation, resource_version, stream_to: self())
```
```elixir
{:ok, conn} = K8s.Conn.from_file("test/support/kube-config.yaml")
operation = K8s.Client.get("v1", "Namespace", [name: "test"])
resource_version = 3003
{:ok, reference} = Watch.run(conn, operation, resource_version, stream_to: self())
```
"""
@spec run(Conn.t(), Operation.t(), binary, keyword()) :: Base.result_t()
def run(%Conn{} = conn, %Operation{method: :get, verb: verb} = operation, rv, http_opts)
when verb in [:list, :list_all_namespaces] do
opts_w_watch_params = add_watch_params_to_opts(http_opts, rv)
Base.run(conn, operation, opts_w_watch_params)
end
def run(%Conn{} = conn, %Operation{method: :get, verb: :get} = operation, rv, http_opts) do
{list_op, field_selector_params} = get_to_list(operation)
params = Keyword.get(http_opts, :params, [])
updated_params = Keyword.merge(params, field_selector_params)
http_opts = Keyword.put(http_opts, :params, updated_params)
run(conn, list_op, rv, http_opts)
end
@doc """
Watches resources and returns an Elixir Stream of events emmitted by kubernetes.
### Example
{:ok, conn} = K8s.Conn.from_file("test/support/kube-config.yaml")
op = K8s.Client.list("v1", "Namespace")
K8s.Client.Runner.Watch.stream(conn, op) |> Stream.map(&IO.inspect/1) |> Stream.run()
"""
@spec stream(Conn.t(), Operation.t(), keyword()) :: Enumerable.t()
defdelegate stream(conn, operation, http_opts \\ []), to: Stream, as: :resource
@spec get_resource_version(Conn.t(), Operation.t()) :: {:ok, binary} | Base.error_t()
def get_resource_version(%Conn{} = conn, %Operation{} = operation) do
with {:ok, payload} <- Base.run(conn, operation) do
rv = parse_resource_version(payload)
{:ok, rv}
end
end
@spec add_watch_params_to_opts(keyword, binary) :: keyword
defp add_watch_params_to_opts(http_opts, rv) do
params = Keyword.get(http_opts, :params, [])
watch_params = [resourceVersion: rv, watch: true]
updated_params = Keyword.merge(params, watch_params)
Keyword.put(http_opts, :params, updated_params)
end
@spec parse_resource_version(any) :: binary
defp parse_resource_version(%{} = payload),
do: get_in(payload, @resource_version_json_path) || "0"
defp parse_resource_version(_), do: "0"
@spec get_to_list(Operation.t()) :: {Operation.t(), keyword}
defp get_to_list(get_op) do
{name, other_path_params} = Keyword.pop(get_op.path_params, :name)
list_op = %{get_op | verb: :list, path_params: other_path_params}
field_selector_params = [fieldSelector: "metadata.name=#{name}"]
{list_op, field_selector_params}
end
end
|
lib/k8s/client/runner/watch.ex
| 0.88856
| 0.48182
|
watch.ex
|
starcoder
|
defmodule JaResource.Create do
@moduledoc """
Defines a behaviour for creating a resource and the function to execute it.
It relies on (and uses):
* JaResource.Repo
* JaResource.Model
* JaResource.Attributes
When used JaResource.Create defines the following overrideable callbacks:
* handle_create/2
* handle_invalid_create/2
* render_create/2
* JaResource.Attributes.permitted_attributes/3
* JaResource.Repo.repo/1
"""
@doc """
Returns an unpersisted changeset or persisted model of the newly created object.
Default implementation returns the results of calling
`Model.changeset(%Model{}, attrs)` where Model is the model defined by the
`JaResource.Model.model/0` callback.
The attributes argument is the result of the `permitted_attributes` function.
`handle_create/2` can return an %Ecto.Changeset, an Ecto.Schema struct,
a list of errors (`{:error, [email: "is not valid"]}` or a conn with
any response/body.
Example custom implementation:
def handle_create(_conn, attributes) do
Post.changeset(%Post{}, attributes, :create_and_publish)
end
"""
@callback handle_create(Plug.Conn.t, JaResource.attributes) :: Plug.Conn.t | Ecto.Changeset.t | JaResource.record | {:ok, JaResource.record} | {:error, JaResource.validation_errors}
@doc """
Returns a `Plug.Conn` in response to errors during create.
Default implementation sets the status to `:unprocessable_entity` and renders
the error messages provided.
"""
@callback handle_invalid_create(Plug.Conn.t, Ecto.Changeset.t) :: Plug.Conn.t
@doc """
Returns a `Plug.Conn` in response to successful create.
Default implementation sets the status to `:created` and renders the view.
"""
@callback render_create(Plug.Conn.t, JaResource.record) :: Plug.Conn.t
defmacro __using__(_) do
quote do
@behaviour JaResource.Create
use JaResource.Repo
use JaResource.Attributes
import Plug.Conn
def handle_create(_conn, attributes) do
__MODULE__.model.changeset(__MODULE__.model.__struct__, attributes)
end
def handle_invalid_create(conn, errors) do
conn
|> put_status(:unprocessable_entity)
|> Phoenix.Controller.render(:errors, data: errors)
end
def render_create(conn, model) do
conn
|> put_status(:created)
|> Phoenix.Controller.render(:show, data: model)
end
defoverridable [handle_create: 2, handle_invalid_create: 2, render_create: 2]
end
end
@doc """
Creates a resource given a module using Create and a connection.
Create.call(ArticleController, conn)
Dispatched by JaResource.Plug when phoenix action is create.
"""
def call(controller, conn) do
merged = JaResource.Attributes.from_params(conn.params)
attributes = controller.permitted_attributes(conn, merged, :create)
conn
|> controller.handle_create(attributes)
|> JaResource.Create.insert(controller)
|> JaResource.Create.respond(conn, controller)
end
@doc false
def insert(%Ecto.Changeset{} = changeset, controller) do
controller.repo().insert(changeset)
end
if Code.ensure_loaded?(Ecto.Multi) do
def insert(%Ecto.Multi{} = multi, controller) do
controller.repo().transaction(multi)
end
end
def insert(other, _controller), do: other
@doc false
def respond(%Plug.Conn{} = conn, _old_conn, _), do: conn
def respond({:error, errors}, conn, controller), do: controller.handle_invalid_create(conn, errors)
def respond({:error, _name, errors, _changes}, conn, controller), do: controller.handle_invalid_create(conn, errors)
def respond({:ok, model}, conn, controller), do: controller.render_create(conn, model)
def respond(model, conn, controller), do: controller.render_create(conn, model)
end
|
lib/ja_resource/create.ex
| 0.863622
| 0.433502
|
create.ex
|
starcoder
|
defmodule Printer.Gcode do
@moduledoc """
Collection of functions for building G-code commands.
More on G-code
"""
@doc """
Builds the command for a [linear move](https://marlinfw.org/docs/gcode/G000-G001.html).
"""
def g0(axes) do
params =
["X", "Y", "Z"]
|> Enum.map(fn key -> {key, axes[key]} end)
|> Enum.reject(fn {_key, value} -> is_nil(value) end)
|> Enum.map(fn {axis, value} -> "#{axis}#{value}" end)
|> Enum.join(" ")
"G0 #{params}"
end
@doc """
Builds the command for a [linear move](https://marlinfw.org/docs/gcode/G000-G001.html).
"""
def g1(axes) do
params =
["E", "X", "Y", "Z"]
|> Enum.map(fn key -> {key, axes[key]} end)
|> Enum.reject(fn {_key, value} -> is_nil(value) end)
|> Enum.map(fn {axis, value} -> "#{axis}#{value}" end)
|> Enum.join(" ")
"G1 #{params}"
end
@doc """
Builds a [home](https://marlinfw.org/docs/gcode/G028.html) command.
"""
def g28(axes \\ []) do
params =
axes
|> Enum.sort()
|> Enum.join(" ")
"G28 #{params}"
end
def g91 do
"G91"
end
@doc """
Builds a [set hotend temperature](https://marlinfw.org/docs/gcode/M104.html) command.
"""
def m104(temperature), do: "M104 S#{temperature}"
@doc """
Returns the [report temperature](https://marlinfw.org/docs/gcode/M105.html) command.
"""
def m105, do: "M105"
@doc """
Builds a [wait for hotend](https://marlinfw.org/docs/gcode/M109.html) command.
"""
def m109(temperature), do: "M109 S#{temperature}"
@doc """
Returns the [e-stop](https://marlinfw.org/docs/gcode/M112.html) command.
"""
def m112, do: "M112"
@doc """
Builds a [set bed temperature](https://marlinfw.org/docs/gcode/M140.html) command.
"""
def m140(temperature), do: "M140 S#{temperature}"
@doc """
Builds a [temperature auto report](https://marlinfw.org/docs/gcode/M155.html) command.
`interval` - the seconds between each temperature report.
"""
def m155(interval), do: "M155 S#{interval}"
@doc """
Builds a [wait for bed temperature](https://marlinfw.org/docs/gcode/M190.html) command.
"""
def m190(temperature), do: "M190 S#{temperature}"
end
|
printer/lib/printer/gcode.ex
| 0.840062
| 0.600071
|
gcode.ex
|
starcoder
|
defmodule Protobuf.Encoder do
import Protobuf.WireTypes
import Bitwise, only: [bsr: 2, band: 2, bsl: 2, bor: 2]
alias Protobuf.{MessageProps, FieldProps}
@spec encode(struct, keyword) :: iodata
def encode(%{__struct__: mod} = struct, opts \\ []) do
Protobuf.Validator.validate!(struct)
res = encode(struct, mod.__message_props__(), [])
res = Enum.reverse(res)
case Keyword.fetch(opts, :iolist) do
{:ok, true} -> res
_ -> IO.iodata_to_binary(res)
end
end
@spec encode(struct, MessageProps.t(), iodata) :: iodata
def encode(struct, props, acc0) do
syntax = props.syntax
oneofs = oneof_actual_vals(props, struct)
Enum.reduce(props.ordered_tags, acc0, fn tag, acc ->
prop = props.field_props[tag]
val = Map.get(struct, prop.name_atom)
val = if prop.oneof, do: oneofs[prop.name_atom], else: val
cond do
syntax == :proto2 && (val == nil || val == [] || val == %{}) -> acc
syntax == :proto3 && empty_val?(val) -> acc
true -> [encode_field(class_field(prop), val, prop) | acc]
end
end)
end
@spec encode_field(atom, any, FieldProps.t()) :: iodata
def encode_field(:normal, val, %{type: type, fnum: fnum} = prop) do
repeated_or_not(val, prop.repeated?, fn v ->
if is_atom(v) && !is_boolean(v) do
v = apply(prop.enum_type, :value, [v])
[encode_fnum(fnum, type), encode_type(type, v)]
else
[encode_fnum(fnum, type), encode_type(type, v)]
end
end)
end
def encode_field(:embedded, val, %{type: type, fnum: fnum} = prop) do
repeated = prop.repeated? || prop.map?
repeated_or_not(val, repeated, fn v ->
v = if prop.map?, do: struct(prop.type, %{key: elem(v, 0), value: elem(v, 1)}), else: v
encoded = encode(v, iolist: true)
byte_size = IO.iodata_length(encoded)
[encode_fnum(fnum, type), [encode_varint(byte_size), encoded]]
end)
end
def encode_field(:packed, val, %{type: type, fnum: fnum}) do
encoded = Enum.map(val, fn v -> encode_type(type, v) end)
byte_size = IO.iodata_length(encoded)
[encode_fnum(fnum, :bytes), [encode_varint(byte_size), encoded]]
end
@spec class_field(map) :: atom
def class_field(%{wire_type: wire_delimited(), embedded?: true}) do
:embedded
end
def class_field(%{repeated?: true, packed?: true}) do
:packed
end
def class_field(_) do
:normal
end
@spec encode_fnum(integer, atom) :: iodata
def encode_fnum(fnum, type) do
fnum
|> bsl(3)
|> bor(wire_type(type))
|> encode_varint
end
@spec encode_type(atom, any) :: iodata
def encode_type(:int32, n), do: encode_varint(n)
def encode_type(:int64, n), do: encode_varint(n)
def encode_type(:uint32, n), do: encode_varint(n)
def encode_type(:uint64, n), do: encode_varint(n)
def encode_type(:sint32, n), do: n |> encode_zigzag |> encode_varint
def encode_type(:sint64, n), do: n |> encode_zigzag |> encode_varint
def encode_type(:bool, n) when n == true, do: encode_varint(1)
def encode_type(:bool, n) when n == false, do: encode_varint(0)
def encode_type(:enum, n), do: encode_type(:int32, n)
def encode_type(:fixed64, n), do: <<n::64-little>>
def encode_type(:sfixed64, n), do: <<n::64-signed-little>>
def encode_type(:double, n), do: <<n::64-float-little>>
def encode_type(:bytes, n) do
bin = IO.iodata_to_binary(n)
len = bin |> byte_size |> encode_varint
<<len::binary, bin::binary>>
end
def encode_type(:string, n), do: encode_type(:bytes, n)
def encode_type(:fixed32, n), do: <<n::32-little>>
def encode_type(:sfixed32, n), do: <<n::32-signed-little>>
def encode_type(:float, n), do: <<n::32-float-little>>
@spec encode_zigzag(integer) :: integer
def encode_zigzag(val) when val >= 0, do: val * 2
def encode_zigzag(val) when val < 0, do: val * -2 - 1
@spec encode_varint(integer) :: iodata
def encode_varint(n) when n < 0 do
<<n::64-unsigned-native>> = <<n::64-signed-native>>
encode_varint(n)
end
def encode_varint(n) when n <= 127, do: <<n>>
def encode_varint(n) when n > 127,
do: <<1::1, band(n, 127)::7, encode_varint(bsr(n, 7))::binary>>
@spec wire_type(atom) :: integer
def wire_type(:int32), do: wire_varint()
def wire_type(:int64), do: wire_varint()
def wire_type(:uint32), do: wire_varint()
def wire_type(:uint64), do: wire_varint()
def wire_type(:sint32), do: wire_varint()
def wire_type(:sint64), do: wire_varint()
def wire_type(:bool), do: wire_varint()
def wire_type(:enum), do: wire_varint()
def wire_type(:fixed64), do: wire_64bits()
def wire_type(:sfixed64), do: wire_64bits()
def wire_type(:double), do: wire_64bits()
def wire_type(:string), do: wire_delimited()
def wire_type(:bytes), do: wire_delimited()
def wire_type(:fixed32), do: wire_32bits()
def wire_type(:sfixed32), do: wire_32bits()
def wire_type(:float), do: wire_32bits()
def wire_type(mod) when is_atom(mod), do: wire_delimited()
defp repeated_or_not(val, repeated, func) do
if repeated do
Enum.map(val, func)
else
func.(val)
end
end
defp empty_val?(v) do
!v || v == 0 || v == "" || v == [] || v == %{}
end
defp oneof_actual_vals(props, struct) do
Enum.reduce(props.oneof, %{}, fn {field, _}, acc ->
case Map.get(struct, field) do
{f, val} -> Map.put(acc, f, val)
nil -> acc
end
end)
end
end
|
lib/protobuf/encoder.ex
| 0.761937
| 0.431584
|
encoder.ex
|
starcoder
|
defmodule TypeCheck.Builtin.FixedTuple do
defstruct [:element_types]
use TypeCheck
@type! t :: %__MODULE__{element_types: list(TypeCheck.Type.t())}
@type! problem_tuple ::
{t(), :not_a_tuple, %{}, any()}
| {t(), :different_size, %{expected_size: integer()}, tuple()}
| {t(), :element_error,
%{problem: lazy(TypeCheck.TypeError.Formatter.problem_tuple()), index: integer()},
tuple()}
defimpl TypeCheck.Protocols.ToCheck do
def to_check(s = %{element_types: types_list}, param) do
element_checks_ast = build_element_checks_ast(types_list, param, s)
expected_size = length(types_list)
quote generated: true, location: :keep do
case unquote(param) do
x when not is_tuple(x) ->
{:error, {unquote(Macro.escape(s)), :not_a_tuple, %{}, x}}
x when tuple_size(x) != unquote(expected_size) ->
{:error,
{unquote(Macro.escape(s)), :different_size, %{expected_size: unquote(expected_size)},
x}}
_ ->
unquote(element_checks_ast)
end
end
end
defp build_element_checks_ast(types_list, param, s) do
element_checks =
types_list
|> Enum.with_index()
|> Enum.flat_map(fn {element_type, index} ->
impl =
TypeCheck.Protocols.ToCheck.to_check(
element_type,
quote generated: true, location: :keep do
elem(unquote(param), unquote(index))
end
)
quote generated: true, location: :keep do
[
{{:ok, element_bindings, altered_element}, _index} <- {unquote(impl), unquote(index)},
bindings = element_bindings ++ bindings,
altered_param = Tuple.append(altered_param, altered_element)
]
end
end)
quote generated: true, location: :keep do
bindings = []
altered_param = {}
with unquote_splicing(element_checks) do
{:ok, bindings, altered_param}
else
{{:error, error}, index} ->
{:error,
{unquote(Macro.escape(s)), :element_error, %{problem: error, index: index},
unquote(param)}}
end
end
end
end
defimpl TypeCheck.Protocols.Inspect do
def inspect(s, opts) do
element_types =
case s.element_types do
%TypeCheck.Builtin.FixedList{element_types: element_types} ->
element_types
%TypeCheck.Builtin.List{element_type: element_type} ->
[element_type]
other ->
other
end
element_types
|> List.to_tuple()
|> Elixir.Inspect.inspect(%Inspect.Opts{
opts
| inspect_fun: &TypeCheck.Protocols.Inspect.inspect/2
})
end
end
if Code.ensure_loaded?(StreamData) do
defimpl TypeCheck.Protocols.ToStreamData do
def to_gen(s) do
s.element_types
|> Enum.map(&TypeCheck.Protocols.ToStreamData.to_gen/1)
|> List.to_tuple()
|> StreamData.tuple()
end
end
end
end
|
lib/type_check/builtin/fixed_tuple.ex
| 0.715921
| 0.47725
|
fixed_tuple.ex
|
starcoder
|
defmodule MarsExplorer do
# Get values from prompt
def get_x() do
x = IO.gets "What's the size of X of the area? > "
val = String.trim(x)
String.to_integer(val)
end
def get_y() do
y = IO.gets "What's the value of Y of the area? > "
val = String.trim(y)
String.to_integer(val)
end
def get_position() do
pos = IO.gets "Set the initial position of the rover (ex. 34E) > "
String.upcase(String.trim(pos))
end
def get_commands() do
cmd = IO.gets "Set the instructions of the rover. M to move and L/R to rotate (ex. MMLRMLLMMR) > "
String.upcase(String.trim(cmd))
end
def exec([x, y], []), do: :ok # Create plateau
def exec([x, y], [rover_commands | new_rover]) do # Add rovers and send commands
IO.inspect exec_for(rover_commands, [x, y]) # Print values from exec_for()
exec([x, y], new_rover)
end
def exec_for([initial_position, commands], [x, y]), do: start_new_position(initial_position, commands) # Add rover and send commands
# Get current position
def start_new_position(current_position, ""), do: current_position
# Send commands to rover (attribute new positions) and launch other rover after
def start_new_position(current_position, <<one_command::binary-size(1), new_rover::binary>>) do
new_position(current_position, one_command) |> start_new_position(new_rover)
end
# Check which instruction will be sent: move or rotate - left/right
def new_position(<<position::binary-size(2), compass::binary-size(1)>>, "L"), do: rotate_left(position, compass)
def new_position(<<position::binary-size(2), compass::binary-size(1)>>, "R"), do: rotate_right(position, compass)
def new_position(<<position::binary-size(2), compass::binary-size(1)>> = current_position, "M"),
do: move(current_position, compass)
# Rotate rover based on compass
def rotate_left(position, "N"), do: position <> "W"
def rotate_left(position, "E"), do: position <> "N"
def rotate_left(position, "W"), do: position <> "S"
def rotate_left(position, "S"), do: position <> "E"
def rotate_right(position, "N"), do: position <> "E"
def rotate_right(position, "E"), do: position <> "S"
def rotate_right(position, "W"), do: position <> "N"
def rotate_right(position, "S"), do: position <> "W"
# Move rover based on position and orientation
def move(<<x_position::binary-size(1), y_position::binary-size(1), compass::binary-size(1)>>, "N"), do: x_position <> inc(y_position) <> compass
def move(<<x_position::binary-size(1), y_position::binary-size(1), compass::binary-size(1)>>, "E"), do: inc(x_position) <> y_position <> compass
def move(<<x_position::binary-size(1), y_position::binary-size(1), compass::binary-size(1)>>, "W"), do: dec(x_position) <> y_position <> compass
def move(<<x_position::binary-size(1), y_position::binary-size(1), compass::binary-size(1)>>, "S"), do: x_position <> dec(y_position) <> compass
# Increase or decrease x or y value
defp inc(number), do: Integer.to_string(String.to_integer(number) + 1)
defp dec(number), do: Integer.to_string(String.to_integer(number) - 1)
end
x = MarsExplorer.get_x()
y = MarsExplorer.get_y()
rover_position = MarsExplorer.get_position()
rover_commands = MarsExplorer.get_commands()
# Here i could send another question like "Do you want to add more rovers? Y/N"
# If the answer is positive, i would ask the questions again and get another rover position and commands.
# Return the results maybe using Enum.each(rover, fn ({key, value}) -> value end)
# For now, i am just asking the questions again
rover_position_2 = MarsExplorer.get_position()
rover_commands_2 = MarsExplorer.get_commands()
IO.inspect MarsExplorer.exec([x, y], [[rover_position, rover_commands], [rover_position_2, rover_commands_2]])
|
src/marsExplorer.ex
| 0.64232
| 0.551091
|
marsExplorer.ex
|
starcoder
|
defmodule Membrane.WebRTC.Server.Message do
@moduledoc """
Struct defining messages exchanged between peers and rooms.
## Fields
- `:data` - Main part of the message. Value under that field MUST BE encodable by
`Jason.Encoder`.
- `:event` - Topic of the message.
- `:from` - Peer ID of a sender.
- `:from_metadata` - Contains metadata from parse request.
the message will be broadcasted: all peers in the room (except for the peer specified
under `from` field) will receive this message.
## Messages in Server API
Messages used by `Membrane.WebRTC.Server.Room` and `Membrane.WebRTC.Server.Peer` modules:
- `t:authenticated_message/0`
- `t:error_message/0`
- `t:joined_message/0`
- `t:left_message/0`
Note that these are NOT the only types of Messages, that can be used in applications.
Custom types can be defined with `t:t/1`.
"""
@derive Jason.Encoder
alias Membrane.WebRTC.Server.Peer
@enforce_keys [:event]
defstruct @enforce_keys ++ [:data, :from, :from_metadata, :to]
@type t :: t(Jason.Encoder.t() | nil)
@typedoc """
Type prepared for defining custom `Membrane.WebRTC.Server.Message`.
`d` MUST BE encodable by `Jason.Encoder`.
"""
@type t(d) :: %__MODULE__{
data: d,
event: String.t(),
from: Peer.peer_id() | nil,
from_metadata: Map.t() | nil,
to: [Peer.peer_id()] | String.t() | nil
}
@typedoc """
Sent to client after peer successfully initialize and join the room.
`:event` is set to `"authenticated"`.
## Data fields
- `:peer_id` - Identifier of the peer, that has joined the room.
"""
@type authenticated_message ::
%__MODULE__{
data: %{
peer_id: Peer.peer_id(),
},
event: String.t(),
from: nil,
from_metadata: Map.t() | nil,
to: [Peer.peer_id()]
}
@typedoc """
Error message.
`:event` is set to `"error"`.
## Data fields
- `:description`- Topic of error message.
- `:details` - Details of error.
## Descriptions used in server API
- `"Invalid message"`
Sent to client after JSON decoding error.
- `"Could not join room"`
Sent to client after `c:Membrane.WebRTC.Server.Room.on_join/2` return `{:error, error}`.
- `"Room closed"`
Send to client after the room's process shut down.
"""
@type error_message :: %__MODULE__{
data: %{
description: String.t(),
details: Jason.Encoder.t()
},
event: String.t(),
from: nil,
from_metadata: Map.t() | nil,
to: [Peer.peer_id()]
}
@typedoc """
Broadcasted by a room when a peer joins the room.
`:event` is set to `"joined"`.
`:to` is set to `"all"`.
## Data fields
- `:peer_id` - Identifier of the peer, that has joined the room.
"""
@type joined_message :: %__MODULE__{
data: %{peer_id: Peer.peer_id()},
event: String.t(),
from: nil,
from_metadata: Map.t() | nil,
to: String.t()
}
@typedoc """
Broadcasted by a room when a peer leaves the room.
`:event` is set to "left".
## Data fields:
- `:peer_id` - Identifier of the peer, that has left the room.
"""
@type left_message :: %__MODULE__{
data: %{peer_id: Peer.peer_id()},
event: String.t(),
from: nil,
from_metadata: Map.t() | nil,
to: String.t()
}
end
|
lib/webrtc_server/message.ex
| 0.88232
| 0.465084
|
message.ex
|
starcoder
|
defmodule Roman.Validators.Numeral do
@moduledoc false
@valid_numerals ~w(M D C L X V I)
@type ok_numeral_or_error :: {:ok, Roman.numeral()} | Roman.error()
@spec validate(Roman.numeral()) :: Roman.numeral() | Roman.error()
def validate(numeral, opts \\ [strict: true]) when is_binary(numeral) do
with {:ok, numeral} <- only_valid_numerals(numeral),
{:strict, true, numeral} <- {:strict, opts[:strict], numeral},
{:ok, numeral} <- only_one_v_l_d(numeral),
{:ok, numeral} <- max_3_consecutive_repetitions(numeral) do
{:ok, numeral}
else
{:strict, _, numeral} -> {:ok, numeral}
{:error, _} = error -> error
end
end
@spec only_valid_numerals(Roman.numeral()) :: ok_numeral_or_error
defp only_valid_numerals(numeral) do
numeral
|> to_letters
|> Enum.reject(&Enum.member?(@valid_numerals, &1))
|> case do
[] ->
{:ok, numeral}
invalid_letters ->
pretty_numerals = Enum.join(@valid_numerals, ", ")
{:error,
{:invalid_letter,
"numeral contains invalid letter(s), " <>
"valid letters are #{pretty_numerals} but encountered " <>
Enum.join(invalid_letters, ", ")}}
end
end
@spec to_letters(Roman.numeral()) :: [Roman.numeral()]
defp to_letters(numeral), do: String.split(numeral, "", trim: true)
@spec only_one_v_l_d(Roman.numeral()) :: ok_numeral_or_error
defp only_one_v_l_d(numeral) do
numeral
|> to_letters
|> Enum.reduce(%{}, &update_letter_count/2)
|> Stream.filter(fn {_, v} -> v > 1 end)
|> Stream.map(fn {k, _} -> k end)
|> Enum.to_list()
|> case do
[] ->
{:ok, numeral}
keys ->
{:error,
{:repeated_vld,
"letters V, L, and D can appear only once, " <>
"but found several instances of #{Enum.join(keys, ", ")}"}}
end
end
@spec update_letter_count(String.t(), map) :: {:cont, map} | {:halt, map}
defp update_letter_count(letter, count_map) when letter in ~w(V L D) do
count = Map.get(count_map, letter, 0) + 1
if count <= 3 do
Map.put(count_map, letter, count)
else
count_map
end
end
defp update_letter_count(_, acc), do: acc
@spec max_3_consecutive_repetitions(Roman.numeral()) :: ok_numeral_or_error
defp max_3_consecutive_repetitions(numeral) do
numeral
|> to_letters
|> Stream.unfold(fn
[h | _] = letters ->
{same, rest} = Enum.split_while(letters, &(&1 == h))
{{h, Enum.count(same)}, rest}
[] ->
nil
end)
|> Stream.filter(fn {_, count} -> count > 3 end)
|> Stream.map(fn {l, _} -> l end)
|> Enum.to_list()
|> case do
[] ->
{:ok, numeral}
letters ->
{:error,
{:identical_letter_seq_too_long,
"a given letter cannot appear more than 3 times in a row: " <>
"encountered invalid sequences for #{Enum.join(letters, ", ")}"}}
end
end
end
|
lib/roman/validators/numeral.ex
| 0.756987
| 0.495606
|
numeral.ex
|
starcoder
|
defmodule Hierbautberlin.GeoData.GeoItem do
use Ecto.Schema
import Ecto.Query, warn: false
import Ecto.Changeset
alias Hierbautberlin.Repo
alias Hierbautberlin.GeoData.{GeoItem, GeoPosition, GeoMapItem, Source}
@states [
"intended",
"in_preparation",
"in_planning",
"under_construction",
"active",
"finished",
nil
]
schema "geo_items" do
field :external_id, :string
field :title, :string
field :subtitle, :string
field :description, :string
field :url, :string
field :state, :string
field :date_start, :utc_datetime
field :date_end, :utc_datetime
field :date_updated, :utc_datetime
field :geo_point, Geo.PostGIS.Geometry
field :geometry, Geo.PostGIS.Geometry
field :participation_open, :boolean, default: false
field :additional_link, :string
field :additional_link_name, :string
field :hidden, :boolean, default: false
belongs_to :source, Source
timestamps(type: :utc_datetime)
end
def changeset(geo_item, attrs) do
geo_item
|> cast(attrs, [
:external_id,
:title,
:subtitle,
:description,
:url,
:state,
:date_start,
:date_end,
:date_updated,
:geo_point,
:geometry,
:source_id,
:participation_open,
:additional_link,
:additional_link_name,
:inserted_at,
:updated_at,
:hidden
])
|> validate_inclusion(:state, @states)
|> validate_required([:source_id, :external_id, :title])
|> unique_constraint([:source_id, :external_id])
end
def newest_date(geo_item) do
[
geo_item.date_start,
geo_item.date_end,
geo_item.date_updated
]
|> Enum.filter(&(!is_nil(&1)))
|> Enum.sort_by(&abs(Timex.diff(&1, Timex.now(), :days)))
|> List.first()
end
def get_near(lat, lng, count) do
geom = %Geo.Point{
coordinates: {lng, lat},
properties: %{},
srid: 4326
}
query =
from item in GeoItem,
limit: ^count,
where: item.hidden == false,
where:
fragment(
"(geometry is not null and ST_DWithin(geometry, ?, 0.05 )) or (geo_point is not null and ST_DWithin(geo_point, ?, 0.05 ))",
^geom,
^geom
),
order_by:
fragment(
"ST_Distance(COALESCE(geometry, geo_point), ?)",
^geom
)
query
|> Repo.all()
|> Repo.preload(:source)
|> Enum.map(fn item ->
%GeoMapItem{
type: :geo_item,
id: item.id,
title: item.title,
subtitle: item.subtitle,
description: item.description,
positions: [
%GeoPosition{
type: :geo_item,
id: item.id,
geopoint: item.geo_point,
geometry: item.geometry
}
],
newest_date: newest_date(item),
source: item.source,
url: item.url,
participation_open: item.participation_open,
item: item
}
end)
end
end
|
lib/hierbautberlin/geo_data/geo_item.ex
| 0.51879
| 0.405596
|
geo_item.ex
|
starcoder
|
defmodule Gettext.Interpolation do
@moduledoc false
@type interpolatable :: [String.t() | atom]
@doc """
Extracts interpolations from a given string.
This function extracts all interpolations in the form `%{interpolation}`
contained inside `str`, converts them to atoms and then returns a list of
string and interpolation keys.
## Examples
iex> msgid = "Hello %{name}, you have %{count} unread messages"
iex> Gettext.Interpolation.to_interpolatable(msgid)
["Hello ", :name, ", you have ", :count, " unread messages"]
iex> Gettext.Interpolation.to_interpolatable("Empties %{} stay empty")
["Empties %{} stay empty"]
"""
@spec to_interpolatable(String.t()) :: interpolatable
def to_interpolatable(string) do
start_pattern = :binary.compile_pattern("%{")
end_pattern = :binary.compile_pattern("}")
string
|> to_interpolatable(_current = "", _acc = [], start_pattern, end_pattern)
|> Enum.reverse()
end
defp to_interpolatable(string, current, acc, start_pattern, end_pattern) do
case :binary.split(string, start_pattern) do
# If we have one element, no %{ was found so this is the final part of the
# string.
[rest] ->
prepend_if_not_empty(current <> rest, acc)
# If we found a %{ but it's followed by an immediate }, then we just
# append %{} to the current string and keep going.
[before, "}" <> rest] ->
new_current = current <> before <> "%{}"
to_interpolatable(rest, new_current, acc, start_pattern, end_pattern)
# Otherwise, we found the start of a binding.
[before, binding_and_rest] ->
case :binary.split(binding_and_rest, end_pattern) do
# If we don't find the end of this binding, it means we're at a string
# like "foo %{ no end". In this case we consider no bindings to be
# there.
[_] ->
[current <> string | acc]
# This is the case where we found a binding, so we put it in the acc
# and keep going.
[binding, rest] ->
new_acc = [String.to_atom(binding) | prepend_if_not_empty(before, acc)]
to_interpolatable(rest, "", new_acc, start_pattern, end_pattern)
end
end
end
defp prepend_if_not_empty("", list), do: list
defp prepend_if_not_empty(string, list), do: [string | list]
@doc """
Interpolate an interpolatable with the given bindings.
This function takes an interpolatable list (like the ones returned by
`to_interpolatable/1`) and some bindings and returns an `{:ok,
interpolated_string}` tuple if interpolation is successful. If it encounters
an atom in `interpolatable` that is missing from `bindings`, it returns
`{:missing_bindings, incomplete_string, missing_bindings}` where
`incomplete_string` is the string with only the present bindings interpolated
and `missing_bindings` is a list of atoms representing bindings that are in
`interpolatable` but not in `bindings`.
## Examples
iex> msgid = "Hello %{name}, you have %{count} unread messages"
iex> interpolatable = Gettext.Interpolation.to_interpolatable(msgid)
iex> good_bindings = %{name: "José", count: 3}
iex> Gettext.Interpolation.interpolate(interpolatable, good_bindings)
{:ok, "Hello José, you have 3 unread messages"}
iex> Gettext.Interpolation.interpolate(interpolatable, %{name: "José"})
{:missing_bindings, "Hello José, you have %{count} unread messages", [:count]}
"""
@spec interpolate(interpolatable, map) ::
{:ok, String.t()} | {:missing_bindings, String.t(), [atom]}
def interpolate(interpolatable, bindings)
when is_list(interpolatable) and is_map(bindings) do
interpolate(interpolatable, bindings, [], [])
end
defp interpolate([string | segments], bindings, strings, missing) when is_binary(string) do
interpolate(segments, bindings, [string | strings], missing)
end
defp interpolate([atom | segments], bindings, strings, missing) when is_atom(atom) do
case bindings do
%{^atom => value} ->
interpolate(segments, bindings, [to_string(value) | strings], missing)
%{} ->
strings = ["%{" <> Atom.to_string(atom) <> "}" | strings]
interpolate(segments, bindings, strings, [atom | missing])
end
end
defp interpolate([], _bindings, strings, []) do
{:ok, IO.iodata_to_binary(Enum.reverse(strings))}
end
defp interpolate([], _bindings, strings, missing) do
missing = missing |> Enum.reverse() |> Enum.uniq()
{:missing_bindings, IO.iodata_to_binary(Enum.reverse(strings)), missing}
end
@doc """
Returns all the interpolation keys contained in the given string or list of
segments.
This function returns a list of all the interpolation keys (patterns in the
form `%{interpolation}`) contained in its argument.
If the argument is a segment list, that is, a list of strings and atoms where
atoms represent interpolation keys, then only the atoms in the list are
returned.
## Examples
iex> Gettext.Interpolation.keys("Hey %{name}, I'm %{other_name}")
[:name, :other_name]
iex> Gettext.Interpolation.keys(["Hello ", :name, "!"])
[:name]
iex> Gettext.Interpolation.keys(["Hello ", :name, "! Goodbye", :name])
[:name]
"""
@spec keys(String.t() | interpolatable) :: [atom]
def keys(string_or_interpolatable)
def keys(string) when is_binary(string), do: string |> to_interpolatable() |> keys()
def keys(interpolatable) when is_list(interpolatable),
do: interpolatable |> Enum.filter(&is_atom/1) |> Enum.uniq()
end
|
lib/gettext/interpolation.ex
| 0.889577
| 0.49109
|
interpolation.ex
|
starcoder
|
defmodule SimpleSchema.Schema do
@moduledoc """
A module to convert a simple schema to JSON Schema
Basic:
```
iex> schema = %{name: :string,
...> value: {:integer, optional: true},
...> array: [:string],
...> map: {%{x: :integer, y: :integer}, optional: true},
...> param: {:any, optional: true}}
iex> SimpleSchema.Schema.to_json_schema(schema)
%{
"type" => "object",
"required" => ["array", "name"],
"additionalProperties" => false,
"properties" => %{
"name" => %{"type" => "string"},
"value" => %{"type" => "integer"},
"array" => %{
"type" => "array",
"items" => %{"type" => "string"},
},
"map" => %{
"type" => "object",
"required" => ["x", "y"],
"additionalProperties" => false,
"properties" => %{
"x" => %{"type" => "integer"},
"y" => %{"type" => "integer"},
},
},
"param" => %{
"type" => ["array", "boolean", "integer", "null", "number", "object", "string"],
},
},
}
```
With restrictions:
```
iex> schema = %{name: {:string, min_length: 8},
...> value: {:integer, optional: true, nullable: true, maximum: 10},
...> array: {[{:string, enum: ["aaa", "bbb"]}], min_items: 1}}
iex> SimpleSchema.Schema.to_json_schema(schema)
%{
"type" => "object",
"required" => ["array", "name"],
"additionalProperties" => false,
"properties" => %{
"name" => %{
"type" => "string",
"minLength" => 8,
},
"value" => %{
"type" => ["integer", "null"],
"maximum" => 10,
},
"array" => %{
"type" => "array",
"minItems" => 1,
"items" => %{
"type" => "string",
"enum" => [
"aaa",
"bbb",
],
}
},
},
}
```
"""
@type opts :: Keyword.t()
@type boolean_type :: :boolean | {:boolean, opts}
@type integer_type :: :integer | {:integer, opts}
@type number_type :: :number | {:number, opts}
@type null_type :: :null | {:null, opts}
@type string_type :: :string | {:string, opts}
@type map_type ::
%{required(atom) => simple_schema} | {%{required(atom) => simple_schema}, opts}
@type array_type :: nonempty_list(simple_schema) | {nonempty_list(simple_schema), opts}
@type any_type :: :any | {:any, opts}
@type module_type :: module | {module, opts}
@type simple_schema ::
boolean_type
| integer_type
| number_type
| null_type
| string_type
| map_type
| array_type
| any_type
| module_type
@primitive_types [:boolean, :integer, :number, :null, :string, :any]
defp raise_if_unexpected_opts(opts) do
{_, opts} = Keyword.pop(opts, :field)
{_, opts} = Keyword.pop(opts, :meta)
do_raise_if_unexpected_opts(opts)
end
defp do_raise_if_unexpected_opts([]), do: :ok
defp to_types(type, nullable)
defp to_types(type, true), do: [type, "null"]
defp to_types(type, false), do: type
defp add_if_not_undefined(xs, _key, :undefined), do: xs
defp add_if_not_undefined(xs, key, value), do: [{key, value} | xs]
defp add_enum_if_not_undefined(xs, :undefined), do: xs
defp add_enum_if_not_undefined(xs, [_ | _] = enum), do: [{"enum", enum} | xs]
defp add_format_if_not_undefined(xs, :undefined), do: xs
defp add_format_if_not_undefined(xs, :datetime), do: [{"format", "date-time"} | xs]
defp add_format_if_not_undefined(xs, :email), do: [{"format", "email"} | xs]
defp pop_optional({type, opts}) do
{optional, opts} = Keyword.pop(opts, :optional, false)
{optional, {type, opts}}
end
defp pop_optional(type) do
{false, type}
end
@undefined_default :simple_schema_default_undefined
defp pop_default({type, opts}) do
{optional, opts} = Keyword.pop(opts, :default, @undefined_default)
{optional, {type, opts}}
end
defp pop_default(type) do
{@undefined_default, type}
end
defp get_field(key, {_, opts}) do
case Keyword.fetch(opts, :field) do
:error -> Atom.to_string(key)
{:ok, value} -> value
end
end
defp get_field(key, _) do
Atom.to_string(key)
end
def simple_schema_implemented?(schema) when is_atom(schema) do
Code.ensure_loaded(schema)
schema? = function_exported?(schema, :schema, 1)
from_json? = function_exported?(schema, :from_json, 3)
to_json? = function_exported?(schema, :to_json, 3)
schema? and from_json? and to_json?
end
def simple_schema_implemented?(_) do
false
end
def to_json_schema(schema, global_opts \\ [])
def to_json_schema(:boolean, global_opts), do: to_json_schema({:boolean, []}, global_opts)
def to_json_schema({:boolean, opts}, _global_opts) do
{nullable, opts} = Keyword.pop(opts, :nullable, false)
raise_if_unexpected_opts(opts)
types = to_types("boolean", nullable)
%{"type" => types}
end
def to_json_schema(:integer, global_opts), do: to_json_schema({:integer, []}, global_opts)
def to_json_schema({:integer, opts}, _global_opts) do
{nullable, opts} = Keyword.pop(opts, :nullable, false)
{maximum, opts} = Keyword.pop(opts, :maximum, :undefined)
{minimum, opts} = Keyword.pop(opts, :minimum, :undefined)
{enum, opts} = Keyword.pop(opts, :enum, :undefined)
raise_if_unexpected_opts(opts)
types = to_types("integer", nullable)
xs = [{"type", types}]
xs = add_if_not_undefined(xs, "maximum", maximum)
xs = add_if_not_undefined(xs, "minimum", minimum)
xs = add_enum_if_not_undefined(xs, enum)
Enum.into(xs, %{})
end
def to_json_schema(:number, global_opts), do: to_json_schema({:number, []}, global_opts)
def to_json_schema({:number, opts}, _global_opts) do
{nullable, opts} = Keyword.pop(opts, :nullable, false)
{maximum, opts} = Keyword.pop(opts, :maximum, :undefined)
{minimum, opts} = Keyword.pop(opts, :minimum, :undefined)
raise_if_unexpected_opts(opts)
types = to_types("number", nullable)
xs = [{"type", types}]
xs = add_if_not_undefined(xs, "maximum", maximum)
xs = add_if_not_undefined(xs, "minimum", minimum)
Enum.into(xs, %{})
end
def to_json_schema(:null, global_opts), do: to_json_schema({:null, []}, global_opts)
def to_json_schema({:null, []}, _global_opts) do
%{"type" => "null"}
end
def to_json_schema(:string, global_opts), do: to_json_schema({:string, []}, global_opts)
def to_json_schema({:string, opts}, _global_opts) do
{nullable, opts} = Keyword.pop(opts, :nullable, false)
{max_length, opts} = Keyword.pop(opts, :max_length, :undefined)
{min_length, opts} = Keyword.pop(opts, :min_length, :undefined)
{enum, opts} = Keyword.pop(opts, :enum, :undefined)
{format, opts} = Keyword.pop(opts, :format, :undefined)
raise_if_unexpected_opts(opts)
types = to_types("string", nullable)
xs = [{"type", types}]
xs = add_if_not_undefined(xs, "maxLength", max_length)
xs = add_if_not_undefined(xs, "minLength", min_length)
xs = add_enum_if_not_undefined(xs, enum)
xs = add_format_if_not_undefined(xs, format)
Enum.into(xs, %{})
end
def to_json_schema(:any, global_opts), do: to_json_schema({:any, []}, global_opts)
def to_json_schema({:any, opts}, _global_opts) do
raise_if_unexpected_opts(opts)
# permit any types
%{"type" => ["array", "boolean", "integer", "null", "number", "object", "string"]}
end
def to_json_schema(%{} = schema, global_opts), do: to_json_schema({schema, []}, global_opts)
def to_json_schema({%{} = schema, opts}, global_opts) do
{nullable, opts} = Keyword.pop(opts, :nullable, false)
{tolerance, opts} = Keyword.pop(opts, :tolerant, false)
raise_if_unexpected_opts(opts)
properties =
for {key, type} <- schema, into: %{} do
{_optional, type} = pop_optional(type)
{_default, type} = pop_default(type)
key = get_field(key, type)
{key, to_json_schema(type, global_opts)}
end
required =
schema
|> Enum.filter(fn {_key, type} ->
{optional, _type} = pop_optional(type)
{default, _type} = pop_default(type)
not optional && default == @undefined_default
end)
|> Enum.map(fn {key, type} ->
get_field(key, type)
end)
|> Enum.sort()
types = to_types("object", nullable)
xs = [
{"type", types},
{"additionalProperties", tolerance},
{"properties", properties}
]
xs =
case required do
[] -> xs
[_ | _] -> [{"required", required} | xs]
end
Enum.into(xs, %{})
end
def to_json_schema([_type] = array, global_opts), do: to_json_schema({array, []}, global_opts)
def to_json_schema({[type], opts}, global_opts) do
{nullable, opts} = Keyword.pop(opts, :nullable, false)
{max_items, opts} = Keyword.pop(opts, :max_items, :undefined)
{min_items, opts} = Keyword.pop(opts, :min_items, :undefined)
{unique_items, opts} = Keyword.pop(opts, :unique_items, :undefined)
raise_if_unexpected_opts(opts)
types = to_types("array", nullable)
xs = [{"type", types}, {"items", to_json_schema(type, global_opts)}]
xs = add_if_not_undefined(xs, "maxItems", max_items)
xs = add_if_not_undefined(xs, "minItems", min_items)
xs = add_if_not_undefined(xs, "uniqueItems", unique_items)
Enum.into(xs, %{})
end
def to_json_schema({schema, opts}, global_opts) when is_atom(schema) do
if not simple_schema_implemented?(schema) do
raise "#{schema} is not exported a function schema/1"
end
case Keyword.fetch(global_opts, :struct_converter) do
{:ok, struct_converter} ->
struct_converter.(schema, opts)
:error ->
{schema2, opts2} =
case schema.schema(opts) do
{schema2, opts2} -> {schema2, opts2}
schema2 -> {schema2, []}
end
to_json_schema({schema2, opts2}, global_opts)
end
end
def to_json_schema(schema, global_opts) when is_atom(schema), do: to_json_schema({schema, []}, global_opts)
defp split_opts({schema, opts}), do: {schema, opts}
defp split_opts(schema), do: {schema, []}
defp reduce_results(results, opts \\ []) do
result =
results
|> Enum.reduce({:ok, []}, fn
{:ok, value}, {:ok, values} -> {:ok, [value | values]}
{:error, error}, {:ok, _} -> {:error, [error]}
{:ok, _}, {:error, errors} -> {:error, errors}
{:error, error}, {:error, errors} -> {:error, [error | errors]}
end)
reverse = Keyword.get(opts, :reverse, false)
if reverse do
case result do
{:ok, values} -> {:ok, Enum.reverse(values)}
{:error, errors} -> {:error, Enum.reverse(errors)}
end
else
result
end
end
@doc """
Convert validated JSON to a simple schema value.
If validation is passed, The JSON key should be all known atom except `:any` type.
So the key can be converted by `String.to_existing_atom/1`.
iex> schema = %{foo: %{bar: :integer}}
iex> SimpleSchema.Schema.from_json(schema, %{"foo" => %{"bar" => 10}})
{:ok, %{foo: %{bar: 10}}}
However, `:any` can contains an arbitrary key, so do not convert a value of `:any`.
iex> schema = %{foo: :any}
iex> SimpleSchema.Schema.from_json(schema, %{"foo" => %{"bar" => 10}})
{:ok, %{foo: %{"bar" => 10}}}
"""
def from_json(schema, value) do
{schema, opts} = split_opts(schema)
do_from_json(schema, value, opts)
end
defp do_from_json(%{} = schema, map, opts) do
lookup_field =
schema
|> Enum.map(fn {atom_key, schema} ->
{_, opts} = split_opts(schema)
field = Keyword.get(opts, :field, Atom.to_string(atom_key))
{field, atom_key}
end)
|> Enum.into(%{})
# drop unknown lookup_field keys from map if :tolerant opts is `true`
map =
if Keyword.get(opts, :tolerant, false) do
map |> Map.take(lookup_field |> Map.keys())
else
map
end
# Use default value if :default opts is specified
default_value =
schema
|> Enum.reduce([], fn {key, schema}, results ->
{_, opts} = split_opts(schema)
case Keyword.fetch(opts, :default) do
{:ok, default} -> [{key, default} | results]
:error -> results
end
end)
|> Enum.into(%{})
result =
map
|> Enum.map(fn {key, value} ->
case Map.fetch(lookup_field, key) do
:error ->
{:error, {:key_not_found, key, lookup_field}}
{:ok, atom_key} ->
schema = Map.fetch!(schema, atom_key)
{_default, schema} = pop_default(schema)
case from_json(schema, value) do
{:ok, result} -> {:ok, {atom_key, result}}
{:error, reason} -> {:error, reason}
end
end
end)
|> reduce_results()
case result do
{:ok, results} -> {:ok, Enum.into(results, default_value)}
{:error, errors} -> {:error, errors}
end
end
defp do_from_json([element_schema], array, _opts) do
array
|> Enum.map(fn value ->
from_json(element_schema, value)
end)
|> reduce_results(reverse: true)
end
defp do_from_json(schema, value, _opts) when is_atom(schema) and schema in @primitive_types do
{:ok, value}
end
defp do_from_json(schema, value, opts) when is_atom(schema) do
if not simple_schema_implemented?(schema) do
raise "#{schema} is not implemented SimpleSchema behaviour."
end
schema.from_json(schema.schema(opts), value, opts)
end
@doc """
Convert a simple schema value to JSON value.
"""
def to_json(schema, value) do
{schema, opts} = split_opts(schema)
do_to_json(schema, value, opts)
end
defp do_to_json(%{} = schema, map, _opts) do
result =
schema
|> Enum.map(fn {key, type} ->
case Map.fetch(map, key) do
:error ->
# get default value if :default opts is specified
{default, type} = pop_default(type)
if default == @undefined_default do
{:error, {:key_not_found, key}}
else
case to_json(type, default) do
{:error, reason} ->
{:error, reason}
{:ok, json} ->
key = get_field(key, type)
{:ok, {key, json}}
end
end
{:ok, value} ->
{_default, type} = pop_default(type)
case to_json(type, value) do
{:error, reason} ->
{:error, reason}
{:ok, json} ->
key = get_field(key, type)
{:ok, {key, json}}
end
end
end)
|> reduce_results()
case result do
{:ok, results} -> {:ok, Enum.into(results, %{})}
{:error, errors} -> {:error, errors}
end
end
defp do_to_json([element_schema], array, _opts) do
array
|> Enum.map(fn value ->
to_json(element_schema, value)
end)
|> reduce_results(reverse: true)
end
defp do_to_json(schema, value, _opts) when is_atom(schema) and schema in @primitive_types do
{:ok, value}
end
defp do_to_json(schema, value, opts) when is_atom(schema) do
if not simple_schema_implemented?(schema) do
raise "#{schema} is not implemented SimpleSchema behaviour."
end
schema.to_json(schema.schema(opts), value, opts)
end
end
|
lib/simple_schema/schema.ex
| 0.802788
| 0.765593
|
schema.ex
|
starcoder
|
defmodule Apoc.Hazmat.AEAD.AESGCM do
@moduledoc """
Implementation of the AES block encryption
standard as per [FIPS PUB 197](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf).
The functions in this module operate in GCM (Galois/Counter Mode) to provide
fast Authenticated Encryption.
See [Recommendation for Block Cipher Modes of Operation: Galois/Counter Mode (GCM) and GMAC](https://csrc.nist.gov/publications/detail/sp/800-38d/final).
Additionally, three block sizes are support (128, 192 and 256). For those particularly
paranoid users, a block size of 256 is recommended for defense against [Shore's algorithm](https://arxiv.org/abs/quant-ph/9508027).
Use a 32 byte key for a 256 bit block size. See `encrypt/2`.
"""
@type aes_key() :: <<_::16, _::_*8>> | <<_::24, _::_*8>> | <<_::32, _::_*8>>
@iv_byte_size 16
defguardp is_key_of_size(key, size) when is_binary(key) and byte_size(key) == size
defguardp is_valid_aad(aad) when aad in ["AES128GCM", "AES192GCM", "AES256GCM"]
@doc """
Encrypt a message using AES under the given key
The key should be a 16, 24 or 32 byte binary string
## Example
```elixir
Apoc.AES.encrypt("a secret message", Apoc.rand_bytes(16))
```
It's important that the key be as uniformly random as possible.
Consequently, avoid the temptation to do this:
```elixir
# Don't do this
k = Apoc.rand_bytes(16) |> Base.encode16
byte_size(k) # => 32
Apoc.AES.encrypt(message, k)
```
As the bytesize of the encoded key in this example is 32 bytes
the 256 bit block size will be used. However, this is not a uniformly
random key in `{0,1}^32`. Specifically, the probability of a key containing
a character other than [0-9a-f] is zero.
To avoid this issue, don't use ASCII (e.g. hex of base 64 encoded strings)
as the key. By all means, encode the key for storage purposes but make sure
your key has been generated with the correct number of bytes.
```elixir
k = Apoc.rand_bytes(32)
Apoc.AES.encrypt(message, k)
Apoc.encode(k) # => base 64 encoded for storage somewhere safe
```
"""
# TODO: Optional additional AD
@spec encrypt(String.t(), aes_key()) :: {:ok, binary()} | {:error, binary()}
def encrypt(msg, key) when is_binary(msg) and is_key_of_size(key, 16) do
do_encrypt(msg, "AES128GCM", key)
end
def encrypt(msg, key) when is_binary(msg) and is_key_of_size(key, 24) do
do_encrypt(msg, "AES192GCM", key)
end
def encrypt(msg, key) when is_binary(msg) and is_key_of_size(key, 32) do
do_encrypt(msg, "AES256GCM", key)
end
def encrypt(x, _) when not is_binary(x) do
{:error, "Message must be a binary"}
end
def encrypt(_, _) do
{:error, "Invalid key size"}
end
def encrypt!(msg, key) do
with {:ok, ct} <- encrypt(msg, key) do
ct
else
{:error, message} ->
raise Apoc.Error, message: message
end
end
@doc """
Decrypt a cipher text that has been encrypted under the given key.
## Example
```elixir
{:ok, plaintext} = Apoc.AES.decrypt(ciphertext, key)
```
"""
@spec decrypt(String.t(), aes_key) :: {:ok, binary} | {:error, String.t()}
def decrypt(payload, key) do
with {:ok, <<aad::binary-9, iv::binary-16, tag::binary-16, ct::binary>>} <-
Apoc.decode(payload) do
do_decrypt(ct, aad, iv, tag, key)
end
end
defp do_encrypt(msg, aad, key) do
iv = Apoc.rand_bytes(@iv_byte_size)
try do
with {ct, tag} <- :crypto.block_encrypt(:aes_gcm, key, iv, {aad, msg}) do
{:ok, Apoc.encode(aad <> iv <> tag <> ct)}
end
rescue
err in ArgumentError ->
{:error, err.message}
err ->
{:error, inspect(err)}
end
end
defp do_decrypt(ct, aad, iv, tag, key) when is_valid_aad(aad) do
:aes_gcm
|> :crypto.block_decrypt(key, iv, {aad, ct, tag})
|> case do
plain_text when is_binary(plain_text) ->
{:ok, plain_text}
_ ->
:error
end
end
end
|
lib/apoc/hazmat/aead/aes-gcm.ex
| 0.891493
| 0.924756
|
aes-gcm.ex
|
starcoder
|
defmodule Spandex.Ecto.Trace do
@moduledoc """
A trace builder that can be given to ecto as a logger. It will try to get
the trace_id and span_id from the caller pid in the case that the particular
query is being run asynchronously (as in the case of parallel preloads).
To configure, set it up as an ecto logger like so:
config :my_app, MyApp.Repo,
loggers: [{Ecto.LogEntry, :log, [:info]}, {Spandex.Ecto.Trace, :trace, []}]
"""
defmodule Error do
defexception [:message]
end
def trace(log_entry) do
unless Spandex.disabled?() do
now = Spandex.Datadog.Utils.now()
_ = setup(log_entry)
query = string_query(log_entry)
num_rows = num_rows(log_entry)
queue_time = get_time(log_entry, :queue_time)
query_time = get_time(log_entry, :query_time)
decoding_time = get_time(log_entry, :decode_time)
start = now - (queue_time + query_time + decoding_time)
Spandex.update_span(
%{
start: start,
completion_time: now,
service: :ecto,
resource: query,
type: :db,
meta: %{"sql.query" => query, "sql.rows" => inspect(num_rows)}
}
)
_ = report_error(log_entry)
if queue_time != 0 do
_ = Spandex.start_span("queue")
_ = Spandex.update_span(%{start: start, completion_time: start + queue_time})
_ = Spandex.finish_span()
end
if query_time != 0 do
_ = Spandex.start_span("run_query")
_ = Spandex.update_span(%{start: start + queue_time, completion_time: start + queue_time + query_time})
_ = Spandex.finish_span()
end
if decoding_time != 0 do
_ = Spandex.start_span("decode")
_ = Spandex.update_span(%{start: start + queue_time + query_time, completion_time: now})
_ = Spandex.finish_span()
end
finish_ecto_trace(log_entry)
end
log_entry
end
defp finish_ecto_trace(%{caller_pid: caller_pid}) do
if caller_pid != self() do
Spandex.finish_trace()
else
Spandex.finish_span()
end
end
defp finish_ecto_trace(_), do: :ok
defp setup(%{caller_pid: caller_pid}) when is_pid(caller_pid) do
if caller_pid == self() do
Logger.metadata(trace_id: Spandex.current_trace_id(), span_id: Spandex.current_span_id())
Spandex.start_span("query")
else
trace = Process.info(caller_pid)[:dictionary][:spandex_trace]
if trace do
trace_id = trace.id
span_id =
trace
|> Map.get(:stack)
|> Enum.at(0, %{})
|> Map.get(:id)
Logger.metadata(trace_id: trace_id, span_id: span_id)
Spandex.continue_trace("query", trace_id, span_id)
else
Spandex.start_trace("query")
end
end
end
defp setup(_) do
:ok
end
defp report_error(%{result: {:ok, _}}), do: :ok
defp report_error(%{result: {:error, error}}) do
Spandex.span_error(%Error{message: inspect(error)})
end
defp string_query(%{query: query}) when is_function(query), do: Macro.unescape_string(query.() || "")
defp string_query(%{query: query}) when is_bitstring(query), do: Macro.unescape_string(query)
defp string_query(_), do: ""
defp num_rows(%{result: {:ok, %{num_rows: num_rows}}}), do: num_rows
defp num_rows(_), do: 0
def get_time(log_entry, key) do
value = Map.get(log_entry, key)
if is_integer(value) do
to_nanoseconds(value)
else
0
end
end
defp to_nanoseconds(time), do: System.convert_time_unit(time, :native, :nanoseconds)
end
|
lib/ecto/trace.ex
| 0.57332
| 0.433082
|
trace.ex
|
starcoder
|
defmodule Day17 do
@moduledoc """
Documentation for Day17.
"""
@dimensions 4
def part1 do
active_set = load_initial("input.txt")
step6 = 0..5 |> Enum.reduce(active_set, fn _iter, current_set -> step(current_set) end)
IO.puts(Enum.count(step6))
end
def load_initial(filename) do
File.stream!(filename)
|> Stream.map(&String.trim/1)
|> Stream.map(&String.graphemes/1)
|> Stream.with_index()
|> Enum.reduce(MapSet.new(), fn {row, y}, set ->
row
|> Enum.with_index()
|> Enum.reduce(set, fn
{"#", x}, set ->
MapSet.put(
set,
Tuple.duplicate(0, @dimensions - 2) |> Tuple.insert_at(0, x) |> Tuple.insert_at(1, y)
)
{_, _}, set ->
set
end)
end)
end
def universe_ranges(set) do
0..(@dimensions - 1)
|> Enum.map(fn dimension ->
slice = set |> Enum.map(fn ranges -> elem(ranges, dimension) end)
(Enum.min(slice) - 1)..(Enum.max(slice) + 1)
end)
|> List.to_tuple()
end
def world_ranges(set) do
0..(@dimensions - 1)
|> Enum.map(fn dimension ->
slice = set |> Enum.map(fn ranges -> elem(ranges, dimension) end)
Enum.min(slice)..Enum.max(slice)
end)
|> List.to_tuple()
end
def universe_list(ranges) do
case tuple_size(ranges) do
1 ->
elem(ranges, 0) |> Enum.to_list()
_ ->
for current <- universe_list({elem(ranges, 0)}),
rest <- universe_list(Tuple.delete_at(ranges, 0)) do
[current, rest] |> List.flatten()
end
end
end
def universe(set) do
universe_list(universe_ranges(set))
|> Enum.map(&List.to_tuple/1)
|> MapSet.new()
end
def neighborhood_list(loc) do
case tuple_size(loc) do
1 ->
[elem(loc, 0) - 1, elem(loc, 0), elem(loc, 0) + 1]
_ ->
for current <- neighborhood_list({elem(loc, 0)}),
rest <- neighborhood_list(Tuple.delete_at(loc, 0)) do
[current, rest] |> List.flatten()
end
end
end
def neighborhood(loc) do
neighborhood_list(loc)
|> Enum.map(&List.to_tuple/1)
|> Enum.reject(fn neighbor -> neighbor == loc end)
|> MapSet.new()
end
def neighbors(set, loc) do
neighborhood(loc)
|> Enum.map(fn point -> MapSet.member?(set, point) end)
|> Enum.filter(& &1)
|> Enum.count()
end
def step(set) do
set
|> universe()
|> Enum.filter(fn loc ->
case {MapSet.member?(set, loc), neighbors(set, loc)} do
{true, 2} -> true
{true, 3} -> true
{true, _} -> false
{false, 3} -> true
_ -> false
end
end)
|> MapSet.new()
end
def print(set) do
ranges = world_ranges(set)
for z <- elem(ranges, 2) do
IO.puts("z = #{z}")
for y <- elem(ranges, 1) do
for x <- elem(ranges, 0) do
if MapSet.member?(set, {x, y, z}) do
IO.write("#")
else
IO.write(".")
end
end
IO.puts("")
end
end
end
end
|
day17/lib/day17.ex
| 0.616359
| 0.494263
|
day17.ex
|
starcoder
|
defmodule Etl.Pipeline do
defmodule Step do
@type t :: %__MODULE__{
child_spec: Supervisor.child_spec(),
opts: keyword(),
dispatcher: {module(), keyword()}
}
defstruct [:child_spec, :opts, :dispatcher]
end
@type t :: %__MODULE__{
context: Etl.Context.t(),
steps: list()
}
defstruct context: nil, steps: []
@partition_dispatcher GenStage.PartitionDispatcher
@broadcast_dispatcher GenStage.BroadcastDispatcher
def new(opts \\ []) do
%__MODULE__{
context: %Etl.Context{
min_demand: Keyword.get(opts, :min_demand, 500),
max_demand: Keyword.get(opts, :max_demand, 1000),
dynamic_supervisor: Keyword.get(opts, :dynamic_supervisor, Etl.DynamicSupervisor)
}
}
end
def add_stage(pipeline, stage, opts) do
Map.update!(pipeline, :steps, fn steps ->
step = %Step{child_spec: to_child_spec(stage, pipeline), opts: opts}
[step | steps]
end)
end
def add_batch(pipeline, opts) do
Map.update!(pipeline, :steps, fn steps ->
batcher = {Etl.Stage.Batcher, opts} |> to_child_spec(pipeline)
step = %Step{child_spec: batcher, opts: opts}
[step | steps]
end)
end
def add_function(%{steps: [head | tail]} = pipeline, fun) do
case head.child_spec do
%{start: {Etl.Functions.Stage, _, [opts]}} ->
opts =
Keyword.update!(opts, :functions, fn funs ->
funs ++ [fun]
end)
new_child_spec = {Etl.Functions.Stage, opts} |> to_child_spec(pipeline)
new_step = %{head | child_spec: new_child_spec}
%{pipeline | steps: [new_step | tail]}
_ ->
stage = {Etl.Functions.Stage, context: pipeline.context, functions: [fun]}
add_stage(pipeline, stage, [])
end
end
def set_partitions(%{steps: [step | rest]} = pipeline, dispatcher_opts) do
step = %{step | dispatcher: {@partition_dispatcher, dispatcher_opts}}
%{pipeline | steps: [step | rest]}
end
def set_broadcast(%{steps: [step | rest]} = pipeline, broadcast_opts) do
step = %{step | dispatcher: {@broadcast_dispatcher, broadcast_opts}}
%{pipeline | steps: [step | rest]}
end
def steps(pipeline) do
Enum.reverse(pipeline.steps)
end
defp to_child_spec(stage, pipeline) do
Etl.Stage.spec(stage, pipeline.context)
|> Supervisor.child_spec([])
end
end
|
lib/etl/pipeline.ex
| 0.718496
| 0.436022
|
pipeline.ex
|
starcoder
|
defmodule Zstream.Unzip do
@moduledoc false
alias Zstream.Entry
alias Zstream.Unzip.Extra
defmodule Error do
defexception [:message]
end
use Bitwise
defmodule LocalHeader do
@moduledoc false
defstruct [
:version_need_to_extract,
:general_purpose_bit_flag,
:compression_method,
:last_modified_file_time,
:last_modified_file_date,
:crc32,
:uncompressed_size,
:compressed_size,
:file_name_length,
:extra_field_length,
:file_name,
:extra_field,
:extras
]
end
defmodule State do
@moduledoc false
defstruct next: :local_file_header,
buffer: "",
local_header: nil,
data_sent: 0,
decoder: nil,
decoder_state: nil
end
defmodule VerifierState do
@moduledoc false
defstruct local_header: nil,
crc32: 0,
uncompressed_size: 0
end
def unzip(stream, _options \\ []) do
Stream.concat([stream, [:eof]])
|> Stream.transform(%State{}, &execute_state_machine/2)
|> Stream.transform(%VerifierState{}, &verify/2)
end
defp verify({:local_header, local_header}, state) do
entry = %Entry{
name: local_header.file_name,
compressed_size: local_header.compressed_size,
size: local_header.uncompressed_size,
mtime: dos_time(local_header.last_modified_file_date, local_header.last_modified_file_time),
extras: local_header.extras
}
{[{:entry, entry}], %{state | local_header: local_header}}
end
defp verify({:data, :eof}, state) do
unless state.crc32 == state.local_header.crc32 do
raise Error, "Invalid crc32, expected: #{state.local_header.crc32}, actual: #{state.crc32}"
end
unless state.uncompressed_size == state.local_header.uncompressed_size do
raise Error,
"Invalid size, expected: #{state.local_header.uncompressed_size}, actual: #{
state.uncompressed_size
}"
end
{[{:data, :eof}], %VerifierState{}}
end
defp verify({:data, data}, state) do
{[{:data, data}],
%{
state
| crc32: :erlang.crc32(state.crc32, data),
uncompressed_size: state.uncompressed_size + IO.iodata_length(data)
}}
end
# Specification is available at
# https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
defp execute_state_machine(:eof, state) do
if state.next == :done do
{[], state}
else
raise Error, "Unexpected end of input"
end
end
defp execute_state_machine(data, state) do
data =
if state.buffer not in ["", []] do
[state.buffer, data]
else
data
end
size = IO.iodata_length(data)
enough_data? =
case state.next do
:local_file_header ->
size >= 30
:next_header ->
size >= 30
:filename_extra_field ->
size >= state.local_header.file_name_length + state.local_header.extra_field_length
:done ->
true
:file_data ->
true
end
if enough_data? do
apply(__MODULE__, state.next, [data, %{state | buffer: ""}])
else
{[], %{state | buffer: data}}
end
end
def local_file_header(data, state) do
data = IO.iodata_to_binary(data)
case parse_local_header(data) do
{:ok, local_header, rest} ->
{decoder, decoder_state} = Zstream.Decoder.init(local_header.compression_method)
if bit_set?(local_header.general_purpose_bit_flag, 3) do
raise Error, "Zip files with data descriptor record are not supported"
end
execute_state_machine(rest, %{
state
| local_header: local_header,
next: :filename_extra_field,
decoder: decoder,
decoder_state: decoder_state
})
:done ->
state = %{state | next: :done}
{[], state}
end
end
def filename_extra_field(data, state) do
data = IO.iodata_to_binary(data)
start = 0
length = state.local_header.file_name_length
file_name = binary_part(data, start, length)
start = start + length
length = state.local_header.extra_field_length
extra_field = binary_part(data, start, length)
start = start + length
rest = binary_part(data, start, byte_size(data) - start)
state = put_in(state.local_header.file_name, file_name)
state = put_in(state.local_header.extra_field, extra_field)
state = %{state | next: :file_data}
state = put_in(state.local_header.extras, Extra.parse(extra_field, []))
zip64_extended_information =
Enum.find(state.local_header.extras, &match?(%Extra.Zip64ExtendedInformation{}, &1))
state =
if zip64_extended_information do
state =
put_in(state.local_header.compressed_size, zip64_extended_information.compressed_size)
put_in(state.local_header.uncompressed_size, zip64_extended_information.size)
else
state
end
{results, new_state} = execute_state_machine(rest, state)
{Stream.concat([{:local_header, state.local_header}], results), new_state}
end
def file_data(data, state) do
size = IO.iodata_length(data)
if size + state.data_sent < state.local_header.compressed_size do
{chunks, state} = decode(data, state)
{chunks, %{state | data_sent: state.data_sent + size}}
else
data = IO.iodata_to_binary(data)
length = state.local_header.compressed_size - state.data_sent
file_chunk = binary_part(data, 0, length)
{chunks, state} = decode_close(file_chunk, state)
start = length
rest = binary_part(data, start, size - start)
state = %{state | data_sent: 0, next: :next_header}
{results, state} = execute_state_machine(rest, state)
{Stream.concat([chunks, [{:data, :eof}], results]), state}
end
end
def next_header(data, state) do
data = IO.iodata_to_binary(data)
case :binary.match(data, <<0x4B50::little-size(16)>>, scope: {0, 28}) do
:nomatch ->
raise Error, "Invalid zip file, could not find any signature header"
{start, 2} ->
<<signature::little-size(32), _::binary>> =
rest = binary_part(data, start, byte_size(data) - start)
case signature do
0x04034B50 ->
execute_state_machine(rest, %{state | next: :local_file_header})
# archive extra data record
0x08064B50 ->
{[], %{state | next: :done}}
# central directory header
0x02014B50 ->
{[], %{state | next: :done}}
end
end
end
def done(_, state) do
{[], state}
end
defp decode(data, state) do
decoder = state.decoder
decoder_state = state.decoder_state
{chunks, decoder_state} = decoder.decode(data, decoder_state)
state = put_in(state.decoder_state, decoder_state)
{chunks, state}
end
defp decode_close(data, state) do
{chunks, state} = decode(data, state)
chunks =
Stream.concat(
chunks,
Stream.resource(
fn -> state.decoder.close(state.decoder_state) end,
fn
empty when empty in [nil, "", []] ->
{:halt, nil}
data ->
{[{:data, data}], nil}
end,
fn _ -> :ok end
)
)
state = put_in(state.decoder, nil)
state = put_in(state.decoder_state, nil)
{chunks, state}
end
# local file header signature
defp parse_local_header(
<<0x04034B50::little-size(32), version_need_to_extract::little-size(16),
general_purpose_bit_flag::little-size(16), compression_method::little-size(16),
last_modified_file_time::little-size(16), last_modified_file_date::little-size(16),
crc32::little-size(32), compressed_size::little-size(32),
uncompressed_size::little-size(32), file_name_length::little-size(16),
extra_field_length::little-size(16), rest::binary>>
) do
{:ok,
%LocalHeader{
version_need_to_extract: version_need_to_extract,
general_purpose_bit_flag: general_purpose_bit_flag,
compression_method: compression_method,
last_modified_file_time: last_modified_file_time,
last_modified_file_date: last_modified_file_date,
crc32: crc32,
compressed_size: compressed_size,
uncompressed_size: uncompressed_size,
file_name_length: file_name_length,
extra_field_length: extra_field_length
}, rest}
end
defp parse_local_header(_), do: raise(Error, "Invalid local header")
defp bit_set?(bits, n) do
(bits &&& 1 <<< n) > 0
end
defp dos_time(date, time) do
<<year::size(7), month::size(4), day::size(5)>> = <<date::size(16)>>
<<hour::size(5), minute::size(6), second::size(5)>> = <<time::size(16)>>
{:ok, datetime} =
NaiveDateTime.new(
1980 + year,
month,
day,
hour,
minute,
min(second * 2, 59)
)
datetime
end
end
|
lib/zstream/unzip.ex
| 0.584153
| 0.510802
|
unzip.ex
|
starcoder
|
defmodule WaoBirthday.Commands.Birthday do
use Alchemy.Cogs
alias Alchemy.{Client, Embed}
alias WaoBirthday.Birthday
import WaoBirthday.Utils
require Logger
require Embed
Cogs.def birthday "help" do
Cogs.say """
```
birthday me - Displays your birthday.
birthday <userid> - Displays this users birthday.
birthday me day month - Sets your birthday. Only you can overwrite this.
birthday <userid> day month- Sets this users birthday.
```
"""
end
Cogs.def birthday "me" do
case send_embed(message, message.author.id) do
{:error, :not_found, _} -> Cogs.say "Nobody knows about your birthday yet"
{:ok, _} -> {:ok, :boomer}
error -> handle_error(message, error)
end
end
Cogs.def birthday id do
case send_embed(message, id) do
{:error, :not_found, user} -> Cogs.say "#{maybe_s user.username} is unknown."
{:ok, _} -> {:ok, :boomer}
error -> handle_error(message, error)
end
end
Cogs.def birthday "me", day, month do
result =
Memento.transaction fn -> Birthday.write_birthday(message.author.id, day, month, true) end
case result do
{:ok, birthday} -> Cogs.say "Your Birthday was set to the #{to_string birthday}"
{:error, error} when is_binary(error) -> Cogs.say error
error ->
Logger.error "Could not set birthday: #{inspect error}"
Cogs.say "Could not set your birthday"
end
end
Cogs.def birthday id, day, month do
with {:ok, %{username: username}} <- Client.get_user(id) do
Memento.transaction fn ->
case Memento.Query.read(Birthday, id, lock: :write) do
%{owner: true} ->
Cogs.say "#{maybe_s username}} can't be overwritten, because it was set by #{username}"
_ ->
case Birthday.write_birthday(id, day, month, false) do
{:ok, birthday} ->
Cogs.say "#{maybe_s username} has been set to #{to_string birthday}"
{:error, error} when is_binary(error) ->
Cogs.say error
_ ->
Cogs.say "Could not set #{maybe_s username} birthday"
end
end
end
else
error ->
handle_error(message, error)
end
end
defp send_embed(message, id) do
case Client.get_user(id) do
{:ok, user} ->
case Birthday.read_birthday(id) do
{:ok, birthday} ->
birthday
|> Birthday.embed(user)
|> Embed.send()
{:error, :not_found} ->
{:error, :not_found, user}
end
{:error, error} when is_binary(error) ->
case Poison.decode(error) do
{:ok, error_json} -> {:error, error_json}
{:error, _} -> {:error, error}
end
{:error, error} ->
{:error, error}
end
end
end
|
lib/wao_birthday/commands/birthday.ex
| 0.603581
| 0.685831
|
birthday.ex
|
starcoder
|
defmodule DemonSpiritGame.Game do
@moduledoc """
Provides a structure to hold all game state, with functions
to manipulate that state.
board: Map. Keys are {x, y} tuples of integers.
Values are maps representing pieces.
cards. Map with the following keys (indented) %Map{
white: List of 2 %Cards{}. Moves that white may use.
black: List of 2 %Cards{}. Moves that black may use.
side: One %Card{} that currently belongs to neither player. Not a list.. should it be?
}
turn: Atom, :white or :black, whose turn is it?
winner: nil, or atom :white or :black. Who has won?
(Winner is :error in the impossible case that two sides win at once)
game_name: String, what is the name of the game.
Example of cards rotating.
- White: [Tiger, Crab] Black: [Monkey, Crane] Side: Mantis
- White plays a move using the Tiger card.
- White: [Mantis, Crab] Black: [Monkey, Crane] Side: Tiger
- Black plays a move using the Crane card.
- White: [Mantis, Crab] Black: [Monkey, Tiger] Side: Crane
"""
defstruct board: nil,
cards: %{
white: [],
black: [],
side: nil
},
turn: :white,
winner: nil,
game_name: nil,
moves: 0
alias DemonSpiritGame.{Game, Card, Move, GameWinCheck}
@doc """
new/1: Create a new game with random cards.
Input: game_name: String. Can be anything.
Output: %Game{}
"""
@spec new(String.t()) :: %Game{}
def new(game_name) do
cards = Card.cards() |> Enum.take_random(5)
_new(cards, game_name)
end
@spec new(String.t(), nonempty_list(%Card{}) | :hardcoded_cards) :: %Game{}
@doc """
new/2: Create a new game with cards specified. Provide a name and
a list of 5 cards. They will be assigned in this order:
[WHITE, WHITE, BLACK, BLACK, SIDE].
Input: game_name: String. Can be anything.
Input: cards: [%Cards{}]. List should be length 5
Output: %Game{}
"""
def new(game_name, cards) when is_list(cards) and length(cards) == 5, do: _new(cards, game_name)
@doc """
new/2 new(game_name, :hardcoded_cards): Create a new game with a set of cards
that will always be the same. Useful for removing the RNG when building a test case.
Input: game_name: String. Can be anything.
Input: :hardcoded_cards (Atom)
Output: %Game{}
"""
def new(game_name, :hardcoded_cards) do
["Wild Pig", "Python", "Crustacean", "Heron", "Drake"]
|> Enum.map(fn name ->
{:ok, card} = Card.by_name(name)
card
end)
|> _new(game_name)
end
_ = """
_new/2 (private): Create a new game with cards specified.
Used to deduplicate the repeated logic between new/0, new/1.
"""
defp _new(cards, game_name) when is_list(cards) and length(cards) == 5 do
%Game{
board: initial_board(),
cards: %{
white: cards |> Enum.slice(0, 2),
black: cards |> Enum.slice(2, 2),
side: cards |> Enum.at(4)
},
game_name: game_name
}
end
@doc """
move/2: Move a piece in the game, if possible.
Input: %Game{}, %Move{}
Output: {:ok, %Game{}}
Output (error): {:error, %Game{}} (Game status unchanged)
"""
@spec move(%Game{}, %Move{}) :: {:ok, %Game{}} | {:error, any}
def move(game, move = %Move{}) do
case valid_move?(game, move) do
true ->
game =
game |> _move(move) |> _rotate_card(move) |> change_player() |> GameWinCheck.check()
{:ok, game}
false ->
{:error, game}
end
end
_ = """
_move/2 (private): Move a piece in the game.
If we've called this, then we've already certain the move is valid.
Input: %Game{}, %Move{}
Output: %Game{}
"""
@spec _move(%Game{}, %Move{}) :: %Game{}
defp _move(game, %Move{from: from, to: to}) do
{piece, board} = game.board |> Map.pop(from)
board = board |> Map.put(to, piece)
%Game{game | board: board, moves: game.moves + 1}
end
_ = """
_rotate_card/2 (private): Rotate the cards.
If we've called this, then we've already certain the move is valid.
The card used to play the move swaps with the side card.
Input: %Game{}, %Move{}
Output: %Game{}
"""
defp _rotate_card(game, %Move{card: card}) do
old_side_card = game.cards.side
# Find the index of the currently played card
played_index =
game.cards[game.turn]
|> Enum.find_index(fn x -> x == card end)
# Player cards: Remove played card, add old side card
player_cards =
game.cards[game.turn]
|> List.delete(card)
|> List.insert_at(played_index, old_side_card)
cards =
game.cards
|> Map.put(:side, card)
|> Map.put(game.turn, player_cards)
%Game{game | cards: cards}
end
@doc """
valid_move?/2: Given a game state and a move specified by coordinates, is that move valid?
Input: %Game{}, %Move{}
Output: Boolean, is this move valid?
"""
@spec valid_move?(%Game{}, %Move{}) :: boolean()
def valid_move?(game = %Game{turn: turn}, move = %Move{from: from, to: to, card: card}) do
active_piece?(game, from) && valid_coord?(to) && to not in active_piece_coords(game) &&
card_provides_move?(move, turn) && active_player_has_card?(game, card)
end
@doc """
card_provides_move?/2: Is the move provided valid? That is, is moving a piece
from `from` to `to` actually one of the moves on the `card`?
Input:
move: %Move{}
turn: :white | :black, whose turn is it?
- Needed because black player uses the flipped version of cards.
Output: Boolean
"""
@spec card_provides_move?(%Move{}, :white | :black) :: boolean()
def card_provides_move?(%Move{from: from, to: to, card: card}, turn) do
to in (possible_moves(from, card, turn) |> Enum.map(fn m -> m.to end))
end
@doc """
active_player_has_card?/2: Does the active player have the card specified?
Input: %Game{}, %Card{}
Output: Boolean
"""
@spec active_player_has_card?(%Game{}, %Card{}) :: boolean()
def active_player_has_card?(game = %Game{}, card = %Card{}) do
count = game.cards[game.turn] |> Enum.filter(fn c -> c == card end) |> length
if count >= 1, do: true, else: false
end
@doc """
active_piece?/2: Given a game state and a coordinate, does a piece exist there
and belong to the currently playing player?
Input:
game: %Game{}
from: {x, y} tuple of piece to pick up and move, example: {2, 2} for the center square
Output:
Boolean: Does the from piece exist, and if so, does it belong to the player whose turn it currently is?
"""
@spec active_piece?(%Game{}, {integer(), integer()}) :: boolean()
def active_piece?(game, from) do
piece = Map.get(game.board, from)
piece != nil && piece.color == game.turn
end
@doc """
all_valid_moves/1: What are all of the valid moves that a player may currently take?
Input: %Game{}
Output: [ %Move{}, ... ]
"""
@spec all_valid_moves(%Game{}) :: list(%Move{})
def all_valid_moves(%Game{winner: winner}) when not is_nil(winner), do: []
def all_valid_moves(game = %Game{turn: turn}) do
active_piece_coords = active_piece_coords(game)
active_piece_coords
|> Enum.flat_map(fn {x, y} ->
game.cards[game.turn]
|> Enum.flat_map(fn card ->
possible_moves({x, y}, card, turn)
end)
end)
|> Enum.filter(&valid_coord?/1)
|> Enum.filter(fn %Move{to: to} -> to not in active_piece_coords end)
end
@doc """
valid_coord/1: Is the coordinate given in bounds of the board?
Input: {x, y} tuple of ints or %Move{}
Output: Boolean
"""
def valid_coord?(%Move{from: from, to: to}), do: valid_coord?(from) && valid_coord?(to)
def valid_coord?({x, y}) when x >= 0 and x <= 4 and y >= 0 and y <= 4, do: true
def valid_coord?(_), do: false
@doc """
active_piece_coords/1: What are all of the coordinates of the pieces of the active player?
All valid moves must begin with one of these as the 'from' piece.
Input: game: %Game{}
Output:
list of {x, y} tuples containing integers: All coordinates of peices belonging to the player
whose turn it currently is.
iex> DemonSpiritGame.Game.new("name") |> active_piece_coords
[{0, 0}, {1, 0}, {2, 0}, {3, 0}, {4, 0}]
"""
@spec active_piece_coords(%Game{}) :: list({integer(), integer()})
def active_piece_coords(game) do
game.board
|> Map.to_list()
|> Enum.filter(fn {_coord, %{color: color}} -> color == game.turn end)
|> Enum.map(fn {coord, _} -> coord end)
end
@doc """
possible_moves/3
Given a starting coordinate and a card, generate a list of possible moves
for that piece.
Input:
{x, y}: Tuple of two integers representing a starting coordinate
%Card{}: A Card to use to generate moves
turn: :white or :black, whose turn is it?
Card moves are flipped vertically if it's black's turn.
If a card provides a {0, 1} move, white can move "up", but black can move "down".
Output:
List of %Move{}s. Possible moves. Note, some of these may be invalid
and land on other pieces owned by the player. That needs to be filtered
out later.
"""
@spec possible_moves({integer(), integer()}, %Card{}, :white | :black) :: list(%Move{})
def possible_moves(_coord, nil, _turn), do: []
def possible_moves({x, y}, card = %Card{}, turn) do
card_ =
case turn do
:black -> Card.flip(card)
_ -> card
end
# Generate the moves off card_, which might be flipped.
# But Enum.map to card, which is always the original card.
# We don't want to show the caller the flipped version of the cards, ever.
card_.moves
|> Enum.map(fn {dx, dy} ->
%Move{from: {x, y}, to: {x + dx, y + dy}, card: card}
end)
|> Enum.filter(&valid_coord?/1)
end
@doc """
change_player/1: Simply flips the ":turn" field of the game between :white and :black.
Does not flip if there is a winner. [Unsure if this is correct, it could set winner to nil?]
Input: %Game{}
Output: %Game{} with :turn flipped, if there is not a winner.
"""
@spec change_player(%Game{}) :: %Game{}
def change_player(game = %{winner: winner}) when not is_nil(winner), do: game
def change_player(game = %{turn: :white}), do: %{game | turn: :black}
def change_player(game = %{turn: :black}), do: %{game | turn: :white}
_ = """
initial_board: The initial
setup of the pieces.
v {4, 4} (top right)
P P K P P (Black)
. . . . .
. . . . .
. . . . .
P P K P P (White)
^ {0, 0} (bottom left)
Input: none.
Output: Map of the format %{ {0, 0} => %{type: :pawn, color: :white}, {x, y} => %{type: ..., color: ... }, ... }
"""
@spec initial_board() :: map()
defp initial_board do
white_pawn = %{type: :pawn, color: :white}
white_king = %{type: :king, color: :white}
black_pawn = %{type: :pawn, color: :black}
black_king = %{type: :king, color: :black}
%{
{0, 0} => white_pawn,
{1, 0} => white_pawn,
{2, 0} => white_king,
{3, 0} => white_pawn,
{4, 0} => white_pawn,
{0, 4} => black_pawn,
{1, 4} => black_pawn,
{2, 4} => black_king,
{3, 4} => black_pawn,
{4, 4} => black_pawn
}
end
end
|
apps/demon_spirit_game/lib/demon_spirit_game/game.ex
| 0.888039
| 0.548976
|
game.ex
|
starcoder
|
defmodule EctoJob.Migrations do
@moduledoc false
defmodule Helpers do
@moduledoc false
def qualify(name, nil), do: name
def qualify(name, prefix), do: "#{prefix}.#{name}"
end
defmodule Install do
@moduledoc """
Defines migrations for installing shared functions
"""
import Ecto.Migration
@doc """
Creates the `fn_notify_inserted` trigger function.
This function will be called from triggers attached to job queue tables.
## Options
* `:prefix` - the prefix (aka Postgresql schema) to create the functions in.
"""
def up(opts \\ []) do
prefix = Keyword.get(opts, :prefix)
execute("""
CREATE FUNCTION #{Helpers.qualify("fn_notify_inserted", prefix)}()
RETURNS trigger AS $$
DECLARE
BEGIN
PERFORM pg_notify(TG_TABLE_NAME, '');
RETURN NEW;
END;
$$ LANGUAGE plpgsql
""")
end
@doc """
Drops the `fn_notify_inserted` trigger function
## Options
* `:prefix` - the prefix (aka Postgresql schema) containing the function to remove.
"""
def down(opts \\ []) do
prefix = Keyword.get(opts, :prefix)
execute("DROP FUNCTION #{Helpers.qualify("fn_notify_inserted", prefix)}()")
end
end
defmodule CreateJobTable do
@moduledoc """
Defines a migration to create a table to be used as a job queue.
This migration can be run multiple times with different values to create multiple queues.
"""
import Ecto.Migration
@doc """
Adds a job queue table with the given name, and attaches an insert trigger.
## Options
* `:prefix` - the prefix (aka Postgresql schema) to create the table in.
* `:version` - the major version of the EctoJob library used to generate the table
* `:timestamps` - A keyword list of options passed to the `Ecto.Migration.timestamps/1` function.
"""
def up(name, opts \\ []) do
opts = [{:primary_key, false} | opts]
prefix = Keyword.get(opts, :prefix)
timestamp_opts = Keyword.get(opts, :timestamps, [])
version = Keyword.get(opts, :version, 4)
create table(name, opts) do
add(:id, :bigserial, primary_key: true)
add(:state, :string, null: false, default: "AVAILABLE")
add(:expires, :utc_datetime_usec)
add(:schedule, :utc_datetime_usec,
null: false,
default: fragment("timezone('UTC', now())")
)
add(:attempt, :integer, null: false, default: 0)
add(:max_attempts, :integer, null: false, default: 5)
add(:params, :map, null: false)
add(:notify, :string)
if version >= 3 do
add(:priority, :integer, null: false, default: 0)
end
if version >= 4 do
add(:idempotency_key, :string)
add(:retain_for, :bigint, null: false, default: 0)
end
timestamps(timestamp_opts)
end
cond do
version == 2 ->
create(index(name, [:schedule, :id]))
version == 3 ->
create(index(name, [:priority, :schedule, :id]))
version >= 4 ->
create(index(name, [:priority, :schedule, :id]))
create(unique_index(name, :idempotency_key))
true ->
nil
end
execute("""
CREATE TRIGGER tr_notify_inserted_#{name}
AFTER INSERT ON #{Helpers.qualify(name, prefix)}
FOR EACH ROW
EXECUTE PROCEDURE #{Helpers.qualify("fn_notify_inserted", prefix)}();
""")
end
@doc """
Drops the job queue table with the given name, and associated trigger
## Options
* `:prefix` - the prefix (aka Postgresql schema) containing the table to remove.
"""
def down(name, opts \\ []) do
prefix = Keyword.get(opts, :prefix)
execute("DROP TRIGGER tr_notify_inserted_#{name} ON #{Helpers.qualify(name, prefix)}")
execute("DROP TABLE #{Helpers.qualify(name, prefix)}")
end
end
defmodule UpdateJobTable do
@moduledoc """
Defines an update migration to an especific version of Ecto Job.
This migration can be run multiple times with different values to update multiple queues.
"""
import Ecto.Migration
@doc """
Upgrade the job queue table with the given ecto job version and name.
"""
def up(3, name) do
alter table(name) do
add(:priority, :integer, null: false, default: 0)
end
create(index(name, [:priority, :schedule, :id]))
end
def up(4, name) do
alter table(name) do
add(:idempotency_key, :string)
add(:retain_for, :bigint, null: false, default: 0)
end
create(unique_index(name, :idempotency_key))
end
@doc """
Rollback updates from job queue table with the given ecto job version and name.
"""
def down(3, name) do
drop(index(name, [:priority, :schedule, :id]))
alter table(name) do
remove(:priority)
end
end
def down(4, name) do
drop(unique_index(name, :idempotency_key))
alter table(name) do
remove(:idempotency_key)
remove(:retain_for)
end
end
end
end
|
lib/ecto_job/migrations.ex
| 0.750553
| 0.469155
|
migrations.ex
|
starcoder
|
defmodule Tipalti.API.Payer do
@moduledoc """
Payer functions.
Details are taken from: <https://api.tipalti.com/v5/PayerFunctions.asmx>
"""
import SweetXml, only: [sigil_x: 2]
alias Tipalti.API.SOAP.Client
alias Tipalti.{Invoice, Balance, ClientError, RequestError}
@version "v5"
@url [
sandbox: "https://api.sandbox.tipalti.com/#{@version}/PayerFunctions.asmx",
production: "https://api.tipalti.com/#{@version}/PayerFunctions.asmx"
]
use Tipalti.API,
url: @url,
standard_response: [
ok_code: "OK",
error_paths: [error_code: ~x"./errorCode/text()"s, error_message: ~x"./errorMessage/text()"os]
]
@doc """
Not yet implemented
"""
@spec apply_vendor_credit() :: {:error, :not_yet_implemented}
def apply_vendor_credit(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec create_extended_payee_status_file() :: {:error, :not_yet_implemented}
def create_extended_payee_status_file(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec create_or_update_custom_fields() :: {:error, :not_yet_implemented}
def create_or_update_custom_fields(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec create_or_update_gl_accounts() :: {:error, :not_yet_implemented}
def create_or_update_gl_accounts(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec create_or_update_grns() :: {:error, :not_yet_implemented}
def create_or_update_grns(), do: {:error, :not_yet_implemented}
@typedoc """
An invoice approver, used when creating invoices in `create_or_update_invoices/1`.
"""
@type invoice_approver :: %{
required(:email) => String.t(),
required(:name) => String.t(),
optional(:order) => integer()
}
@typedoc """
An invoice line item, used when creating invoices in `create_or_update_invoices/1`.
"""
@type invoice_line_item :: %{
required(:amount) => String.t(),
optional(:banking_message) => String.t(),
optional(:currency) => String.t(),
optional(:custom_fields) => [Tipalti.key_value_pair()],
optional(:description) => String.t(),
optional(:e_wallet_message) => String.t(),
optional(:external_metadata) => String.t(),
optional(:internal_notes) => String.t(),
optional(:line_type) => String.t(),
optional(:quantity) => integer()
}
@typedoc """
An invoice, used when creating invoices in `create_or_update_invoices/1`.
"""
@type invoice :: %{
required(:can_approve) => boolean(),
required(:date) => String.t(),
required(:idap) => Tipalti.idap(),
required(:is_paid_manually) => boolean(),
required(:subject) => String.t(),
optional(:ap_account_number) => String.t(),
optional(:approvers) => [invoice_approver()],
optional(:currency) => String.t(),
optional(:custom_fields) => [Tipalti.key_value_pair()],
optional(:description) => String.t(),
optional(:due_date) => String.t(),
optional(:income_type) => String.t(),
optional(:internal_notes) => String.t(),
optional(:line_items) => [invoice_line_item()],
optional(:number) => String.t(),
optional(:payer_entity_name) => String.t(),
optional(:ref_code) => String.t(),
optional(:status) => String.t()
}
@doc """
Create new invoices or update existing ones.
Returns a list of invoice responses for each invoice,
indicating if it succeeded and what the errors were if it didn't.
See <https://support.tipalti.com/Content/Topics/Development/APIs/PayerApi.htm> for details.
## Parameters
* `invoices[]`: List of maps of invoice params.
* `idap`: Payee id.
* `ref_code`: Uniq id for this invoice (leave null for auto-generated id).
* `date`: Invoice value date (estimated date and time the payee receives the funds).
* `due_date`: The date and time the invoice is due to be paid.
* `line_items[]`: List of invoice lines.
* `currency`: Invoice currency.
* `amount`: Invoice line amount.
* `description`: Description of the invoice line.
* `internal_notes`: Notes which are not displayed to the payee.
* `e_wallet_message`: A message to attach to the payment. This message is sent to providers and appears on payee bank statements. If no value is provided, the InvoiceRefCode is used..
* `banking_message`: A message to attach to the payment. This message is sent to providers and appears on payee bank statements. If a value is not provided, the EWalletMessage is used.
* `custom_fields[]`: If custom fields have been defined for the invoice entity, the values of these fields can be set here. The field name must match the defined custom field name.
* `key`: The custom field key.
* `value`: The custom field value.
* `line_type`: ?
* `external_metadata`: ?
* `quantity`: ?
* `description`: Description of the invoice.
* `can_approve`: Indicates whether or not the payee is able to approve the invoice.
* `internal_notes`: Notes, which are not displayed to the payee.
* `custom_fields[]`: If custom fields have been defined for the invoice entity, the values of these fields can be set here. The field name must match the defined custom field name.
* `key`: The custom field key.
* `value`: The custom field value.
* `is_paid_manually`: If `true`, the invoice is marked as paid manually.
* `income_type`: If the Tax Withholding module is enabled and there are multiple income types that can be associated with the payment, then you must enter the IncomeType per payment.
* `status`: ?
* `currency`: Invoice currency.
* `approvers`: ?
* `number`: ?
* `payer_entity_name`: The name of the payer entity linked to the invoice.
* `subject`: The text for the title of the invoice, displays for the payee in the Payee Dashboard or Suppliers Portal.
* `ap_account_number`: ?
## Returns
`{:ok, list}` where list is a list of maps contains the following fields:
* `error_message`: String; if there was an error creating the invoice.
* `ref_code`: String; corresponds to the input invoices.
* `succeeded`: Boolean; Indicates if creating the invoice succeeded.
## Examples
iex> create_or_update_invoices([%{idap: "somepayee", ref_code: "testinvoice1", due_date: "2018-05-01", date: "2018-06-01", subject: "test invoice 1", currency: "USD", line_items: [%{amount: "100.00", description: "test line item"}]}, %{idap: "somepayee", ref_code: "testinvoice2", due_date: "2018-06-01", date: "2018-05-01", subject: "test invoice 2", currency: "USD", line_items: [%{amount: "100.00", description: "test line item"}]}])
{:ok,
[
%{
error_message: "Due date cannot be earlier then invoice date",
ref_code: "testinvoice1",
succeeded: false
},
%{error_message: nil, ref_code: "testinvoice2", succeeded: true}
]}
"""
@spec create_or_update_invoices([invoice()]) ::
{:ok, [%{error_message: String.t() | nil, ref_code: String.t(), succeeded: boolean()}]}
| {:error, RequestError.t()}
def create_or_update_invoices(invoices) do
payload =
RequestBuilder.build(
"CreateOrUpdateInvoices",
[
invoices:
optional_list(invoices, fn invoice ->
{:TipaltiInvoiceItemRequest,
[
Idap: invoice[:idap],
InvoiceRefCode: invoice[:ref_code],
InvoiceDate: invoice[:date],
InvoiceDueDate: invoice[:due_date],
InvoiceLines:
optional_list(invoice[:line_items], fn line_item ->
{:InvoiceLine,
[
Currency: line_item[:currency],
Amount: line_item[:amount],
Description: line_item[:description],
InvoiceInternalNotes: line_item[:internal_notes],
EWalletMessage: line_item[:e_wallet_message],
BankingMessage: line_item[:banking_message],
CustomFields:
optional_list(line_item[:custom_fields], fn custom_field ->
{:KeyValuePair, Key: custom_field[:key], Value: custom_field[:value]}
end),
# TODO: figure out what this is and how to support it
GLAccount: nil,
LineType: line_item[:line_type],
LineExternalMetadata: line_item[:external_metadata],
Quantity: line_item[:quantity]
]}
end),
Description: invoice[:description],
CanApprove: invoice[:can_approve],
InvoiceInternalNotes: invoice[:internal_notes],
CustomFields:
optional_list(invoice[:custom_fields], fn custom_field ->
{:KeyValuePair, Key: custom_field[:key], Value: custom_field[:value]}
end),
IsPaidManually: invoice[:is_paid_manually],
IncomeType: invoice[:income_type],
InvoiceStatus: invoice[:status],
Currency: invoice[:currency],
Approvers:
optional_list(invoice[:approvers], fn approver ->
{:TipaltiInvoiceApprover, Name: approver[:name], Email: approver[:email], Order: approver[:order]}
end),
InvoiceNumber: invoice[:number],
PayerEntityName: invoice[:payer_entity_name],
InvoiceSubject: invoice[:subject],
ApAccountNumber: invoice[:ap_account_number]
]}
end)
],
[:payer_name, :timestamp]
)
client = Application.get_env(:tipalti, :api_client_module, Client)
with {:ok, body} <- client.send(@url, payload) do
response =
ResponseParser.parse_without_errors(
body,
~x"//CreateOrUpdateInvoicesResult",
[
~x"./InvoiceErrors/TipaltiInvoiceItemResult"l,
error_message: ~x"./ErrorMessage/text()"os,
succeeded: ~x"./Succeeded/text()"b,
ref_code: ~x"./InvoiceRefCode/text()"os
]
)
{:ok, response}
end
end
defp optional_list(nil, _), do: nil
defp optional_list([], _), do: nil
defp optional_list(items, fun), do: Enum.map(items, fun)
@doc """
Not yet implemented
"""
@spec create_or_update_purchase_orders() :: {:error, :not_yet_implemented}
def create_or_update_purchase_orders(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec create_payee_status_file() :: {:error, :not_yet_implemented}
def create_payee_status_file(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec create_payment_orders_report() :: {:error, :not_yet_implemented}
def create_payment_orders_report(), do: {:error, :not_yet_implemented}
@doc """
Get balances in your accounts.
Returns account provider, account identifier, currency and amount in balance.
Note: when submitting a payment, the balance may take some time before it is updated.
## Examples
iex> get_balances()
{:ok,
[
%Tipalti.Balance{
account_identifier: "1234",
balance: Money.new("USD", "1000"),
provider: "Tipalti"
}
]}
"""
@spec get_balances() :: {:ok, [Tipalti.Balance.t()]} | {:error, ClientError.t()} | {:error, RequestError.t()}
def get_balances do
with {:ok, balances_maps} <-
run(
"GetBalances",
[],
[:payer_name, :timestamp],
{
~x"//GetBalancesResult",
[
~x"./AccountInfos/TipaltiAccountInfo"l,
provider: ~x"./Provider/text()"os,
account_identifier: ~x"./AccountIdentifier/text()"os,
balance: ~x"./Balance/text()"os,
currency: ~x"./Currency/text()"os
]
},
ok_code: 0,
error_paths: [error_code: ~x"./errorCode/text()"i, error_message: ~x"./errorMessage/text()"os]
) do
{:ok, Balance.from_maps!(balances_maps)}
end
end
@doc """
Not yet implemented
"""
@spec get_custom_fields() :: {:error, :not_yet_implemented}
def get_custom_fields(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec get_dynamic_key() :: {:error, :not_yet_implemented}
def get_dynamic_key(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec get_dynamic_key_of_sub_payer() :: {:error, :not_yet_implemented}
def get_dynamic_key_of_sub_payer(), do: {:error, :not_yet_implemented}
@doc """
Return list of payee invoices.
## Parameters
* `invoice_ref_codes`: list of invoice reference codes
## Examples
iex> get_payee_invoices_list_details(["12345","12346"])
{:ok,
[
%Tipalti.Invoice{
amount_due: Money.new!(:USD, "3.61"),
approval_date: nil,
approvers: [],
can_approve: false,
custom_fields: [],
date: ~D[2018-07-23],
description: "Some invoice",
due_date: ~D[2018-07-27],
idap: "payee1",
internal_notes: "Notes",
is_paid_manually: false,
line_items: [
%Tipalti.Invoice.Line{
amount: Money.new!(:USD, "3.61"),
custom_fields: [],
description: "Charges",
line_type: nil,
quantity: nil
}
],
number: "h6gz1gs2e",
payer_entity_name: "SomePayee",
ref_code: "12345",
status: :pending_payment
},
%Tipalti.Invoice{
amount_due: Money.new!(:USD, "10.47"),
approval_date: nil,
approvers: [],
can_approve: false,
custom_fields: [],
date: ~D[2018-07-18],
description: "Some other invoice",
due_date: ~D[2018-07-20],
idap: "payee2",
internal_notes: "Notes notes notes",
is_paid_manually: false,
line_items: [
%Tipalti.Invoice.Line{
amount: Money.new!(:USD, "10.47"),
custom_fields: [],
description: "Charges",
line_type: nil,
quantity: nil
}
],
number: "h6gz1grv4",
payer_entity_name: "SomePayee",
ref_code: "12346",
status: :pending_payment
}
]}
"""
@spec get_payee_invoices_list_details([Invoice.ref_code()]) ::
{:ok, [Invoice.t()]} | {:error, ClientError.t()} | {:error, RequestError.t()}
def get_payee_invoices_list_details(invoice_ref_codes) do
with {:ok, %{errors: _errors, invoices: invoice_maps}} <-
run(
"GetPayeeInvoicesListDetails",
[invoicesRefCodes: Enum.map(invoice_ref_codes, fn ref_code -> [string: ref_code] end)],
[:payer_name, :timestamp],
{
~x"//GetPayeeInvoicesListDetailsResult",
errors: [
~x"./InvoiceErrors/TipaltiInvoiceItemError"l,
error_message: ~x"./ErrorMessage/text()"s,
error_code: ~x"./ErrorCode/text()"s,
ref_code: ~x"./InvoiceRefCode/text()"s
],
invoices: [
~x"./Invoices/TipaltiInvoiceItemResponse"l,
idap: ~x"./Idap/text()"s,
ref_code: ~x"./InvoiceRefCode/text()"s,
date: ~x"./InvoiceDate/text()"s,
due_date: ~x"./InvoiceDueDate/text()"s,
line_items: [
~x"./InvoiceLines/InvoiceLine"l,
currency: ~x"./Currency/text()"s,
amount: ~x"./Amount/text()"s,
description: ~x"./Description/text()"s,
custom_fields: [
~x"./CustomFields/KeyValuePair"l,
key: ~x"./Key/text()"os,
value: ~x"./Value/text()"os
],
line_type: ~x"./LineType/text()"os,
quantity: ~x"./Quantity/text()"oi
],
description: ~x"./Description/text()"s,
can_approve: ~x"./CanApprove/text()"b,
internal_notes: ~x"./InvoiceInternalNotes/text()"s,
custom_fields: [
~x"./CustomFields/KeyValuePair"l,
key: ~x"./Key/text()"os,
value: ~x"./Value/text()"os
],
is_paid_manually: ~x"./IsPaidManually/text()"b,
status: ~x"./InvoiceStatus/text()"s,
currency: ~x"./Currency/text()"s,
approvers: [
~x"./Approvers/TipaltiInvoiceApprover"l,
name: ~x"./Name/text()"s,
email: ~x"./Email/text()"s,
order: ~x"./Order/text()"oi
],
number: ~x"./InvoiceNumber/text()"s,
approval_date: ~x"./ApprovalDate/text()"s,
payer_entity_name: ~x"./PayerEntityName/text()"s,
amount_due: ~x"./AmountDue/text()"s
]
}
) do
{:ok, Invoice.from_maps!(invoice_maps)}
end
end
@doc """
Not yet implemented
"""
@spec get_payer_fees() :: {:error, :not_yet_implemented}
def get_payer_fees(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec get_processing_request_status() :: {:error, :not_yet_implemented}
def get_processing_request_status(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec get_provider_accounts() :: {:error, :not_yet_implemented}
def get_provider_accounts(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec get_updated_payments() :: {:error, :not_yet_implemented}
def get_updated_payments(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec log_integration_error() :: {:error, :not_yet_implemented}
def log_integration_error(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec process_multi_currency_payment_file() :: {:error, :not_yet_implemented}
def process_multi_currency_payment_file(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec process_multi_currency_payment_file_async() :: {:error, :not_yet_implemented}
def process_multi_currency_payment_file_async(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec process_payment_file() :: {:error, :not_yet_implemented}
def process_payment_file(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec process_payment_file_async() :: {:error, :not_yet_implemented}
def process_payment_file_async(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec process_payments() :: {:error, :not_yet_implemented}
def process_payments(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec process_payments_async() :: {:error, :not_yet_implemented}
def process_payments_async(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec process_payments_async_result() :: {:error, :not_yet_implemented}
def process_payments_async_result(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec test_multi_currency_payment_file() :: {:error, :not_yet_implemented}
def test_multi_currency_payment_file(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec test_multi_currency_payment_file_async() :: {:error, :not_yet_implemented}
def test_multi_currency_payment_file_async(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec test_payment_file() :: {:error, :not_yet_implemented}
def test_payment_file(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec test_payment_file_async() :: {:error, :not_yet_implemented}
def test_payment_file_async(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec test_payments() :: {:error, :not_yet_implemented}
def test_payments(), do: {:error, :not_yet_implemented}
@doc """
Not yet implemented
"""
@spec test_payments_async() :: {:error, :not_yet_implemented}
def test_payments_async(), do: {:error, :not_yet_implemented}
end
|
lib/tipalti/api/payer.ex
| 0.863046
| 0.452717
|
payer.ex
|
starcoder
|
defmodule Roll35Core.Types do
@moduledoc """
Core type definitions for Roll35.
"""
@typedoc """
Represents an item.
"""
@type item :: %{
:name => String.t(),
optional(atom()) => term()
}
@typedoc """
Represents the category of an item.
"""
@type category ::
:armor | :weapon | :potion | :ring | :rod | :scroll | :staff | :wand | :wondrous
@categories [:armor, :weapon, :potion, :ring, :rod, :scroll, :staff, :wand, :wondrous]
@typedoc """
Represents the rank (minor/medium/major) of an item.
"""
@type rank :: :minor | :medium | :major
@ranks [:minor, :medium, :major]
@typedoc """
A subset of `rank`, used by rods and staves.
"""
@type limited_rank :: :medium | :major
@limited_ranks [:medium, :major]
@typedoc """
Represents the sub-rank (lesser/greater) of an item.
"""
@type subrank :: :lesser | :greater
@subranks [:lesser, :greater]
@typedoc """
An superset of `subrank`, used by minor slotless items.
"""
@type full_subrank :: :least | subrank
@full_subranks [:least | @subranks]
@typedoc """
Represents the slot of a wondrous item.
"""
@type slot ::
:belt
| :body
| :chest
| :eyes
| :feet
| :hands
| :head
| :headband
| :neck
| :shoulders
| :wrists
| :slotless
@slots [
:belt,
:body,
:chest,
:eyes,
:feet,
:hands,
:head,
:headband,
:neck,
:shoulders,
:wrists,
:slotless
]
@typedoc """
A single item entry.
"""
@type item_entry :: %{weight: non_neg_integer(), value: %{atom() => any}}
@typedoc """
A flat list of items.
This is an internal structure used by many of the data agents.
"""
@type flat_itemlist :: [item_entry, ...]
@typedoc """
A map of ranks to lists of items.
This is an internal structure used by many of the data agents.
"""
@type itemlist :: %{rank => [item_entry, ...]}
@typedoc """
A map of subranks to lists of items.
This is an internal structure used by many of the data agents.
"""
@type subranked_itemlist :: %{full_subrank => [item_entry, ...]}
@typedoc """
A map of ranks and subranks to lists of items.
This is an internal structure used by many of the data agents.
"""
@type ranked_itemlist :: %{rank => subranked_itemlist}
@doc """
Check that a value is a valid category.
"""
defguard is_category(value) when value in @categories
@doc """
Check that a value is a valid rank.
"""
defguard is_rank(value) when value in @ranks
@doc """
Check that a value is a valid limited_rank.
"""
defguard is_limited_rank(value) when value in @limited_ranks
@doc """
Check that a value is a valid subrank.
"""
defguard is_subrank(value) when value in @subranks
@doc """
Check that a value is a valid full_subrank.
"""
defguard is_full_subrank(value) when value in @full_subranks
@doc """
Check that a value is a valid slot.
"""
defguard is_slot(value) when value in @slots
@doc """
Return a list of valid categories.
"""
@spec categories :: nonempty_list(category)
def categories do
@categories
end
@doc """
Return a list of valid ranks.
"""
@spec ranks :: nonempty_list(rank)
def ranks do
@ranks
end
@doc """
Return a list of valid limited_ranks.
"""
@spec limited_ranks :: nonempty_list(limited_rank)
def limited_ranks do
@limited_ranks
end
@doc """
Return a list of valid subranks.
"""
@spec subranks :: nonempty_list(subrank)
def subranks do
@subranks
end
@doc """
Return a list of valid full_subranks.
"""
@spec full_subranks :: nonempty_list(full_subrank)
def full_subranks do
@full_subranks
end
@doc """
Return a list of valid slots.
"""
@spec slots :: nonempty_list(slot)
def slots do
@slots
end
@doc """
Create a category atom from a string.
"""
@spec category_from_string(String.t()) :: category
def category_from_string(str) when is_binary(str) do
_ = @categories
String.to_existing_atom(str)
end
@doc """
Create a rank atom from a string.
"""
@spec rank_from_string(String.t()) :: rank
def rank_from_string(str) when is_binary(str) do
_ = @ranks
String.to_existing_atom(str)
end
@doc """
Create a limited_rank atom from a string.
"""
@spec limited_rank_from_string(String.t()) :: limited_rank
def limited_rank_from_string(str) when is_binary(str) do
_ = @limited_ranks
String.to_existing_atom(str)
end
@doc """
Create a subrank atom from a string.
"""
@spec subrank_from_string(String.t()) :: subrank
def subrank_from_string(str) when is_binary(str) do
_ = @subranks
String.to_existing_atom(str)
end
@doc """
Create a full_subrank atom from a string.
"""
@spec full_subrank_from_string(String.t()) :: full_subrank
def full_subrank_from_string(str) when is_binary(str) do
_ = @full_subranks
String.to_existing_atom(str)
end
@doc """
Create a slot atom from a string.
"""
@spec slot_from_string(String.t()) :: slot
def slot_from_string(str) when is_binary(str) do
_ = @slots
String.to_existing_atom(str)
end
end
|
apps/roll35_core/lib/roll35_core/types.ex
| 0.910665
| 0.513851
|
types.ex
|
starcoder
|
defmodule ConnectFour.Game do
@moduledoc """
A Connect Four game.
Players are distinguished by game piece color (yellow and red). Moves are
represented by the columns in which they are made.
To create a new game, create an empty `ConnectFour.Game.t()` struct
(`%ConnectFour.Game{}`).
Yellow moves first.
"""
import Bitwise
alias ConnectFour.Game
defstruct(
bitboards: %{yellow: 0, red: 0},
column_heights: %{0 => 0, 1 => 7, 2 => 14, 3 => 21, 4 => 28, 5 => 35, 6 => 42},
moves: [],
plies: 0,
result: nil
)
@typedoc """
The representation of a Connect Four game. The struct contains five fields,
three of which should be considered read-only, and two which should be
considered private. None should ever be modified manually.
The read-only fields are:
- `moves`
- `plies`
- `result`
And the private fields are:
- `bitbords`
- `column_heights`
"""
@type t :: %Game{
bitboards: %{required(:yellow) => bitboard(), required(:red) => bitboard()},
column_heights: column_heights(),
moves: moves(),
plies: non_neg_integer(),
result: result()
}
@typedoc """
One of seven Connect Four game columns, zero-indexed.
"""
@type column :: 0..6
@typedoc """
A list of Connect Four moves, describing a game. Yellow always moves first, so
the first move in the list will always be yellow's.
"""
@type moves :: [column()]
@typedoc """
A Connect Four player (yellow always moves first).
"""
@type player :: :yellow | :red
@typedoc """
A ply is one move completed by one player. Plies are thus the total number of
moves completed in the game so far.
For example, if a game starts and yellow takes their turn (by "dropping" a
game piece into a column) and then red does the same, that game has two plies.
"""
@type plies :: non_neg_integer()
@typedoc """
A Connect Four game result. `nil` means that the game has not yet ended.
`:draw`s occur when all columns are full and no player has connected four.
"""
@type result :: :yellow_wins | :red_wins | :draw | nil
@typep bitboard :: non_neg_integer()
@typep column_height :: non_neg_integer()
@typep column_heights :: %{
required(0) => column_height(),
required(1) => column_height(),
required(2) => column_height(),
required(3) => column_height(),
required(4) => column_height(),
required(5) => column_height(),
required(6) => column_height()
}
@doc """
Submit a move for whomever's turn it currently is by specifying a column (0
through 6).
## Examples
iex> alias ConnectFour.Game
iex> {:ok, updated_game} = %Game{} |> Game.move(0)
iex> updated_game.moves
[0]
Make multiple moves at once by passing a list of moves.
## Examples
iex> alias ConnectFour.Game
iex> {:ok, updated_game} = %Game{} |> Game.move([0, 1, 0])
iex> updated_game.moves
[0, 1, 0]
"""
@spec move(Game.t(), column() | moves()) :: {:ok, Game.t()} | {:error, String.t()}
def move(game = %Game{}, column) when is_integer(column) do
cond do
!is_nil(game.result) ->
{:error, "Game is over"}
legal_move?(column, game.column_heights) ->
{:ok, make_move(game, column)}
true ->
{:error, "Illegal move"}
end
end
def move(game = %Game{}, moves) when is_list(moves), do: make_many_moves(game, moves)
@doc """
Get a list of all the legal moves for a game. Returns an empty list if the
game is over.
## Examples
iex> alias ConnectFour.Game
iex> %Game{} |> Game.legal_moves()
[0, 1, 2, 3, 4, 5, 6]
"""
@spec legal_moves(Game.t()) :: moves()
def legal_moves(game = %Game{}), do: list_legal_moves(game.column_heights)
@spec make_move(Game.t(), column()) :: Game.t()
defp make_move(game = %Game{}, column) do
{old_column_height, new_column_heights} =
Map.get_and_update!(game.column_heights, column, fn column_height ->
{column_height, column_height + 1}
end)
bitboard_color = color_to_move(game)
old_bitboard = Map.get(game.bitboards, bitboard_color)
new_moves = game.moves ++ [column]
new_bitboard = old_bitboard ^^^ (1 <<< old_column_height)
new_plies = game.plies + 1
updated_game = %{
game
| :moves => new_moves,
:plies => new_plies,
:bitboards => %{game.bitboards | bitboard_color => new_bitboard},
:column_heights => new_column_heights
}
set_result(updated_game)
end
@spec set_result(Game.t()) :: Game.t()
defp set_result(updated_game = %Game{}) do
cond do
connected_four?(updated_game) ->
%{updated_game | result: winning_color(updated_game)}
updated_game.plies == 42 ->
%{updated_game | result: :draw}
true ->
updated_game
end
end
@spec color_to_move(Game.t()) :: player()
defp color_to_move(%Game{plies: plies}) do
case plies &&& 1 do
0 -> :yellow
1 -> :red
end
end
@spec color_last_moved(Game.t()) :: player()
defp color_last_moved(%Game{plies: plies}) do
case plies &&& 1 do
1 -> :yellow
0 -> :red
end
end
@spec make_many_moves(Game.t(), moves()) :: {:ok, Game.t()} | {:error, String.t()}
defp make_many_moves(game = %Game{}, [next_move | remaining_moves]) do
if legal_move?(next_move, game.column_heights) do
updated_game = make_move(game, next_move)
make_many_moves(updated_game, remaining_moves)
else
{:error, "One or more invalid moves"}
end
end
defp make_many_moves(game = %Game{}, []) do
{:ok, game}
end
@spec legal_move?(column(), column_heights()) :: boolean()
defp legal_move?(column, column_heights) do
Enum.member?(list_legal_moves(column_heights), column)
end
@spec connected_four?(Game.t()) :: boolean()
defp connected_four?(game = %Game{}) do
bitboard = Map.get(game.bitboards, color_last_moved(game))
direction_offsets = [1, 7, 6, 8]
Enum.any?(direction_offsets, fn direction_offset ->
intermediate_bitboard = bitboard &&& bitboard >>> direction_offset
(intermediate_bitboard &&& intermediate_bitboard >>> (2 * direction_offset)) != 0
end)
end
defp winning_color(%Game{plies: plies}) do
case plies &&& 1 do
1 -> :yellow_wins
0 -> :red_wins
end
end
@spec list_legal_moves(column_heights()) :: [integer()]
defp list_legal_moves(column_heights) do
full_top = 0b1000000_1000000_1000000_1000000_1000000_1000000_1000000
Enum.reduce(0..6, [], fn column, legal_moves_ ->
if (full_top &&& 1 <<< column_heights[column]) == 0 do
legal_moves_ ++ [column]
else
legal_moves_
end
end)
end
end
|
lib/connect_four/game.ex
| 0.929103
| 0.767625
|
game.ex
|
starcoder
|
defmodule MyList do
require MyEnum
require Integer
@doc """
MyEnum.span(from, to, step) returns a list starting at from and ending at to with step step, the resulting list can be empty
## Examples
iex > MyList.span(6, 20, 5)
[6, 11, 16]
iex > MyList.span(0, 20, 2)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
iex> MyList.span(1, 10)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
iex> MyList.span(20, 0, 2)
[]
iex> MyList.span(1, 2, 3)
[1]
iex> MyList.span(1, 1, 3)
[1]
MyList.span(1, 10, 0)
[]
MyList.span(10, 0, -1)
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
MyList.span(10, 0, -10)
[10, 0]
MyList.span(10, 0, -12)
[10]
MyList.span(-10, 10, 10)
[-10, 0, 10]
MyList.span(-10, 10, 11)
[-10, 0]
MyList.span(-10, 10, -11)
[]
"""
def span(from, to, step \\ 1)
def span(_, _, 0), do: []
def span(from, to, step) when not(is_integer(from)) or not(is_integer(to)) or not(is_integer(step)) do
[]
end
def span(from, to, step) when from > to and step > 0 do
[]
end
def span(from, to, step) when from < 0 and to < 0 and step < 0 and from > to do
[]
end
def span(from, to, step) do
_span([], from, from, to, step)
end
defp _span(lr, curr, _, to, step) when curr > to and step > 0 do
MyEnum.reverse(lr)
end
defp _span(lr, curr, _, to, step) when curr < to and step < 0 do
MyEnum.reverse(lr)
end
# MyList.span(10, 0, -12) ==> '\n' which is actually [ 10 ]
defp _span(lr, curr, from, to, step) do
_span([curr | lr], curr + step, from, to, step)
end
@doc """
MyEnum.gen_prime(from, to) returns the list of prime between from and to
## Examples
iex > MyList.gen_prime(50)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
iex > MyList.gen_prime(2, 50)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
MyList.gen_prime(2, 20)
[2, 3, 5, 7, 11, 13, 17, 19]
MyList.gen_prime(20, 0)
[]
MyList.gen_prime(-20, 0)
[]
MyList.gen_prime(-20, -10)
[]
"""
def gen_prime(from \\ 2, to)
def gen_prime(from, to) when not(is_integer(from)) or not(is_integer(to)) do
[]
end
def gen_prime(from, to) when from > to do
[]
end
def gen_prime(from, _) when from < 0 do
[]
end
def gen_prime(0, to), do: gen_prime(2, to)
def gen_prime(1, to), do: gen_prime(2, to)
def gen_prime(from, to) when from == 2 do
for x <- span(from + 1, to, 2), is_prime?(x), into: [from], do: x
end
def gen_prime(from, to) when Integer.is_even(from) do
for x <- span(from + 1, to, 2), is_prime?(x), into: [], do: x
end
def gen_prime(from, to) when Integer.is_odd(from) do
for x <- span(from, to, 2), is_prime?(x), into: [], do: x
end
defp is_prime?(x) do
{ limit, _ } = :math.sqrt(x)
|> Float.floor
|> Float.to_string
|> Integer.parse
MyEnum.all? span(3, limit), &(rem(x, &1) != 0)
end
end
|
my_enum/lib/my_list.ex
| 0.713032
| 0.534005
|
my_list.ex
|
starcoder
|
defmodule HTTPStream do
@moduledoc """
Main API interface.
HTTPStream is a tiny tiny library for streaming big big files. It works by
wrapping HTTP requests onto a Stream. You can use it with Flow, write it to
disk through regular streams and more!
```
HTTPStream.get(large_image_url)
|> Stream.into(File.stream!("large_image.png"))
|> Stream.run()
```
The adapter can be configured by setting in your `config/config.exs`:
```
config :http_stream, adapter: HTTPStream.Adapter.Mint
```
At the moment, only two adapters are supported: Mint (default) and HTTPoison.
"""
alias HTTPStream.Adapter
alias HTTPStream.Request
@type method :: String.t()
@doc """
Performs a GET request.
Supported options:
* `:headers` (optional) - Keyword list of HTTP headers to add to the request.
* `:query` (optional) - Keyword list of query params to add to the request.
"""
@spec get(String.t(), keyword()) :: Enumerable.t()
def get(url, opts \\ []) do
headers = Keyword.get(opts, :headers, []) |> to_keyword()
query = Keyword.get(opts, :query, [])
request("GET", url, headers, query)
end
@doc """
Performs a DELETE request.
Supported options:
* `:headers` (optional) - Keyword list of HTTP headers to add to the request.
* `:query` (optional) - Keyword list of query params to add to the request.
"""
@spec delete(String.t(), keyword()) :: Enumerable.t()
def delete(url, opts \\ []) do
headers = Keyword.get(opts, :headers, []) |> to_keyword()
query = Keyword.get(opts, :query, [])
request("DELETE", url, headers, query)
end
@doc """
Performs a OPTIONS request.
Supported options:
* `:headers` (optional) - Keyword list of HTTP headers to add to the request.
* `:query` (optional) - Keyword list of query params to add to the request.
"""
@spec options(String.t(), keyword()) :: Enumerable.t()
def options(url, opts \\ []) do
headers = Keyword.get(opts, :headers, []) |> to_keyword()
query = Keyword.get(opts, :query, [])
request("OPTIONS", url, headers, query)
end
@doc """
Performs a TRACE request.
Supported options:
* `:headers` (optional) - Keyword list of HTTP headers to add to the request.
* `:query` (optional) - Keyword list of query params to add to the request.
"""
@spec trace(String.t(), keyword()) :: Enumerable.t()
def trace(url, opts \\ []) do
headers = Keyword.get(opts, :headers, []) |> to_keyword()
query = Keyword.get(opts, :query, [])
request("TRACE", url, headers, query)
end
@doc """
Performs a HEAD request.
Supported options:
* `:headers` (optional) - Keyword list of HTTP headers to add to the request.
* `:query` (optional) - Keyword list of query params to add to the request.
"""
@spec head(String.t(), keyword()) :: Enumerable.t()
def head(url, opts \\ []) do
headers = Keyword.get(opts, :headers, []) |> to_keyword()
query = Keyword.get(opts, :query, [])
request("HEAD", url, headers, query)
end
@doc """
Performs a POST request.
Supported options:
* `:headers` (optional) - Keyword list of HTTP headers to add to the request.
* `:params` (optional) - Keyword list of query params to add to the request.
"""
@spec post(String.t(), keyword()) :: Enumerable.t()
def post(url, opts \\ []) do
headers = Keyword.get(opts, :headers, []) |> to_keyword()
params = Keyword.get(opts, :params, "") |> to_json()
request("POST", url, headers, params)
end
@doc """
Performs a PUT request.
Supported options:
* `:headers` (optional) - Keyword list of HTTP headers to add to the request.
* `:params` (optional) - Keyword list of query params to add to the request.
"""
@spec put(String.t(), keyword()) :: Enumerable.t()
def put(url, opts \\ []) do
headers = Keyword.get(opts, :headers, []) |> to_keyword()
params = Keyword.get(opts, :params, "") |> to_json()
request("PUT", url, headers, params)
end
@doc """
Performs a PATCH request.
Supported options:
* `:headers` (optional) - Keyword list of HTTP headers to add to the request.
* `:params` (optional) - Keyword list of query params to add to the request.
"""
@spec patch(String.t(), keyword()) :: Enumerable.t()
def patch(url, opts \\ []) do
headers = Keyword.get(opts, :headers, []) |> to_keyword()
params = Keyword.get(opts, :params, "") |> to_json()
request("PATCH", url, headers, params)
end
@doc """
Performs an HTTP request.
Supported methods: GET, OPTIONS, HEAD, TRACE, POST, PUT, PATCH and DELETE
"""
@spec request(method(), String.t(), keyword(), binary()) :: Enumerable.t()
def request(method, url, headers \\ [], body \\ "") do
Request.new(method, url, headers: headers, body: body)
|> do_request()
end
defp do_request(%Request{} = request) do
Stream.resource(
fn -> adapter().request(request) end,
&adapter().parse_chunks/1,
&adapter().close/1
)
end
defp to_keyword(enum) do
Stream.map(enum, fn {k, v} -> {to_string(k), v} end)
|> Enum.to_list()
end
defp to_json(term), do: Jason.encode!(term)
defp adapter, do: Application.get_env(:http_stream, :adapter, Adapter.Mint)
end
|
lib/http_stream.ex
| 0.882959
| 0.639975
|
http_stream.ex
|
starcoder
|
defmodule Expyplot.Plot do
@moduledoc """
<b>This is the end-user API for pyplot.</b>
See the matplotlib.plot docs at:
http://matplotlib.org/api/pyplot_api.html
<b>Most of these functions are UNTESTED. I know. That's terrible. But you can test them by using them! Then, if they don't work, open an issue on the github:</b>
https://github.com/MaxStrange/expyplot/issues
<b>Or better yet, you could write some tests or a patch and open a pull request!</b>
Also, since the return values are simply the string representations of whatever was returned to the python server, all of the return values
will need to be converted into actual Elixir datastructures. I would love to get around to changing this behavior in the future, but for now, it is
as it is. <b>This also means that numpy arrays that are returned are likely to be truncated</b>.
This documentation is mostly just copied from the matplotlib.plot docs, and much of it isn't translated to Elixir like it should be.
"""
@doc """
Plot the autocorrelation of x.
"""
def acorr(x, opts \\ [hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.axis", nonnamed: [x], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Plot the angle spectrum.
Compute the angle spectrum (wrapped phase spectrum) of x. Data is padded to a length of pad_to and the windowing function <i>window</i> <b>Not used yet
in Expyplot</b> is applied to the signal.
Example call:
```elixir
1..1_000_000 |> Enum.to_list |> Expyplot.Plot.angle_spectrum(x, [_Fs: 2, _Fc: 0, pad_to: nil, sides: "default"])
```
"""
def angle_spectrum(x, opts \\ [_Fs: 2, _Fc: 0, pad_to: nil, sides: :default], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.angle_spectrum", nonnamed: [x], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Annotate the point xy with text s.
Additional kwargs are passed to Text.
"""
def annotate(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.annotate", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Add an arrow to the axes.
Draws arrow on specified axis from (x, y) to (x + dx, y + dy). Uses FancyArrow patch to construct the arrow.
"""
def arrow(x, y, dx, dy, opts \\ [hold: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.arrow", nonnamed: [x, y, dx, dy], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Autoscale the axis view to the data (toggle).
Convenience method for simple axis view autoscaling. It turns autoscaling on or off, and then, if autoscaling for either axis is on, it
performs the autoscaling on the specified axis or axes.
"""
def autoscale(opts \\ [enable: true, axis: :both, tight: nil]) do
Codebuilder.build_code(funcname: "plt.autoscale", nonnamed: [], named: opts) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to autumn and apply to current image if any. See help(colormaps) for more information
"""
def autumn do
Codebuilder.build_code(funcname: "plt.autumn", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Add an axes to the figure.
The axes is added at position rect specified by:
- axes() by itself creates a default full subplot(111) window axis.
- axes(rect, [facecolor: :w]), where rect = [left, bottom, width, height] in normalized (0, 1) units. facecolor is the background
color for the axis, default white.
- axes(h) where h is an axes instance makes h the current axis.
"""
def axes(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.axes", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Add a horizontal line across the axis.
Typical calls:
```elixir
Expyplot.Plot.axhline(linewidth: 4, color: :r) # Draw a thick red hline at 'y'=0 that spans the xrange
Expyplot.Plot.axhline(y: 1) # Draw a default hline at 'y'=1 that spans the xrange
Expyplot.Plot.axhline(y: 0.5, xmin: 0.25, xmax: 0.75) # Draw a default hline at 'y'=0.5 that spans the middle half of the xrange
```
"""
def axhline(opts \\ [y: 0, xmin: 0, xmax: 1, hold: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.axhline", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Add a horizontal span (rectangle) across the axis.
Draw a horizontal span (rectangle) from ymin to ymax. With the default values of xmin = 0 and xmax = 1, this always spans the xrange,
regardless of the xlim settings, even if you change them, e.g., with the set_xlim() command. That is, the horizontal extent is in axes
coords: 0=left, 0.5=middle, 1.0=right but the y location is in data coordinates.
"""
def axhspan(ymin, ymax, opts \\ [xmin: 0, xmax: 1, hold: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.axhspan", nonnamed: [ymin, ymax], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
<b>This is how <i>axis</i> has been implemented in this library: as two functions - a get and a set, rather than just the one</b>
Convenience method to get or set axis properties.
This function is of limited usefulness at this stage, as it simply returns the string representation of the current axis.
iex> Expyplot.Plot.axis_get()
"(0.0, 1.0, 0.0, 1.0)"
"""
def axis_get(kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.axis", nonnamed: [], named: kwargs) |> Server.Commapi.add_code
end
@doc """
<b>This is how <i>axis</i> has been implemented in this library: as two functions - a get and a set, rather than just the one</b>
Convenience method to get or set axis properties.
## Some examples:
iex> Expyplot.Plot.axis_set("off") # Turn off the axis lines and labels
"(0.0, 1.0, 0.0, 1.0)"
iex> Expyplot.Plot.axis_set("equal") # Changes limits of x and y axis so that equal increments of x and y have the same length
"(-0.055, 0.055, -0.055, 0.055)"
"""
def axis_set(v, kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.axis", nonnamed: [v], named: kwargs) |> Server.Commapi.add_code
end
@doc """
Add a vertical line across the axes.
## Examples
- Draw a thick read vline at <i>x</i> = 0 that spans the yrange:
```elixir
Expyplot.Plot.axvline(linewidth: 4, color: :r)
```
- Draw a default vline at <i>x</i> = 1 that spans the yrange:
```elixir
Expyplot.Plot.axvline(x: 1)
```
- Draw a default vline at <i>x</i> = 0.5 that spans the middle half of the yrange
```elixir
Expyplot.Plot.axvline(x: 0.5, ymin: 0.25, ymax: 0.75)
```
"""
def axvline(opts \\ [x: 0, ymin: 0, ymax: 1, hold: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.axvline", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Add a vertical span (rectangle) across the axes.
Draw a vertical span (rectangle) from xmin to xmax. With the default values of ymin = 0 and ymax = 1. This always spans the yrange,
regardless of the ylim settings, even if you change them, e.g., with the set_ylim() command. That is, the vertical extent is in axes
coords: 0=bottom, 0.5=middle, 1.0=top but the y location is in data coordinates.
## Examples
Draw a vertical, green, translucent rectangle from x = 1.25 to x = 1.55 that spans the yrange of the axes.
iex> Expyplot.Plot.axvspan(1.25, 1.55, facecolor: :g, alpha: 0.5)
""
"""
def axvspan(xmin, xmax, opts \\ [ymin: 0, ymax: 1, hold: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.axvspan", nonnamed: [xmin, xmax], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Make a bar plot.
Make a bar plot with rectangles bounded by: <i>left</i>, <i>left + width</i>, <i>bottom</i>, <i>bottom + height</i>
(left, right, bottom and top edges).
"""
def bar(left, height, opts \\ [width: 0.8, bottom: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.bar", nonnamed: [left, height], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Plot a 2-D field of barbs.
"""
def barbs(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.barbs", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Make a horizontal bar plot.
Make a horizontal bar plot with rectangles bounded by: <i>left</i>, <i>left + width</i>, <i>bottom</i>, <i>bottom + height</i>
(lft, right, bottom and top edges).
<i>bottom, width, height</i>, and <i>left</i> can be either scalars or sequences
"""
def barh(bottom, width, opts \\ [height: 0.8, left: nil, hold: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.barh", nonnamed: [bottom, width], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to bone and apply to current image if any.
"""
def bone do
Codebuilder.build_code(funcname: "plt.bone", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Make a box and whisker plot.
Make a box and whisker plot for each column of <i>x</i> or each vector in sequence <i>x</i>. The box extends from the lower to upper quartile values
of the data, with a line at the median. The whiskers extend from the box to show the range of the data. Flier points are those past the end
of the whiskers.
"""
def boxplot(x, opts \\ [notch: nil, sym: nil, vert: nil, whis: nil, positions: nil, widths: nil, patch_artist: nil, bootstrap: nil, usermedians: nil,
conf_intervals: nil, meanline: nil, showmeans: nil, showcaps: nil, showbox: nil, showfliers: nil, boxprops: nil, labels: nil,
flierprops: nil, medianprops: nil, meanprops: nil, capprops: nil, whiskerprops: nil, manage_xticks: true, autorange: false,
zorder: nil, hold: nil, data: nil]) do
Codebuilder.build_code(funcname: "plt.boxplot", nonnamed: [x], named: opts) |> Server.Commapi.add_code
end
@doc """
Plot horizontal bars.
A collection of horizontal bars spanning <i>yrange</i> with a sequence of <i>xranges</i>.
"""
def broken_barh(xranges, yrange, opts \\ [hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.broken_barh", nonnamed: [xranges, yrange], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Clear the current axes.
"""
def cla do
Codebuilder.build_code(funcname: "plt.cla", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Label a contour plot.
"""
def clabel(cs, opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.clabel", nonnamed: [cs] ++ opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Clear the current figure.
"""
def clf do
Codebuilder.build_code(funcname: "plt.clf", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Set the color limits of the current image.
To apply clim to all axes images do:
```elixir
clim(vmin: 0, vmax: 0.5)
```
If either <i>vmin</i> or <i>vmax</i> is nil, the image min/max respectively will be used for color scaling.
"""
def clim(opts \\ [vmin: nil, vmax: nil]) do
Codebuilder.build_code(funcname: "plt.clim", nonnamed: [], named: opts) |> Server.Commapi.add_code
end
@doc """
Close a figure window.
<i>close()</i> by itself closes the current figure
<i>close(num)</i> closes figure number <i>num</i>
<i>close(name)</i> where <i>name</i> is a string, closes figure with that label
<i>close(:all)</i> closes all the figure windows
"""
def close(opts \\ []) do
Codebuilder.build_code(funcname: "plt.close", nonnamed: opts, named: []) |> Server.Commapi.add_code
end
@doc """
Plot the coherence between <i>x</i> and <i>y</i>.
Plot the coherence between <i>x</i> and <i>y</i>. Coherence is the normalized cross spectral density.
"""
def cohere(x, y, opts \\ [nfft: 256, _Fs: 2, _Fc: 0, noverlap: 0, pad_to: nil, sides: :default, scale_by_freq: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.cohere", nonnamed: [x, y], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Add a colorbar to a plot.
"""
def colorbar(opts \\ [mappable: nil, cax: nil, ax: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.colorbar", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
This is a do-nothing function to provide you with help on how matplotlib handles colors.
"""
def colors do
Codebuilder.build_code(funcname: "plt.colors", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Plot contours.
contour() and contourf() draw contour lines and filled contours, respectively. Except as noted, function signatures and return values
are the same for both versions.
contourf() differs from the MATLAB version in that it does not draw the polygon edges. To draw edges, add line contours with calls to contour().
"""
def contour(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.contour", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Plot contours.
contour() and contourf() draw contour lines and filled contours, respectively. Except as noted, function signatures and return values
are the same for both versions.
contourf() differs from the MATLAB version in that it does not draw the polygon edges. To draw edges, add line contours with calls to contour().
"""
def contourf(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.contourf", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to cool and apply to current image if any.
"""
def cool do
Codebuilder.build_code(funcname: "plt.cool", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to copper and apply to current image if any.
"""
def copper do
Codebuilder.build_code(funcname: "plt.copper", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Plot the cross-spectral density.
"""
def csd(x, y, opts \\ [nfft: 256, _Fs: 2, _Fc: 0, noverlap: 0, pad_to: nil, sides: :default, scale_by_freq: nil, return_line: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.csd", nonnamed: [x, y], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Remove an axes from the current figure. If <i>ax</i> doesn't exist, an error will be raised.
"""
def delaxes(opts \\ []) do
Codebuilder.build_code(funcname: "plt.delaxes", nonnamed: opts, named: []) |> Server.Commapi.add_code
end
@doc """
Redraw the current figure.
This is used to update a figure that has been altered, but not automatically re-drawn. If interactive mode is on (ion()), this should be only
rarely needed, but there may be ways to modify the state of a figure without marking it as <i>stale</i>.
"""
def draw do
Codebuilder.build_code(funcname: "plt.draw", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Plot an errorbar graph.
Plot x versus y with error deltas in yerr and xerr. Vertical errorbars are plotted if yerr is not nil. Horizontal errorbars are plotted if xerr is
not nil.
x, y, xerr, and yerr can all be scalars, which plots a single error bar at x, y.
"""
def errorbar(x, y, opts \\ [yerr: nil, xerr: nil, fmt: "", ecolor: nil, elinewidth: nil, capsize: nil, barsabove: false, lolims: false, uplims: false,
xlolims: false, xuplims: false, errorevery: 1, capthick: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.errorbar", nonnamed: [x, y], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Plot identical parallel lines at specific positions.
Plot parallel lines at the given <i>positions</i>. <i>positions</i> should be a 1D or 2D array-like object, with each row corresponding to a row or
column of lines.
This type of plot is commonly used in neuroscience for representing neural events, where it is commonly called a spike raster, dot raster, or raster plot.
However, it is useful in any situation where you wish to show the timing or position of multiple sets of discrete events, such as the arrival
times of people to a business on each day of the month or the date of hurricanes each year of the last century.
"""
def eventplot(positions, opts \\ [orientation: :horizontal, lineoffsets: 1, linelengths: 1, linewidths: nil, colors: nil, linestyles: :solid,
hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.eventplot", nonnamed: [positions], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Adds a non-resampled image to the figure.
"""
def figimage(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.figimage", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Place a legend in the figure.
"""
def figlegend(handles, labels, loc, kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.figlegend", nonnamed: [handles, labels, loc], named: kwargs) |> Server.Commapi.add_code
end
@doc """
"""
def fignum_exists(num) do
Codebuilder.build_code(funcname: "plt.fignum_exists", nonnamed: [num], named: []) |> Server.Commapi.add_code
end
@doc """
Add text to figure.
Add text to figure at location <i>x, y</i> (relative 0-1 coords).
"""
def figtext(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.figtext", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Creates a new figure.
"""
def figure(opts \\ [num: nil, figsize: nil, dpi: nil, facecolor: nil, edgecolor: nil, frameon: true], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.figure", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Plot filled polygons.
"""
def fill(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.fill", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Make filled polygons between two curves.
"""
def fill_between_vertical(x, y1, opts \\ [y2: 0, where: nil, interpolate: false, step: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.fill_between", nonnamed: [x, y1], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Make filled polygons between two horizontal curves.
"""
def fill_between_horizontal(y, x1, opts \\ [x2: 0, where: nil, step: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.fill_between", nonnamed: [y, x1], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Find artist objects.
Recursively find all Artist instances contained in self.
"""
def findobj(opts \\ [o: nil, match: nil, include_self: true]) do
Codebuilder.build_code(funcname: "plt.findobj", nonnamed: [], named: opts) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to flag and apply to current image if any.
"""
def flag do
Codebuilder.build_code(funcname: "plt.flag", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Get the current Axes instance on the current figure matching the given keyword args, or create one.
"""
def gca(kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.gca", named: [], nonnamed: kwargs) |> Server.Commapi.add_code
end
@doc """
Get a reference to the current figure.
"""
def gcf do
Codebuilder.build_code(funcname: "plt.gcf", named: [], nonnamed: []) |> Server.Commapi.add_code
end
@doc """
Get the current colorable artist. Specifically, returns the current ScalarMappable instance (image or patch collection), or "Nothing" if no
images or patch collections have been defined. The commands imshow() and figimage() create Image instances, and the commands
pcolor() and scatter() create Collection instances. The current image is an attribute of the current axes, or the nearest earlier axes
in the current figure that contains an image.
"""
def gci do
Codebuilder.build_code(funcname: "plt.gci", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
"""
def get_current_fig_manager do
Codebuilder.build_code(funcname: "plt.get_current_fig_manager", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Return a list of existing figure labels.
"""
def get_figlabels do
Codebuilder.build_code(funcname: "plt.get_figlabels", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Return a list of existing figure numbers.
"""
def get_fignums do
Codebuilder.build_code(funcname: "plt.get_fignums", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Get a sorted list of all of the plotting commands.
"""
def get_plot_commands do
Codebuilder.build_code(funcname: "plt.get_plot_commands", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
This will wait for <i>n</i> clicks from the user and return a list of the coordinates of each click.
If <i>timeout</i> is zero or negative, does not timeout.
If <i>n</i> is zero or negative, accumulated clicks until a middle click (or potentially both mouse buttons at once) terminates the input.
Right clicking cancels last input.
The buttons used for the various actions (adding points, removing points, terminating the inputs) can be overriden via the arguments
<i>mouse_add, mouse_pop</i> and <i>mouse_stop</i>, that give the associated mouse button: 1 for left, 2 for middle, 3 for right.
The keyboard can also be used to select points in case your mouse does not have one or more of the buttons. The delete and backspace
keys act like right clicking (i.e., remove last point), the enter key terminates input and any other key (not already used by the window
manager) selects a point.
"""
def ginput(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.ginput", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to gray and apply to current image if any.
"""
def gray do
Codebuilder.build_code(funcname: "plt.gray", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Turn the axes grids on or off.
Set the axes grids on or off; <i>b</i> is a boolean. (For MATLAB compatibility, <i>b</i> may also be an atom :on or :off.)
"""
def grid(opts \\ [b: nil, which: :major, axis: :both], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.grid", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Make a hexagonal binning plot.
Make a hexagonal binning plot of <i>x</i> versus <i>y</i>, where <i>x, y</i> are 1-D sequences of the same length, <i>N</i>. If <i>C</i> is
nil (the default), this is a histogram of the number of occurrences of the observations at (x[i], y[i]).
If <i>C</i> is specified, it specifies values at the coordinate (x[i], y[i]). These values are accumulated for each hexagonal bin and then reduced
according to <i>reduce_C_function</i>, which defaults to numpy's mean function (np.mean). (If <i>C</i> is specified, it must also be a 1-D sequence
of the same length as <i>x</i> and <i>y</i>.)
"""
def hexbin(x, y, opts \\ [c: nil, gridsize: 100, bins: nil, xscale: :linear, yscale: :linear, extent: nil, cmap: nil, norm: nil, vmin: nil, vmax: nil,
alpha: nil, linewidths: nil, edgecolors: :none, mincnt: nil, marginals: false, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.hexbin", nonnamed: [x, y], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Plot a histogram.
Compute and draw the histogram of <i>x</i>. The return value is a tuple (<i>n, bins, patches</i>) or (<i>[n0, n1, ...], bins, [patches0, patches1, ...]</i>)
if the input contains multiple data.
Multiple data can be provided via <i>x</i> as a list of datasets of potentially different length, or as a 2-D ndarray in which each column is a dataset.
Note that the ndarray form is transposed relative to the list form.
Masked arrays are not supported at present.
"""
def hist(x, opts \\ [bins: nil, range: nil, normed: false, weights: nil, cumulative: false, bottom: nil, histtype: :bar, align: :mid, orientation: :vertical,
rwidth: nil, log: false, color: nil, label: nil, stacked: false, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.hist", nonnamed: [x], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Make a 2D histogram plot.
"""
def hist2d(x, y, opts \\ [bins: 10, range: nil, normed: false, weights: nil, cmin: nil, cmax: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.hist2d", nonnamed: [x, y], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Plot horizontal lines at each <i>y</i> from <i>xmin</i> to <i>xmax</i>.
"""
def hlines(y, xmin, xmax, opts \\ [colors: :k, linestyles: :solid, label: "", hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.hlines", nonnamed: [y, xmin, xmax], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to hot and apply to current image if any.
"""
def hot do
Codebuilder.build_code(funcname: "plt.hot", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to hsv and apply to current image if any.
"""
def hsv do
Codebuilder.build_code(funcname: "plt.hsv", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Read an image from a file into an array.
<i>fname</i> may be a string path, or a valid URL.
If <i>format</i> is provided, will try to read file of that type, otherwise the format is deduced from the filename. If nothing can be deduced, PNG
is tried.
Return value is a numpy.array <b>But in expyplot, this will return a string, and it probably won't work</b>. For grayscale images, the return array is MxN.
For RGB images, the return value is MxNx4.
matplotlib can only read PNGs natively, but if PIL is installed, it will use it to load the image and return an array (if possible) which can
be used with imshow(). Note, URL strings may not be compatible with PIL. Check the PIL documentation for more information.
"""
def imread(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.imread", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Save an array as an image file.
The output formats available depend on the backend being used.
"""
def imsave(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.imsave", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Display an image on the axes.
"""
def imshow(x, opts \\ [cmap: nil, norm: nil, aspect: nil, interpolation: nil, alpha: nil, vmin: nil, vmax: nil,
origin: nil, extent: nil, shape: nil, filternorm: 1, filterrad: 4.0, imlim: nil, resample: nil,
url: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.imshow", nonnamed: [x], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to inferno and apply to current image if any.
"""
def inferno do
Codebuilder.build_code(funcname: "plt.inferno", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Turn interactive mode off.
"""
def ioff do
Codebuilder.build_code(funcname: "plt.ioff", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Turn interactive mode on.
"""
def ion do
Codebuilder.build_code(funcname: "plt.ion", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Return status of interactive mode.
"""
def isinteractive do
Codebuilder.build_code(funcname: "plt.isinteractive", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Set the default coormap to jet and apply to current image if any.
"""
def jet do
Codebuilder.build_code(funcname: "plt.jet", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Places a legend on the axes.
"""
def legend(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.legend", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Control behavior of tick locators.
"""
def locator_params(opts \\ [axis: :both, tight: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.locator_params", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Make a plot with log scaling on both the <i>x</i> and <i>y</i> axis.
loglog() supports all the keyword arguments of plot() and matplotlib.axes.Axes.set_xscale() / matplotlib.axes.Axes.set_yscale().
"""
def loglog(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.loglog", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to magma and apply to current image if any.
"""
def magma do
Codebuilder.build_code(funcname: "plt.magma", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Plot the magnitude spectrum.
"""
def magnitude_spectrum(x, opts \\ [_Fs: nil, _Fc: nil, window: nil, pad_to: nil, sides: nil, scale: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.magnitude_spectrum", nonnamed: [x], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Set or retrieve autoscaling margins.
"""
def margins(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.margins", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first dimension of the array) are displayed horizontally. The aspect ratio of the
figure window is that of the array, unless this would make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
"""
def matshow(a, opts \\ [fignum: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.matshow", nonnamed: [a], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Remove minor ticks from the current plot.
"""
def minorticks_off do
Codebuilder.build_code(funcname: "plt.minorticks_off", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to nipy_spectral and apply to current image if any.
"""
def nipy_spectral do
Codebuilder.build_code(funcname: "plt.nipy_spectral", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Pause for <i>interval</i> seconds.
If there is an active figure it will be updated and displayed, and the GUI event loop will run during the pause.
If there is no active figure, or if a non-interactive backend is in use, this executes time.sleep(interval).
This can be used for crude animation.
"""
def pause(interval) do
Codebuilder.build_code(funcname: "plt.interval", nonnamed: [interval], named: []) |> Server.Commapi.add_code
end
@doc """
Create a pseudocolor plot of a 2-D array.
"""
def pcolor(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.pcolor", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Plot a quadrilateral mesh.
"""
def pcolormesh(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.pcolormesh", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Plot the phase spectrum.
"""
def phase_spectrum(x, opts \\ [_Fs: nil, _Fc: nil, window: nil, pad_to: nil, sides: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.phase_spectrum", nonnamed: [x], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Plot a pie chart.
Make a pie chart of array x. The fractional area of each wedge is given by x / sum(x). If sum(x) <= 1, then the values of x give the fractional
area directly and the array will not be normalized. The wedges are plotted counterclockwise, by default starting from the x-axis.
"""
def pie(x, opts \\ [explode: nil, labels: nil, colors: nil, autopct: nil, pctdistance: 0.6, shadow: false, labeldistance: 1.1, startangle: nil,
radius: nil, counterclock: true, wedgeprops: nil, textprops: nil, center: {0, 0}, frame: false, hold: nil, data: nil]) do
Codebuilder.build_code(funcname: "plt.pie", nonnamed: [x], named: opts) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to pink and apply to current image if any.
"""
def pink do
Codebuilder.build_code(funcname: "plt.pink", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to plasma and apply to current image if any.
"""
def plasma do
Codebuilder.build_code(funcname: "plt.plasma", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Plot lines and/or markers to the Axes. <i>args</i> is a variable length argument, allowing for multiple <i>x, y</i> pairs with an optional format string.
Each of the following is legal:
```elixir
x = 1..10 |> Enum.to_list
y = 11..20 |> Enum.to_list
Expyplot.Plot.plot([x, y]) # plot x and y using default line style and color
Expyplot.Plot.plot([x, y, "bo"]) # plot x and y using blue circle markers
Expyplot.Plot.plot([y]) # plot y using x as index array 0..N-1
Expyplot.Plot.plot([y, "r+"]) # ditto, but with red plusses
x2 = 1..5 |> Enum.to_list
y2 = 3..7 |> Enum.to_list
Expyplot.Plot.plot([x, y, "g^", x2, y2, "g-"])
```
Due to the differences between function signatures in Python and Elixir, the typical usage of this function is a little different than what you would
expect:
iex> Expyplot.Plot.plot([[1, 2, 3, 4, 5]])
nil
or
iex> Expyplot.Plot.plot([1..5])
nil
Notice the nesting of the list or range.
## Examples with keyword args
```elixir
Expyplot.Plot.plot([[1, 2, 3], [1, 2, 3], "go-"], label: "line 1", linewidth: 2)
Expyplot.Plot.plot([[1, 2, 3], [1, 4, 9], "rs"], label: "line 2")
Expyplot.Plot.axis_set([0, 4, 0, 10]) # Notice that this signature is 'axis_set', not 'axis' as in matplotlib
Expyplot.Plot.legend()
```
"""
def plot(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.plot", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
def candlestick(timestamp, open, high, low, close) do
Codebuilder.build_code(funcname: "candlestick", nonnamed: [List.zip([timestamp, open, high, low, close])], named: []) |> Server.Commapi.add_code
end
@doc """
A plot with data that contains dates.
"""
def plot_date(x, y, opts \\ [fmt: :o, tz: nil, xdate: true, ydate: false, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.plot_date", nonnamed: [x, y], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Plot the data in a file.
"""
def plotfile(fname, opts \\ [cols: {0}, plotfuncs: nil, comments: "#", skiprows: 0, checkrows: 5, delimiter: ",", names: nil, subplots: true,
newfig: true], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.plotfile", nonnamed: [fname], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
make a polar plot.
"""
def polar(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.polar", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to prism and apply to current image if any.
"""
def prism do
Codebuilder.build_code(funcname: "plt.prism", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Plot the power spectral density.
"""
def psd(x, opts \\ [nfft: nil, _Fs: nil, _Fc: nil, detrend: nil, window: nil, noverlap: nil, pad_to: nil, sides: nil, scale_by_freq: nil,
return_line: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.psd", nonnamed: [x], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Plot a 2-D field of arrows.
"""
def quiver(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.quiver", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Add a key to a quiver plot.
"""
def quiverkey(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.quiverkey", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the current rc params. Group is the grouping for the rc.
"""
def rc(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.rc", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Restore the default rc params. These are not the params loaded by the rc file, but mpl's internal params. See rc_file_defaults for reloading
the default params from the rc file.
"""
def rcdefaults do
Codebuilder.build_code(funcname: "plt.rcdefaults", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Get or set the radial gridlines on a polar plot.
"""
def rgrids(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.rgrids", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Save the current figure.
"""
def savefig(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.savefig", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Make a scatter plot of x vs y.
Marker size is scaled by s and marker color is mapped to c.
"""
def scatter(x, y, opts \\ [s: nil, c: nil, marker: nil, cmap: nil, norm: nil, vmin: nil, vmax: nil, alpha: nil,
linewidths: nil, verts: nil, edgecolors: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.scatter", nonnamed: [x, y], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Make a plot with log scaling on the x axis.
"""
def semilogx(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.semilogx", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Make a plot with log scaling on the y axis.
"""
def semilogy(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.semilogy", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap. Applies to the current image if any.
<i>cmap</i> must be the name of a registered colormap.
"""
def set_cmap(cmap) do
Codebuilder.build_code(funcname: "plt.set_cmap", nonnamed: [cmap], named: []) |> Server.Commapi.add_code
end
@doc """
Display a figure.
"""
def show(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.show", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Plot a spectrogram.
"""
def specgram(x, opts \\ [nfft: nil, _Fs: nil, _Fc: nil, detrend: nil, window: nil, noverlap: nil, cmap: nil, xextent: nil, pad_to: nil,
sides: nil, scale_by_freq: nil, mode: nil, scale: nil, vmin: nil, vmax: nil, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.specgram", nonnamed: [x], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to spectral and apply to current image if any.
"""
def spectral do
Codebuilder.build_code(funcname: "plt.spectral", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to spring and apply to current image if any.
"""
def spring do
Codebuilder.build_code(funcname: "plt.spring", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Plot the sparsity pattern on a 2-D array.
spy(z) plots the sparsity pattern of the 2-D array <i>z</i>.
"""
def spy(z, opts \\ [precision: 0, marker: nil, markersize: nil, aspect: :equal], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.spy", nonnamed: [z], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Draws a stacked area plot.
"""
def stackplot(x, opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.stackplot", nonnamed: [x] ++ opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Create a stem plot.
"""
def stem(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.stem", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Make a step plot.
"""
def step(x, y, opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.step", nonnamed: [x, y] ++ opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Draws streamlinkes of a vector flow.
"""
def streamplot(x, y, u, v, opts \\ [density: 1, linewidth: nil, color: nil, cmap: nil, norm: nil, arrowsize: 1, arrowstyle: "-|>", minlength: 0.1, transform: nil,
zorder: nil, start_points: nil, hold: nil, data: nil]) do
Codebuilder.build_code(funcname: "plt.streamplot", nonnamed: [x, y, u, v], named: opts) |> Server.Commapi.add_code
end
@doc """
Return a subplot axes positioned by the given grid definition.
"""
def subplot(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.subplot", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Create a subplot in a grid. The grid is specified by <i>shape</i>, at location of <i>loc</i>, spanning <i>rowspan, colspan</i> cells in each direction.
The index for loc is 0-based.
"""
def subplot2grid(shape, loc, opts \\ [rowspan: 1, colspan: 1], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.subplot2grid", nonnamed: [shape, loc], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Launch a subplot tool window for a figure.
"""
def subplot_tool(opts \\ [targetfig: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.subplot_tool", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Create a figure and a set of subplots.
This utility wrapper makes it convenient to create common layouts of subplots, including the enclosing figure object, in a single call.
"""
def subplots(opts \\ [nrows: 1, ncols: 1, sharex: false, sharey: false, squeeze: true, subplot_kw: nil, gridspec_kw: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.subplots", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Tune the subplot layout.
"""
def subplots_adjust(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.subplots_adjust", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to summer and apply to current image if any.
"""
def summer do
Codebuilder.build_code(funcname: "plt.summer", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Add a centered title to the figure.
"""
def suptitle(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.suptitle", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Add a table to the current axes.
"""
def table(kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.table", nonnamed: [], named: kwargs) |> Server.Commapi.add_code
end
@doc """
Add text to the axes.
Add text in string <i>s</i> to axis at location <i>x, y</i> data coordinates.
"""
def text(x, y, s, opts \\ [fontdict: nil, withdash: false], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.text", nonnamed: [x, y, s], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Get or set the theta locations of the gridlines in a polar plot.
If no arguments are passed, return a tuple (<i>lines, labels</i>) where <i>lines</i> is an array of radial gridlines (Line2D instances) and <i>labels</i>
is an array of tick labels (Text instances).
"""
def thetagrids(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.thetagrids", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Change the appearance of ticks and tick labels.
"""
def tick_parameters(opts \\ [axis: :both], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.tick_parameters", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Change the ScalarFormatter used by default for linear axes.
"""
def ticklabel_format(kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.ticklabel_format", nonnamed: [], named: kwargs) |> Server.Commapi.add_code
end
@doc """
Automatically adjust subplot parameters to give specified padding.
"""
def tight_layout(opts \\ [pad: 1.08, h_pad: nil, w_pad: nil, rect: nil]) do
Codebuilder.build_code(funcname: "plt.tight_layout", nonnamed: [], named: opts) |> Server.Commapi.add_code
end
@doc """
Set a title of the current axes.
Set one of hte three available axes titles. The available titles are positioned above the aces in the center, flush with the left edge, and flush
with the right edge.
"""
def title(s, opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.title", nonnamed: [s] ++ opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Draw contours on an unstructured triangular grid tricontour() and tricontourf() draw contour lines and filled contours,
respectively.
"""
def tricontour(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.tricontour", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Draw contours on an unstructured triangular grid tricontour() and tricontourf() draw contour lines and filled contours,
respectively.
"""
def tricontourf(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.tricontourf", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Create a pseudocolor plot of an unstructured triangular grid.
"""
def tripcolor(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.tripcolor", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Draw an unstructured triangular grid as lines and/or markers.
"""
def triplot(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.triplot", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Make a second axes that shares the <i>x</i>-axis. The new axes will overlay <i>ax</i> (or the current axes if <i>ax</i> is nil). The ticks for
<i>ax2</i> will be placed on the right, and the <i>ax2</i> instance is returned.
"""
def twinx(opts \\ [ax: nil]) do
Codebuilder.build_code(funcname: "plt.twinx", nonnamed: [], named: opts) |> Server.Commapi.add_code
end
@doc """
Make a second axes that shares the <i>y</i>-axis. The new axis will overlay <i>ax</i> (or the current axes if <i>ax</i> is nil). The ticks for
<i>ax2</i> will be placed on the top, and the <i>ax2</i> instance is returned.
"""
def twiny(opts \\ [ax: nil]) do
Codebuilder.build_code(funcname: "plt.twiny", nonnamed: [], named: opts) |> Server.Commapi.add_code
end
@doc """
Uninstalls the matplotlib display hook.
"""
def uninstall_repl_displayhook do
Codebuilder.build_code(funcname: "plt.uninstall_repl_displayhook", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Make a violin plot.
Make a violin plot for each column of <i>dataset</i> or each vector in sequence <i>dataset</i>. Each filled area extends to represent the entire data
range, with optional lines at the mean, the median, the minimum, and the maximum.
"""
def violinplot(dataset, opts \\ [positions: nil, vert: true, widths: 0.5, showmeans: false, showextrema: true, showmedians: false, points: 100, bw_method: nil,
hold: nil, data: nil]) do
Codebuilder.build_code(funcname: "plt.violinplot", nonnamed: [dataset], named: opts) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to viridis and apply to current image if any.
"""
def viridis do
Codebuilder.build_code(funcname: "plt.viridis", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Plot vertical lines.
Plot vertical lines at each <i>x</i> from <i>ymin</i> to <i>ymax</i>.
"""
def vlines(x, ymin, ymax, opts \\ [colors: :k, linestyles: :solid, label: "", hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.vlines", nonnamed: [x, ymin, ymax], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Blocking call to interact with the figure.
This will return "True" if a key was pressed, "False" if a mouse button was pressed and "None" if timeout was reached without either being pressed.
If <i>timeout</i> is negative, does not timeout.
"""
def waitforbuttonpress(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.waitforbuttonpress", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the default colormap to winter and apply to current image if any.
"""
def winter do
Codebuilder.build_code(funcname: "plt.winter", nonnamed: [], named: []) |> Server.Commapi.add_code
end
@doc """
Plot the cross correlation between <i>x</i> and <i>y</i>.
"""
def xcorr(x, y, opts \\ [normed: true, usevlines: true, maxlags: 10, hold: nil, data: nil], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.xcorr", nonnamed: [x, y], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Turns xkcd sketch-style drawing mode. This will only have effect on things drawn after this function is called.
For best results, the "Humor Sans" font should be indstalled: it is not included with matplotlib.
"""
def xkcd(opts \\ [scaled: 1, length: 100, randomness: 2], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.xkcd", nonnamed: [], named: opts ++ kwargs) |> Server.Commapi.add_code
end
@doc """
Set the x axis label of the current axis.
"""
def xlabel(s, opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.xlabel", nonnamed: [s] ++ opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Get or set the <i>x</i> limits of the current axes.
"""
def xlim(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.xlim", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the scaling of the x-axis.
"""
def xscale(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.xscale", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Get or set the x-limits of the current tick locations and labels.
"""
def xticks(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.xticks", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the y-axis label of the current axis.
"""
def ylabel(s, opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.ylabel", nonnamed: [s] ++ opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Get or set the y-limits of the current axes.
"""
def ylim(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.ylim", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Set the scaling of the y-axis.
"""
def yscale(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.yscale", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
@doc """
Get or set the y-limits of the current tick locations and labels.
"""
def yticks(opts \\ [], kwargs \\ []) do
Codebuilder.build_code(funcname: "plt.yticks", nonnamed: opts, named: kwargs) |> Server.Commapi.add_code
end
end
|
lib/plot.ex
| 0.88127
| 0.945951
|
plot.ex
|
starcoder
|
defmodule ElixirConsole.Sandbox do
@moduledoc """
Provides a sandbox where Elixir code from untrusted sources can be executed
"""
@type sandbox() :: %__MODULE__{}
alias ElixirConsole.Sandbox.CodeExecutor
@max_command_length 500
@max_memory_kb_default 256
@max_binary_memory_kb_default 50 * 1024
@timeout_ms_default 5000
@check_every_ms_default 20
@bytes_in_kb 1024
@enforce_keys [:pid, :bindings]
defstruct [:pid, :bindings]
@doc """
Initialize and returns a process where Elixir code will run in "sandbox mode".
This is useful if we want to provide the chance to more than one individual
command, where the user can assume it is always working the same process (as
it happens when someone runs different commands in iex).
Returns a Sandbox struct including the dedicated process and an empty list of
bindings.
"""
@spec init() :: sandbox()
def init() do
loop = fn loop_func ->
receive do
{:command, command, bindings, parent_pid} ->
result = CodeExecutor.execute_code(command, bindings)
send(parent_pid, {:result, result})
loop_func.(loop_func)
end
end
creator_pid = self()
pid =
spawn(fn ->
# Add some metadata to those process to identify them, allowing to further
# analysis
Process.put(:sandbox_owner, creator_pid)
loop.(loop)
end)
%__MODULE__{pid: pid, bindings: []}
end
@doc """
Executes a command (Elixir code in a string) in the process given by the
Sandbox struct provided.
Returns the result of the execution and a Sandbox struct with the changes in
the bindings, if the command succeeded. In case of errors, it returns an
`{:error, error_message}` tuple where the second element is a string with an
explanation.
If the execution takes more time than the specified timeout, an error is
returned. In addition, if the execution uses more memory than the allowed
amount, it is interrupted and an error is returned.
You can use the following options in the `opts` argument:
`timeout`: Time limit to run the command (in milliseconds). The default is
5000.
`max_memory_kb`: Memory usage limit (expressed in Kb). The default is
30.
`check_every`: Determine the time elapsed between checks where memory
usage is measured (expressed in Kb). The default is 20.
"""
@typep execution_result() :: {binary(), sandbox()}
@spec execute(binary(), sandbox(), keyword()) ::
{:success, execution_result()} | {:error, execution_result()}
def execute(command, sandbox, opts \\ [])
def execute(command, sandbox, _) when byte_size(command) > @max_command_length do
{:error, {"Command is too long. Try running a shorter piece of code.", sandbox}}
end
def execute(command, sandbox, opts) do
task = Task.async(fn -> do_execute(command, sandbox, opts) end)
Task.await(task, :infinity)
end
defp do_execute(command, sandbox, opts) do
send(sandbox.pid, {:command, command, sandbox.bindings, self()})
case check_execution_status(sandbox.pid, normalize_options(opts)) do
{:ok, {:success, {result, bindings}}} ->
{:success, {result, %{sandbox | bindings: Enum.sort(bindings)}}}
{:ok, {:error, result}} ->
{:error, {result, sandbox}}
:timeout ->
{:error, {"The command was cancelled due to timeout", restore(sandbox)}}
:memory_abuse ->
{:error, {"The command used more memory than allowed", restore(sandbox)}}
end
end
@doc """
The sandbox process is exited. This function should be used when the sandbox
is not longer needed so resources are properly disposed.
"""
def terminate(%__MODULE__{pid: pid}) do
Process.exit(pid, :kill)
end
defp normalize_options(opts) do
timeout = Keyword.get(opts, :timeout, @timeout_ms_default)
check_every = Keyword.get(opts, :check_every, @check_every_ms_default)
ticks = floor(timeout / check_every)
max_memory_kb = Keyword.get(opts, :max_memory_kb, @max_memory_kb_default) * @bytes_in_kb
max_binary_memory_kb =
Keyword.get(opts, :max_binary_memory_kb, @max_binary_memory_kb_default) * @bytes_in_kb
[
ticks: ticks,
check_every: check_every,
max_memory_kb: max_memory_kb,
max_binary_memory_kb: max_binary_memory_kb
]
end
defp restore(sandbox) do
%__MODULE__{sandbox | pid: init().pid}
end
defp check_execution_status(pid, [{:ticks, 0} | _]) do
Process.exit(pid, :kill)
:timeout
end
defp check_execution_status(
pid,
[
ticks: ticks,
check_every: check_every,
max_memory_kb: max_memory_kb,
max_binary_memory_kb: max_binary_memory_kb
] = opts
) do
receive do
{:result, result} ->
{:ok, result}
after
check_every ->
if allowed_memory_usage_by_process?(pid, max_memory_kb) and
allowed_memory_usage_in_binaries?(max_binary_memory_kb) do
check_execution_status(pid, Keyword.put(opts, :ticks, ticks - 1))
else
Process.exit(pid, :kill)
:memory_abuse
end
end
end
defp allowed_memory_usage_by_process?(pid, memory_limit) do
{:memory, memory} = Process.info(pid, :memory)
memory <= memory_limit
end
defp allowed_memory_usage_in_binaries?(binaries_memory_limit) do
:erlang.memory(:binary) <= binaries_memory_limit
end
end
|
lib/elixir_console/sandbox.ex
| 0.862221
| 0.440229
|
sandbox.ex
|
starcoder
|
defmodule IntelligentBrute do
@moduledoc """
Following advice from some matematictian, this brute will make more intellegent guesses when solving
"""
import Acros
@doc """
notation - List :: Either(String, Float)
limits - List :: {Integer, Integer}
precision - Float
variables - List :: String
line - List :: List :: Float. Only 2 elements(on the first level), start and end points.
"""
defstruct notation: [], limits: [], precision: 0.5, variables: [], line: []
def init(notation, limits, precision, variables) do
loop(%IntelligentBrute{notation: notation, limits: limits, precision: precision, variables: variables})
end
def loop(brute) do
_loop(brute)
end
# First one is with defaults, but in best java traditions
defp _loop(brute) do
_loop(1, [], %IntelligentBrute{brute | line: _find_points(brute)})
end
defp _loop(val, history, %IntelligentBrute{precision: precision}) when inside_precision(val, precision),
do: {:ok, history}
defp _loop(_, history, brute) do
# calculating new point
mean_point = brute.line |>
Stream.zip |>
Enum.map(fn {lx, rx} -> (rx + lx) / 2 end)
# calibrating scope of search
new_val = _execute(brute.notation, brute.variables, mean_point)
[lp, rp] = brute.line
_loop(
new_val,
[mean_point | history],
%IntelligentBrute{brute | line: cond do: (new_val > 0 -> [lp, mean_point]; new_val < 0 -> [mean_point, rp])}
)
end
# Finding points in different areas, i.e. in a way that for first one val < 0 and val > 0 for second one
defp _find_points(brute) do
[_find_point(_generate_point(brute.limits), -1, brute), _find_point(_generate_point(brute.limits), 1, brute)]
end
defp _find_point(point, sign, brute) do
cond do
sign(_execute(brute.notation, brute.variables, point)) === sign(sign) -> point
true -> _find_point(_generate_point(brute.limits), sign, brute)
end
end
defp _generate_point(limits) do
limits |>
Enum.map(fn {l, r} -> :rand.uniform() * (r - l) + l end)
end
defp _execute(notation, vars, for_point) do
Stream.zip(vars, for_point) |>
Enum.reduce(
notation,
fn ({var, val}, acc) -> acc |> Enum.map(fn el -> cond do: (el === var -> val; true -> el) end) end
) |>
Executor.execute
end
end
|
lib/intelligent_brute.ex
| 0.719876
| 0.545588
|
intelligent_brute.ex
|
starcoder
|
defmodule Intcode.Computer do
alias Intcode.{Computer, Instruction, Memory}
@moduledoc """
A process that runs a single Intcode program to completion.
"""
@typedoc """
The PID for an Intcode computer process.
"""
@type t :: pid
@typedoc """
The PID for a process that is able to handle messages from the Intcode computer.
## Received messages
A handler can be any process as long as it understands the following messages:
* `{:input, pid}`: The `t:Intcode.Computer.t/0` at `pid` is waiting for an input value.
Execution of the Intcode program will pause until the handler sends an input value to
`pid` using `Intcode.send_input/2`.
* `{:output, pid, value}`: The `t:Intcode.Computer.t/0` at `pid` produced an output
`value`. The handler can deal with this as needed.
* `{:halt, pid}`: The `t:Intcode.Computer.t/0` at `pid` has halted. The handler can
use this signal to perform post-processing and terminate if it hasn't already.
"""
@type handler :: pid
use Task
use Bitwise
require Logger
defstruct [:memory, :handler, :input_dest, pc: 0, relative_base: 0]
def start_link(data, handler) do
Task.start_link(__MODULE__, :run, [data, handler])
end
@doc """
Start an Intcode computer as an async `Task`.
This allows waiting for the computer to halt using `Task.await/2`.
"""
@spec async(Memory.data(), handler) :: Task.t()
def async(data, handler) do
Task.async(__MODULE__, :run, [data, handler])
end
@doc """
Runs the Intcode computer.
This should generally not be called directly, as it will block the process until the program
in the memory data finishes. Instead, you can use `Intcode.Computer.async` to spawn a task
to run the computer in another process.
"""
@spec run(Memory.data(), handler) :: {:ok, Memory.t()}
def run(data, handler) do
{:ok, memory} = Memory.start_link(data)
send(self(), :pop_inst)
:ok = loop(%Computer{memory: memory, handler: handler})
{:ok, memory}
end
@doc """
Provides an input value to the computer.
This should only be called by the handler when it receives a request for input from the
computer. Sending input when the computer hasn't asked for it will result in undefined
behavior.
"""
@spec send_input(t, number) :: any
def send_input(computer, value) do
send(computer, {:input, value})
end
defp loop(computer) do
receive do
:pop_inst ->
{:ok, inst, computer} = next_instruction(computer)
send(self(), {:exec_inst, inst})
loop(computer)
{:exec_inst, inst} ->
loop(execute_instruction(inst, computer))
{:input, value} ->
set_param(computer, computer.input_dest, value)
send(self(), :pop_inst)
loop(%{computer | input_dest: nil})
:halt ->
:ok
end
end
defp next_instruction(computer) do
%Computer{memory: memory, pc: pc} = computer
value = Memory.get(memory, pc)
{opcode, modes} = Instruction.decode(value)
params =
for {m, i} <- Enum.with_index(modes) do
{Memory.get(memory, pc + i + 1), m}
end
{:ok, {opcode, List.to_tuple(params)}, %{computer | pc: pc + 1 + Enum.count(params)}}
end
defp execute_instruction(inst, computer) do
%Computer{pc: pc} = computer
Logger.metadata(inst: inspect(inst), pc: pc)
Logger.debug("executing instruction")
case inst do
{:add, {x, y, z}} ->
xx = get_param(computer, x)
yy = get_param(computer, y)
set_param(computer, z, xx + yy)
send(self(), :pop_inst)
computer
{:mult, {x, y, z}} ->
xx = get_param(computer, x)
yy = get_param(computer, y)
set_param(computer, z, xx * yy)
send(self(), :pop_inst)
computer
{:input, {x}} ->
send(computer.handler, {:input, self()})
%{computer | input_dest: x}
{:output, {x}} ->
send(computer.handler, {:output, self(), get_param(computer, x)})
send(self(), :pop_inst)
computer
{:jump_true, {p, addr}} ->
send(self(), :pop_inst)
case get_param(computer, p) do
0 -> computer
_ -> %{computer | pc: get_param(computer, addr)}
end
{:jump_false, {p, addr}} ->
send(self(), :pop_inst)
case get_param(computer, p) do
0 -> %{computer | pc: get_param(computer, addr)}
_ -> computer
end
{:less_than, {x, y, z}} ->
cond do
get_param(computer, x) < get_param(computer, y) ->
set_param(computer, z, 1)
true ->
set_param(computer, z, 0)
end
send(self(), :pop_inst)
computer
{:equals, {x, y, z}} ->
cond do
get_param(computer, x) == get_param(computer, y) ->
set_param(computer, z, 1)
true ->
set_param(computer, z, 0)
end
send(self(), :pop_inst)
computer
{:add_rb, {x}} ->
send(self(), :pop_inst)
Map.update!(computer, :relative_base, fn rb ->
rb + get_param(computer, x)
end)
{:halt, _} ->
send(self(), :halt)
if computer.handler != nil do
send(computer.handler, {:halt, self()})
end
computer
end
end
defp get_param(%Computer{memory: memory}, {i, :abs}) do
Memory.get(memory, i)
end
defp get_param(_computer, {i, :imm}) do
i
end
defp get_param(%Computer{memory: memory, relative_base: base}, {i, :rel}) do
Memory.get(memory, base + i)
end
defp set_param(%Computer{memory: memory}, {i, :abs}, value) do
Memory.set(memory, i, value)
end
defp set_param(%Computer{memory: memory, relative_base: base}, {i, :rel}, value) do
Memory.set(memory, base + i, value)
end
end
|
aoc2019_elixir/apps/aoc/lib/intcode/computer.ex
| 0.854975
| 0.559049
|
computer.ex
|
starcoder
|
defprotocol DeepMerge.Resolver do
@moduledoc """
Protocol defining how conflicts during deep_merge should be resolved.
As part of the DeepMerge library this protocol is already implemented for
`Map` and `List` as well as a fallback to `Any` (which just always takes the
override).
If you want your custom structs to also be deeply mergable and not just
override one another (default behaviour) you can derive the protocol:
defmodule Derived do
@derive [DeepMerge.Resolver]
defstruct [:attrs]
end
It will then automatically be deeply merged with structs of its own kind, not
with other structs or maps though.
"""
@fallback_to_any true
@doc """
Defines what happens when a merge conflict occurs on this struct during a
deep_merge.
Can be implemented for additional data types to implement custom deep merging
behavior.
The passed in values are:
* `original` - the value in the original data structure, usually left side
argument
* `override` - the value with which `original` would be overridden in a
normal `Map.merge/2`
* `resolver` - the function used by DeepMerge to resolve merge conflicts,
i.e. what you can pass to `Map.merge/3` and `Keyword.merge/3` to continue
deeply merging.
An example implementation might look like this if you want to deeply merge
your struct but only against non `nil` values (because all keys are always there)
if you merge against the same struct (but still merge with maps):
defimpl DeepMerge.Resolver, for: MyStruct do
def resolve(original, override = %MyStruct{}, resolver) do
cleaned_override =
override
|> Map.from_struct()
|> Enum.reject(fn {_key, value} -> is_nil(value) end)
|> Map.new()
Map.merge(original, cleaned_override, resolver)
end
def resolve(original, override, resolver) when is_map(override) do
Map.merge(original, override, resolver)
end
end
"""
def resolve(original, override, resolver)
end
defimpl DeepMerge.Resolver, for: Map do
@doc """
Resolve the merge between two maps by continuing to deeply merge them.
Don't merge structs or if its any other type take the override value.
"""
def resolve(_original, override = %{__struct__: _}, _fun) do
override
end
def resolve(original, override, resolver) when is_map(override) do
Map.merge(original, override, resolver)
end
def resolve(_original, override, _fun), do: override
end
defimpl DeepMerge.Resolver, for: List do
@doc """
Deeply merge keyword lists but avoid overriding a keywords with an empty list.
"""
def resolve(original = [{_k, _v} | _], override = [{_, _} | _], resolver) do
Keyword.merge(original, override, resolver)
end
def resolve(original = [{_k, _v} | _tail], _override = [], _fun) do
original
end
def resolve(_original, override, _fun), do: override
end
defimpl DeepMerge.Resolver, for: Any do
@doc """
Fall back to always taking the override.
Also the implementation for `@derive [DeppMerge.Resolver]` where structs of the same type that
implement the protocol are deeply merged.
"""
def resolve(original = %{__struct__: struct}, override = %{__struct__: struct}, resolver) do
implementors = get_implementors(DeepMerge.Resolver.__protocol__(:impls))
if Enum.member?(implementors, struct) do
Map.merge(original, override, resolver)
else
override
end
end
def resolve(_original, override, _fun), do: override
defp get_implementors({:consolidated, implementors}), do: implementors
defp get_implementors(:not_consolidated) do
IO.warn(
"Protocols not consolidated and trying to merge two structs of the same type. Not supported!"
)
# let the code work with override semantics without being intrusive
[]
end
end
|
lib/deep_merge/resolver.ex
| 0.842491
| 0.604516
|
resolver.ex
|
starcoder
|
defmodule EctoTablestore.Migration do
@moduledoc """
Migrations are used to create your tables.
Support the partition key is autoincrementing based on this library's wrapper, for this usecase,
we can use the migration to automatically create an another separated table to generate the serial value
when `:insert` (viz `ExAliyunOts.put_row/5`) or `:batch_write` (viz `ExAliyunOts.batch_write/3`) with `:put` option.
In practice, we don't create migration files by hand either, we typically use `mix ecto.ots.gen.migration` to
generate the file with the proper timestamp and then we just fill in its contents:
$ mix ecto.ots.gen.migration create_posts_table
And then we can fill the table definition details:
defmodule EctoTablestore.TestRepo.Migrations.CreatePostsTable do
use EctoTablestore.Migration
def change do
create table("ecto_ots_test_posts") do
add :post_id, :integer, partition_key: true, auto_increment: true
end
end
end
After we filled the above migration content, you can run the migration above by going to the root of your project
and typing:
$ mix ecto.ots.migrate
Finally, we successfully create the "ecto_ots_test_posts" table, since the above definition added an autoincrementing
column for the partition key, there will automatically create an "ecto_ots_test_posts_seq" table to generate a serial integer
for `:post_id` field when insert a new record.
"""
require ExAliyunOts.Const.PKType, as: PKType
require Logger
alias EctoTablestore.Migration.Runner
alias EctoTablestore.Sequence
alias Ecto.MigrationError
defmodule Table do
@moduledoc false
defstruct name: nil, prefix: nil, partition_key: true, meta: []
@type t :: %__MODULE__{
name: String.t(),
prefix: atom | nil,
partition_key: boolean(),
meta: Keyword.t()
}
end
@doc false
defmacro __using__(_) do
quote location: :keep do
import EctoTablestore.Migration,
only: [
table: 1,
table: 2,
create: 2,
add: 2,
add: 3,
add_pk: 2,
add_pk: 3
]
def __migration__, do: :ok
end
end
@doc """
Returns a table struct that can be given to `create/2`.
Since Tablestore is a NoSQL service, there are up to 4 primary key(s) can be
added when creation, the first added key is partition key when set `partition_key`
option as false.
## Examples
create table("products") do
add :name, :string
add :price, :integer
end
create table("products", partition_key: false) do
add :name, :string
add :price, :integer
end
## Options
* `:partition_key` - as `true` by default, and there will add an `:id` field as partition key
with type as a large autoincrementing integer (as `bigserial`), Tablestore does not support
`bigserial` type for primary keys, but can use the `ex_aliyun_ots` lib's wrapper - Sequence
to implement it; when `false`, a partition key field is not generated on table creation.
* `:prefix` - the prefix for the table.
* `:meta` - define the meta information when create table, can see Tablestore's document for details:
* `:reserved_throughput_write` - reserve the throughtput for write when create table, an integer,
the default value is 0;
* `:reserved_throughput_read` - reserve the throughtput for read when create table, an integer,
the default value is 0;
* `:time_to_live` - the survival time of the saved data, a.k.a TTL; an integer, unit as second,
the default value is -1 (permanent preservation);
* `:deviation_cell_version_in_sec` - maximum version deviation, the default value is 86400
seconds, which is 1 day;
* `stream_spec` - set the stream specification of Tablestore:
- `is_enabled`, open or close stream
- `expiration_time`, the expriration time of the table's stream
"""
def table(name, opts \\ [])
def table(name, opts) when is_atom(name) do
table(Atom.to_string(name), opts)
end
def table(name, opts) when is_binary(name) and is_list(opts) do
struct(%Table{name: name}, opts)
end
@doc """
Define the primary key(s) of the table to create.
By default, the table will also include an `:id` primary key field (it is also partition key)
that has a type of `:integer` which is an autoincrementing column. Check the `table/2` docs for
more information.
There are up to 4 primary key(s) can be added when creation.
## Example
create table("posts") do
add :title, :string
end
# The above is equivalent to
create table("posts") do
add :id, :integer, partition_key: true, auto_increment: true
add :title, :string
end
"""
defmacro create(table, do: block), do: _create_table(table, block)
defp _create_table(table, block) do
columns =
case block do
{:__block__, _, columns} -> columns
column -> [column]
end
quote do
map = unquote(__MODULE__).__create_table__(unquote(table), unquote(columns))
Runner.push_command(&unquote(__MODULE__).do_create_table(&1, map))
end
end
def __create_table__(%Table{} = table, columns) do
partition_key_count = Enum.count(columns, & &1.partition_key)
columns =
cond do
partition_key_count == 1 ->
columns
# Make the partition key as `:id` and in an increment integer sequence
partition_key_count == 0 and table.partition_key ->
opts = Runner.repo_config(:migration_primary_key, [])
{name, opts} = Keyword.pop(opts, :name, :id)
{type, _opts} = Keyword.pop(opts, :type, :integer)
[%{pk_name: name, type: type, partition_key: true, auto_increment: true} | columns]
# No partition key defined
partition_key_count == 0 ->
raise MigrationError,
message: "Please define at least one partition primary keys for table: " <> table.name
# The partition key only can define one
true ->
raise MigrationError,
message:
"The maximum number of partition primary keys is 4, now is #{partition_key_count} defined on table: " <>
table.name <> " columns:\n" <> inspect(columns)
end
case Enum.count(columns) do
# The number of primary keys can not be more than 4
pk_count when pk_count > 4 ->
raise MigrationError,
message:
"The maximum number of primary keys is 4, now is #{pk_count} defined on table: " <>
table.name <> " columns:\n" <> inspect(columns)
# Only support to define one primary key as auto_increment integer
pk_count ->
left_columns = Enum.reject(columns, & &1.auto_increment)
auto_increment_count = pk_count - length(left_columns)
hashids_count = Enum.count(left_columns, &match?(:hashids, &1.type))
total_increment_count = auto_increment_count + hashids_count
if total_increment_count > 1 do
raise MigrationError,
message:
"The maximum number of [auto_increment & hashids] pk is 1, but now find #{
total_increment_count
} pks defined on table: " <> table.name
else
seq_type =
cond do
auto_increment_count > 0 -> :self_seq
hashids_count > 0 -> :default_seq
true -> :none_seq
end
%{table: table, columns: columns, seq_type: seq_type}
end
end
end
@doc false
def do_create_table(repo, %{table: table, columns: columns, seq_type: seq_type}) do
table_name = get_table_name(table, repo.config())
repo_meta = Ecto.Adapter.lookup_meta(repo)
instance = repo_meta.instance
table_names = Runner.list_table_names(instance)
# check if not exists
if table_name not in table_names do
primary_keys = Enum.map(columns, &transform_table_column/1)
Logger.info(fn -> ">> table: #{table_name}, primary_keys: #{inspect(primary_keys)}" end)
options = Keyword.put(table.meta, :max_versions, 1)
case ExAliyunOts.create_table(instance, table_name, primary_keys, options) do
:ok ->
result_str = IO.ANSI.format([:green, "ok", :reset])
Logger.info(fn -> "create table: #{table_name} result: #{result_str}" end)
:ok
result ->
Logger.error(fn -> "create table: #{table_name} result: #{inspect(result)}" end)
elem(result, 0)
end
else
result_str = IO.ANSI.format([:yellow, "exists", :reset])
Logger.info(fn -> ">> table: #{table_name} already #{result_str}" end)
:already_exists
end
|> case do
:ok ->
create_seq_table_by_type(seq_type, table_name, table_names, repo, instance)
{table_name, :ok}
result ->
{table_name, result}
end
end
def create_seq_table_by_type(:none_seq, _table_name, _table_names, _repo, _instance),
do: :ignore
def create_seq_table_by_type(seq_type, table_name, table_names, repo, instance) do
seq_table_name =
case seq_type do
:self_seq -> repo.__adapter__.bound_sequence_table_name(table_name)
:default_seq -> Sequence.default_table()
end
# check if not exists
if seq_table_name not in table_names do
Logger.info(fn ->
">> auto create table: #{seq_table_name} for table: " <> table_name
end)
Sequence.create(instance, seq_table_name)
else
:already_exists
end
end
@doc false
defp get_table_name(table, repo_config) do
prefix = table.prefix || Keyword.get(repo_config, :migration_default_prefix)
if prefix do
prefix <> table.name
else
table.name
end
end
defp transform_table_column(%{
type: type,
pk_name: field_name,
partition_key: partition_key?,
auto_increment: auto_increment?
}) do
field_name =
if is_binary(field_name) do
field_name
else
Atom.to_string(field_name)
end
case type do
:integer when auto_increment? and not partition_key? ->
{field_name, PKType.integer(), PKType.auto_increment()}
_ ->
type_mapping = %{
hashids: PKType.string(),
integer: PKType.integer(),
string: PKType.string(),
binary: PKType.binary()
}
{field_name, type_mapping[type]}
end
end
@doc """
Adds a primary key when creating a table.
This function only accepts types as `:string` | `:binary` | `:integer` | `:hashids`.
About `:auto_increment` option:
* set `:auto_increment` as `true` and its field is primary key of non-partitioned key, there will
use Tablestore's auto-increment column to process it.
* set `:auto_increment` as `true` and its field is partition key, there will use `ex_aliyun_ots`'s
built-in Sequence function, the actual principle behind it is to use the atomic update operation
though another separate table when generate serial integer, by default there will add an `:id`
partition key as `:integer` type, the initial value of the sequence is 0, and the increment step is 1.
Tablestore can only have up to 4 primary keys, meanwhile the first defined primary key is the
partition key, Please know that the order of the primary key definition will be directly mapped to
the created table.
About `:hashids` type to define the partition key:
* set `partition_key` as `true` is required.
* set `auto_increment` as `true` is required.
## Examples
The auto generated serial integer for partition key:
create table("posts") do
add :title, :string
end
# The above is equivalent to
create table("posts", partition_key: false) do
add :id, :integer, partition_key: true, auto_increment: true
add :title, :string
end
The explicitly defined field with `partition_key`:
create table("posts") do
add :title, :string
end
# The above is equivalent to
create table("posts") do
add :id, :integer, partition_key: true, auto_increment: true
add :title, :string
end
The `:auto_increment` integer for primary key of non-partitioned key:
create table("posts") do
add :tag, :integer, auto_increment: true
end
# The above is equivalent to
create table("posts", partition_key: false) do
add :id, :integer, partition_key: true, auto_increment: true
add :version, :integer, auto_increment: true
end
The `:hashids` type for the partition key with the built-in sequence feature:
create table("posts") do
add :id, :hashids, auto_increment: true, partition_key: true
end
## Options
* `:partition_key` - when `true`, marks this field as the partition key, only the first explicitly defined field is available for this option.
* `:auto_increment` - when `true` and this field is non-partitioned key, Tablestore automatically generates the primary key value, which is unique
in the partition key, and which increases progressively, when `true` and this field is a partition key, use `ex_aliyun_ots`'s Sequence to build
a serial number for this field, the `auto_increment: true` option only allows binding of one primary key.
"""
defmacro add(column, type, opts \\ []), do: _add_pk(column, type, opts)
defmacro add_pk(column, type, opts \\ []), do: _add_pk(column, type, opts)
defp _add_pk(column, type, opts)
when (is_atom(column) or is_binary(column)) and is_list(opts) do
validate_pk_type!(column, type)
quote location: :keep do
%{
pk_name: unquote(column),
type: unquote(type),
partition_key: Keyword.get(unquote(opts), :partition_key, false),
auto_increment: Keyword.get(unquote(opts), :auto_increment, false)
}
end
end
defp validate_pk_type!(column, type) do
if type in [:integer, :string, :binary, :hashids] do
:ok
else
raise ArgumentError,
"#{inspect(type)} is not a valid primary key type for column: `#{inspect(column)}`, " <>
"please use an atom as :integer | :string | :binary | :hashids ."
end
end
end
|
lib/ecto_tablestore/migration.ex
| 0.852291
| 0.46952
|
migration.ex
|
starcoder
|
defmodule Advent.Y2016.D01 do
@moduledoc """
https://adventofcode.com/2016/day/1
"""
@typep directions :: [{integer(), non_neg_integer()}]
@doc """
How far is the shortest path to the destination?
"""
@spec part_one(String.t()) :: non_neg_integer()
def part_one(input) do
{x, y, _d} =
input
|> parse_directions()
|> Enum.reduce({0, 0, 0}, fn {rot, s}, {x, y, d} ->
t = rotate(d, rot)
case t do
0 -> {x, y + s, t}
90 -> {x + s, y, t}
180 -> {x, y - s, t}
270 -> {x - s, y, t}
end
end)
manhattan_distance({0, 0}, {x, y})
end
@doc """
How many blocks away is the first location you visit twice?
"""
@spec part_two(String.t()) :: non_neg_integer()
def part_two(input) do
# |> (&((&1 -- Enum.uniq(&1)) |> Enum.uniq())).()
{x, y} =
input
|> parse_directions()
|> Enum.reduce_while({MapSet.new([{0, 0}]), {0, 0, 0}}, fn {rot, s}, {seen, {x, y, d}} ->
t = rotate(d, rot)
ns =
Enum.map(1..s, fn s ->
case t do
0 -> {x, y + s}
90 -> {x + s, y}
180 -> {x, y - s}
270 -> {x - s, y}
end
end)
case Enum.find(ns, &MapSet.member?(seen, &1)) do
nil ->
seen = Enum.reduce(ns, seen, &MapSet.put(&2, &1))
{x, y} = List.last(ns)
{:cont, {seen, {x, y, t}}}
repeat ->
{:halt, repeat}
end
end)
manhattan_distance({0, 0}, {x, y})
end
@spec parse_directions(String.t()) :: directions()
defp parse_directions(input) do
input
|> String.split(",")
|> Enum.map(&String.trim/1)
|> Enum.map(fn <<t, n::binary>> ->
turn =
case t do
?R -> 90
?L -> -90
end
step = String.to_integer(n)
{turn, step}
end)
end
# Normalize rotation
@spec rotate(integer(), integer()) :: non_neg_integer()
defp rotate(start, degree) do
rem(rem(start + degree, 360) + 360, 360)
end
# https://xlinux.nist.gov/dads/HTML/manhattanDistance.html
defp manhattan_distance({x_a, y_a}, {x_b, y_b}) do
abs(x_a - x_b) + abs(y_a - y_b)
end
end
|
lib/advent/y2016/d01.ex
| 0.796174
| 0.544862
|
d01.ex
|
starcoder
|
defmodule Cased.Sensitive.Processor do
@moduledoc """
Processes audit events for sensitive data.
"""
@default_process_opts [
return: :embedded,
handlers: []
]
@type process_opts :: [process_opt()]
@type process_opt ::
{:return, :embedded | :pii}
| {:handlers, [Cased.Sensitive.Handler.spec()]}
@type address :: [String.t() | non_neg_integer()]
@doc """
Process an audit event, collecting any sensitive data found.
## Examples
Process an audit event, returning any sensitive data in a new :".cased" key:
```
iex> audit_event = %{action: "comment.create", body: "Hi, @username"}
iex> Cased.Sensitive.Processor.process(audit_event, handlers: [
iex> {Cased.Sensitive.RegexHandler, :username, ~r/@\\w+/}
iex> ])
%{
".cased": %{
pii: %{
".body" => [
%Cased.Sensitive.Range{
begin_offset: 4,
end_offset: 13,
key: :body,
label: :username
}
]
}
},
action: "comment.create",
body: "Hi, @username"
}
```
Return just the sensitive data:
```
iex> audit_event = %{action: "comment.create", body: "Hi, @username"}
iex> Cased.Sensitive.Processor.process(audit_event, handlers: [
iex> {Cased.Sensitive.RegexHandler, :username, ~r/@\\w+/}
iex> ], return: :pii)
%{
".body" => [
%Cased.Sensitive.Range{
begin_offset: 4,
end_offset: 13,
key: :body,
label: :username
}
]
}
```
"""
@spec process(
audit_event :: map(),
opts :: process_opts()
) :: map()
def process(audit_event, opts \\ []) do
opts =
@default_process_opts
|> Keyword.merge(opts)
handlers =
opts[:handlers]
|> Enum.map(&Cased.Sensitive.Handler.from_spec/1)
{processed_audit_event, pii_data} =
audit_event
|> collect(handlers)
case {opts[:return], pii_data} do
{:pii, _} ->
pii_data
{:embedded, d} when map_size(d) == 0 ->
processed_audit_event
{:embedded, _} ->
processed_audit_event
|> Map.put(:".cased", %{pii: pii_data})
end
end
# Collect data and PII from an audit event, using handlers
@spec collect(audit_event :: map(), handlers :: [Cased.Sensitive.Handler.t()]) :: {map(), map()}
defp collect(audit_event, handlers) do
collect_from_node(audit_event, audit_event, handlers)
end
# Collect data and PII from a node, using handlers
@spec collect_from_node(
node :: any(),
audit_event :: map(),
handlers :: [Cased.Sensitive.Handler.t()]
) :: {processed_node :: map(), pii :: map()}
defp collect_from_node(node, audit_event, handlers) do
node
|> Enum.reduce({%{}, %{}, []}, &do_collect_from_node(&1, &2, audit_event, handlers))
|> Tuple.delete_at(2)
end
@spec do_collect_from_node(
{key :: any(), value :: any()},
acc :: {results :: map(), pii :: map(), parent_address :: address()},
audit_event :: map(),
handlers :: [Cased.Sensitive.Handler.t()]
) :: {results :: map(), pii :: map(), address :: address()}
# Value is manually marked as sensitive; split data and ranges
defp do_collect_from_node(
{key, %Cased.Sensitive.String{} = value},
{processed_node, pii, parent_address} = _acc,
_audit_event,
_handlers
) do
range =
value
|> Cased.Sensitive.String.to_range(key)
address = [key | parent_address]
{
Map.put(processed_node, key, value.data),
Map.put(pii, build_path(address), [range]),
parent_address
}
end
# Value is another type of struct; just store the value
defp do_collect_from_node(
{key, value},
{results, pii, parent_address} = _acc,
_audit_event,
_handlers
)
when is_struct(value) do
{
Map.put(results, key, value),
pii,
parent_address
}
end
# Value is a list; recurse
defp do_collect_from_node(
{key, values},
{results, pii, parent_address} = _acc,
audit_event,
handlers
)
when is_list(values) do
address = [key | parent_address]
acc = {_result = [], pii}
{result, pii} =
values
|> Enum.with_index()
|> Enum.reduce(
acc,
&collect_from_list_element(&1, &2, address, audit_event, handlers)
)
{
Map.put(results, key, result |> Enum.reverse()),
pii,
parent_address
}
end
# Value is a map; recurse
defp do_collect_from_node(
{key, values},
{results, pii, parent_address} = _acc,
audit_event,
handlers
)
when is_map(values) do
address = [key | parent_address]
acc = {_result = %{}, pii}
{result, pii} =
values
|> Enum.reduce(
acc,
&collect_from_map_pair(&1, &2, address, audit_event, handlers)
)
{
Map.put(results, key, result),
pii,
parent_address
}
end
# Value is a scalar; extract ranges
defp do_collect_from_node(
{key, value},
{results, pii, parent_address} = _acc,
audit_event,
handlers
) do
address = [key | parent_address]
case ranges(handlers, audit_event, key, value) do
[] ->
{
Map.put(results, key, value),
pii,
parent_address
}
key_pii ->
{
Map.put(results, key, value),
Map.put(pii, build_path(address), key_pii),
parent_address
}
end
end
@spec collect_from_map_pair(
{key :: any(), value :: any()},
acc :: {results :: map(), pii :: map()},
parent_address :: address(),
audit_event :: map(),
handlers :: [Cased.Sensitive.Handler.t()]
) :: {results :: map(), pii :: map()}
defp collect_from_map_pair(pair, {results, pii}, parent_address, audit_event, handlers) do
do_collect_from_node(pair, {results, pii, parent_address}, audit_event, handlers)
|> Tuple.delete_at(2)
end
@spec collect_from_list_element(
{value :: any(), offset :: non_neg_integer()},
acc :: {results :: map(), pii :: map()},
parent_address :: address(),
audit_event :: map(),
handlers :: [Cased.Sensitive.Handler.t()]
) :: {results :: list(), pii :: map()}
defp collect_from_list_element(
{value, offset},
{results, pii},
parent_address,
audit_event,
handlers
) do
{collected_result, pii, _} =
do_collect_from_node(
{offset, value},
{%{}, pii, parent_address},
audit_event,
handlers
)
result = collected_result[offset]
{[result | results], pii}
end
# Extract the sensitive value ranges from a value, using handlers
@spec ranges(
handlers :: [Cased.Sensitive.Handler.t()],
audit_event :: map(),
key :: atom() | String.t(),
value :: any()
) :: [Cased.Sensitive.Range.t()]
defp ranges(handlers, audit_event, key, value) do
handlers
|> Enum.flat_map(fn %module{} = handler ->
module.ranges(handler, audit_event, {key, value})
end)
end
@doc false
@spec build_path(address :: address()) :: String.t()
def build_path(address) do
address
|> Enum.reverse()
|> Enum.map(fn
value when is_integer(value) ->
"[#{value}]"
value ->
# Normalize atoms
value = to_string(value)
key =
if String.contains?(value, ".") do
~s("#{value}")
else
value
end
".#{key}"
end)
|> Enum.join("")
end
end
|
lib/cased/sensitive/processor.ex
| 0.907421
| 0.741089
|
processor.ex
|
starcoder
|
defmodule BPXE.Engine.Process.Log do
defmodule NewProcessActivation do
defstruct pid: nil, id: nil, activation: nil
end
defmodule FlowNodeActivated do
defstruct pid: nil, id: nil, token_id: nil, token: nil
end
defmodule FlowNodeForward do
defstruct pid: nil, id: nil, token_id: nil, to: []
end
defmodule FlowNodeErrorOccurred do
defstruct pid: nil, id: nil, token_id: nil, error: nil
end
defmodule ExpressionErrorOccurred do
defstruct pid: nil, id: nil, token_id: nil, expression: nil, error: nil
end
defmodule ExclusiveGatewayActivated do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule EventBasedGatewayActivated do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule EventBasedGatewayCompleted do
defstruct pid: nil, id: nil, token_id: nil, to: []
end
defmodule ParallelGatewayReceived do
defstruct pid: nil, id: nil, token_id: nil, from: nil
end
defmodule ParallelGatewayCompleted do
defstruct pid: nil, id: nil, token_id: nil, to: []
end
defmodule InclusiveGatewayReceived do
defstruct pid: nil, id: nil, token_id: nil, from: nil
end
defmodule InclusiveGatewayCompleted do
defstruct pid: nil, id: nil, token_id: nil, fired: []
end
defmodule PrecedenceGatewayActivated do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule PrecedenceGatewayPrecedenceEstablished do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule PrecedenceGatewayTokenDiscarded do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule SensorGatewayActivated do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule SensorGatewayCompleted do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule EventActivated do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule EventTriggered do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule EventCompleted do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule SequenceFlowStarted do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule SequenceFlowCompleted do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule TaskActivated do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule TaskCompleted do
defstruct pid: nil, id: nil, token_id: nil
end
defmodule ScriptTaskErrorOccurred do
defstruct pid: nil, id: nil, token_id: nil, error: nil
end
defmodule ServiceTimeoutOccurred do
defstruct pid: nil, id: nil, token_id: nil, timeout: nil
end
end
|
lib/bpxe/engine/process/log.ex
| 0.5083
| 0.466542
|
log.ex
|
starcoder
|
defmodule ICalendar.Recurrence do
@moduledoc """
Adds support for recurring events.
Events can recur by frequency, count, interval, and/or start/end date. To
see the specific rules and examples, see `add_recurring_events/2` below.
Credit to @fazibear for this module.
"""
alias ICalendar.Event
# ignore :byhour, :monthday, :byyearday, :byweekno, :bymonth for now
@supported_by_x_rrules [:byday]
@doc """
Given an event, return a stream of recurrences for that event.
Warning: this may create a very large sequence of event recurrences.
## Parameters
- `event`: The event that may contain an rrule. See `ICalendar.Event`.
- `end_date` *(optional)*: A date time that represents the fallback end date
for a recurring event. This value is only used when the options specified
in rrule result in an infinite recurrance (ie. when neither `count` nor
`until` is set). If no end_date is set, it will default to
`DateTime.utc_now()`.
## Event rrule options
Event recurrance details are specified in the `rrule`. The following options
are considered:
- `freq`: Represents how frequently the event recurs. Allowed frequencies
are `DAILY`, `WEEKLY`, and `MONTHLY`. These can be further modified by
the `interval` option.
- `count` *(optional)*: Represents the number of times that an event will
recur. This takes precedence over the `end_date` parameter and the
`until` option.
- `interval` *(optional)*: Represents the interval at which events occur.
This option works in concert with `freq` above; by using the `interval`
option, an event could recur every 5 days or every 3 weeks.
- `until` *(optional)*: Represents the end date for a recurring event.
This takes precedence over the `end_date` parameter.
- `byday` *(optional)*: Represents the days of the week at which events occur.
The `freq` option is required for a valid rrule, but the others are
optional. They may be used either individually (ex. just `freq`) or in
concert (ex. `freq` + `interval` + `until`).
## Future rrule options (not yet supported)
- `byhour` *(optional)*: Represents the hours of the day at which events occur.
- `byweekno` *(optional)*: Represents the week number at which events occur.
- `bymonthday` *(optional)*: Represents the days of the month at which events occur.
- `bymonth` *(optional)*: Represents the months at which events occur.
- `byyearday` *(optional)*: Represents the days of the year at which events occur.
## Examples
iex> dt = Timex.Date.from({2016,8,13})
iex> dt_end = Timex.Date.from({2016, 8, 23})
iex> event = %ICalendar.Event{rrule:%{freq: "DAILY"}, dtstart: dt, dtend: dt}
iex> recurrences =
ICalendar.Recurrence.get_recurrences(event)
|> Enum.to_list()
"""
@spec get_recurrences(%Event{}) :: %Stream{}
@spec get_recurrences(%Event{}, %DateTime{}) :: %Stream{}
def get_recurrences(event, end_date \\ DateTime.utc_now()) do
by_x_rrules =
if is_map(event.rrule), do: Map.take(event.rrule, @supported_by_x_rrules), else: %{}
reference_events =
if by_x_rrules != %{} do
# If there are any by_x modifiers in the rrule, build reference events based on them
# Remove the invalid reference events later on
build_refernce_events_by_x_rules(event, by_x_rrules)
else
[event]
end
case event.rrule do
nil ->
Stream.map([nil], fn _ -> [] end)
%{freq: "DAILY", count: count, interval: interval} ->
add_recurring_events_count(event, reference_events, count, days: interval)
%{freq: "DAILY", until: until, interval: interval} ->
add_recurring_events_until(event, reference_events, until, days: interval)
%{freq: "DAILY", count: count} ->
add_recurring_events_count(event, reference_events, count, days: 1)
%{freq: "DAILY", until: until} ->
add_recurring_events_until(event, reference_events, until, days: 1)
%{freq: "DAILY", interval: interval} ->
add_recurring_events_until(event, reference_events, end_date, days: interval)
%{freq: "DAILY"} ->
add_recurring_events_until(event, reference_events, end_date, days: 1)
%{freq: "WEEKLY", until: until, interval: interval} ->
add_recurring_events_until(event, reference_events, until, days: interval * 7)
%{freq: "WEEKLY", count: count} ->
add_recurring_events_count(event, reference_events, count, days: 7)
%{freq: "WEEKLY", until: until} ->
add_recurring_events_until(event, reference_events, until, days: 7)
%{freq: "WEEKLY", interval: interval} ->
add_recurring_events_until(event, reference_events, end_date, days: interval * 7)
%{freq: "WEEKLY"} ->
add_recurring_events_until(event, reference_events, end_date, days: 7)
%{freq: "MONTHLY", count: count, interval: interval} ->
add_recurring_events_count(event, reference_events, count, months: interval)
%{freq: "MONTHLY", until: until, interval: interval} ->
add_recurring_events_until(event, reference_events, until, months: interval)
%{freq: "MONTHLY", count: count} ->
add_recurring_events_count(event, reference_events, count, months: 1)
%{freq: "MONTHLY", until: until} ->
add_recurring_events_until(event, reference_events, until, months: 1)
%{freq: "MONTHLY", interval: interval} ->
add_recurring_events_until(event, reference_events, end_date, months: interval)
%{freq: "MONTHLY"} ->
add_recurring_events_until(event, reference_events, end_date, months: 1)
%{freq: "YEARLY", count: count, interval: interval} ->
add_recurring_events_count(event, reference_events, count, years: interval)
%{freq: "YEARLY", until: until, interval: interval} ->
add_recurring_events_until(event, reference_events, until, years: interval)
%{freq: "YEARLY", count: count} ->
add_recurring_events_count(event, reference_events, count, years: 1)
%{freq: "YEARLY", until: until} ->
add_recurring_events_until(event, reference_events, until, years: 1)
%{freq: "YEARLY", interval: interval} ->
add_recurring_events_until(event, reference_events, end_date, years: interval)
%{freq: "YEARLY"} ->
add_recurring_events_until(event, reference_events, end_date, years: 1)
end
end
defp add_recurring_events_until(original_event, reference_events, until, shift_opts) do
Stream.resource(
fn -> [reference_events] end,
fn acc_events ->
# Use the previous batch of the events as the reference for the next batch
[prev_event_batch | _] = acc_events
case prev_event_batch do
[] ->
{:halt, acc_events}
prev_event_batch ->
new_events =
Enum.map(prev_event_batch, fn reference_event ->
new_event = shift_event(reference_event, shift_opts)
case Timex.compare(new_event.dtstart, until) do
1 -> []
_ -> [new_event]
end
end)
|> List.flatten()
{remove_excluded_dates(new_events, original_event), [new_events | acc_events]}
end
end,
fn recurrences ->
recurrences
end
)
end
defp add_recurring_events_count(original_event, reference_events, count, shift_opts) do
Stream.resource(
fn -> {[reference_events], count} end,
fn {acc_events, count} ->
# Use the previous batch of the events as the reference for the next batch
[prev_event_batch | _] = acc_events
case prev_event_batch do
[] ->
{:halt, acc_events}
prev_event_batch ->
new_events =
Enum.map(prev_event_batch, fn reference_event ->
new_event = shift_event(reference_event, shift_opts)
if count > 1 do
[new_event]
else
[]
end
end)
|> List.flatten()
{remove_excluded_dates(new_events, original_event),
{[new_events | acc_events], count - 1}}
end
end,
fn recurrences ->
recurrences
end
)
end
defp shift_event(event, shift_opts) do
Map.merge(event, %{
dtstart: shift_date(event.dtstart, shift_opts),
dtend: shift_date(event.dtend, shift_opts),
rrule: Map.put(event.rrule, :is_recurrence, true)
})
end
defp shift_date(date, shift_opts) do
case Timex.shift(date, shift_opts) do
%Timex.AmbiguousDateTime{} = new_date ->
new_date.after
new_date ->
new_date
end
end
defp build_refernce_events_by_x_rules(event, by_x_rrules) do
by_x_rrules
|> Map.keys()
|> Enum.map(fn by_x ->
build_refernce_events_by_x_rule(event, by_x)
end)
|> List.flatten()
end
@valid_days ["SU", "MO", "TU", "WE", "TH", "FR", "SA"]
@day_values %{su: 0, mo: 1, tu: 2, we: 3, th: 4, fr: 5, sa: 6}
defp build_refernce_events_by_x_rule(
%{rrule: %{byday: bydays}} = event,
:byday
) do
bydays
|> Enum.map(fn byday ->
if byday in @valid_days do
day_atom = byday |> String.downcase() |> String.to_atom()
# determine the difference between the byday and the event's dtstart
day_offset_for_reference = Map.get(@day_values, day_atom) - Timex.weekday(event.dtstart)
Map.merge(event, %{
dtstart: Timex.shift(event.dtstart, days: day_offset_for_reference),
dtend: Timex.shift(event.dtend, days: day_offset_for_reference)
})
else
# Ignore the invalid byday value
nil
end
end)
|> Enum.filter(&(!is_nil(&1)))
end
defp remove_excluded_dates(recurrences, original_event) do
Enum.filter(recurrences, fn new_event ->
# Make sure new event doesn't fall on an EXDATE
falls_on_exdate = not is_nil(new_event) and new_event.dtstart in new_event.exdates
# This removes any events which were created as references
is_invalid_reference_event =
DateTime.compare(new_event.dtstart, original_event.dtstart) == :lt
!falls_on_exdate &&
!is_invalid_reference_event
end)
end
end
|
lib/icalendar/recurrence.ex
| 0.93223
| 0.533519
|
recurrence.ex
|
starcoder
|
defmodule Zappa.OptionParser do
@moduledoc false
# The name of the index variable should match up with the index helper.
@index_var "index___helper"
@valid_variable_name_regex ~r/^[a-zA-Z]{1}[a-zA-Z0-9_]+$/
@typep variable :: String.t()
@typep iterator :: String.t()
@typep index :: String.t()
@doc ~S"""
This function is a lo-fi knock-off of the original
[OptionParser.split/1](https://hexdocs.pm/elixir/1.3.4/OptionParser.html#split/1) tweaked specifically to support
Handlebars [Hash Arguments](https://handlebarsjs.com/guide/block-helpers.html#hash-arguments).
This function splits a raw string into a list of arguments and keyword arguments. The arguments encountered are
classified as being either quoted or unquoted so that downstream parsing can know whether to treat the value as a
variable or as a string constant.
The `split/1` function returns a tuple containing a list of value maps and a map containing the "hash arguments".
## Examples
iex> Zappa.OptionParser.split("foo bar")
iex> OptionParser.split("foo \"bar baz\"")
"""
@spec split(String.t()) :: {list, map}
def split(string) when is_binary(string) do
do_split(String.trim_leading(string, " "), "", [], "", %{}, nil, false)
end
# Did we find an equals sign?
defp do_split("=" <> t, args_buffer, args_acc, _kwargs_buffer, kwargs_acc, nil, false) do
do_split(String.trim_leading(t, " "), "", args_acc, args_buffer, kwargs_acc, nil, true)
end
# If we have a quote and we were not in a quote, start one
defp do_split(
<<quote, t::binary>>,
args_buffer,
args_acc,
kwargs_buffer,
kwargs_acc,
nil,
equals
)
when quote in [?", ?'] do
do_split(t, args_buffer, args_acc, kwargs_buffer, kwargs_acc, quote, equals)
end
# If we have a quote and we were inside it, close it
# defp do_split(<<quote, t::binary>>, args_buffer, args_acc, quote), do: do_split(t, args_buffer, args_acc, nil)
defp do_split(
<<quote, t::binary>>,
args_buffer,
args_acc,
kwargs_buffer,
kwargs_acc,
quote,
false
) do
do_split(
String.trim_leading(t, " "),
"",
[%{value: args_buffer, quoted?: true} | args_acc],
kwargs_buffer,
kwargs_acc,
nil,
false
)
end
# If we are in a key/value declaration and we end a quote, track the hash value and reset the buffers
defp do_split(
<<quote, t::binary>>,
args_buffer,
args_acc,
kwargs_buffer,
kwargs_acc,
quote,
true
) do
do_split(
String.trim_leading(t, " "),
"",
args_acc,
"",
Map.put(kwargs_acc, String.to_atom(kwargs_buffer), args_buffer),
nil,
false
)
end
# If we have an escaped quote/space, simply remove the escape as long as we are not inside a quote
# (I have no idea when someone would use this)
defp do_split(
<<?\\, h, t::binary>>,
args_buffer,
args_acc,
kwargs_buffer,
kwargs_acc,
nil,
equals
)
when h in [?\s, ?', ?"] do
do_split(t, <<args_buffer::binary, h>>, args_acc, kwargs_buffer, kwargs_acc, nil, equals)
end
# If we have a space and we are outside of a quote, start new segment
defp do_split(<<?\s, t::binary>>, args_buffer, args_acc, kwargs_buffer, kwargs_acc, nil, false) do
do_split(
String.trim_leading(t, " "),
"",
[%{value: args_buffer, quoted?: false} | args_acc],
kwargs_buffer,
kwargs_acc,
nil,
false
)
end
# If we are in a key/value declaration and we find a space outside a quote, track the hash value and reset the buffers
defp do_split(<<?\s, t::binary>>, args_buffer, args_acc, kwargs_buffer, kwargs_acc, nil, true) do
do_split(
String.trim_leading(t, " "),
"",
args_acc,
"",
Map.put(kwargs_acc, String.to_atom(kwargs_buffer), args_buffer),
nil,
false
)
end
# All other characters are moved to args_buffer
defp do_split(<<h, t::binary>>, args_buffer, args_acc, kwargs_buffer, kwargs_acc, quote, equals) do
do_split(t, <<args_buffer::binary, h>>, args_acc, kwargs_buffer, kwargs_acc, quote, equals)
end
# Finish the string expecting a nil marker
defp do_split(<<>>, "", args_acc, _kwargs_buffer, kwargs_acc, nil, _equals),
do: {Enum.reverse(args_acc), kwargs_acc}
defp do_split(<<>>, args_buffer, args_acc, _kwargs_buffer, kwargs_acc, nil, false),
do: {Enum.reverse([%{value: args_buffer, quoted?: false} | args_acc]), kwargs_acc}
defp do_split(<<>>, args_buffer, args_acc, kwargs_buffer, kwargs_acc, nil, true),
do: {Enum.reverse(args_acc), Map.put(kwargs_acc, String.to_atom(kwargs_buffer), args_buffer)}
# Otherwise raise
defp do_split(<<>>, _, _args_acc, _kwargs_buffer, _kwargs_acc, marker, _equals) do
raise "Tag options string did not terminate properly, a #{<<marker>>} was opened but never closed"
end
@doc """
This function exists to parse the weird Ruby-esque "block" closures that are used by Handlebars to specify custom
iterators and indexes in loops. For example, you might see something like:
```
{{#each notes as |note|}}
{{note}}
{{/each}}
```
This function takes a string (the `raw_options` from a `Zappa.Tag` struct) and determines the variable being
enumerated, the iterator variable, and the index variable. The result is returned as a tuple packaged in the
common `{:ok, {variable, iterator, index}}`. If no customizations are specified, default values are returned.
## Examples
iex> Zappa.OptionParser.block("notes")
{:ok, {"notes", "this", "index___helper"}
iex> Zappa.OptionParser.block("notes as |note|")
{:ok, {"notes", "note", "index___helper"}
iex> Zappa.OptionParser.block("notes as |note, scaleDegree|")
{:ok, {"notes", "note", "scaleDegree"}
iex> Zappa.OptionParser.block("notes as |note, scaleDegree|")
{:ok, {"notes", "note", "scaleDegree"}
iex> Zappa.OptionParser.block("$$$ for rich white men")
{:error, "Invalid input"}
"""
@spec split_block(String.t()) :: {:ok, {variable, iterator, index}} | {:error, String.t()}
def split_block(string) when is_binary(string) do
with {:ok, variable, tail} <- find_variable(string),
:ok <- validate_variable_name(variable),
{:ok, iterator, index} <- find_iterator_index(tail) do
{:ok, {variable, iterator, index}}
else
{:error, msg} -> {:error, msg}
end
end
# Get the initial variable
defp find_variable(string) do
case String.split(String.trim(string), ~r/\p{Zs}/u, parts: 2) do
[""] -> {:error, "Missing variable"}
[variable] -> {:ok, variable, ""}
[variable, tail] -> {:ok, variable, String.trim(tail)}
end
end
defp find_iterator_index("") do
{:ok, "this", @index_var}
end
defp find_iterator_index(string) do
# Get the contents of the |block|
with [_, block] <- Regex.run(~r/^as\p{Zs}+\|(.*)\|.*$/, string),
{iterator, index} <- split_index_iterator(block),
:ok <- validate_variable_name(iterator),
:ok <- validate_variable_name(index) do
{:ok, iterator, index}
else
nil -> {:error, "Invalid syntax"}
{:error, msg} -> {:error, msg}
end
end
# We return tuples for smoother flow within the with clauses
@spec validate_variable_name(String.t()) :: :ok | {:error, String.t()}
defp validate_variable_name(string) do
case Regex.match?(@valid_variable_name_regex, string) do
false -> {:error, "Invalid variable name"}
true -> :ok
end
end
defp split_index_iterator(string) do
case String.split(string, ",") do
[iterator] -> {String.trim(iterator), @index_var}
[iterator, index] -> {String.trim(iterator), String.trim(index)}
_ -> {:error, "Invalid |block| contents"}
end
end
end
|
lib/zappa/option_parser.ex
| 0.777131
| 0.504089
|
option_parser.ex
|
starcoder
|
defmodule Liveness do
@moduledoc """
`Liveness` offers the `eventually` higher-order function, which can be used
to specify liveness properties, or to busy-wait for a particular condition.
"""
defexception [:message]
@doc """
Runs `f` repeatedly until `f` succeeds or the number of `tries` is
reached.
Particular runs are separated in time by an interval of `interval`
milliseconds. The interval period begins when the function begins
execution. This means that if execution takes longer than `interval`
milliseconds, the next try will be attempted immediately after `f` returns.
A function is deemed to have failed if it returns `false` or `nil` (a falsy
value), or if it crashes (exits, raises, or `:erlang.error`s out).
If the function returns successfully, its return value becomes the value of
the call to `eventually`.
If the function returns a falsy value (`false` or `nil`) upon the last try,
then the `Liveness` exception is raised.
If the function raises an exception upon the last try, this exception is
re-raised by `eventually` with the *original* stacktrace.
"""
def eventually(f, tries \\ 250, interval \\ 20) do
eventually(f, tries, interval, %RuntimeError{}, nil)
end
defp eventually(_, 0, _, last_exception, last_stacktrace) do
case last_stacktrace do
nil -> raise(last_exception)
stacktrace -> reraise_or_exit(last_exception, stacktrace)
end
end
defp eventually(f, tries, interval, _, _) do
started_at = System.os_time(:millisecond)
try do
case f.() do
x when is_nil(x) or false == x ->
sleep_remaining(started_at, interval)
exception = %__MODULE__{message: "function returned #{inspect(x)}"}
eventually(f, tries - 1, interval, exception, nil)
other ->
other
end
rescue
e in __MODULE__ ->
reraise e, __STACKTRACE__
exception ->
sleep_remaining(started_at, interval)
eventually(f, tries - 1, interval, exception, __STACKTRACE__)
catch
class, reason ->
sleep_remaining(started_at, interval)
eventually(f, tries - 1, interval, {class, reason}, __STACKTRACE__)
end
end
defp sleep_remaining(started_at, interval) do
case interval - (System.os_time(:millisecond) - started_at) do
remaining when remaining > 0 -> Process.sleep(remaining)
_other -> :noop
end
end
defp reraise_or_exit({class, reason}, stacktrace) do
:erlang.raise(class, reason, stacktrace)
end
defp reraise_or_exit(exception, stacktrace) do
reraise(exception, stacktrace)
end
end
|
lib/liveness.ex
| 0.811527
| 0.724773
|
liveness.ex
|
starcoder
|
defmodule Tensorflow.AllocationRecord do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
alloc_micros: integer,
alloc_bytes: integer
}
defstruct [:alloc_micros, :alloc_bytes]
field(:alloc_micros, 1, type: :int64)
field(:alloc_bytes, 2, type: :int64)
end
defmodule Tensorflow.AllocatorMemoryUsed do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
allocator_name: String.t(),
total_bytes: integer,
peak_bytes: integer,
live_bytes: integer,
allocation_records: [Tensorflow.AllocationRecord.t()],
allocator_bytes_in_use: integer
}
defstruct [
:allocator_name,
:total_bytes,
:peak_bytes,
:live_bytes,
:allocation_records,
:allocator_bytes_in_use
]
field(:allocator_name, 1, type: :string)
field(:total_bytes, 2, type: :int64)
field(:peak_bytes, 3, type: :int64)
field(:live_bytes, 4, type: :int64)
field(:allocation_records, 6,
repeated: true,
type: Tensorflow.AllocationRecord
)
field(:allocator_bytes_in_use, 5, type: :int64)
end
defmodule Tensorflow.NodeOutput do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
slot: integer,
tensor_description: Tensorflow.TensorDescription.t() | nil
}
defstruct [:slot, :tensor_description]
field(:slot, 1, type: :int32)
field(:tensor_description, 3, type: Tensorflow.TensorDescription)
end
defmodule Tensorflow.MemoryStats do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
temp_memory_size: integer,
persistent_memory_size: integer,
persistent_tensor_alloc_ids: [integer],
device_temp_memory_size: integer,
device_persistent_memory_size: integer,
device_persistent_tensor_alloc_ids: [integer]
}
defstruct [
:temp_memory_size,
:persistent_memory_size,
:persistent_tensor_alloc_ids,
:device_temp_memory_size,
:device_persistent_memory_size,
:device_persistent_tensor_alloc_ids
]
field(:temp_memory_size, 1, type: :int64)
field(:persistent_memory_size, 3, type: :int64)
field(:persistent_tensor_alloc_ids, 5, repeated: true, type: :int64)
field(:device_temp_memory_size, 2, type: :int64, deprecated: true)
field(:device_persistent_memory_size, 4, type: :int64, deprecated: true)
field(:device_persistent_tensor_alloc_ids, 6,
repeated: true,
type: :int64,
deprecated: true
)
end
defmodule Tensorflow.NodeExecStats do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
node_name: String.t(),
all_start_micros: integer,
op_start_rel_micros: integer,
op_end_rel_micros: integer,
all_end_rel_micros: integer,
memory: [Tensorflow.AllocatorMemoryUsed.t()],
output: [Tensorflow.NodeOutput.t()],
timeline_label: String.t(),
scheduled_micros: integer,
thread_id: non_neg_integer,
referenced_tensor: [Tensorflow.AllocationDescription.t()],
memory_stats: Tensorflow.MemoryStats.t() | nil,
all_start_nanos: integer,
op_start_rel_nanos: integer,
op_end_rel_nanos: integer,
all_end_rel_nanos: integer,
scheduled_nanos: integer
}
defstruct [
:node_name,
:all_start_micros,
:op_start_rel_micros,
:op_end_rel_micros,
:all_end_rel_micros,
:memory,
:output,
:timeline_label,
:scheduled_micros,
:thread_id,
:referenced_tensor,
:memory_stats,
:all_start_nanos,
:op_start_rel_nanos,
:op_end_rel_nanos,
:all_end_rel_nanos,
:scheduled_nanos
]
field(:node_name, 1, type: :string)
field(:all_start_micros, 2, type: :int64)
field(:op_start_rel_micros, 3, type: :int64)
field(:op_end_rel_micros, 4, type: :int64)
field(:all_end_rel_micros, 5, type: :int64)
field(:memory, 6, repeated: true, type: Tensorflow.AllocatorMemoryUsed)
field(:output, 7, repeated: true, type: Tensorflow.NodeOutput)
field(:timeline_label, 8, type: :string)
field(:scheduled_micros, 9, type: :int64)
field(:thread_id, 10, type: :uint32)
field(:referenced_tensor, 11,
repeated: true,
type: Tensorflow.AllocationDescription
)
field(:memory_stats, 12, type: Tensorflow.MemoryStats)
field(:all_start_nanos, 13, type: :int64)
field(:op_start_rel_nanos, 14, type: :int64)
field(:op_end_rel_nanos, 15, type: :int64)
field(:all_end_rel_nanos, 16, type: :int64)
field(:scheduled_nanos, 17, type: :int64)
end
defmodule Tensorflow.DeviceStepStats.ThreadNamesEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: non_neg_integer,
value: String.t()
}
defstruct [:key, :value]
field(:key, 1, type: :uint32)
field(:value, 2, type: :string)
end
defmodule Tensorflow.DeviceStepStats do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
device: String.t(),
node_stats: [Tensorflow.NodeExecStats.t()],
thread_names: %{non_neg_integer => String.t()}
}
defstruct [:device, :node_stats, :thread_names]
field(:device, 1, type: :string)
field(:node_stats, 2, repeated: true, type: Tensorflow.NodeExecStats)
field(:thread_names, 3,
repeated: true,
type: Tensorflow.DeviceStepStats.ThreadNamesEntry,
map: true
)
end
defmodule Tensorflow.StepStats do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
dev_stats: [Tensorflow.DeviceStepStats.t()]
}
defstruct [:dev_stats]
field(:dev_stats, 1, repeated: true, type: Tensorflow.DeviceStepStats)
end
|
lib/tensorflow/core/framework/step_stats.pb.ex
| 0.769297
| 0.450178
|
step_stats.pb.ex
|
starcoder
|
defmodule Liquex.Parser.Object do
@moduledoc false
import NimbleParsec
alias Liquex.Parser.Field
alias Liquex.Parser.Literal
@spec arguments(NimbleParsec.t()) :: NimbleParsec.t()
def arguments(combinator \\ empty()) do
choice([
combinator
|> Literal.argument()
|> lookahead_not(string(":"))
|> repeat(
ignore(Literal.whitespace())
|> ignore(string(","))
|> ignore(Literal.whitespace())
|> concat(Literal.argument())
|> lookahead_not(string(":"))
)
|> optional(
ignore(Literal.whitespace())
|> ignore(string(","))
|> ignore(Literal.whitespace())
|> keyword_fields()
),
keyword_fields()
])
end
def keyword_fields(combinator \\ empty()) do
combinator
|> keyword_field()
|> repeat(
ignore(Literal.whitespace())
|> ignore(string(","))
|> ignore(Literal.whitespace())
|> keyword_field()
)
end
defp keyword_field(combinator) do
combinator
|> concat(Field.identifier())
|> ignore(string(":"))
|> ignore(Literal.whitespace())
|> concat(Literal.argument())
|> tag(:keyword)
end
@spec filter(NimbleParsec.t()) :: NimbleParsec.t()
def filter(combinator \\ empty()) do
combinator
|> ignore(Literal.whitespace())
|> ignore(utf8_char([?|]))
|> ignore(Literal.whitespace())
|> concat(Field.identifier())
|> tag(
optional(
ignore(string(":"))
|> ignore(Literal.whitespace())
|> concat(arguments())
),
:arguments
)
|> tag(:filter)
end
@spec object(NimbleParsec.t()) :: NimbleParsec.t()
def object(combinator \\ empty()) do
combinator
|> ignore(string("{{"))
|> ignore(optional(string("-")))
|> ignore(Literal.whitespace())
|> Literal.argument()
|> optional(tag(repeat(filter()), :filters))
|> ignore(Literal.whitespace())
|> ignore(choice([close_object_remove_whitespace(), string("}}")]))
|> tag(:object)
end
def close_object_remove_whitespace(combinator \\ empty()) do
combinator
|> string("-}}")
|> Literal.whitespace()
end
end
|
lib/liquex/parser/object.ex
| 0.710829
| 0.430207
|
object.ex
|
starcoder
|
defmodule Exi.Connect do
@moduledoc """
Nerves起動時に指定したノードに `Node.connect()` し、接続が切れた場合は再接続を試みる。
## 使い方
application.exに以下を記載する。
```
{Exi.Connect, [node_name, cookie, conn_node]}
```
- node_name: 自分のノード名
- cookie: クッキー(共通鍵)
- conn_node: Node.connectするノード
## 例
```
{Exi.Connect, ["my_node_name", "comecomeeverybody", "node_server@192.168.0.7"]}
```
"""
use GenServer
require Logger
@interval_init_ms 1_000
@interval_wakeup_ms 1_000
@interval_alive_ms 60_000
@interval_alive_false_ms 1_000
def start_link(node_option \\ []) do
# to init/1
GenServer.start_link(__MODULE__, node_option, name: __MODULE__)
end
def init(node_option) do
set_interval(:init, @interval_init_ms)
{:ok, node_option}
end
def set_interval(msg, ms) do
# to handle_info/2
Process.send_after(self(), msg, ms)
end
def handle_info(:init, node_option) do
init_nodeconn(eth0_ready?(), node_option)
{:noreply, node_option}
end
def handle_info(:wakeup, node_option) do
nodeconn(node_option)
{:noreply, node_option}
end
def handle_info(:alive, node_option) do
re_nodeconn(conn_node_alive?(node_option), node_option)
{:noreply, node_option}
end
defp init_nodeconn(true, [node_name, cookie, _]) do
node_host = get_ipaddr_eth0_static()
System.cmd("epmd", ["-daemon"])
Node.start(:"#{node_name}@#{node_host}")
Node.set_cookie(:"#{cookie}")
Logger.info("=== Node.start -> #{node_name}@#{node_host} ===")
Logger.info("=== Node.set_cookie -> #{cookie} ===")
case [node_start?(), node_set_cookie?()] do
[true, true] ->
Logger.info("=== init_nodeconn -> success! Node.start & Node.set ===")
set_interval(:wakeup, @interval_wakeup_ms)
[_, _] ->
Logger.info(
"=== init_nodeconn -> false, node_start(#{inspect(node_start?())}), node_set_cookie(#{inspect(node_set_cookie?())}) ==="
)
set_interval(:init, @interval_init_ms)
end
end
defp init_nodeconn(false, [_, _, _]) do
Logger.info("=== init_nodeconn -> false, eth0_ready(#{inspect(eth0_ready?())}) ===")
set_interval(:init, @interval_init_ms)
end
defp nodeconn([_, _, conn_node]) do
conn = Node.connect(:"#{conn_node}")
Logger.info("=== Node.connect -> try connect to #{conn_node} ===")
case conn do
true ->
Logger.info("=== nodeconn -> #{conn} ===")
set_interval(:alive, @interval_alive_ms)
_ ->
set_interval(:wakeup, @interval_wakeup_ms)
end
end
defp re_nodeconn(:node_alive, _) do
set_interval(:alive, @interval_alive_ms)
end
defp re_nodeconn(:node_re_conn, [_, _, conn_node]) do
conn = Node.connect(:"#{conn_node}")
Logger.info("=== re_nodeconn Node.connect -> #{conn_node} ===")
case conn do
true ->
Logger.info("=== re_nodeconn -> #{conn} ===")
set_interval(:alive, @interval_alive_ms)
_ ->
set_interval(:alive, @interval_alive_false_ms)
end
end
defp re_nodeconn(:node_down, [_, _, conn_node]) do
Logger.debug("=== re_nodeconn -> false... try connect to #{conn_node} ====")
set_interval(:alive, @interval_alive_false_ms)
end
def node_start?() do
case Node.self() do
:nonode@nohost -> false
_ -> true
end
end
def node_set_cookie?() do
case Node.get_cookie() do
:nocookie -> false
_ -> true
end
end
def conn_node_alive?([_, _, conn_node]) do
case [conn_node_list_find?(conn_node), conn_node_ping?(conn_node)] do
[true, true] -> :node_alive
[false, true] -> :node_re_conn
[_, _] -> :node_down
end
end
def conn_node_list_find?(conn_node) do
case Node.list() |> Enum.find(fn x -> x == :"#{conn_node}" end) do
nil -> false
_ -> true
end
end
def conn_node_ping?(conn_node) do
case Node.ping(:"#{conn_node}") do
:pang -> false
:pong -> true
end
end
def eth0_ready?() do
case get_ipaddr_eth0_static() do
nil -> false
_ -> true
end
end
def wlan0_ready?() do
case get_ipaddr_wlan0() do
nil -> false
_ -> true
end
end
def get_ipaddr_eth0_static() do
case VintageNet.get_by_prefix(["interface", "eth0", "config"]) do
[] ->
nil
[tuple_int_eth0_config] ->
tuple_int_eth0_config
|> (fn {_, list_settings} -> list_settings end).()
|> Map.get(:ipv4)
|> Map.get(:address)
end
end
def get_ipaddr_wlan0() do
case VintageNet.get_by_prefix(["interface", "wlan0", "addresses"]) do
[] ->
nil
[tuple_int_wlan0_addr] ->
tuple_int_wlan0_addr
|> (fn {_, list_settings} -> list_settings end).()
|> hd()
|> Map.get(:address)
|> VintageNet.IP.ip_to_string()
end
end
end
|
dio/exibee/lib/exi/exi_connect.ex
| 0.67971
| 0.722796
|
exi_connect.ex
|
starcoder
|
defmodule WebSockex do
alias WebSockex.{Utils}
@handshake_guid "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
@moduledoc ~S"""
A client handles negotiating the connection, then sending frames, receiving
frames, closing, and reconnecting that connection.
A simple client implementation would be:
```
defmodule WsClient do
use WebSockex
def start_link(url, state) do
WebSockex.start_link(url, __MODULE__, state)
end
def handle_frame({:text, msg}, state) do
IO.puts "Received a message: #{msg}"
{:ok, state}
end
def handle_cast({:send, {type, msg} = frame}, state) do
IO.puts "Sending #{type} frame with payload: #{msg}"
{:reply, frame, state}
end
end
```
## Supervision
WebSockex is implemented as an OTP Special Process and as a result will fit
into supervision trees.
WebSockex also supports the Supervisor children format introduced in Elixir
1.5. Meaning that a child specification could be `{ClientModule, [state]}`.
However, since there is a possibility that you would like to provide a
`t:WebSockex.Conn/0` or a url as well as the state, there are two versions of
the `child_spec` function. If you need functionality beyond that it is
recommended that you override the function or define your own.
Just remember to use the version that corresponds with your `start_link`'s
arity.
"""
@type client :: pid | atom | {:via, module, term} | {:global, term}
@type frame ::
:ping
| :pong
| {:ping | :pong, nil | (message :: binary)}
| {:text | :binary, message :: binary}
@typedoc """
The frame sent when the negotiating a connection closure.
"""
@type close_frame :: {close_code, message :: binary}
@typedoc """
An integer between 1000 and 4999 that specifies the reason for closing the connection.
"""
@type close_code :: integer
@typedoc """
Debug options to be parsed by `:sys.debug_options/1`.
These options can also be set after the process is running using the functions in
the Erlang `:sys` module.
"""
@type debug_opts :: [
:trace
| :log
| {:log, log_depth :: pos_integer}
| :statistics
| {:log_to_file, Path.t()}
]
@type options :: [option]
@typedoc """
Options values for `start_link`.
- `:async` - Replies with `{:ok, pid}` before establishing the connection.
This is useful for when attempting to connect indefinitely, this way the
process doesn't block trying to establish a connection.
- `:handle_initial_conn_failure` - When set to `true` a connection failure
while establishing the initial connection won't immediately return an error
and instead will invoke the `c:handle_disconnect/2` callback. This option
only matters during process initialization. The `handle_disconnect`
callback is always invoked if an established connection is lost.
- `:debug` - Options to set the debug options for `:sys.handle_debug`.
- `:name` - An atom that the registers the process with name locally.
Can also be a `{:via, module, term}` or `{:global, term}` tuple.
Other possible option values include: `t:WebSockex.Conn.connection_option/0`
"""
@type option ::
WebSockex.Conn.connection_option()
| {:async, boolean}
| {:debug, debug_opts}
| {:name, atom | {:global, term} | {:via, module, term}}
| {:handle_initial_conn_failure, boolean}
@typedoc """
The reason a connection was closed.
A `:normal` reason is the same as a `1000` reason with no payload.
If the peer closes the connection abruptly without a close frame then the
close reason is `{:remote, :closed}`.
"""
@type close_reason ::
{:remote | :local, :normal}
| {:remote | :local, close_code, message :: binary}
| {:remote, :closed}
| {:error, term}
@typedoc """
The error returned when a connection fails to be established.
"""
@type close_error ::
%WebSockex.RequestError{}
| %WebSockex.ConnError{}
| %WebSockex.InvalidFrameError{}
| %WebSockex.FrameEncodeError{}
@typedoc """
A map that contains information about the failure to connect.
This map contains the error, attempt number, and the `t:WebSockex.Conn.t/0`
that was used to attempt the connection.
"""
@type connection_status_map :: %{
reason: close_reason | close_error,
attempt_number: integer,
conn: WebSockex.Conn.t()
}
@doc """
Invoked after a connection is established.
This is invoked after both the initial connection and a reconnect.
"""
@callback handle_connect(conn :: WebSockex.Conn.t(), state :: term) :: {:ok, new_state :: term}
@doc """
Invoked on the reception of a frame on the socket.
The control frames have possible payloads, when they don't have a payload
then the frame will have `nil` as the payload. e.g. `{:ping, nil}`
"""
@callback handle_frame(frame, state :: term) ::
{:ok, new_state}
| {:reply, frame, new_state}
| {:close, new_state}
| {:close, close_frame, new_state}
when new_state: term
@doc """
Invoked to handle asynchronous `cast/2` messages.
"""
@callback handle_cast(msg :: term, state :: term) ::
{:ok, new_state}
| {:reply, frame, new_state}
| {:close, new_state}
| {:close, close_frame, new_state}
when new_state: term
@doc """
Invoked to handle all other non-WebSocket messages.
"""
@callback handle_info(msg :: term, state :: term) ::
{:ok, new_state}
| {:reply, frame, new_state}
| {:close, new_state}
| {:close, close_frame, new_state}
when new_state: term
@doc """
Invoked when the WebSocket disconnects from the server.
This callback is only invoked in the event of a connection failure. In cases
of crashes or other errors the process will terminate immediately
skipping this callback.
If the `handle_initial_conn_failure: true` option is provided during process
startup, then this callback will be invoked if the process fails to establish
an initial connection.
If a connection is established by reconnecting, the `c:handle_connect/2`
callback will be invoked.
The possible returns for this callback are:
- `{:ok, state}` will continue the process termination.
- `{:reconnect, state}` will attempt to reconnect instead of terminating.
- `{:reconnect, conn, state}` will attempt to reconnect with the connection
data in `conn`. `conn` is expected to be a `t:WebSockex.Conn.t/0`.
"""
@callback handle_disconnect(connection_status_map, state :: term) ::
{:ok, new_state}
| {:reconnect, new_state}
| {:reconnect, new_conn :: WebSockex.Conn.t(), new_state}
when new_state: term
@doc """
Invoked when the Websocket receives a ping frame
"""
@callback handle_ping(ping_frame :: :ping | {:ping, binary}, state :: term) ::
{:ok, new_state}
| {:reply, frame, new_state}
| {:close, new_state}
| {:close, close_frame, new_state}
when new_state: term
@doc """
Invoked when the Websocket receives a pong frame.
"""
@callback handle_pong(pong_frame :: :pong | {:pong, binary}, state :: term) ::
{:ok, new_state}
| {:reply, frame, new_state}
| {:close, new_state}
| {:close, close_frame, new_state}
when new_state: term
@doc """
Invoked when the process is terminating.
"""
@callback terminate(close_reason, state :: term) :: any
@doc """
Invoked when a new version the module is loaded during runtime.
"""
@callback code_change(old_vsn :: term | {:down, term}, state :: term, extra :: term) ::
{:ok, new_state :: term}
| {:error, reason :: term}
@doc """
Invoked to retrieve a formatted status of the state in a WebSockex process.
This optional callback is used when you want to edit the values returned when
invoking `:sys.get_status`.
The second argument is a two-element list with the order of `[pdict, state]`.
"""
@callback format_status(:normal, [process_dictionary | state]) :: status :: term
when process_dictionary: [{key :: term, val :: term}], state: term
@optional_callbacks format_status: 2
defmacro __using__(opts) do
quote location: :keep do
@behaviour WebSockex
if Kernel.function_exported?(Supervisor, :child_spec, 2) do
@doc false
def child_spec(conn_info, state) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [conn_info, state]}
}
|> Supervisor.child_spec(unquote(Macro.escape(opts)))
end
@doc false
def child_spec(state) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [state]}
}
|> Supervisor.child_spec(unquote(Macro.escape(opts)))
end
defoverridable child_spec: 2, child_spec: 1
end
@doc false
def handle_connect(_conn, state) do
{:ok, state}
end
@doc false
def handle_frame(frame, _state) do
raise "No handle_frame/2 clause in #{__MODULE__} provided for #{inspect(frame)}"
end
@doc false
def handle_cast(message, _state) do
raise "No handle_cast/2 clause in #{__MODULE__} provided for #{inspect(message)}"
end
@doc false
def handle_info(message, state) do
require Logger
Logger.error("No handle_info/2 clause in #{__MODULE__} provided for #{inspect(message)}")
{:ok, state}
end
@doc false
def handle_disconnect(_connection_status_map, state) do
{:ok, state}
end
@doc false
def handle_ping(:ping, state) do
{:reply, :pong, state}
end
def handle_ping({:ping, msg}, state) do
{:reply, {:pong, msg}, state}
end
@doc false
def handle_pong(:pong, state), do: {:ok, state}
def handle_pong({:pong, _}, state), do: {:ok, state}
@doc false
def terminate(_close_reason, _state), do: :ok
@doc false
def code_change(_old_vsn, state, _extra), do: {:ok, state}
defoverridable handle_connect: 2,
handle_frame: 2,
handle_cast: 2,
handle_info: 2,
handle_ping: 2,
handle_pong: 2,
handle_disconnect: 2,
terminate: 2,
code_change: 3
end
end
@doc """
Starts a `WebSockex` process.
Acts like `start_link/4`, except doesn't link the current process.
See `start_link/4` for more information.
"""
@spec start(url :: String.t() | WebSockex.Conn.t(), module, term, options) ::
{:ok, pid} | {:error, term}
def start(conn_info, module, state, opts \\ [])
def start(%WebSockex.Conn{} = conn, module, state, opts) do
Utils.spawn(:no_link, conn, module, state, opts)
end
def start(url, module, state, opts) do
case WebSockex.Conn.parse_url(url) do
{:ok, uri} ->
conn = WebSockex.Conn.new(uri, opts)
start(conn, module, state, opts)
{:error, error} ->
{:error, error}
end
end
@doc """
Starts a `WebSockex` process linked to the current process.
For available option values see `t:option/0`.
If a `WebSockex.Conn.t` is used in place of a url string, then the options
available in `t:WebSockex.Conn.connection_option/0` have effect.
The callback `c:handle_connect/2` is invoked after the connection is
established.
"""
@spec start_link(url :: String.t() | WebSockex.Conn.t(), module, term, options) ::
{:ok, pid} | {:error, term}
def start_link(conn_info, module, state, opts \\ [])
def start_link(conn = %WebSockex.Conn{}, module, state, opts) do
Utils.spawn(:link, conn, module, state, opts)
end
def start_link(url, module, state, opts) do
case WebSockex.Conn.parse_url(url) do
{:ok, uri} ->
conn = WebSockex.Conn.new(uri, opts)
start_link(conn, module, state, opts)
{:error, error} ->
{:error, error}
end
end
@doc """
Asynchronously sends a message to a client that is handled by `c:handle_cast/2`.
"""
@spec cast(client, term) :: :ok
def cast(client, message) do
Utils.send(client, {:"$websockex_cast", message})
:ok
end
@doc """
Sends a frame through the WebSocket.
If the connection is either connecting or closing then this will return an
error tuple with a `WebSockex.NotConnectedError` exception struct as the
second element.
If a connection failure is discovered while sending then it will return an
error tuple with a `WebSockex.ConnError` exception struct as the second
element.
"""
@spec send_frame(client, frame) ::
:ok
| {:error,
%WebSockex.FrameEncodeError{}
| %WebSockex.ConnError{}
| %WebSockex.NotConnectedError{}
| %WebSockex.InvalidFrameError{}}
| none
def send_frame(client, _) when client == self() do
raise %WebSockex.CallingSelfError{function: :send_frame}
end
def send_frame(client, frame) do
try do
{:ok, res} = :gen.call(client, :"$websockex_send", frame)
res
catch
_, reason ->
exit({reason, {__MODULE__, :call, [client, frame]}})
end
end
@doc false
@spec init(pid, WebSockex.Conn.t(), module, term, options) :: {:ok, pid} | {:error, term}
def init(parent, conn, module, module_state, opts) do
do_init(parent, self(), conn, module, module_state, opts)
end
@spec init(pid, atom, WebSockex.Conn.t(), module, term, options) :: {:ok, pid} | {:error, term}
def init(parent, name, conn, module, module_state, opts) do
case Utils.register(name) do
true ->
do_init(parent, name, conn, module, module_state, opts)
{:error, _} = error ->
:proc_lib.init_ack(parent, error)
end
end
## OTP Stuffs
@doc false
def system_continue(parent, debug, %{connection_status: :connected} = state) do
websocket_loop(parent, debug, Map.delete(state, :connection_status))
end
def system_continue(parent, debug, %{connection_status: :connecting} = state) do
open_loop(parent, debug, Map.delete(state, :connection_status))
end
def system_continue(parent, debug, %{connection_status: {:closing, reason}} = state) do
close_loop(reason, parent, debug, Map.delete(state, :connection_status))
end
@doc false
@spec system_terminate(term, pid, any, any) :: no_return
def system_terminate(reason, parent, debug, state) do
terminate(reason, parent, debug, state)
end
@doc false
def system_get_state(%{module_state: module_state}) do
{:ok, module_state}
end
@doc false
def system_replace_state(fun, state) do
new_module_state = fun.(state.module_state)
{:ok, new_module_state, %{state | module_state: new_module_state}}
end
@doc false
def system_code_change(state, _mod, old_vsn, extra) do
case apply(state.module, :code_change, [old_vsn, state.module_state, extra]) do
{:ok, new_module_state} ->
{:ok, %{state | module_state: new_module_state}}
other ->
other
end
catch
other -> other
end
@doc false
def format_status(opt, [pdict, sys_state, parent, debug, state]) do
log = :sys.get_debug(:log, debug, [])
module_misc = module_status(opt, state.module, pdict, state.module_state)
[
{:header, 'Status for WebSockex process #{inspect(self())}'},
{:data,
[
{"Status", sys_state},
{"Parent", parent},
{"Log", log},
{"Connection Status", state.connection_status},
{"Socket Buffer", state.buffer},
{"Socket Module", state.module}
]}
| module_misc
]
end
defp module_status(opt, module, pdict, module_state) do
default = [{:data, [{"State", module_state}]}]
if function_exported?(module, :format_status, 2) do
result = try_callback(module, :format_status, [opt, [pdict, module_state]])
case result do
{:"$EXIT", _} ->
require Logger
Logger.error("There was an error while invoking #{module}.format_status/2")
default
other when is_list(other) ->
other
other ->
[other]
end
else
default
end
end
# Internals! Yay
defp do_init(parent, name, conn, module, module_state, opts) do
# OTP stuffs
debug = Utils.parse_debug_options(self(), opts)
reply_fun =
case Keyword.get(opts, :async, false) do
true ->
:proc_lib.init_ack(parent, {:ok, self()})
&async_init_fun/1
false ->
&sync_init_fun(parent, &1)
end
state = %{
conn: conn,
module: module,
module_state: module_state,
name: name,
reply_fun: reply_fun,
buffer: <<>>,
fragment: nil
}
handle_conn_failure = Keyword.get(opts, :handle_initial_conn_failure, false)
case open_connection(parent, debug, state) do
{:ok, new_state} ->
debug = Utils.sys_debug(debug, :connected, state)
module_init(parent, debug, new_state)
{:error, error, new_state} when handle_conn_failure == true ->
init_conn_failure(error, parent, debug, new_state)
{:error, error, _} ->
state.reply_fun.({:error, error})
end
end
# Loops
defp open_loop(parent, debug, state) do
%{task: %{ref: ref}} = state
receive do
{:system, from, req} ->
state = Map.put(state, :connection_status, :connecting)
:sys.handle_system_msg(req, from, parent, __MODULE__, debug, state)
{:"$websockex_send", from, _frame} ->
:gen.reply(from, {:error, %WebSockex.NotConnectedError{connection_state: :opening}})
open_loop(parent, debug, state)
{:EXIT, ^parent, reason} ->
case state do
%{reply_fun: reply_fun} ->
reply_fun.(reason)
exit(reason)
_ ->
terminate(reason, parent, debug, state)
end
{^ref, {:ok, new_conn}} ->
Process.demonitor(ref, [:flush])
new_state =
Map.delete(state, :task)
|> Map.put(:conn, new_conn)
{:ok, new_state}
{^ref, {:error, reason}} ->
Process.demonitor(ref, [:flush])
new_state = Map.delete(state, :task)
{:error, reason, new_state}
end
end
defp websocket_loop(parent, debug, state) do
case WebSockex.Frame.parse_frame(state.buffer) do
{:ok, frame, buffer} ->
debug = Utils.sys_debug(debug, {:in, :frame, frame}, state)
execute_telemetry([:websockex, :frame, :received], state, %{frame: frame})
handle_frame(frame, parent, debug, %{state | buffer: buffer})
:incomplete ->
transport = state.conn.transport
socket = state.conn.socket
receive do
{:system, from, req} ->
state = Map.put(state, :connection_status, :connected)
:sys.handle_system_msg(req, from, parent, __MODULE__, debug, state)
{:"$websockex_cast", msg} ->
debug = Utils.sys_debug(debug, {:in, :cast, msg}, state)
common_handle({:handle_cast, msg}, parent, debug, state)
{:"$websockex_send", from, frame} ->
sync_send(frame, from, parent, debug, state)
{^transport, ^socket, message} ->
buffer = <<state.buffer::bitstring, message::bitstring>>
websocket_loop(parent, debug, %{state | buffer: buffer})
{:tcp_closed, ^socket} ->
handle_close({:remote, :closed}, parent, debug, state)
{:ssl_closed, ^socket} ->
handle_close({:remote, :closed}, parent, debug, state)
{:EXIT, ^parent, reason} ->
terminate(reason, parent, debug, state)
msg ->
debug = Utils.sys_debug(debug, {:in, :msg, msg}, state)
common_handle({:handle_info, msg}, parent, debug, state)
end
end
end
defp close_loop(reason, parent, debug, %{conn: conn, timer_ref: timer_ref} = state) do
transport = state.conn.transport
socket = state.conn.socket
receive do
{:system, from, req} ->
state = Map.put(state, :connection_status, {:closing, reason})
:sys.handle_system_msg(req, from, parent, __MODULE__, debug, state)
{:EXIT, ^parent, reason} ->
terminate(reason, parent, debug, state)
{^transport, ^socket, _} ->
close_loop(reason, parent, debug, state)
{:"$websockex_send", from, _frame} ->
:gen.reply(from, {:error, %WebSockex.NotConnectedError{connection_state: :closing}})
close_loop(reason, parent, debug, state)
{close_mod, ^socket} when close_mod in [:tcp_closed, :ssl_closed] ->
new_conn = %{conn | socket: nil}
debug = Utils.sys_debug(debug, :closed, state)
purge_timer(timer_ref, :websockex_close_timeout)
state = Map.delete(state, :timer_ref)
on_disconnect(reason, parent, debug, %{state | conn: new_conn})
:"$websockex_close_timeout" ->
new_conn = WebSockex.Conn.close_socket(conn)
debug = Utils.sys_debug(debug, :timeout_closed, state)
on_disconnect(reason, parent, debug, %{state | conn: new_conn})
end
end
# Frame Handling
defp handle_frame(:ping, parent, debug, state) do
common_handle({:handle_ping, :ping}, parent, debug, state)
end
defp handle_frame({:ping, msg}, parent, debug, state) do
common_handle({:handle_ping, {:ping, msg}}, parent, debug, state)
end
defp handle_frame(:pong, parent, debug, state) do
common_handle({:handle_pong, :pong}, parent, debug, state)
end
defp handle_frame({:pong, msg}, parent, debug, state) do
common_handle({:handle_pong, {:pong, msg}}, parent, debug, state)
end
defp handle_frame(:close, parent, debug, state) do
handle_close({:remote, :normal}, parent, debug, state)
end
defp handle_frame({:close, code, reason}, parent, debug, state) do
handle_close({:remote, code, reason}, parent, debug, state)
end
defp handle_frame({:fragment, _, _} = fragment, parent, debug, state) do
handle_fragment(fragment, parent, debug, state)
end
defp handle_frame({:continuation, _} = fragment, parent, debug, state) do
handle_fragment(fragment, parent, debug, state)
end
defp handle_frame({:finish, _} = fragment, parent, debug, state) do
handle_fragment(fragment, parent, debug, state)
end
defp handle_frame(frame, parent, debug, state) do
common_handle({:handle_frame, frame}, parent, debug, state)
end
defp handle_fragment({:fragment, type, part}, parent, debug, %{fragment: nil} = state) do
websocket_loop(parent, debug, %{state | fragment: {type, part}})
end
defp handle_fragment({:fragment, _, _}, parent, debug, state) do
handle_close(
{:local, 1002, "Endpoint tried to start a fragment without finishing another"},
parent,
debug,
state
)
end
defp handle_fragment({:continuation, _}, parent, debug, %{fragment: nil} = state) do
handle_close(
{:local, 1002, "Endpoint sent a continuation frame without starting a fragment"},
parent,
debug,
state
)
end
defp handle_fragment({:continuation, next}, parent, debug, %{fragment: {type, part}} = state) do
websocket_loop(parent, debug, %{state | fragment: {type, <<part::binary, next::binary>>}})
end
defp handle_fragment({:finish, next}, parent, debug, %{fragment: {type, part}} = state) do
frame = {type, <<part::binary, next::binary>>}
debug = Utils.sys_debug(debug, {:in, :completed_fragment, frame}, state)
handle_frame(frame, parent, debug, %{state | fragment: nil})
end
defp handle_close({:remote, :closed} = reason, parent, debug, state) do
debug = Utils.sys_debug(debug, {:close, :remote, :unexpected}, state)
new_conn = %{state.conn | socket: nil}
on_disconnect(reason, parent, debug, %{state | conn: new_conn})
end
defp handle_close({:remote, _} = reason, parent, debug, state) do
handle_remote_close(reason, parent, debug, state)
end
defp handle_close({:remote, _, _} = reason, parent, debug, state) do
handle_remote_close(reason, parent, debug, state)
end
defp handle_close({:local, _} = reason, parent, debug, state) do
handle_local_close(reason, parent, debug, state)
end
defp handle_close({:local, _, _} = reason, parent, debug, state) do
handle_local_close(reason, parent, debug, state)
end
defp handle_close({:error, _} = reason, parent, debug, state) do
handle_error_close(reason, parent, debug, state)
end
defp common_handle({function, msg}, parent, debug, state) do
result = try_callback(state.module, function, [msg, state.module_state])
case result do
{:ok, new_state} ->
websocket_loop(parent, debug, %{state | module_state: new_state})
{:reply, frame, new_state} ->
# A `with` that includes `else` clause isn't tail recursive (elixir-lang/elixir#6251)
res =
with {:ok, binary_frame} <- WebSockex.Frame.encode_frame(frame),
do: WebSockex.Conn.socket_send(state.conn, binary_frame)
case res do
:ok ->
debug = Utils.sys_debug(debug, {:reply, function, frame}, state)
websocket_loop(parent, debug, %{state | module_state: new_state})
{:error, error} ->
handle_close({:error, error}, parent, debug, %{state | module_state: new_state})
end
{:close, new_state} ->
handle_close({:local, :normal}, parent, debug, %{state | module_state: new_state})
{:close, {close_code, message}, new_state} ->
handle_close({:local, close_code, message}, parent, debug, %{
state
| module_state: new_state
})
{:"$EXIT", reason} ->
handle_terminate_close(reason, parent, debug, state)
badreply ->
error = %WebSockex.BadResponseError{
module: state.module,
function: function,
args: [msg, state.module_state],
response: badreply
}
terminate(error, parent, debug, state)
end
end
defp handle_remote_close(reason, parent, debug, state) do
debug = Utils.sys_debug(debug, {:close, :remote, reason}, state)
# If the socket is already closed then that's ok, but the spec says to send
# the close frame back in response to receiving it.
debug =
case send_close_frame(reason, state.conn) do
:ok -> Utils.sys_debug(debug, {:socket_out, :close, reason}, state)
_ -> debug
end
timer_ref = Process.send_after(self(), :"$websockex_close_timeout", 5000)
close_loop(reason, parent, debug, Map.put(state, :timer_ref, timer_ref))
end
defp handle_local_close(reason, parent, debug, state) do
debug = Utils.sys_debug(debug, {:close, :local, reason}, state)
case send_close_frame(reason, state.conn) do
:ok ->
debug = Utils.sys_debug(debug, {:socket_out, :close, reason}, state)
timer_ref = Process.send_after(self(), :"$websockex_close_timeout", 5000)
close_loop(reason, parent, debug, Map.put(state, :timer_ref, timer_ref))
{:error, %WebSockex.ConnError{original: reason}} when reason in [:closed, :einval] ->
handle_close({:remote, :closed}, parent, debug, state)
end
end
defp handle_error_close(reason, parent, debug, state) do
send_close_frame(:error, state.conn)
timer_ref = Process.send_after(self(), :"$websockex_close_timeout", 5000)
close_loop(reason, parent, debug, Map.put(state, :timer_ref, timer_ref))
end
@spec handle_terminate_close(any, pid, any, any) :: no_return
def handle_terminate_close(reason, parent, debug, state) do
debug = Utils.sys_debug(debug, {:close, :error, reason}, state)
debug =
case send_close_frame(:error, state.conn) do
:ok -> Utils.sys_debug(debug, {:socket_out, :close, :error}, state)
_ -> debug
end
# I'm not supposed to do this, but I'm going to go ahead and close the
# socket here. If people complain I'll come up with something else.
new_conn = WebSockex.Conn.close_socket(state.conn)
terminate(reason, parent, debug, %{state | conn: new_conn})
end
# Frame Sending
defp sync_send(frame, from, parent, debug, %{conn: conn} = state) do
res =
with {:ok, binary_frame} <- WebSockex.Frame.encode_frame(frame),
do: WebSockex.Conn.socket_send(conn, binary_frame)
case res do
:ok ->
execute_telemetry([:websockex, :frame, :sent], state, %{frame: frame})
:gen.reply(from, :ok)
debug = Utils.sys_debug(debug, {:socket_out, :sync_send, frame}, state)
websocket_loop(parent, debug, state)
{:error, %WebSockex.ConnError{original: reason}} = error
when reason in [:closed, :einval] ->
:gen.reply(from, error)
handle_close(error, parent, debug, state)
{:error, _} = error ->
:gen.reply(from, error)
websocket_loop(parent, debug, state)
end
end
defp send_close_frame(reason, conn) do
with {:ok, binary_frame} <- build_close_frame(reason),
do: WebSockex.Conn.socket_send(conn, binary_frame)
end
defp build_close_frame({_, :normal}) do
WebSockex.Frame.encode_frame(:close)
end
defp build_close_frame({_, code, msg}) do
WebSockex.Frame.encode_frame({:close, code, msg})
end
defp build_close_frame(:error) do
WebSockex.Frame.encode_frame({:close, 1011, ""})
end
# Connection Handling
defp init_conn_failure(reason, parent, debug, state, attempt \\ 1) do
case handle_disconnect(reason, state, attempt) do
{:ok, new_module_state} ->
init_failure(reason, parent, debug, %{state | module_state: new_module_state})
{:reconnect, new_conn, new_module_state} ->
state = %{state | conn: new_conn, module_state: new_module_state}
debug = Utils.sys_debug(debug, :reconnect, state)
case open_connection(parent, debug, state) do
{:ok, new_state} ->
debug = Utils.sys_debug(debug, :connected, state)
module_init(parent, debug, new_state)
{:error, new_reason, new_state} ->
init_conn_failure(new_reason, parent, debug, new_state, attempt + 1)
end
{:"$EXIT", reason} ->
init_failure(reason, parent, debug, state)
end
end
defp on_disconnect(reason, parent, debug, state, attempt \\ 1) do
case handle_disconnect(reason, state, attempt) do
{:ok, new_module_state} when is_tuple(reason) and elem(reason, 0) == :error ->
terminate(elem(reason, 1), parent, debug, %{state | module_state: new_module_state})
{:ok, new_module_state} ->
terminate(reason, parent, debug, %{state | module_state: new_module_state})
{:reconnect, new_conn, new_module_state} ->
state = %{state | conn: new_conn, module_state: new_module_state}
debug = Utils.sys_debug(debug, :reconnect, state)
case open_connection(parent, debug, state) do
{:ok, new_state} ->
debug = Utils.sys_debug(debug, :reconnected, state)
reconnect(parent, debug, new_state)
{:error, new_reason, new_state} ->
on_disconnect(new_reason, parent, debug, new_state, attempt + 1)
end
{:"$EXIT", reason} ->
terminate(reason, parent, debug, state)
end
end
defp reconnect(parent, debug, state) do
result = try_callback(state.module, :handle_connect, [state.conn, state.module_state])
case result do
{:ok, new_module_state} ->
state = Map.merge(state, %{buffer: <<>>, fragment: nil, module_state: new_module_state})
websocket_loop(parent, debug, state)
{:"$EXIT", reason} ->
terminate(reason, parent, debug, state)
badreply ->
reason = %WebSockex.BadResponseError{
module: state.module,
function: :handle_connect,
args: [state.conn, state.module_state],
response: badreply
}
terminate(reason, parent, debug, state)
end
end
defp open_connection(parent, debug, %{conn: conn} = state) do
my_pid = self()
debug = Utils.sys_debug(debug, :connect, state)
task =
Task.async(fn ->
with {:ok, conn} <- WebSockex.Conn.open_socket(conn),
key <- :crypto.strong_rand_bytes(16) |> Base.encode64(),
{:ok, request} <- WebSockex.Conn.build_request(conn, key),
:ok <- WebSockex.Conn.socket_send(conn, request),
{:ok, headers} <- WebSockex.Conn.handle_response(conn, my_pid),
:ok <- validate_handshake(headers, key) do
:ok = WebSockex.Conn.controlling_process(conn, my_pid)
:ok = WebSockex.Conn.set_active(conn)
{:ok, %{conn | resp_headers: headers}}
end
end)
open_loop(parent, debug, Map.put(state, :task, task))
end
# Other State Functions
defp module_init(parent, debug, state) do
execute_telemetry([:websockex, :connected], state)
result = try_callback(state.module, :handle_connect, [state.conn, state.module_state])
case result do
{:ok, new_module_state} ->
state.reply_fun.({:ok, self()})
state =
Map.put(state, :module_state, new_module_state)
|> Map.delete(:reply_fun)
websocket_loop(parent, debug, state)
{:"$EXIT", reason} ->
state.reply_fun.(reason)
badreply ->
reason =
{:error,
%WebSockex.BadResponseError{
module: state.module,
function: :handle_connect,
args: [state.conn, state.module_state],
response: badreply
}}
state.reply_fun.(reason)
end
end
@spec terminate(any, pid, any, any) :: no_return
defp terminate(reason, parent, debug, state) do
execute_telemetry([:websockex, :terminate], state, %{reason: reason})
do_terminate(reason, parent, debug, state)
end
defp do_terminate(reason, parent, debug, %{conn: %{socket: socket}} = state)
when not is_nil(socket) do
handle_terminate_close(reason, parent, debug, state)
end
defp do_terminate(reason, _parent, _debug, %{module: mod, module_state: mod_state}) do
mod.terminate(reason, mod_state)
case reason do
{_, :normal} ->
exit(:normal)
{_, 1000, _} ->
exit(:normal)
_ ->
exit(reason)
end
end
defp handle_disconnect(reason, state, attempt) do
status_map = %{conn: state.conn, reason: reason, attempt_number: attempt}
execute_telemetry([:websockex, :disconnected], state, status_map)
result = try_callback(state.module, :handle_disconnect, [status_map, state.module_state])
case result do
{:ok, new_state} ->
{:ok, new_state}
{:reconnect, new_state} ->
{:reconnect, state.conn, new_state}
{:reconnect, new_conn, new_state} ->
{:reconnect, new_conn, new_state}
{:"$EXIT", _} = res ->
res
badreply ->
{:"$EXIT",
%WebSockex.BadResponseError{
module: state.module,
function: :handle_disconnect,
args: [status_map, state.module_state],
response: badreply
}}
end
end
# Helpers (aka everything else)
defp try_callback(module, function, args) do
apply(module, function, args)
catch
:error, payload ->
stacktrace = System.stacktrace()
reason = Exception.normalize(:error, payload, stacktrace)
{:"$EXIT", {reason, stacktrace}}
:exit, payload ->
{:"$EXIT", payload}
end
defp init_failure(reason, _parent, _debug, state) do
state.reply_fun.({:error, reason})
end
defp async_init_fun({:ok, _}), do: :noop
defp async_init_fun(exit_reason), do: exit(exit_reason)
defp sync_init_fun(parent, {error, stacktrace}) when is_list(stacktrace) do
:proc_lib.init_ack(parent, {:error, error})
end
defp sync_init_fun(parent, reply) do
:proc_lib.init_ack(parent, reply)
end
defp validate_handshake(headers, key) do
challenge = :crypto.hash(:sha, key <> @handshake_guid) |> Base.encode64()
{_, res} = List.keyfind(headers, "Sec-Websocket-Accept", 0)
if challenge == res do
:ok
else
{:error, %WebSockex.HandshakeError{response: res, challenge: challenge}}
end
end
defp purge_timer(ref, msg) do
case Process.cancel_timer(ref) do
i when is_integer(i) ->
:ok
false ->
receive do
^msg -> :ok
after
100 -> :ok
end
end
end
if WebSockex.Utils.otp_release() >= 21 do
defp execute_telemetry(event, state, extra_metadata \\ %{}) do
metadata = Map.merge(%{conn: state.conn, module: state.module}, extra_metadata)
:telemetry.execute(event, %{time: System.system_time()}, metadata)
end
else
defp execute_telemetry(_, _, _ \\ %{}), do: :ok
end
end
|
lib/websockex.ex
| 0.855338
| 0.777933
|
websockex.ex
|
starcoder
|
defmodule Bandit do
@moduledoc """
Bandit is an HTTP server for Plug apps.
As an HTTP server, Bandit's primary goal is to act as 'glue' between client connections managed
by [Thousand Island](https://github.com/mtrudel/thousand_island) and application code defined
via the [Plug API](https://github.com/elixir-plug/plug). As such there really isn't a whole lot
of user-visible surface area to Bandit, and as a consequence the API documentation presented here
is somewhat sparse. This is by design! Bandit is intended to 'just work' in almost all cases;
the only thought users typically have to put into Bandit comes in the choice of which options (if
any) they would like to change when starting a Bandit server. The sparseness of the Bandit API
should not be taken as an indicator of the comprehensiveness or robustness of the project.
## Using Bandit With Phoenix
Note that as of the 0.5.x branch Bandit supports Phoenix applications which use HTTP(S).
Phoenix applications which use WebSockets for features such as Channels or LiveView are not yet
supported (though this support is coming soon!).
That having been said, using Bandit to host your Phoenix application couldn't be simpler:
1. Add Bandit as a dependency in your Phoenix application's `mix.exs`:
```elixir
{:bandit, ">= 0.5.0"}
```
2. Add the following to your endpoint configuration in `config/config.exs`:
```elixir
config :your_app, YourAppWeb.Endpoint,
adapter: Bandit.PhoenixAdapter
```
3. That's it! You should now see messages at startup indicating that Phoenix is using Bandit to
serve your endpoint.
## Using Bandit With Plug Applications
Using Bandit to host your own Plug is very straightforward. Assuming you have a Plug module
implemented already, you can host it within Bandit by adding something similar to the following
to your application's `Application.start/2` function:
```elixir
def start(_type, _args) do
children = [
{Bandit, plug: MyApp.MyPlug, scheme: :http, options: [port: 4000]}
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
```
For details about writing Plug based applications, consult the excellent [Plug
documentation](https://hexdocs.pm/plug/) for plenty of examples & tips to get started. Note that
while Bandit supports the complete Plug API & should work correctly with any Plug-based
application. If you encounter errors using Bandit your Plug app, please do get in touch by
filing an issue on the Bandit GitHub project (especially if the error does not occur with
another HTTP server such as Cowboy).
## Config Options
Bandit takes a number of options at startup:
* `plug`: The plug to handle connections. Can be specified as `MyPlug` or `{MyPlug, plug_opts}`
* `scheme`: One of `:http` or `:https`. If `:https` is specified, you will need
to specify `certfile` and `keyfile` in the `transport_options` subsection of `options`.
* `read_timeout`: How long to wait for data from the client before timing out and closing the
connection, specified in milliseconds. Defaults to 60_000
* `options`: Options to pass to `ThousandIsland`. For an exhaustive list of options see the
`ThousandIsland` documentation, however some common options are:
* `port`: The port to bind to. Defaults to 4000
* `num_acceptors`: The number of acceptor processes to run. This is mostly a performance
tuning knob and can usually be left at the default value of 10
* `transport_module`: The name of the module which provides basic socket functions.
This overrides any value set for `scheme` and is intended for cases where control
over the socket at a fundamental level is needed.
* `transport_options`: A keyword list of options to be passed into the transport socket's listen function
## Setting up an HTTPS Server
By far the most common stumbling block encountered with configuration involves setting up an
HTTPS server. Bandit is comparatively easy to set up in this regard, with a working example
looking similar to the following:
```elixir
def start(_type, _args) do
bandit_options = [
port: 4000,
transport_options: [
certfile: Path.join(__DIR__, "path/to/cert.pem"),
keyfile: Path.join(__DIR__, "path/to/key.pem")
]
]
children = [
{Bandit, plug: MyApp.MyPlug, scheme: :https, options: bandit_options}
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
```
"""
require Logger
@typedoc "A Plug definition"
@type plug :: {module(), keyword()}
@spec child_spec(keyword()) :: Supervisor.child_spec()
def child_spec(arg) do
%{id: Bandit, start: {__MODULE__, :start_link, [arg]}}
end
@doc """
Starts a Bandit server using the provided arguments. See "Config Options' above for specific
options to pass to this function.
"""
def start_link(arg) do
{options, illegal_options} =
arg
|> Keyword.get(:options, [])
|> Keyword.split(~w(port num_acceptors transport_module transport_options)a)
if illegal_options != [] do
raise "Unsupported option(s) in Bandit config: #{inspect(illegal_options)}"
end
scheme = Keyword.get(arg, :scheme, :http)
{plug_mod, _} = plug = plug(arg)
{transport_module, extra_transport_options} =
case scheme do
:http -> {ThousandIsland.Transports.TCP, []}
:https -> {ThousandIsland.Transports.SSL, alpn_preferred_protocols: ["h2", "http/1.1"]}
end
handler_options = %{
plug: plug,
handler_module: Bandit.InitialHandler,
read_timeout: Keyword.get(arg, :read_timeout, 60_000)
}
options
|> Keyword.put_new(:transport_module, transport_module)
|> Keyword.update(
:transport_options,
extra_transport_options,
&Keyword.merge(&1, extra_transport_options)
)
|> Keyword.put(:handler_module, Bandit.DelegatingHandler)
|> Keyword.put(:handler_options, handler_options)
|> ThousandIsland.start_link()
|> case do
{:ok, pid} ->
Logger.info(info(scheme, plug_mod, pid))
{:ok, pid}
{:error, _} = error ->
error
end
end
defp plug(arg) do
arg
|> Keyword.fetch!(:plug)
|> case do
{plug, plug_options} -> {plug, plug.init(plug_options)}
plug -> {plug, plug.init([])}
end
end
defp info(scheme, plug, pid) do
server = "Bandit #{Application.spec(:bandit)[:vsn]}"
"Running #{inspect(plug)} with #{server} at #{bound_address(scheme, pid)}"
end
defp bound_address(scheme, pid) do
{:ok, %{address: address, port: port}} = ThousandIsland.listener_info(pid)
case address do
{:local, unix_path} ->
"#{unix_path} (#{scheme}+unix)"
address ->
"#{:inet.ntoa(address)}:#{port} (#{scheme})"
end
end
end
|
lib/bandit.ex
| 0.922062
| 0.902653
|
bandit.ex
|
starcoder
|
defmodule Day12 do
@facing ~w[north east south west]a
def part1(input) do
{{x, y}, _facing} =
input
|> String.splitter("\n", trim: true)
|> Stream.map(&parse_line/1)
|> Enum.reduce({{0, 0}, :east}, &follow_directions/2)
abs(x) + abs(y)
end
def parse_line(line) when is_binary(line) do
{op, num} = String.split_at(line, 1)
op =
case op do
"F" -> :forward
"R" -> :turn_right
"L" -> :turn_left
"N" -> :north
"E" -> :east
"S" -> :south
"W" -> :west
end
{op, String.to_integer(num)}
end
def follow_directions(instruction, acc) do
{{x, y}, facing} = acc
case instruction do
{:north, amount} ->
{{x, y + amount}, facing}
{:south, amount} ->
{{x, y - amount}, facing}
{:east, amount} ->
{{x + amount, y}, facing}
{:west, amount} ->
{{x - amount, y}, facing}
{:turn_right, amount} ->
turn_steps = div(amount, 90)
new_facing =
@facing
|> Stream.cycle()
|> Stream.drop_while(&(&1 != facing))
|> Enum.fetch!(turn_steps)
{{x, y}, new_facing}
{:turn_left, amount} ->
turn_steps = 4 - div(amount, 90)
new_facing =
@facing
|> Stream.cycle()
|> Stream.drop_while(&(&1 != facing))
|> Enum.fetch!(turn_steps)
{{x, y}, new_facing}
{:forward, amount} ->
follow_directions({facing, amount}, acc)
end
end
def part2(input) do
instructions =
input
|> String.splitter("\n", trim: true)
|> Enum.map(&parse_line/1)
ship = {0, 0}
waypoint = {10, 1}
{{x, y}, _} = Enum.reduce(instructions, {ship, waypoint}, &follow_waypoint_instructions/2)
abs(x) + abs(y)
end
def follow_waypoint_instructions(instruction, {ship, waypoint}) do
{waypoint_x, waypoint_y} = waypoint
case instruction do
{:north, amount} ->
{ship, {waypoint_x, waypoint_y + amount}}
{:south, amount} ->
{ship, {waypoint_x, waypoint_y - amount}}
{:east, amount} ->
{ship, {waypoint_x + amount, waypoint_y}}
{:west, amount} ->
{ship, {waypoint_x - amount, waypoint_y}}
{:turn_right, amount} ->
steps =
amount
|> div(90)
|> rem(4)
case steps do
0 ->
{ship, waypoint}
1 ->
{ship, {waypoint_y, -waypoint_x}}
2 ->
{ship, {-waypoint_x, -waypoint_y}}
3 ->
{ship, {-waypoint_y, waypoint_x}}
end
{:turn_left, amount} ->
follow_waypoint_instructions({:turn_right, 360 - amount}, {ship, waypoint})
{:forward, amount} ->
{ship_x, ship_y} = ship
{{ship_x + waypoint_x * amount, ship_y + waypoint_y * amount}, waypoint}
end
end
end
|
2020/day12/ex/day12.ex
| 0.69035
| 0.656662
|
day12.ex
|
starcoder
|
defmodule Faker.Lorem.Shakespeare.En do
import Faker, only: [sampler: 2]
@moduledoc """
Random quotes from <NAME>'s plays, sonnets and poems in English.
"""
@doc """
Return random quote from "The Tragedy of Hamlet, Prince of Denmark" tragedy.
## Examples
iex> Faker.Lorem.Shakespeare.En.hamlet()
"Brevity is the soul of wit."
iex> Faker.Lorem.Shakespeare.En.hamlet()
"And it must follow, as the night the day, thou canst not then be false to any man."
iex> Faker.Lorem.Shakespeare.En.hamlet()
"Do you think I am easier to be played on than a pipe?"
iex> Faker.Lorem.Shakespeare.En.hamlet()
"Rich gifts wax poor when givers prove unkind."
"""
@spec hamlet() :: String.t()
sampler(:hamlet, [
"To be, or not to be: that is the question.",
"Neither a borrower nor a lender be; For loan oft loses both itself and friend, and borrowing dulls the edge of husbandry.",
"This above all: to thine own self be true.",
"Though this be madness, yet there is method in 't.",
"That it should come to this!",
"There is nothing either good or bad, but thinking makes it so.",
"What a piece of work is man! how noble in reason! how infinite in faculty! in form and moving how express and admirable! in action how like an angel! in apprehension how like a god! the beauty of the world, the paragon of animals! .",
"The lady doth protest too much, methinks.",
"In my mind's eye.",
"A little more than kin, and less than kind.",
"The play 's the thing wherein I'll catch the conscience of the king.",
"And it must follow, as the night the day, thou canst not then be false to any man.",
"Brevity is the soul of wit.",
"Doubt that the sun doth move, doubt truth to be a liar, but never doubt I love.",
"Rich gifts wax poor when givers prove unkind.",
"Do you think I am easier to be played on than a pipe?",
"I will speak daggers to her, but use none.",
"When sorrows come, they come not single spies, but in battalions."
])
@doc """
Return random quote from "As You Like It" comedy.
## Examples
iex> Faker.Lorem.Shakespeare.En.as_you_like_it()
"For ever and a day."
iex> Faker.Lorem.Shakespeare.En.as_you_like_it()
"Can one desire too much of a good thing?."
iex> Faker.Lorem.Shakespeare.En.as_you_like_it()
"How bitter a thing it is to look into happiness through another man's eyes!"
iex> Faker.Lorem.Shakespeare.En.as_you_like_it()
"All the world's a stage, and all the men and women merely players. They have their exits and their entrances; And one man in his time plays many parts."
"""
@spec as_you_like_it() :: String.t()
sampler(:as_you_like_it, [
"All the world's a stage, and all the men and women merely players. They have their exits and their entrances; And one man in his time plays many parts.",
"Can one desire too much of a good thing?.",
"I like this place and willingly could waste my time in it.",
"How bitter a thing it is to look into happiness through another man's eyes!",
"Blow, blow, thou winter wind! Thou art not so unkind as man's ingratitude.",
"True is it that we have seen better days.",
"For ever and a day.",
"The fool doth think he is wise, but the wise man knows himself to be a fool."
])
@doc """
Return random quote from "Richard III" play.
## Examples
iex> Faker.Lorem.Shakespeare.En.king_richard_iii()
"The king's name is a tower of strength."
iex> Faker.Lorem.Shakespeare.En.king_richard_iii()
"A horse! a horse! my kingdom for a horse!"
iex> Faker.Lorem.Shakespeare.En.king_richard_iii()
"So wise so young, they say, do never live long."
iex> Faker.Lorem.Shakespeare.En.king_richard_iii()
"Now is the winter of our discontent."
"""
@spec king_richard_iii() :: String.t()
sampler(:king_richard_iii, [
"Now is the winter of our discontent.",
"A horse! a horse! my kingdom for a horse!",
"Conscience is but a word that cowards use, devised at first to keep the strong in awe.",
"So wise so young, they say, do never live long.",
"Off with his head!",
"An honest tale speeds best, being plainly told.",
"The king's name is a tower of strength.",
"The world is grown so bad, that wrens make prey where eagles dare not perch."
])
@doc """
Return random quote from "Romeo and Juliet" tragedy.
## Examples
iex> Faker.Lorem.Shakespeare.En.romeo_and_juliet()
"What's in a name? That which we call a rose by any other name would smell as sweet."
iex> Faker.Lorem.Shakespeare.En.romeo_and_juliet()
"For you and I are past our dancing days."
iex> Faker.Lorem.Shakespeare.En.romeo_and_juliet()
"For you and I are past our dancing days."
iex> Faker.Lorem.Shakespeare.En.romeo_and_juliet()
"For you and I are past our dancing days."
"""
@spec romeo_and_juliet() :: String.t()
sampler(:romeo_and_juliet, [
"O Romeo, Romeo! wherefore art thou Romeo?.",
"It is the east, and Juliet is the sun.",
"Good Night, Good night! Parting is such sweet sorrow, that I shall say good night till it be morrow.",
"What's in a name? That which we call a rose by any other name would smell as sweet.",
"Wisely and slow; they stumble that run fast.",
"Tempt not a desperate man.",
"For you and I are past our dancing days.",
"O! she doth teach the torches to burn bright.",
"It seems she hangs upon the cheek of night like a rich jewel in an Ethiope's ear.",
"See, how she leans her cheek upon her hand! O that I were a glove upon that hand, that I might touch that cheek!.",
"Not stepping o'er the bounds of modesty."
])
end
|
lib/faker/lorem/shakespeare/en.ex
| 0.577138
| 0.470919
|
en.ex
|
starcoder
|
defmodule Posexional.File do
@moduledoc """
a Posexional.File is the main struct to manage a positional file
"""
alias Posexional.{Field, Row}
defstruct rows: [],
separator: "\n"
def new(rows, separator \\ nil)
def new(rows, nil) do
%Posexional.File{rows: rows, separator: "\n"}
end
def new(rows, separator) do
%Posexional.File{rows: rows, separator: separator}
end
@doc """
creates a file from values
## Examples
iex> Posexional.File.write(
...> Posexional.File.new([ Posexional.Row.new(:row_test, [ Posexional.Field.Value.new(:test1, 5) ]) ]),
...> [row_test: [test1: "test"], row_test: [test1: "t"]]
...> )
"test \\nt "
iex> Posexional.File.write(
...> Posexional.File.new([ Posexional.Row.new(:row_test, [ Posexional.Field.Value.new(:test1, 5) ]) ]),
...> [row_test: [test1: "test"], ne: [test1: "t"]]
...> )
** (RuntimeError) row ne not found
"""
@spec write(%Posexional.File{}, Keyword.t()) :: binary
def write(file = %Posexional.File{separator: separator}, values) do
file
|> manage_counters
|> get_lines(values)
|> Enum.join(separator)
end
@spec write_path!(%Posexional.File{}, Keyword.t(), binary) :: {:ok, binary} | {:error, any}
def write_path!(file = %Posexional.File{separator: separator}, values, path) do
with {:ok, _} <-
File.open(path, [:write], fn handle ->
file
|> manage_counters
|> get_lines(values)
|> Stream.intersperse(separator)
|> Stream.each(&IO.binwrite(handle, &1))
|> Stream.run()
end) do
{:ok, path}
end
end
@spec read(%Posexional.File{}, binary) :: [tuple() | String.t()]
def read(%Posexional.File{separator: separator, rows: rows}, content) do
content
|> String.split(separator)
|> Enum.filter(fn
"" -> false
_ -> true
end)
|> Enum.flat_map(fn content ->
row = guess_row(content, rows)
if is_nil(row) do
[content]
else
Row.read(row, content)
end
end)
end
@spec get_lines(%Posexional.File{}, Keyword.t()) :: Enumerable.t()
defp get_lines(file, values) do
values
|> Stream.map(fn {row_name, values} -> {find_row(file, row_name), row_name, values} end)
|> Stream.map(fn {row, row_name, values} ->
if is_nil(row) do
raise "row #{row_name} not found"
end
{:ok, out} = Row.write(row, values)
out
end)
end
@doc """
adds a generator for every progressive_number_field in the file.
The fields are grouped by name, so that you can specify many counters for every row
"""
@spec manage_counters(%Posexional.File{}) :: %Posexional.File{}
def manage_counters(file = %Posexional.File{rows: rows}) do
counters = get_counters(file)
%{file | rows: Stream.map(rows, &Row.manage_counters(&1, counters))}
end
@spec get_counters(%Posexional.File{}) :: [{atom, pid}]
def get_counters(%Posexional.File{rows: rows}) do
rows
|> Stream.flat_map(& &1.fields)
|> Stream.flat_map(fn
%Field.ProgressiveNumber{name: name} -> [name]
_ -> []
end)
|> Stream.uniq()
|> Enum.map(fn name ->
{:ok, pid} = Agent.start_link(fn -> 1 end)
{name, pid}
end)
end
@spec guess_row(binary, [%Row{}]) :: %Row{} | nil
defp guess_row(content, rows) do
Enum.find(rows, nil, fn
%Row{row_guesser: :always} -> true
%Row{row_guesser: :never} -> false
%Row{row_guesser: row_guesser} when is_function(row_guesser) -> row_guesser.(content)
end)
end
@spec find_row(%Posexional.File{}, atom) :: %Row{}
def find_row(%Posexional.File{rows: rows}, name) do
Enum.find(rows, nil, fn %Row{name: row_name} -> row_name == name end)
end
end
|
lib/posexional/file.ex
| 0.642208
| 0.485905
|
file.ex
|
starcoder
|
defmodule Day21 do
# Faster version for Day 21 (~100-1000x time faster).
# Inspired by <NAME> version: https://gist.github.com/sasa1977/1246dc75886faf7da6b7956d158c2420
# This version uses boolean lists instead of binaries and makes heavy use of
# streams to transform the pixel grid.
def solveA(filename), do: solve filename, 5
def solveB(filename), do: solve filename, 18
def solve(filename, niter) do
rules = parse filename
{3, ".#./..#/###" |> to_bin_square}
|> Stream.iterate(&iter(&1, rules))
|> Enum.at(niter)
|> count
end
def iter({size, pixels}, rules) do
square_size = if rem(size, 2) == 0, do: 2, else: 3
next_square_size = if square_size == 2, do: 3, else: 4
nsquares_by_row = div size, square_size
pixels =
pixels
|> squares(size, square_size, nsquares_by_row)
|> Stream.map(&Map.fetch!(rules, &1))
|> merge(next_square_size, nsquares_by_row)
{next_square_size * nsquares_by_row, pixels}
end
def count({_size, pixels}) do
Enum.count pixels, fn x -> x end
end
def squares(pixels, size, square_size, nsquares_by_row) do
pixels
|> Stream.chunk_every(square_size)
|> Stream.chunk_every(size)
|> Stream.map(&Stream.chunk_every(&1, nsquares_by_row))
|> Stream.flat_map(&Stream.zip/1)
|> Stream.map(fn tuple -> tuple |> Tuple.to_list |> Enum.concat end)
end
def merge(squares, square_size, nsquares_by_row) do
squares
|> Stream.chunk_every(nsquares_by_row)
|> Stream.flat_map(&merge_row(&1, square_size))
end
def merge_row(row, square_size) do
row
|> Stream.map(&Stream.chunk_every(&1, square_size))
|> Stream.zip
|> Stream.flat_map(fn tuple -> tuple |> Tuple.to_list |> Stream.concat end)
end
def parse(filename) do
filename
|> File.stream!
|> Stream.map(&String.trim/1)
|> Stream.map(&String.split &1, " => ")
|> Stream.map(fn line -> Enum.map line, &to_bin_square/1 end)
|> Enum.map(&List.to_tuple/1)
|> Map.new
|> enhance
end
def enhance(map) do
map
|> Map.keys
|> Enum.reduce(map, fn square, acc ->
enhanced_square = Map.fetch! map, square
square
|> variations
|> Stream.zip(Stream.cycle [enhanced_square])
|> Map.new
|> Map.merge(acc)
end)
end
def variations(square) do
square
|> rotations
|> Enum.flat_map(fn sq -> [sq, fliph(sq), flipv(sq)] end)
end
def rotations(square) do
rot1 = rotate square
rot2 = rotate rot1
rot3 = rotate rot2
[square, rot1, rot2, rot3]
end
def rotate([a, b,
c, d]) do
[c, a,
d, b]
end
def rotate([a, b, c,
d, e, f,
g, h, i]) do
[g, d, a,
h, e, b,
i, f, c]
end
def flipv([a, b,
c, d]) do
[b, a,
d, c]
end
def flipv([a, b, c,
d, e, f,
g, h, i]) do
[c, b, a,
f, e, d,
i, h, g]
end
def fliph([a, b,
c, d]) do
[c, d,
a, b]
end
def fliph([a, b, c,
d, e, f,
g, h, i]) do
[g, h, i,
d, e, f,
a, b, c]
end
def to_bin_square(square) do
square
|> String.codepoints
|> Stream.reject(fn c -> c == "/" end)
|> Enum.map(fn c ->
case c do
"." -> false
"#" -> true
end
end)
end
end
|
2017/elixir/day21/lib/day21.ex
| 0.673192
| 0.541348
|
day21.ex
|
starcoder
|
defmodule Rambla.Smtp do
@moduledoc """
Default connection implementation for 📧 SMTP.
It expects a message to be a map containing the following fields:
`:to`, `:subject`, `:body` _and_ the optional `:from` that otherwise would be
taken from the global settings (`releases.mix`) from `[]:rambla, :pools, Rambla.Smtp]`.
For instance, this call would send an email to email:<EMAIL> with the
respective subject and body.
```elixir
Rambla.publish(
Rambla.Smtp,
%{to: "<EMAIL>", subject: "Hi there", body: "I ❤ SMTP"}
}
```
"""
@behaviour Rambla.Connection
@conn_params ~w|relay username password auth ssl tls tls_options hostname retries|a
@impl Rambla.Connection
def connect(params) when is_list(params) do
if is_nil(params[:hostname]),
do:
raise(Rambla.Exceptions.Connection,
value: params,
expected: "📧 configuration with :host key"
)
[defaults, opts] =
params
|> Keyword.split(@conn_params)
|> Tuple.to_list()
|> Enum.map(&Map.new/1)
%Rambla.Connection{
conn: %Rambla.Connection.Config{conn: params[:hostname], opts: opts, defaults: defaults},
conn_type: __MODULE__,
conn_pid: self(),
conn_params: params,
errors: []
}
end
@impl Rambla.Connection
def publish(%Rambla.Connection.Config{} = conn, message) when is_binary(message),
do: publish(conn, Jason.decode!(message))
@impl Rambla.Connection
def publish(%Rambla.Connection.Config{} = conn, message) when is_list(message),
do: publish(conn, Map.new(message))
@impl Rambla.Connection
def publish(%Rambla.Connection.Config{opts: opts, defaults: defaults}, message)
when is_map(opts) and is_map(message) do
{to, message} = Map.pop(message, :to)
{from, message} = Map.pop(message, :from, Map.get(opts, :from, []))
{subject, message} = Map.pop(message, :subject, Map.pop(opts, :subject, ""))
{body, _message} = Map.pop(message, :body, Map.pop(opts, :body, ""))
from_with_name = for {name, email} <- from, do: "#{name} <#{email}>"
smtp_message =
["Subject: ", "From: ", "To: ", "\r\n"]
|> Enum.zip([subject, hd(from_with_name), to, body])
|> Enum.map(&(&1 |> Tuple.to_list() |> Enum.join()))
|> Enum.join("\r\n")
apply(:gen_smtp_client, :send, [
{to, Map.values(from), smtp_message},
defaults
|> Map.merge(Map.take(opts, @conn_params))
|> Map.to_list()
])
end
end
|
lib/rambla/connections/smtp.ex
| 0.842475
| 0.670649
|
smtp.ex
|
starcoder
|
defmodule Toolshed do
@moduledoc """
Making the IEx console friendlier one command at a time
To use the helpers, run:
iex> use Toolshed
Add this to your `.iex.exs` to load automatically.
The following is a list of helpers:
* `cat/1` - print out a file
* `cmd/1` - run a system command and print the output
* `date/0` - print out the current date and time
* `dmesg/0` - print kernel messages (Nerves-only)
* `exit/0` - exit out of an IEx session
* `fw_validate/0` - marks the current image as valid (check Nerves system if supported)
* `grep/2` - print out lines that match a regular expression
* `hex/1` - print a number as hex
* `hostname/0` - print our hostname
* `ifconfig/0` - print info on network interfaces
* `load_term!/2` - load a term that was saved by `save_term/2`
* `lsof/0` - print out open file handles by OS process
* `lsmod/0` - print out what kernel modules have been loaded (Nerves-only)
* `lsusb/0` - print info on USB devices
* `multicast_addresses/0` - print out all multicast addresses
* `nslookup/1` - query DNS to find an IP address
* `pastebin/1` - post text to a pastebin server (requires networking)
* `ping/2` - ping a remote host (but use TCP instead of ICMP)
* `qr_encode/1` - create a QR code (requires networking)
* `reboot/0` - reboots gracefully (Nerves-only)
* `reboot!/0` - reboots immediately (Nerves-only)
* `save_value/2` - save a value to a file as Elixir terms (uses inspect)
* `save_term!/2` - save a term as a binary
* `top/2` - list out the top processes
* `tping/2` - check if a host can be reached (like ping, but uses TCP)
* `tree/1` - pretty print a directory tree
* `uptime/0` - print out the current Erlang VM uptime
* `uname/0` - print information about the running system (Nerves-only)
* `weather/0` - get the local weather (requires networking)
"""
defmacro __using__(_) do
nerves =
if Code.ensure_loaded?(Toolshed.Nerves) do
quote do
import Toolshed.Nerves
end
else
quote do
end
end
quote do
import Toolshed
import Toolshed.Top
import Toolshed.Lsof
unquote(nerves)
import Toolshed.Unix
import Toolshed.Net
import Toolshed.Misc
import Toolshed.HW
import Toolshed.HTTP
import Toolshed.Multicast
# If module docs have been stripped, then don't tell the user that they can
# see them.
help_text =
case Code.fetch_docs(Toolshed) do
{:error, _anything} -> ""
_ -> " Run h(Toolshed) for more info."
end
IO.puts([
IO.ANSI.color(:rand.uniform(231) + 1),
"Toolshed",
IO.ANSI.reset(),
" imported.",
help_text
])
end
end
@doc """
Run a command and return the exit code. This function is intended to be run
interactively.
"""
@spec cmd(String.t() | charlist()) :: integer()
def cmd(str) when is_binary(str) do
{_collectable, exit_code} =
System.cmd("sh", ["-c", str], stderr_to_stdout: true, into: IO.stream(:stdio, :line))
exit_code
end
def cmd(str) when is_list(str) do
str |> to_string |> cmd
end
@doc """
Inspect a value with all integers printed out in hex. This is useful for
one-off hex conversions. If you're doing a lot of work that requires
hexadecimal output, you should consider running:
`IEx.configure(inspect: [base: :hex])`
The drawback of doing the above is that strings print out as hex binaries.
"""
@spec hex(integer()) :: String.t()
def hex(value) do
inspect(value, base: :hex)
end
end
|
lib/toolshed.ex
| 0.672009
| 0.537163
|
toolshed.ex
|
starcoder
|
require Utils
defmodule D10 do
@moduledoc """
--- Day 10: Monitoring Station ---
You fly into the asteroid belt and reach the Ceres monitoring station. The Elves here have an emergency: they're having trouble tracking all of the asteroids and can't be sure they're safe.
The Elves would like to build a new monitoring station in a nearby area of space; they hand you a map of all of the asteroids in that region (your puzzle input).
The map indicates whether each position is empty (.) or contains an asteroid (#). The asteroids are much smaller than they appear on the map, and every asteroid is exactly in the center of its marked position. The asteroids can be described with X,Y coordinates where X is the distance from the left edge and Y is the distance from the top edge (so the top-left corner is 0,0 and the position immediately to its right is 1,0).
Your job is to figure out which asteroid would be the best place to build a new monitoring station. A monitoring station can detect any asteroid to which it has direct line of sight - that is, there cannot be another asteroid exactly between them. This line of sight can be at any angle, not just lines aligned to the grid or diagonally. The best location is the asteroid that can detect the largest number of other asteroids.
Find the best location for a new monitoring station. How many other asteroids can be detected from that location?
--- Part Two ---
Once you give them the coordinates, the Elves quickly deploy an Instant Monitoring Station to the location and discover the worst: there are simply too many asteroids.
The only solution is complete vaporization by giant laser.
Fortunately, in addition to an asteroid scanner, the new monitoring station also comes equipped with a giant rotating laser perfect for vaporizing asteroids. The laser starts by pointing up and always rotates clockwise, vaporizing any asteroid it hits.
If multiple asteroids are exactly in line with the station, the laser only has enough power to vaporize one of them before continuing its rotation. In other words, the same asteroids that can be detected can be vaporized, but if vaporizing one asteroid makes another one detectable, the newly-detected asteroid won't be vaporized until the laser has returned to the same position by rotating a full 360 degrees.
The Elves are placing bets on which will be the 200th asteroid to be vaporized. Win the bet by determining which asteroid that will be; what do you get if you multiply its X coordinate by 100 and then add its Y coordinate? (For example, 8,2 becomes 802.)
"""
@behaviour Day
def distance({x1, y1}, {x2, y2}), do: abs(x1 - x2) + abs(y1 - y2)
def to_angle({x, y}) do
rads = :math.atan2(y, x)
angle = rads * 180 / :math.pi() + 90
if x < 0 and y < 0, do: angle + 360, else: angle
end
def mx({x1, y1}, {x2, y2}) do
{dx, dy} = {x2 - x1, y2 - y1}
gcd = Integer.gcd(dx, dy)
{div(dx, gcd), div(dy, gcd)}
end
def count_visible(asteroids, point) do
asteroids
|> Enum.reject(&(&1 == point))
|> Enum.map(fn point_2 -> mx(point, point_2) end)
|> MapSet.new()
|> MapSet.size()
end
def solve(input) do
asteroids =
input
|> Enum.with_index()
|> Enum.reduce([], fn {line, y}, acc ->
line
|> to_charlist
|> Enum.with_index()
|> Enum.reduce(acc, fn {c, x}, acc -> if c == ?#, do: [{x, y} | acc], else: acc end)
end)
best = Enum.max_by(asteroids, fn point -> count_visible(asteroids, point) end)
part_1 = count_visible(asteroids, best)
{part_2_x, part_2_y} =
asteroids
|> Enum.reject(&(&1 == best))
|> Enum.sort_by(fn point -> distance(best, point) end)
|> Enum.group_by(fn point -> to_angle(mx(best, point)) end)
|> Enum.flat_map(fn {angle, list} ->
list
|> Enum.with_index()
|> Enum.map(fn {point, index} -> {index * 360 + angle, point} end)
end)
|> Enum.sort()
|> Enum.at(199)
|> elem(1)
part_2 = 100 * part_2_x + part_2_y
{
part_1,
part_2
}
end
end
|
lib/days/10.ex
| 0.785103
| 0.879095
|
10.ex
|
starcoder
|
defmodule Grizzly.ZWave.CommandClasses.Powerlevel do
@moduledoc """
"Powerlevel" Command Class
The Powerlevel Command Class defines RF transmit power controlling Commands useful when
installing or testing a network. The Commands makes it possible for supporting controllers to set/get
the RF transmit power level of a node and test specific links between nodes with a specific RF transmit
power level.
"""
@behaviour Grizzly.ZWave.CommandClass
alias Grizzly.ZWave.DecodeError
@type power_level ::
:normal_power
| :minus1dBm
| :minus2dBm
| :minus3dBm
| :minus4dBm
| :minus5dBm
| :minus6dBm
| :minus7dBm
| :minus8dBm
| :minus9dBm
@type status_of_operation :: :test_failed | :test_success | :test_in_progress
@impl true
def byte(), do: 0x73
@impl true
def name(), do: :powerlevel
def power_level_to_byte(:normal_power), do: 0x00
def power_level_to_byte(:minus1dBm), do: 0x01
def power_level_to_byte(:minus2dBm), do: 0x02
def power_level_to_byte(:minus3dBm), do: 0x03
def power_level_to_byte(:minus4dBm), do: 0x04
def power_level_to_byte(:minus5dBm), do: 0x05
def power_level_to_byte(:minus6dBm), do: 0x06
def power_level_to_byte(:minus7dBm), do: 0x07
def power_level_to_byte(:minus8dBm), do: 0x08
def power_level_to_byte(:minus9dBm), do: 0x09
def power_level_from_byte(0x00), do: {:ok, :normal_power}
def power_level_from_byte(0x01), do: {:ok, :minus1dBm}
def power_level_from_byte(0x02), do: {:ok, :minus2dBm}
def power_level_from_byte(0x03), do: {:ok, :minus3dBm}
def power_level_from_byte(0x04), do: {:ok, :minus4dBm}
def power_level_from_byte(0x05), do: {:ok, :minus5dBm}
def power_level_from_byte(0x06), do: {:ok, :minus6dBm}
def power_level_from_byte(0x07), do: {:ok, :minus7dBm}
def power_level_from_byte(0x08), do: {:ok, :minus8dBm}
def power_level_from_byte(0x09), do: {:ok, :minus9dBm}
def power_level_from_byte(byte), do: {:error, %DecodeError{value: byte, param: :power_level}}
def status_of_operation_to_byte(:test_failed), do: 0x00
def status_of_operation_to_byte(:test_success), do: 0x01
def status_of_operation_to_byte(:test_in_progress), do: 0x02
def status_of_operation_from_byte(0x00), do: {:ok, :test_failed}
def status_of_operation_from_byte(0x01), do: {:ok, :test_success}
def status_of_operation_from_byte(0x02), do: {:ok, :test_in_progress}
def status_of_operation_from_byte(byte),
do: {:error, %DecodeError{value: byte, param: :status_of_operation}}
end
|
lib/grizzly/zwave/command_classes/powerlevel.ex
| 0.84691
| 0.592283
|
powerlevel.ex
|
starcoder
|
defmodule Toml.Lexer do
@moduledoc false
import __MODULE__.Guards
defstruct [:pid]
@type t :: %__MODULE__{pid: pid}
# The type of the token
@type type ::
:whitespace
| :newline
| :comment
| :digits
| :hex
| :octal
| :binary
| :alpha
| __MODULE__.String.type()
| boolean
| non_neg_integer
| :eof
# The number of bytes in the input to skip to reach the beginning of the token
@type skip :: non_neg_integer
# The data representation of a token (either size, a character, or string)
@type data :: non_neg_integer | binary
# The line number of the token
@type lines :: non_neg_integer
# The full shape of a token
@type token :: {type, skip, data, lines}
# The shape of errors the lexer produces
@type lexer_err :: {:error, term, skip, lines}
# The shape of the lexer stack
@type stack :: [token] | lexer_err
# The shape of replies which return tokens
@type token_reply ::
{:ok, token}
| lexer_err
@doc """
Creates a new Lexer with the given binary content.
The lexer is a process, which manages the state of the lexer,
and provides the following benefits:
- Only lexes content as the decoder walks the document, minimizing
the work performed, and resources (i.e. memory) used.
- Allows pushing an arbitrary tokens back on the stack, allowing the
decoder to "rewind" the lexer and try an alternative path.
- Lexing the next token happens concurrently with the decoder handling the last token
Currently, the lexer will build up strings for most tokens and send them back to
the decoder, since these are running in separate processes, this means all string data
contained in the tokens is copied. For some tokens, like comments, the lexer will send
only the token type (e.g. `:comment`), and indexes into the original input, so that the
content can be extracted only when needed, and in the most efficient manner possible. In
the future, the lexer will do this will all tokens, allowing us to only make copies or store
references into the original input when absolutely needed. We do not do this currently, as
strings in TOML have escapes, which need to be unescaped during parsing. This could be deferred
and done in the decoder, but is not done so right now.
Returns `{:ok, %#{__MODULE__}{}}`.
"""
@spec new(binary) :: {:ok, t}
def new(content) when is_binary(content) do
{:ok, pid} = :proc_lib.start_link(__MODULE__, :init, [self(), content])
{:ok, %__MODULE__{pid: pid}}
end
@doc """
Pops the next token from the lexer. This advances the lexer to the next token.
"""
@spec pop(t) :: token_reply
def pop(%__MODULE__{pid: pid}) when is_pid(pid),
do: server_call(pid, :pop)
@doc """
Advances the lexer to the next token, without returning the current token on the stack,
effectively skipping the current token.
"""
@spec advance(t) :: :ok
def advance(%__MODULE__{pid: pid}) when is_pid(pid),
do: server_call(pid, :advance)
@doc """
Peeks at the next token the lexer will return from `pop/1`.
Always returns the same result until the lexer advances.
"""
@spec peek(t) :: token_reply
def peek(%__MODULE__{pid: pid}) when is_pid(pid),
do: server_call(pid, :peek)
@doc """
Pushes a token back on the lexer's stack.
You may push as many tokens back on the stack as desired.
"""
@spec push(t, token) :: :ok
def push(%__MODULE__{pid: pid}, {_type, _skip, _data, _lines} = token) when is_pid(pid),
do: server_call(pid, {:push, token})
@doc """
Retrieves the position of the lexer in the current input
"""
@spec pos(t) :: {:ok, skip, lines}
def pos(%__MODULE__{pid: pid}) when is_pid(pid),
do: server_call(pid, :pos)
@doc """
Terminates the lexer process.
"""
@spec stop(t) :: :ok
def stop(%__MODULE__{pid: pid}) when is_pid(pid) do
if Process.alive?(pid) do
server_call(pid, :stop)
else
:ok
end
end
@doc """
Converts the lexer in to a `Stream`. Not currently used.
"""
@spec stream(t) :: Enumerable.t()
def stream(%__MODULE__{} = lexer) do
Stream.resource(
fn -> {lexer, false, false} end,
fn
{_lexer, true, _error?} = acc ->
{:halt, acc}
{_lexer, _eof?, true} = acc ->
{:halt, acc}
{lexer, false, false} ->
case pop(lexer) do
{:error, _, _, _} = err ->
{[err], {lexer, false, true}}
{:ok, {:eof, _, _, _}} = ok ->
{[ok], {lexer, true, false}}
{:ok, _} = ok ->
{[ok], {lexer, false, false}}
end
end,
fn {lexer, _, _} -> stop(lexer) end
)
end
## Private
def init(parent, {:stream, stream}) when is_pid(parent) do
init(parent, Enum.into(stream, <<>>))
end
def init(parent, data) when is_pid(parent) and is_binary(data) do
Process.flag(:trap_exit, true)
:proc_lib.init_ack(parent, {:ok, self()})
lex(parent, :sys.debug_options([]), data, 0, 1, [])
end
# If an error is on the stack keep it there unless we push a valid token back on
@spec lex(pid, term, binary, skip, lines, stack) :: no_return
defp lex(parent, debug, data, skip, lines, {:error, _, eskip, elines} = err) do
receive do
{:EXIT, ^parent, reason} ->
exit(reason)
{from, :stop} ->
send(from, {self(), :ok})
exit(:normal)
{from, {:push, {_type, _tskip, _tsize, _tline} = token}} ->
send(from, {self(), :ok})
lex(parent, debug, data, skip, lines, [token])
{from, op} when op in [:pop, :peek, :advance] ->
send(from, {self(), err})
lex(parent, debug, data, skip, lines, err)
{from, :pos} ->
send(from, {self(), {:ok, eskip, elines}})
lex(parent, debug, data, skip, lines, err)
end
end
defp lex(parent, debug, data, skip, lines, []) do
case do_lex(data, skip, lines) do
{:error, _, _, _} = err ->
lex(parent, debug, data, skip, lines, err)
{:ok, data, {_type, skip, _size, lines} = token} ->
lex(parent, debug, data, skip, lines, [token])
end
end
defp lex(parent, debug, data, skip, lines, [{_, tskip, _, tlines} = token | stack] = ostack) do
receive do
{:EXIT, ^parent, reason} ->
exit(reason)
{from, :stop} ->
send(from, {self(), :ok})
exit(:normal)
{from, :pop} ->
send(from, {self(), {:ok, token}})
lex(parent, debug, data, skip, lines, stack)
{from, :advance} ->
send(from, {self(), :ok})
lex(parent, debug, data, skip, lines, stack)
{from, :peek} ->
send(from, {self(), {:ok, token}})
lex(parent, debug, data, skip, lines, ostack)
{from, {:push, pushed}} ->
send(from, {self(), :ok})
lex(parent, debug, data, skip, lines, [pushed | ostack])
{from, :pos} ->
send(from, {self(), {:ok, tskip, tlines}})
lex(parent, debug, data, skip, lines, ostack)
end
end
@spec do_lex(binary, skip, lines) :: {:ok, binary, token} | {:error, term, skip, lines}
defp do_lex(data, skip, lines)
defp do_lex(<<>> = data, skip, lines),
do: {:ok, data, {:eof, skip, 0, lines}}
defp do_lex(<<?\#, rest::binary>>, skip, lines),
do: lex_comment(rest, skip + 1, 0, lines)
defp do_lex(<<?\r, ?\n, rest::binary>>, skip, lines),
do: {:ok, rest, {:newline, skip + 2, 0, lines + 1}}
defp do_lex(<<?\n, rest::binary>>, skip, lines),
do: {:ok, rest, {:newline, skip + 1, 0, lines + 1}}
defp do_lex(<<c::utf8, rest::binary>>, skip, lines) when is_whitespace(c),
do: lex_whitespace(rest, skip + 1, lines)
defp do_lex(<<"true", rest::binary>>, skip, lines),
do: {:ok, rest, {true, skip + 4, 0, lines}}
defp do_lex(<<"false", rest::binary>>, skip, lines),
do: {:ok, rest, {false, skip + 5, 0, lines}}
defp do_lex(<<?=, rest::binary>>, skip, lines),
do: {:ok, rest, {?=, skip + 1, 0, lines}}
defp do_lex(<<?., rest::binary>>, skip, lines),
do: {:ok, rest, {?., skip + 1, 0, lines}}
defp do_lex(<<?\[, rest::binary>>, skip, lines),
do: {:ok, rest, {?\[, skip + 1, 0, lines}}
defp do_lex(<<?\], rest::binary>>, skip, lines),
do: {:ok, rest, {?\], skip + 1, 0, lines}}
defp do_lex(<<?\{, rest::binary>>, skip, lines),
do: {:ok, rest, {?\{, skip + 1, 0, lines}}
defp do_lex(<<?\}, rest::binary>>, skip, lines),
do: {:ok, rest, {?\}, skip + 1, 0, lines}}
defp do_lex(<<?+, rest::binary>>, skip, lines),
do: {:ok, rest, {?+, skip + 1, 0, lines}}
defp do_lex(<<?-, rest::binary>>, skip, lines),
do: {:ok, rest, {?-, skip + 1, 0, lines}}
defp do_lex(<<?:, rest::binary>>, skip, lines),
do: {:ok, rest, {?:, skip + 1, 0, lines}}
defp do_lex(<<?,, rest::binary>>, skip, lines),
do: {:ok, rest, {?,, skip + 1, 0, lines}}
defp do_lex(<<?_, rest::binary>>, skip, lines),
do: {:ok, rest, {?_, skip + 1, 0, lines}}
defp do_lex(<<?0, ?x, c::utf8, rest::binary>>, skip, lines) when is_hex(c),
do: lex_hex(rest, skip + 3, [c], lines)
defp do_lex(<<?0, ?o, c::utf8, rest::binary>>, skip, lines) when is_octal(c),
do: lex_octal(rest, skip + 3, [c], lines)
defp do_lex(<<?0, ?b, c::utf8, rest::binary>>, skip, lines) when is_bin(c),
do: lex_binary(rest, skip + 3, [c], lines)
defp do_lex(<<c::utf8, _::binary>> = data, skip, lines) when is_quote(c),
do: __MODULE__.String.lex(data, skip, lines)
defp do_lex(<<c::utf8, rest::binary>>, skip, lines) when is_digit(c),
do: lex_digits(rest, skip + 1, [c], lines)
defp do_lex(<<c::utf8, rest::binary>>, skip, lines) when is_alpha(c),
do: lex_alpha(rest, skip + 1, [c], lines)
defp do_lex(<<c::utf8, _::binary>>, skip, lines),
do: {:error, {:invalid_char, <<c::utf8>>}, skip + 1, lines}
defp lex_whitespace(<<c::utf8, rest::binary>>, skip, lines) when is_whitespace(c),
do: lex_whitespace(rest, skip + 1, lines)
defp lex_whitespace(rest, skip, lines),
do: {:ok, rest, {:whitespace, skip, 0, lines}}
defp lex_comment(<<?\r, ?\n, rest::binary>>, skip, size, lines),
do: {:ok, rest, {:comment, skip + 2, size, lines + 1}}
defp lex_comment(<<?\n, rest::binary>>, skip, size, lines),
do: {:ok, rest, {:comment, skip + 1, size, lines + 1}}
defp lex_comment(<<_::utf8, rest::binary>>, skip, size, lines),
do: lex_comment(rest, skip + 1, size + 1, lines)
defp lex_comment(<<>> = rest, skip, size, lines),
do: {:ok, rest, {:comment, skip, size, lines}}
defp lex_digits(<<c::utf8, rest::binary>>, skip, acc, lines) when is_digit(c),
do: lex_digits(rest, skip + 1, [c | acc], lines)
defp lex_digits(rest, skip, acc, lines) do
bin = acc |> Enum.reverse() |> IO.chardata_to_string()
{:ok, rest, {:digits, skip, bin, lines}}
end
defp lex_hex(<<c::utf8, ?_, d::utf8, rest::binary>>, skip, acc, lines)
when is_hex(c) and is_hex(d),
do: lex_hex(rest, skip + 3, [d, c | acc], lines)
defp lex_hex(<<c::utf8, rest::binary>>, skip, acc, lines) when is_hex(c),
do: lex_hex(rest, skip + 1, [c | acc], lines)
defp lex_hex(rest, skip, acc, lines) do
bin = acc |> Enum.reverse() |> IO.chardata_to_string()
{:ok, rest, {:hex, skip, bin, lines}}
end
defp lex_octal(<<c::utf8, ?_, d::utf8, rest::binary>>, skip, acc, lines)
when is_octal(c) and is_octal(d),
do: lex_octal(rest, skip + 3, [d, c | acc], lines)
defp lex_octal(<<c::utf8, rest::binary>>, skip, acc, lines) when is_octal(c),
do: lex_octal(rest, skip + 1, [c | acc], lines)
defp lex_octal(rest, skip, acc, lines) do
bin = acc |> Enum.reverse() |> IO.chardata_to_string()
{:ok, rest, {:octal, skip, bin, lines}}
end
defp lex_binary(<<c::utf8, ?_, d::utf8, rest::binary>>, skip, acc, lines)
when is_bin(c) and is_bin(d),
do: lex_binary(rest, skip + 3, [d, c | acc], lines)
defp lex_binary(<<c::utf8, rest::binary>>, skip, acc, lines) when is_bin(c),
do: lex_binary(rest, skip + 1, [c | acc], lines)
defp lex_binary(rest, skip, acc, lines) do
bin = acc |> Enum.reverse() |> IO.chardata_to_string()
{:ok, rest, {:binary, skip, bin, lines}}
end
defp lex_alpha(<<c::utf8, rest::binary>>, skip, acc, lines) when is_alpha(c),
do: lex_alpha(rest, skip + 1, [c | acc], lines)
defp lex_alpha(rest, skip, acc, lines) do
bin = acc |> Enum.reverse() |> IO.chardata_to_string()
{:ok, rest, {:alpha, skip, bin, lines}}
end
defp server_call(pid, msg) do
ref = Process.monitor(pid)
send(pid, {self(), msg})
receive do
{:DOWN, ^ref, _type, _pid, info} ->
{:error, info}
{^pid, reply} ->
Process.demonitor(ref, [:flush])
reply
end
end
end
|
lib/lexer.ex
| 0.845958
| 0.601184
|
lexer.ex
|
starcoder
|
defmodule ExWire.Struct.BlockQueue do
@moduledoc """
A structure to store and process blocks received by peers. The goal of this
module is to keep track of partial blocks until we're ready to add the block
to the chain.
There are three reasons we need to keep them stored in a queue:
1. Block headers are sent separately of block bodies. We need to store the
headers until we receive the bodies.
2. We shouldn't accept a block as canonical until we've heard from several
peers that the block is the most canonical block at that number. Thus,
we store the block and a number of commitments. Once the number of
commitments tips over some threshold, we process the block and add it
to our block tree.
3. We may be waiting on a parent block as we received the child first.
We add these blocks to a backlog map keyed by the parent hash.
"""
alias Block.Header
alias ExWire.Struct.Block, as: BlockStruct
alias Blockchain.{Block, Blocktree, Chain}
alias MerklePatriciaTree.Trie
require Logger
# These will be used to help us determine if a block is empty
@empty_trie MerklePatriciaTree.Trie.empty_trie_root_hash()
@empty_hash [] |> ExRLP.encode() |> ExthCrypto.Hash.Keccak.kec()
defstruct queue: %{},
backlog: %{},
do_validation: true,
block_numbers: MapSet.new()
@type block_item :: %{
commitments: list(binary()),
block: Block.t(),
ready: boolean()
}
@type block_map :: %{
EVM.hash() => block_item
}
@type t :: %__MODULE__{
queue: %{integer() => block_map},
backlog: %{EVM.hash() => list(Block.t())},
do_validation: boolean(),
block_numbers: MapSet.t()
}
@doc """
Adds a given header received by a peer to a block queue. Returns whether or
not we should request the block body.
Note: we will process it if the block is empty (i.e. has neither transactions
nor ommers).
"""
@spec add_header(
t,
Blocktree.t(),
Header.t(),
EVM.hash(),
binary(),
Chain.t(),
Trie.t()
) :: {t, Blocktree.t(), Trie.t(), boolean()}
def add_header(
block_queue = %__MODULE__{queue: queue},
block_tree,
header,
header_hash,
remote_id,
chain,
trie
) do
block_map = Map.get(queue, header.number, %{})
{block_map, should_request_body} =
case Map.get(block_map, header_hash) do
nil ->
# may already be ready, already.
is_empty = is_block_empty?(header)
block_map =
Map.put(block_map, header_hash, %{
commitments: MapSet.new([remote_id]),
block: %Block{header: header},
ready: is_empty
})
{block_map, not is_empty}
block_item ->
{Map.put(block_map, header_hash, %{
block_item
| commitments: MapSet.put(block_item.commitments, remote_id)
}), false}
end
updated_block_queue = %{
block_queue
| queue: Map.put(queue, header.number, block_map),
block_numbers: MapSet.put(block_queue.block_numbers, header.number)
}
{new_block_queue, new_block_tree, new_trie} =
process_block_queue(updated_block_queue, block_tree, chain, trie)
{new_block_queue, new_block_tree, new_trie, should_request_body}
end
@doc """
Adds a given block struct received by a peer to a block queue.
Since we don't really know which block this belongs to, we're going to just
need to look at every block and try and guess.
To guess, we'll compute the transactions root and ommers hash, and then try
and find a header that matches it. For empty blocks (ones with no transactions
and no ommers, there may be several matches. Otherwise, each block body should
pretty much be unique).
"""
@spec add_block_struct(
t(),
Blocktree.t(),
BlockStruct.t(),
Chain.t(),
Trie.t()
) :: {t(), Blocktree.t(), Trie.t()}
def add_block_struct(
block_queue = %__MODULE__{queue: queue},
block_tree,
block_struct,
chain,
trie
) do
transactions_root = get_transactions_root(block_struct.transactions_rlp)
ommers_hash = get_ommers_hash(block_struct.ommers_rlp)
updated_queue =
Enum.reduce(queue, queue, fn {number, block_map}, queue ->
updated_block_map =
Enum.reduce(block_map, block_map, fn {hash, block_item}, block_map ->
if block_item.block.header.transactions_root == transactions_root and
block_item.block.header.ommers_hash == ommers_hash do
# This is now ready! (though, it may not still have enough commitments)
block = %{
block_item.block
| transactions: block_struct.transactions,
ommers: block_struct.ommers
}
Map.put(block_map, hash, %{block_item | block: block, ready: true})
else
block_map
end
end)
Map.put(queue, number, updated_block_map)
end)
updated_block_queue = %{block_queue | queue: updated_queue}
process_block_queue(updated_block_queue, block_tree, chain, trie)
end
@doc """
Processes a the block queue, adding any blocks which are complete and pass
the number of confirmations to the block tree. These blocks are then removed
from the queue. Note: they may end up in the backlog, nonetheless, if we are
waiting still for the parent block.
"""
@spec process_block_queue(t(), Blocktree.t(), Chain.t(), Trie.t()) ::
{t(), Blocktree.t(), Trie.t()}
def process_block_queue(
block_queue = %__MODULE__{},
block_tree,
chain,
trie
) do
# First get ready to process blocks
{remaining_block_queue, blocks} = get_complete_blocks(block_queue)
# Then recursively process them
do_process_blocks(blocks, remaining_block_queue, block_tree, chain, trie)
end
@spec do_process_blocks(list(Block.t()), t(), Blocktree.t(), Chain.t(), Trie.t()) ::
{t(), Blocktree.t(), Trie.t()}
defp do_process_blocks([], block_queue, block_tree, _chain, trie),
do: {block_queue, block_tree, trie}
defp do_process_blocks([block | rest], block_queue, block_tree, chain, trie) do
{new_block_tree, new_trie, new_backlog, extra_blocks} =
case Blocktree.verify_and_add_block(
block_tree,
chain,
block,
trie,
block_queue.do_validation
) do
{:invalid, [:non_genesis_block_requires_parent]} ->
# Note: this is probably too slow since we see a lot of blocks without
# parents and, I think, we're running the full validity check.
# :ok = Logger.debug("[Block Queue] Failed to verify block due to missing parent")
updated_backlog =
Map.update(
block_queue.backlog,
block.header.parent_hash,
[block],
fn blocks -> [block | blocks] end
)
{block_tree, trie, updated_backlog, []}
{:invalid, reasons} ->
:ok =
Logger.debug(fn ->
"[Block Queue] Failed to verify block due to #{inspect(reasons)}"
end)
{block_tree, trie, block_queue.backlog, []}
{:ok, {new_block_tree, new_trie, block_hash}} ->
:ok =
Logger.debug(fn ->
"[Block Queue] Verified block #{block.header.number} (0x#{
Base.encode16(block_hash, case: :lower)
}) and added to new block tree"
end)
{backlogged_blocks, new_backlog} = Map.pop(block_queue.backlog, block_hash, [])
{new_block_tree, new_trie, new_backlog, backlogged_blocks}
end
new_block_queue = %{block_queue | backlog: new_backlog}
do_process_blocks(extra_blocks ++ rest, new_block_queue, new_block_tree, chain, new_trie)
end
@doc """
Returns the set of blocks which are complete in the block queue, returning a
new block queue with those blocks removed. This effective dequeues blocks
once they have sufficient data and commitments. These blocks may still
fail to process or end up in a backlog if the parent is missing.
## Examples
iex> %ExWire.Struct.BlockQueue{
...> queue: %{
...> 5 => %{
...> <<1::256>> => %{
...> commitments: MapSet.new([1, 2]),
...> header: %Block.Header{number: 5},
...> block: %Blockchain.Block{block_hash: <<1::256>>},
...> ready: true,
...> },
...> <<2::256>> => %{
...> commitments: MapSet.new([]),
...> header: %Block.Header{number: 5},
...> block: %Blockchain.Block{block_hash: <<2::256>>},
...> ready: true,
...> },
...> <<3::256>> => %{
...> commitments: MapSet.new([1, 2]),
...> header: %Block.Header{number: 5, gas_used: 5},
...> block: %Blockchain.Block{block_hash: <<3::256>>},
...> ready: false,
...> },
...> <<4::256>> => %{
...> commitments: MapSet.new([1, 2]),
...> header: %Block.Header{number: 5, ommers_hash: <<5::256>>},
...> block: %Blockchain.Block{block_hash: <<4::256>>},
...> ready: false,
...> }
...> },
...> 6 => %{
...> <<5::256>> => %{
...> commitments: MapSet.new([1, 2]),
...> header: %Block.Header{number: 6},
...> block: %Blockchain.Block{block_hash: <<5::256>>},
...> ready: true,
...> }
...> }
...> }
...> }
...> |> ExWire.Struct.BlockQueue.get_complete_blocks()
{
%ExWire.Struct.BlockQueue{
queue: %{
5 => %{
<<2::256>> => %{
commitments: MapSet.new([]),
header: %Block.Header{number: 5},
block: %Blockchain.Block{block_hash: <<2::256>>},
ready: true
},
<<3::256>> => %{
commitments: MapSet.new([1, 2]),
header: %Block.Header{number: 5, gas_used: 5},
block: %Blockchain.Block{block_hash: <<3::256>>},
ready: false
},
<<4::256>> => %{
commitments: MapSet.new([1, 2]),
header: %Block.Header{number: 5, ommers_hash: <<5::256>>},
block: %Blockchain.Block{block_hash: <<4::256>>},
ready: false
}
}
}
},
[
%Blockchain.Block{block_hash: <<1::256>>},
%Blockchain.Block{block_hash: <<5::256>>}
]
}
"""
@spec get_complete_blocks(t) :: {t, [Block.t()]}
def get_complete_blocks(block_queue = %__MODULE__{queue: queue}) do
{queue, blocks} =
Enum.reduce(queue, {queue, []}, fn {number, block_map}, {queue, blocks} ->
{final_block_map, new_blocks} =
Enum.reduce(block_map, {block_map, []}, fn {hash, block_item}, {block_map, blocks} ->
if block_item.ready and
MapSet.size(block_item.commitments) >= ExWire.Config.commitment_count() do
{Map.delete(block_map, hash), [block_item.block | blocks]}
else
{block_map, blocks}
end
end)
total_blocks = blocks ++ new_blocks
if final_block_map == %{} do
{Map.delete(queue, number), total_blocks}
else
{Map.put(queue, number, final_block_map), total_blocks}
end
end)
{%{block_queue | queue: queue}, blocks}
end
@doc """
Determines if a block is empty. There's no reason to actually ask for a block
body if we know, a priori, that the block is empty.
## Examples
iex> %Block.Header{
...> transactions_root: MerklePatriciaTree.Trie.empty_trie_root_hash(),
...> ommers_hash: <<29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71>>
...> }
...> |> ExWire.Struct.BlockQueue.is_block_empty?
true
iex> %Block.Header{
...> transactions_root: MerklePatriciaTree.Trie.empty_trie_root_hash(),
...> ommers_hash: <<1>>
...> }
...> |> ExWire.Struct.BlockQueue.is_block_empty?
false
iex> %Block.Header{
...> transactions_root: <<1>>,
...> ommers_hash: <<29, 204, 77, 232, 222, 199, 93, 122, 171, 133, 181, 103, 182, 204, 212, 26, 211, 18, 69, 27, 148, 138, 116, 19, 240, 161, 66, 253, 64, 212, 147, 71>>
...> }
...> |> ExWire.Struct.BlockQueue.is_block_empty?
false
"""
@spec is_block_empty?(Header.t()) :: boolean()
def is_block_empty?(header) do
header.transactions_root == @empty_trie and header.ommers_hash == @empty_hash
end
# Tries to get the transaction root by encoding the transaction trie
@spec get_transactions_root([ExRLP.t()]) :: MerklePatriciaTree.Trie.root_hash()
defp get_transactions_root(transactions_rlp) do
# this is a throw-away
db = MerklePatriciaTree.Test.random_ets_db()
trie =
Enum.reduce(transactions_rlp |> Enum.with_index(), Trie.new(db), fn {trx, i}, trie ->
Trie.update_key(trie, ExRLP.encode(i), ExRLP.encode(trx))
end)
trie.root_hash
end
@spec get_ommers_hash(list(binary())) :: ExthCrypto.Hash.hash()
defp get_ommers_hash(ommers_rlp) do
ommers_rlp
|> ExRLP.encode()
|> ExthCrypto.Hash.Keccak.kec()
end
end
|
apps/ex_wire/lib/ex_wire/struct/block_queue.ex
| 0.821939
| 0.621498
|
block_queue.ex
|
starcoder
|
defmodule Day22 do
@moduledoc """
AoC 2019, Day 22 - Slam Shuffle
The math for Part 2 was beyond me. Much inspiration was taken from:
https://github.com/bjorng/advent-of-code-2019/blob/master/day22/lib/day22.ex
https://www.reddit.com/r/adventofcode/comments/ee0rqi/2019_day_22_solutions/fbnkaju/?context=3
https://przybyl.io/solution-explanation-to-day-22-of-advent-of-code-2019.html
https://github.com/alexander-yu/adventofcode/blob/master/problems_2019/22.py
https://github.com/sasa1977/aoc/blob/master/lib/2019/201922.ex
"""
use Bitwise
@doc """
Shuffle deck from instructions, return position of card 2019
"""
def part1_orig do
input_file()
|> load()
|> shuffle()
|> Enum.find_index(&(&1 == 2019))
end
@doc """
Shuffle deck from instructions, return position of card 2019
"""
def part1 do
input_file()
|> load()
|> shuffle_funs()
|> apply([2019])
end
@big_deck_cnt 119315717514047
@shuffle_cnt 101741582076661
@doc """
Shuffle the extended deck, multiple times. What card is in position 2020?
"""
def part2 do
input_file()
|> load()
|> inv_shuffle_funs(@big_deck_cnt)
|> apply([2020, @shuffle_cnt])
end
@factory_deck Enum.into(0..10_006, [])
def input_file do
Util.priv_file(:day22, "day22_input.txt")
|> File.read!()
end
def load(str) do
String.split(str, "\n", trim: true)
|> Enum.map(&parse/1)
end
def normalize(val, deck_len) when val < 0 do
deck_len - normalize(-val, deck_len)
end
def normalize(val, deck_len), do: rem(val, deck_len)
def inv_shuffle_funs(steps, deck_len) do
{a, b} = Enum.reverse(steps)
|> Enum.reduce({1, 0}, &(inv_lin_fun(&1, &2, deck_len)))
fn (card, shuffle_cnt) ->
normalize(pow(a, shuffle_cnt, deck_len) * card +
b * (pow(a, shuffle_cnt, deck_len) - 1) * Modular.inverse(a-1, deck_len),
deck_len)
end
end
def pow(x, p, m, res \\ 1)
def pow(_, 0, _, res), do: res
def pow(x, p, m, res) do
next_x = rem(x * x, m)
next_p = bsr(p, 1)
case band(p, 1) do
0 ->
pow(next_x, next_p, m, rem(res, m))
1 ->
pow(next_x, next_p, m, rem(res*x, m))
end
end
def inv_lin_fun({:new_stack, nil}, {a, b}, len), do: {normalize(-a, len), normalize(-b - 1, len)}
def inv_lin_fun({:cut, val}, {a, b}, len), do: {a, normalize(b+val, len)}
def inv_lin_fun({:increment, val}, {a, b}, len) do
{normalize(a*Modular.inverse(val, len), len),
normalize(b*Modular.inverse(val,len), len)}
end
def shuffle_funs(steps, deck \\ @factory_deck) do
deck_len = Enum.count(deck)
{a, b} = Enum.reduce(steps, {1, 0}, &(lin_fun(&1, &2, deck_len)))
fn x -> normalize(a*x + b, deck_len) end
end
def lin_fun({:new_stack, nil}, {a, b}, len), do: {normalize(-a, len), normalize(-b - 1, len)}
def lin_fun({:cut, val}, {a, b}, len), do: {a, normalize(b-val, len)}
def lin_fun({:increment, val}, {a, b}, len), do: {normalize(a*val, len), normalize(b*val, len)}
def parse(<<"deal into new stack">>), do: {:new_stack, nil}
def parse(<<"cut ", cnt::binary>>), do: {:cut, String.to_integer(cnt)}
def parse(<<"deal with increment ", cnt::binary>>), do: {:increment, String.to_integer(cnt)}
def shuffle(steps, deck \\ @factory_deck)
def shuffle([], deck), do: deck
def shuffle([{:new_stack, _} | rest], deck) do
shuffle(rest, Enum.reverse(deck))
end
def shuffle([{:cut, cnt} | rest], deck) do
f = Enum.take(deck, cnt)
b = Enum.drop(deck, cnt)
new_list = if cnt < 0, do: [f | b], else: [b | f]
shuffle(rest, List.flatten(new_list))
end
def shuffle([{:increment, cnt} | rest], deck) do
m = mod_shuffle(tl(deck), Enum.count(deck), 0, cnt, %{0 => hd(deck)})
result = Enum.reduce(Enum.sort(Map.keys(m)), [], &([Map.get(m, &1) | &2]))
|> Enum.reverse()
shuffle(rest, result)
end
defp mod_shuffle([], _size, _curr, _cnt, map), do: map
defp mod_shuffle([v | rest], size, curr, cnt, map) do
loc = Integer.mod((curr + cnt), size)
mod_shuffle(rest, size, loc, cnt, Map.put(map, loc, v))
end
end
|
apps/day22/lib/day22.ex
| 0.760873
| 0.577555
|
day22.ex
|
starcoder
|
defmodule Etso.ETS.MatchSpecification do
@moduledoc """
The ETS Match Specifications module contains various functions which convert Ecto queries to
ETS Match Specifications in order to execute the given queries.
"""
def build(query, params) do
{_, schema} = query.from.source
field_names = Etso.ETS.TableStructure.field_names(schema)
match_head = build_head(field_names)
match_conditions = build_conditions(field_names, params, query.wheres)
match_body = [build_body(field_names, query.select.fields)]
{match_head, match_conditions, match_body}
end
defp build_head(field_names) do
List.to_tuple(Enum.map(1..length(field_names), fn x -> :"$#{x}" end))
end
defp build_conditions(field_names, params, query_wheres) do
Enum.reduce(query_wheres, [], fn %Ecto.Query.BooleanExpr{expr: expression}, acc ->
[build_condition(field_names, params, expression) | acc]
end)
end
defmacrop guard_operator(:and), do: :andalso
defmacrop guard_operator(:or), do: :orelse
defmacrop guard_operator(:!=), do: :"/="
defmacrop guard_operator(:<=), do: :"=<"
defmacrop guard_operator(operator), do: operator
for operator <- ~w(== != < > <= >= and or)a do
defp build_condition(field_names, params, {unquote(operator), [], [lhs, rhs]}) do
lhs_condition = build_condition(field_names, params, lhs)
rhs_condition = build_condition(field_names, params, rhs)
{guard_operator(unquote(operator)), lhs_condition, rhs_condition}
end
end
for operator <- ~w(not)a do
defp build_condition(field_names, params, {unquote(operator), [], [clause]}) do
condition = build_condition(field_names, params, clause)
{guard_operator(unquote(operator)), condition}
end
end
defp build_condition(field_names, params, {:in, [], [field, value]}) do
field_name = resolve_field_name(field)
field_index = get_field_index(field_names, field_name)
case resolve_field_values(params, value) do
[] -> []
values -> List.to_tuple([:orelse | Enum.map(values, &{:==, :"$#{field_index}", &1})])
end
end
defp build_condition(field_names, _, {{:., [], [{:&, [], [0]}, field_name]}, [], []}) do
:"$#{get_field_index(field_names, field_name)}"
end
defp build_condition(_, params, {:^, [], [index]}) do
Enum.at(params, index)
end
defp build_condition(_, _, value) when not is_tuple(value) do
value
end
defp build_body(field_names, query_select_fields) do
for select_field <- query_select_fields do
field_name = resolve_field_name(select_field)
field_index = get_field_index(field_names, field_name)
:"$#{field_index}"
end
end
defp resolve_field_name(field) do
{{:., _, [{:&, [], [0]}, field_name]}, [], []} = field
field_name
end
defp resolve_field_values(params, {:^, [], [index, count]}) do
for index <- index..(index + count - 1) do
Enum.at(params, index)
end
end
defp resolve_field_values(params, {:^, [], [index]}) do
Enum.at(params, index)
end
defp get_field_index(field_names, field_name) do
1 + Enum.find_index(field_names, fn x -> x == field_name end)
end
end
|
lib/etso/ets/match_specification.ex
| 0.630116
| 0.54952
|
match_specification.ex
|
starcoder
|
defmodule NimblePool do
@external_resource "README.md"
@moduledoc "README.md"
|> File.read!()
|> String.split("<!-- MDOC !-->")
|> Enum.fetch!(1)
use GenServer
require Logger
@type from :: {pid, reference}
@type init_arg :: term
@type pool_state :: term
@type worker_state :: term
@type client_state :: term
@type user_reason :: term
@doc """
Initializes the worker.
It receives the worker argument passed to `start_link/1`. It must
return `{:ok, worker_state, pool_state}` or `{:async, fun}`, where the `fun`
is a zero-arity function that must return the worker state.
Note this callback is synchronous and therefore will block the pool.
If you need to perform long initialization, consider using the
`{:async, fun}` return type.
"""
@doc callback: :worker
@callback init_worker(pool_state) ::
{:ok, worker_state, pool_state} | {:async, (() -> worker_state), pool_state}
@doc """
Initializes the pool.
It receives the worker argument passed to `start_link/1` and must
return `{:ok, pool_state}` upon successful initialization,
`:ignore` to exit normally, or `{:stop, reason}` to exit with `reason`
and return `{:error, reason}`.
This is a good place to perform a registration for example.
It must return the `pool_state`. The `pool_state` is given to
`init_worker`. By default, it simply returns the arguments given.
This callback is optional.
"""
@doc callback: :pool
@callback init_pool(init_arg) :: {:ok, pool_state} | :ignore | {:stop, reason :: any()}
@doc """
Checks a worker out.
It receives `maybe_wrapped_command`. The `command` is given to the `checkout!/4`
call and may optionally be wrapped by `c:handle_enqueue/2`. It must return either
`{:ok, client_state, worker_state, pool_state}`, `{:remove, reason, pool_state}`,
or `{:skip, Exception.t(), pool_state}`.
If `:remove` is returned, `NimblePool` will attempt to checkout another
worker.
If `:skip` is returned, `NimblePool` will skip the checkout, the client will
raise the returned exception, and the worker will be left ready for the next
checkout attempt.
Note this callback is synchronous and therefore will block the pool.
Avoid performing long work in here, instead do as much work as
possible on the client.
Once the connection is checked out, the worker won't receive any
messages targeted to `c:handle_info/2`.
"""
@doc callback: :worker
@callback handle_checkout(maybe_wrapped_command :: term, from, worker_state, pool_state) ::
{:ok, client_state, worker_state, pool_state}
| {:remove, user_reason, pool_state}
| {:skip, Exception.t(), pool_state}
@doc """
Checks a worker in.
It receives the `client_state`, returned by the `checkout!/4`
anonymous function and it must return either
`{:ok, worker_state, pool_state}` or `{:remove, reason, pool_state}`.
Note this callback is synchronous and therefore will block the pool.
Avoid performing long work in here, instead do as much work as
possible on the client.
Once the connection is checked in, it may immediately be handed
to another client, without traversing any of the messages in the
pool inbox.
This callback is optional.
"""
@doc callback: :worker
@callback handle_checkin(client_state, from, worker_state, pool_state) ::
{:ok, worker_state, pool_state} | {:remove, user_reason, pool_state}
@doc """
Handles update instruction from checked out worker.
See `update/2` for more information.
This callback is optional.
"""
@doc callback: :worker
@callback handle_update(message :: term, worker_state, pool_state) ::
{:ok, worker_state, pool_state}
@doc """
Receives a message in the worker.
It receives the `message` and it must return either
`{:ok, worker_state}` or `{:remove, reason}`.
Note this callback is synchronous and therefore will block the pool.
Avoid performing long work in here.
This callback is optional.
"""
@doc callback: :worker
@callback handle_info(message :: term, worker_state) ::
{:ok, worker_state} | {:remove, user_reason}
@doc """
Executed by the pool, whenever a request to checkout a worker is enqueued.
The `command` argument should be treated as an opaque value, but it can be
wrapped with some data to be used in `c:handle_checkout/4`.
It must return either `{:ok, maybe_wrapped_command, pool_state}` or
`{:skip, Exception.t(), pool_state}` if checkout is to be skipped.
Note this callback is synchronous and therefore will block the pool.
Avoid performing long work in here.
This callback is optional.
"""
@doc callback: :pool
@callback handle_enqueue(command :: term, pool_state) ::
{:ok, maybe_wrapped_command :: term, pool_state}
| {:skip, Exception.t(), pool_state}
@doc """
Terminates a worker.
This callback is invoked with `:DOWN` whenever the client
link breaks, with `:timeout` whenever the client times out,
with one of `:throw`, `:error`, `:exit` whenever the client
crashes with one of the reasons above.
If at any point you return `{:remove, reason}`, the `reason`
will also be given to `terminate`. If any callback raises,
the raised exception will be given as `reason`.
It receives the latest known `worker_state`, which may not
be the latest state. For example, if a client checksout the
state and crashes, we don't fully know the `client_state`,
so the `terminate_state` callback needs to take such scenarios
into account.
This callback is optional.
"""
@doc callback: :pool
@callback terminate_worker(
:DOWN | :timeout | :throw | :error | :exit | user_reason,
worker_state,
pool_state
) ::
{:ok, pool_state}
@doc """
Handle pings due to inactivity on worker.
Executed whenever the idle worker periodic timer verifies that a worker has been idle
on the pool for longer than `:worker_idle_timeout` pool configuration milliseconds.
This callback must return one of the following values:
* `{:ok, worker_state}`: Updates worker state.
* `{:remove, user_reason}`: The pool will proceed to the standard worker termination
defined in `terminate_worker/3`.
* `{:stop, user_reason}`: The entire pool process will be terminated, and `terminate_worker/3`
will be called for every worker on the pool.
This callback is optional.
## Max idle pings
The `:max_idle_pings` pool option is useful to prevent sequencial termination of a large number
of workers. But it is important to keep in mind the following behaviours whenever utilizing it.
* If you are not terminating workers with `handle_ping/2`, you may end up pinging only the same
workers over and over again because each cycle will ping only the first `:max_idle_pings` workers
* If you are terminating workers with `handle_ping/2`, the last worker may be terminated after up to
`worker_idle_timeout + worker_idle_timeout * ceil(number_of_workers/max_idle_pings)`
instead of `2 * worker_idle_timeout` milliseconds of idle time.
For instance if you have a pool with 10 workers and a ping of 1 second and:
Considering a negligible worker termination time and a worst case scenario where all the workers
goes idle right after an verification cycle is started.
Without `max_idle_ping` the last work will be terminated in the next cycle, 2 seconds.
With a `max_idle_ping` of 2, the last worker will be terminated only in the 5th cycle, 6 seconds.
## Disclaimers
* On lazy pools, if no worker is currently on the pool the callback will never be called.
Therefore you can not rely on this callback to terminate empty lazy pools.
* On not lazy pools, if you return `{:remove, user_reason}` you may end up
terminating and initializing workers at the same time every idle verification cycle.
* On large pools, if many resources goes idle at the same cycle you may end up terminating
a large number of workers sequentially, what could lead to the pool being unable to
fulfill requests. See `:max_idle_pings` option to prevent this.
"""
@doc callback: :worker
@callback handle_ping(
worker_state,
pool_state
) ::
{:ok, worker_state} | {:remove, user_reason()} | {:stop, user_reason()}
@optional_callbacks init_pool: 1,
handle_checkin: 4,
handle_info: 2,
handle_enqueue: 2,
handle_update: 3,
handle_ping: 2,
terminate_worker: 3
@doc """
Defines a pool to be started under the supervision tree.
It accepts the same options as `start_link/1` with the
addition or `:restart` and `:shutdown` that control the
"Child Specification".
"""
def child_spec(opts)
def child_spec(opts) do
{worker, _} = Keyword.fetch!(opts, :worker)
{restart, opts} = Keyword.pop(opts, :restart, :permanent)
{shutdown, opts} = Keyword.pop(opts, :shutdown, 5_000)
%{
id: worker,
start: {__MODULE__, :start_link, [opts]},
shutdown: shutdown,
restart: restart
}
end
@doc """
Starts a pool.
## Options
* `:worker` - a `{worker_mod, worker_init_arg}` tuple with the worker
module that implements the `NimblePool` behaviour and the worker
initial argument. This argument is required.
* `:pool_size` - how many workers in the pool. Defaults to 10.
* `:lazy` - When `true`, workers are started lazily, only when necessary.
Defaults to `false`.
* `:worker_idle_timeout` - Timeout in milliseconds to tag a worker as idle.
If not nil, starts a periodic timer on the same frequency that will ping
all idle workers using `handle_ping/2` optional callback .
Defaults to no timeout.
* `:max_idle_pings` - Defines a limit to the number of workers that can be pinged
for each cycle of the `handle_ping/2` optional callback.
Defaults to no limit. See `handle_ping/2` for more details.
"""
def start_link(opts) do
{{worker, arg}, opts} = Keyword.pop(opts, :worker)
{pool_size, opts} = Keyword.pop(opts, :pool_size, 10)
{lazy, opts} = Keyword.pop(opts, :lazy, false)
{worker_idle_timeout, opts} = Keyword.pop(opts, :worker_idle_timeout, nil)
{max_idle_pings, opts} = Keyword.pop(opts, :max_idle_pings, -1)
unless is_atom(worker) do
raise ArgumentError, "worker must be an atom, got: #{inspect(worker)}"
end
unless pool_size > 0 do
raise ArgumentError, "pool_size must be more than 0, got: #{inspect(pool_size)}"
end
GenServer.start_link(
__MODULE__,
{worker, arg, pool_size, lazy, worker_idle_timeout, max_idle_pings},
opts
)
end
@doc """
Stops a pool.
"""
def stop(pool, reason \\ :normal, timeout \\ :infinity) do
GenServer.stop(pool, reason, timeout)
end
@doc """
Checks out from the pool.
It expects a command, which will be passed to the `c:handle_checkout/4`
callback. The `c:handle_checkout/4` callback will return a client state,
which is given to the `function`.
The `function` receives two arguments, the pool `{pid(), reference()}` and the `client_state`.
The function must return a two-element tuple, where the first element is the
return value for `checkout!`, and the second element is the updated `client_state`,
which will be given as the first argument to `c:handle_checkin/4`.
`checkout!` also has an optional `timeout` value, this value will be applied
to checkout operation itself. `checkin` happens asynchronously.
"""
def checkout!(pool, command, function, timeout \\ 5_000) when is_function(function, 2) do
# Reimplementation of gen.erl call to avoid multiple monitors.
pid = GenServer.whereis(pool)
unless pid do
exit!(:noproc, :checkout, [pool])
end
ref = Process.monitor(pid)
send_call(pid, ref, {:checkout, command, deadline(timeout)})
receive do
{^ref, {:skipped, exception}} ->
raise exception
{^ref, client_state} ->
Process.demonitor(ref, [:flush])
try do
function.({pid, ref}, client_state)
catch
kind, reason ->
send(pid, {__MODULE__, :cancel, ref, kind})
:erlang.raise(kind, reason, __STACKTRACE__)
else
{result, client_state} ->
send(pid, {__MODULE__, :checkin, ref, client_state})
result
end
{:DOWN, ^ref, _, _, :noconnection} ->
exit!({:nodedown, get_node(pid)}, :checkout, [pool])
{:DOWN, ^ref, _, _, reason} ->
exit!(reason, :checkout, [pool])
after
timeout ->
Process.demonitor(ref, [:flush])
exit!(:timeout, :checkout, [pool])
end
end
@doc """
Sends an `update` instruction to the pool about the checked out worker.
This must be called inside the `checkout!` callback with
the `from` value given to `checkout`.
This is useful to update the pool state before effectively
checking the state in, which is handy when transferring
resources that requires two steps.
"""
def update({pid, ref}, command) do
send(pid, {__MODULE__, :update, ref, command})
end
defp deadline(timeout) when is_integer(timeout) do
System.monotonic_time() + System.convert_time_unit(timeout, :millisecond, :native)
end
defp deadline(:infinity), do: :infinity
defp get_node({_, node}), do: node
defp get_node(pid) when is_pid(pid), do: node(pid)
defp send_call(pid, ref, message) do
# Auto-connect is asynchronous. But we still use :noconnect to make sure
# we send on the monitored connection, and not trigger a new auto-connect.
Process.send(pid, {:"$gen_call", {self(), ref}, message}, [:noconnect])
end
defp exit!(reason, fun, args) do
exit({reason, {__MODULE__, fun, args}})
end
## Callbacks
@impl true
def init({worker, arg, pool_size, lazy, worker_idle_timeout, max_idle_pings}) do
Process.flag(:trap_exit, true)
_ = Code.ensure_loaded(worker)
lazy = if lazy, do: pool_size, else: nil
if worker_idle_timeout do
if function_exported?(worker, :handle_ping, 2) do
Process.send_after(self(), :check_idle, worker_idle_timeout)
else
IO.warn(
":worker_idle_timeout was given but the worker does not export a handle_ping/2 callback"
)
end
end
with {:ok, pool_state} <- do_init_pool(worker, arg) do
{pool_state, resources, async} =
if is_nil(lazy) do
Enum.reduce(1..pool_size, {pool_state, :queue.new(), %{}}, fn
_, {pool_state, resources, async} ->
init_worker(worker, pool_state, resources, async, worker_idle_timeout)
end)
else
{pool_state, :queue.new(), %{}}
end
state = %{
worker: worker,
queue: :queue.new(),
requests: %{},
monitors: %{},
resources: resources,
async: async,
state: pool_state,
lazy: lazy,
worker_idle_timeout: worker_idle_timeout,
max_idle_pings: max_idle_pings
}
{:ok, state}
end
end
@impl true
def handle_call({:checkout, command, deadline}, {pid, ref} = from, state) do
%{requests: requests, monitors: monitors, worker: worker, state: pool_state} = state
mon_ref = Process.monitor(pid)
requests = Map.put(requests, ref, {pid, mon_ref, :command, command, deadline})
monitors = Map.put(monitors, mon_ref, ref)
state = %{state | requests: requests, monitors: monitors}
case handle_enqueue(worker, command, pool_state) do
{:ok, command, pool_state} ->
{:noreply, maybe_checkout(command, mon_ref, deadline, from, %{state | state: pool_state})}
{:skip, exception, pool_state} ->
state = remove_request(%{state | state: pool_state}, ref, mon_ref)
{:reply, {:skipped, exception}, state}
end
end
@impl true
def handle_info({__MODULE__, :update, ref, command}, state) do
%{requests: requests, state: pool_state, worker: worker} = state
case requests do
%{^ref => {pid, mon_ref, :state, worker_state}} ->
{:ok, worker_state, pool_state} = worker.handle_update(command, worker_state, pool_state)
requests = Map.put(requests, ref, {pid, mon_ref, :state, worker_state})
{:noreply, %{state | requests: requests, state: pool_state}}
%{} ->
exit(:unexpected_precheckin)
end
end
@impl true
def handle_info({__MODULE__, :checkin, ref, worker_client_state}, state) do
%{
requests: requests,
resources: resources,
worker: worker,
state: pool_state,
worker_idle_timeout: worker_idle_timeout
} = state
case requests do
%{^ref => {pid, mon_ref, :state, worker_server_state}} ->
checkin =
if function_exported?(worker, :handle_checkin, 4) do
args = [worker_client_state, {pid, ref}, worker_server_state, pool_state]
apply_worker_callback(pool_state, worker, :handle_checkin, args)
else
{:ok, worker_server_state, pool_state}
end
{resources, state} =
case checkin do
{:ok, worker_server_state, pool_state} ->
{:queue.in({worker_server_state, get_metadata(worker_idle_timeout)}, resources),
%{state | state: pool_state}}
{:remove, reason, pool_state} ->
{resources,
remove_worker(reason, worker_server_state, %{state | state: pool_state})}
end
state = remove_request(state, ref, mon_ref)
{:noreply, maybe_checkout(%{state | resources: resources})}
%{} ->
exit(:unexpected_checkin)
end
end
@impl true
def handle_info({__MODULE__, :cancel, ref, reason}, state) do
cancel_request_ref(ref, reason, state)
end
@impl true
def handle_info({__MODULE__, :init_worker}, state) do
%{
async: async,
resources: resources,
worker: worker,
state: pool_state,
worker_idle_timeout: worker_idle_timeout
} = state
{pool_state, resources, async} =
init_worker(worker, pool_state, resources, async, worker_idle_timeout)
{:noreply, maybe_checkout(%{state | async: async, resources: resources, state: pool_state})}
end
@impl true
def handle_info({:DOWN, ref, _, _, _} = down, state) do
%{monitors: monitors, async: async} = state
case monitors do
%{^ref => request_ref} ->
cancel_request_ref(request_ref, :DOWN, state)
%{} ->
case async do
%{^ref => _} -> remove_async_ref(ref, state)
%{} -> maybe_handle_info(down, state)
end
end
end
@impl true
def handle_info({:EXIT, pid, _reason} = exit, state) do
%{async: async} = state
case async do
%{^pid => _} -> {:noreply, %{state | async: Map.delete(async, pid)}}
%{} -> maybe_handle_info(exit, state)
end
end
@impl true
def handle_info({ref, worker_state} = reply, state) when is_reference(ref) do
%{async: async, resources: resources, worker_idle_timeout: worker_idle_timeout} = state
case async do
%{^ref => _} ->
Process.demonitor(ref, [:flush])
resources = :queue.in({worker_state, get_metadata(worker_idle_timeout)}, resources)
async = Map.delete(async, ref)
state = %{state | async: async, resources: resources}
{:noreply, maybe_checkout(state)}
%{} ->
maybe_handle_info(reply, state)
end
end
@impl true
def handle_info(
:check_idle,
%{resources: resources, worker_idle_timeout: worker_idle_timeout} = state
) do
case check_idle_resources(resources, state) do
{:ok, new_resources, new_state} ->
Process.send_after(self(), :check_idle, worker_idle_timeout)
{:noreply, %{new_state | resources: new_resources}}
{:stop, reason, state} ->
{:stop, {:shutdown, reason}, state}
end
end
@impl true
def handle_info(msg, state) do
maybe_handle_info(msg, state)
end
@impl true
def terminate(reason, %{resources: resources} = state) do
for {worker_server_state, _} <- :queue.to_list(resources) do
maybe_terminate_worker(reason, worker_server_state, state)
end
:ok
end
defp do_init_pool(worker, arg) do
if function_exported?(worker, :init_pool, 1) do
worker.init_pool(arg)
else
{:ok, arg}
end
end
defp remove_async_ref(ref, state) do
%{
async: async,
resources: resources,
worker: worker,
state: pool_state,
worker_idle_timeout: worker_idle_timeout
} = state
# If an async worker failed to start, we try to start another one
# immediately, even if the pool is lazy, as we assume there is an
# immediate need for this resource.
{pool_state, resources, async} =
init_worker(worker, pool_state, resources, Map.delete(async, ref), worker_idle_timeout)
{:noreply, %{state | resources: resources, async: async, state: pool_state}}
end
defp cancel_request_ref(ref, reason, %{requests: requests} = state) do
case requests do
# Exited or timed out before we could serve it
%{^ref => {_, mon_ref, :command, _command, _deadline}} ->
{:noreply, remove_request(state, ref, mon_ref)}
# Exited or errored during client processing
%{^ref => {_, mon_ref, :state, worker_server_state}} ->
state = remove_request(state, ref, mon_ref)
{:noreply, remove_worker(reason, worker_server_state, state)}
%{} ->
exit(:unexpected_remove)
end
end
defp maybe_handle_info(msg, state) do
%{resources: resources, worker: worker, worker_idle_timeout: worker_idle_timeout} = state
if function_exported?(worker, :handle_info, 2) do
{resources, state} =
Enum.reduce(:queue.to_list(resources), {:queue.new(), state}, fn
{worker_server_state, _}, {resources, state} ->
case apply_worker_callback(worker, :handle_info, [msg, worker_server_state]) do
{:ok, worker_server_state} ->
{:queue.in({worker_server_state, get_metadata(worker_idle_timeout)}, resources),
state}
{:remove, reason} ->
{resources, remove_worker(reason, worker_server_state, state)}
end
end)
{:noreply, %{state | resources: resources}}
else
{:noreply, state}
end
end
defp maybe_checkout(%{queue: queue, requests: requests} = state) do
case :queue.out(queue) do
{{:value, {pid, ref}}, queue} ->
case requests do
# The request still exists, so we are good to go
%{^ref => {^pid, mon_ref, :command, command, deadline}} ->
maybe_checkout(command, mon_ref, deadline, {pid, ref}, %{state | queue: queue})
# It should never happen
%{^ref => _} ->
exit(:unexpected_checkout)
# The request is no longer active, do nothing
%{} ->
maybe_checkout(%{state | queue: queue})
end
{:empty, _queue} ->
state
end
end
defp maybe_checkout(command, mon_ref, deadline, {pid, ref} = from, state) do
if past_deadline?(deadline) do
state = remove_request(state, ref, mon_ref)
maybe_checkout(state)
else
%{resources: resources, requests: requests, worker: worker, queue: queue, state: pool_state} =
state = init_worker_if_lazy_and_empty(state)
case :queue.out(resources) do
{{:value, {worker_server_state, _}}, resources} ->
args = [command, from, worker_server_state, pool_state]
case apply_worker_callback(pool_state, worker, :handle_checkout, args) do
{:ok, worker_client_state, worker_server_state, pool_state} ->
GenServer.reply({pid, ref}, worker_client_state)
requests = Map.put(requests, ref, {pid, mon_ref, :state, worker_server_state})
%{state | resources: resources, requests: requests, state: pool_state}
{:remove, reason, pool_state} ->
state = remove_worker(reason, worker_server_state, %{state | state: pool_state})
maybe_checkout(command, mon_ref, deadline, from, %{state | resources: resources})
{:skip, exception, pool_state} ->
GenServer.reply({pid, ref}, {:skipped, exception})
remove_request(%{state | state: pool_state}, ref, mon_ref)
other ->
raise """
unexpected return from #{inspect(worker)}.handle_checkout/4.
Expected: {:ok, client_state, server_state, pool_state} | {:remove, reason, pool_state} | {:skip, Exception.t(), pool_state}
Got: #{inspect(other)}
"""
end
{:empty, _} ->
%{state | queue: :queue.in(from, queue)}
end
end
end
defp init_worker_if_lazy_and_empty(%{lazy: nil} = state), do: state
defp init_worker_if_lazy_and_empty(
%{lazy: lazy, resources: resources, worker_idle_timeout: worker_idle_timeout} = state
) do
if lazy > 0 and :queue.is_empty(resources) do
%{async: async, worker: worker, state: pool_state} = state
{pool_state, resources, async} =
init_worker(worker, pool_state, resources, async, worker_idle_timeout)
%{state | async: async, resources: resources, state: pool_state, lazy: lazy - 1}
else
state
end
end
defp past_deadline?(deadline) when is_integer(deadline) do
System.monotonic_time() >= deadline
end
defp past_deadline?(:infinity), do: false
defp remove_worker(reason, worker_server_state, state) do
state = maybe_terminate_worker(reason, worker_server_state, state)
if lazy = state.lazy do
%{state | lazy: lazy + 1}
else
schedule_init()
state
end
end
defp check_idle_resources(resources, state) do
now_in_ms = System.monotonic_time(:millisecond)
do_check_idle_resources(resources, now_in_ms, state, :queue.new(), state.max_idle_pings)
end
defp do_check_idle_resources(resources, _now_in_ms, state, new_resources, 0) do
{:ok, :queue.join(new_resources, resources), state}
end
defp do_check_idle_resources(resources, now_in_ms, state, new_resources, remaining_pings) do
case :queue.out(resources) do
{:empty, _} ->
{:ok, new_resources, state}
{{:value, resource_data}, next_resources} ->
{worker_server_state, worker_metadata} = resource_data
time_diff = now_in_ms - worker_metadata
if time_diff >= state.worker_idle_timeout do
case maybe_ping_worker(worker_server_state, state) do
{:ok, new_worker_state} ->
new_resource_data = {new_worker_state, worker_metadata}
new_resources = :queue.in(new_resource_data, new_resources)
do_check_idle_resources(
next_resources,
now_in_ms,
state,
new_resources,
remaining_pings - 1
)
{:remove, user_reason} ->
new_state = remove_worker(user_reason, worker_server_state, state)
do_check_idle_resources(
next_resources,
now_in_ms,
new_state,
new_resources,
remaining_pings - 1
)
{:stop, reason} ->
{:stop, reason, state}
end
else
{:ok, :queue.join(new_resources, resources), state}
end
end
end
defp maybe_ping_worker(worker_server_state, state) do
%{worker: worker, state: pool_state} = state
args = [worker_server_state, pool_state]
case apply_worker_callback(worker, :handle_ping, args) do
{:ok, worker_state} ->
{:ok, worker_state}
{:remove, user_reason} ->
{:remove, user_reason}
{:stop, user_reason} ->
{:stop, user_reason}
other ->
raise """
unexpected return from #{inspect(worker)}.handle_ping/2.
Expected:
{:remove, reason}
| {:ok, worker_state}
| {:stop, reason}
Got: #{inspect(other)}
"""
end
end
defp maybe_terminate_worker(reason, worker_server_state, state) do
%{worker: worker, state: pool_state} = state
if function_exported?(worker, :terminate_worker, 3) do
args = [reason, worker_server_state, pool_state]
case apply_worker_callback(worker, :terminate_worker, args) do
{:ok, pool_state} ->
%{state | state: pool_state}
{:remove, _reason} ->
state
other ->
raise """
unexpected return from #{inspect(worker)}.terminate_worker/3.
Expected:
{:ok, pool_state}
Got: #{inspect(other)}
"""
end
else
state
end
end
defp init_worker(worker, pool_state, resources, async, worker_idle_timeout) do
case apply_worker_callback(worker, :init_worker, [pool_state]) do
{:ok, worker_state, pool_state} ->
{pool_state, :queue.in({worker_state, get_metadata(worker_idle_timeout)}, resources),
async}
{:async, fun, pool_state} when is_function(fun, 0) ->
%{ref: ref, pid: pid} = Task.Supervisor.async(NimblePool.TaskSupervisor, fun)
{pool_state, resources, async |> Map.put(ref, pid) |> Map.put(pid, ref)}
{:remove, _reason} ->
send(self(), {__MODULE__, :init_worker})
{pool_state, resources, async}
other ->
raise """
unexpected return from #{inspect(worker)}.init_worker/1.
Expected:
{:ok, worker_state, pool_state}
| {:async, (() -> worker_state), pool_state}
Got: #{inspect(other)}
"""
end
end
defp schedule_init() do
send(self(), {__MODULE__, :init_worker})
end
defp apply_worker_callback(worker, fun, args) do
do_apply_worker_callback(worker, fun, args, &{:remove, &1})
end
defp apply_worker_callback(pool_state, worker, fun, args) do
do_apply_worker_callback(worker, fun, args, &{:remove, &1, pool_state})
end
defp do_apply_worker_callback(worker, fun, args, catch_fun) do
try do
apply(worker, fun, args)
catch
kind, reason ->
reason = Exception.normalize(kind, reason, __STACKTRACE__)
Logger.error(
[
"Error during #{inspect(worker)}.#{fun}/#{length(args)} callback:\n"
| Exception.format(kind, reason, __STACKTRACE__)
],
crash_reason: {crash_reason(kind, reason), __STACKTRACE__}
)
catch_fun.(reason)
end
end
defp crash_reason(:throw, value), do: {:nocatch, value}
defp crash_reason(_, value), do: value
defp remove_request(pool_state, ref, mon_ref) do
requests = Map.delete(pool_state.requests, ref)
monitors = Map.delete(pool_state.monitors, mon_ref)
Process.demonitor(mon_ref, [:flush])
%{pool_state | requests: requests, monitors: monitors}
end
defp handle_enqueue(worker, command, pool_state) do
if function_exported?(worker, :handle_enqueue, 2) do
worker.handle_enqueue(command, pool_state)
else
{:ok, command, pool_state}
end
end
defp get_metadata(nil), do: nil
defp get_metadata(_worker_idle_timeout), do: System.monotonic_time(:millisecond)
end
|
lib/nimble_pool.ex
| 0.876278
| 0.558568
|
nimble_pool.ex
|
starcoder
|
defmodule Robolia.Competitions.RandomGroupedAllAgainstAll do
@doc """
Generate a list of tuples where each tuple has the players that are going to battle
against each other.
The players are put in random groups. Which means every time this function
is called, the players might battle against another opponents.
If the number of players can't fit in the number of per_group, then the remaining
players will be distributed equally between other groups. For example, if 8 players are given
and 3 per_group are asked, the result will be 2 groups with 4 players on each. Another exmple,
of 7 players are given and 3 per_group are asked, the resilt will be 2 groups with 4 players in
one group and 3 in another.
The players can be anything. The function is going to organize them only.
### Usage
iex> players = [player1, player2, player3, player4]
iex> GroupedAllAgainstAll.generate_matches(%{players: players, per_group: 2})
[{player1, player2}, {player2, player1}, {player3, player4}, {player4, player3}]
iex> players = [player1, player2, player3]
iex> GroupedAllAgainstAll.generate_matches(%{players: players, per_group: 3})
[{player1, player2}, {player1, player3}, {player2, player1}, {player2, player3}, {player3, player1}, {player3, player2}]
Note that on the examples above, players are not shuffled. But in practice they will be.
"""
@spec generate_matches(%{players: list(), per_group: integer()}) :: list(tuple())
def generate_matches(%{players: players, per_group: per_group}) when is_integer(per_group) do
generate_groups(%{players: players, per_group: per_group})
|> Enum.flat_map(&organize_matches/1)
end
defp generate_groups(%{players: players, per_group: per_group}) do
case players |> Enum.count() <= per_group do
true ->
[players]
false ->
groups =
players
|> Enum.shuffle()
|> Enum.chunk_every(per_group)
case Enum.count(groups) > expected_number_of_groups(players, per_group) do
true ->
groups
|> distribute_last_group_between_others(per_group)
false ->
groups
end
end
end
defp organize_matches(players) do
for p1 <- players,
p2 <- players,
p1 != p2,
do: {p1, p2}
end
defp expected_number_of_groups(players, per_group),
do: (Enum.count(players) / per_group) |> trunc
defp distribute_last_group_between_others(groups, per_group) do
last_group = groups |> List.last() |> fill_group_with_nil(per_group)
Enum.drop(groups, -1)
|> Enum.zip(last_group)
|> Enum.map(fn group ->
(elem(group, 0) ++ [elem(group, 1)]) |> Enum.reject(&is_nil/1)
end)
end
defp fill_group_with_nil(group, per_group),
do: group ++ List.duplicate(nil, per_group - Enum.count(group))
end
|
lib/robolia/competitions/random_grouped_all_against_all.ex
| 0.698535
| 0.687079
|
random_grouped_all_against_all.ex
|
starcoder
|
% Date represents dates, with years, months, and days. Date has no concept of timezones,
% while DateTime does(but not currently). This implementation is based on Erlang's date BIF:
% http://www.erlang.org/doc/man/erlang.html#date-0
module Date
def new(date_tuple)
{year, month, day} = date_tuple
#Date::Behavior(year, month, day)
end
def new(year, month, day)
#Date::Behavior(year, month, day)
end
% Return the current date according to the operating system,
% but in UTC. This means that, if you are on 17th April but
% in UTC it still is 16th April, it will return 16th April.
def today
{date,_} = Erlang.calendar.universal_time
Date.new(date)
end
% Return the tomorrow's date according to the operating system,
% but in UTC.
def tomorrow
Date.today.tomorrow
end
% Return the yesterday's date according to the operating system,
% but in UTC.
def yesterday
Date.today.yesterday
end
% Return the number of days in the month in *date*.
def days_in_month(date)
if [1,3,5,7,8,10,12].member?(date.month)
31
elsif [4,6,9,1,11].member?(date.month)
30
elsif date.month == 2 && date.leap_year?
29
else
28
end
end
module Behavior
attr_reader ['day, 'month, 'year]
def __bound__(year, month, day)
@('year: year, 'month: month, 'day: day)
end
def weekday
Erlang.calendar.day_of_the_week(to_tuple)
end
def weekday_name
day_name(weekday)
end
def month_name
month_name(@month)
end
% Return a string representation of the Date.
%
% ## Example
%
% date = Date.new(2012, 12, 21)
% date.inspect % => 2012-12-21
%
def inspect
"#{@year}-#{convert_to_double_digit(@month)}-#{convert_to_double_digit(@day)}"
end
% Determine whether or the not the Date occurs within a leap year
def leap_year?
Erlang.calendar.is_leap_year(@year)
end
% Return tomorrow's date, relative to the current date
def tomorrow
gregorian_addition(1)
end
% Return yesterdays's date, relative to the current date
def yesterday
gregorian_addition(-1)
end
% Subtract the number of *days* from the current date
def -(days)
gregorian_addition(-days)
end
% Add the number of *days* from the current date
def +(days)
gregorian_addition(days)
end
% Converts the Date object to a tuple, with year, month, and day
def to_tuple
{@year, @month, @day}
end
private
def day_name(1) "Mon"; end
def day_name(2) "Tue"; end
def day_name(3) "Wed"; end
def day_name(4) "Thu"; end
def day_name(5) "Fri"; end
def day_name(6) "Sat"; end
def day_name(7) "Sun"; end
def month_name(1) "Jan"; end
def month_name(2) "Feb"; end
def month_name(3) "Mar"; end
def month_name(4) "Apr"; end
def month_name(5) "May"; end
def month_name(6) "Jun"; end
def month_name(7) "Jul"; end
def month_name(8) "Aug"; end
def month_name(9) "Sep"; end
def month_name(10) "Oct"; end
def month_name(11) "Nov"; end
def month_name(12) "Dec"; end
def gregorian_addition(days)
time = {0,0,0}
seconds = Erlang.calendar.datetime_to_gregorian_seconds({to_tuple,time})
{ date, _time } = Erlang.calendar.gregorian_seconds_to_datetime(seconds + (86400 * days))
Date.new(date)
end
def convert_to_double_digit(unit)
if unit < 10
"0" + unit.to_s
else
unit.to_s
end
end
end
end
|
lib/date.ex
| 0.680348
| 0.526708
|
date.ex
|
starcoder
|
defmodule Collidex.Utils do
@moduledoc """
Assorted utilities and geometric transformations.
"""
alias Collidex.Geometry.Polygon
alias Collidex.Geometry.Rect
alias Collidex.Geometry.Circle
alias Graphmath.Vec2
@doc """
Returns a vector normal to each edge of the shape, in a right-handed
coordinate space.
"""
def normals_of_edges(shape = %Polygon{}) do
{ _, sides } = shape.vertices
|> Enum.reduce( {List.last(shape.vertices), []},
fn (vertex, {prev, list}) ->
{vertex, [ Vec2.subtract(vertex, prev) | list] }
end )
sides |> Enum.map(&(Vec2.perp(&1)))
end
@doc """
Given two numeric ranges as a pair of 2-tuples `{a1, a2}, {b1, b2}`, or
a list containing a pair of 2-tuples `[{a1, a2}, {b1, b2}]` returns
true if those ranges overlap.
## Examples
```
iex> Collidex.Utils.overlap?({0.0,5.0}, {5.0, 10.0})
true
iex> Collidex.Utils.overlap?({-1.0, -3.0}, {-6.1, 3.5})
true
iex> Collidex.Utils.overlap?({-1.0, 0.0}, {0.01, 1.0} )
false
```
"""
def overlap?({min1, max1}, {min2, max2}) do
in_range?(min1, min2, max2)
or in_range?(max1, min2, max2)
or in_range?(min2, min1, max1)
or in_range?(max2, min1, max1)
end
def overlap?([tuple1, tuple2]) do
overlap?(tuple1, tuple2)
end
@doc """
Find the projection of the vertices of a shape (Rect or Polygon)
on an arbitrary axis. Returns a list of floats representing
the projection of each vertex.
"""
def project_onto_axis(poly = %Polygon{}, axis ) do
poly.vertices |> Enum.map(&(Vec2.dot(&1,axis)))
end
def project_onto_axis(r = %Rect{}, axis) do
project_onto_axis(Polygon.make(r), axis)
end
@doc """
Find the minimum and maximum point of a shape, as that shape is
projected along an arbitrary axis. Supports Rects, Circles, and Polygons,
but note that the projection of a Circle will only be accurate if
the axis of projection is a unit vector.
Returns a 2-tuple representing the minimum and maximum point of the
shape's "shadow" as projected onto `axis`.
"""
def extent_on_axis(circle = %Circle{}, axis) do
# TODO raise an exception if this isn't a unit vector axis
projected_center = Vec2.dot(circle.center, axis)
{ projected_center - circle.radius,
projected_center + circle.radius }
end
def extent_on_axis(shape, axis ) do
project_onto_axis(shape, axis) |> Enum.min_max
end
@doc """
Returns a unit-vector in the same direction as the
argument.
"""
def unit_vector({x,y}) do
len = Vec2.length({x,y})
{x / len, y / len}
end
@doc """
Convert the numeric parts of arguments to floats. Accepts
a single number, a 2-tuple of numbers, or a list of 2-tuples
of numbers.
## Examples
```
iex> Collidex.Utils.coerce_floats [ {1, 3}, {-1.5, -2} ]
[ {1.0, 3.0}, {-1.5, -2.0} ]
iex> Collidex.Utils.coerce_floats {1, 3}
{1.0, 3.0}
iex> Collidex.Utils.coerce_floats 6
6.0
```
"""
def coerce_floats(list) when is_list(list) do
list |> Enum.map(fn({a, b}) -> { a/1, b/1} end)
end
def coerce_floats({a, b}) do
{ a / 1, b / 1 }
end
def coerce_floats(num) do
num / 1
end
defp in_range?(a,b,c) when b > c do
in_range?(a,c,b)
end
defp in_range?(a,b,c) do
a >= b and a <= c
end
end
|
lib/collidex/utils.ex
| 0.891681
| 0.93115
|
utils.ex
|
starcoder
|
defmodule Crawlie.PqueueWrapper do
@moduledoc """
Wraps the Erlang [pqueue](https://github.com/okeuday/pqueue)
`:pqueue`, `:pqueue2`, `:pqueue3` and `:pqueue4` modules
functionality for easier use in Elixir and easier swapping of the implementations
"""
alias __MODULE__, as: This
alias Crawlie.Page
@valid_pqueue_modules [:pqueue, :pqueue2, :pqueue3, :pqueue4]
@type t :: %This{
module: atom,
data: term,
}
@enforce_keys [:module, :data]
defstruct [
:module,
:data,
]
#===========================================================================
# API functions
#===========================================================================
@spec new(atom) :: This.t
@doc """
Constructs a new `Crawlie.PqueueWrapper` priority queue with `module` as the
underlying impelementation.
"""
def new(module) when module in @valid_pqueue_modules do
%This{
module: module,
data: module.new(),
}
end
@spec len(This.t) :: integer
@doc """
Returns the size of the underlying queue.
"""
def len(%This{module: module, data: data}) do
module.len(data)
end
@spec empty?(This.t) :: boolean
@doc """
Checks if this priority queue is empty.
"""
def empty?(%This{module: module, data: data}) do
module.is_empty(data)
end
@spec add_page(This.t, Page.t) :: This.t
@doc """
Adds a `Crawlie.Page` to this priority queue, treating its `depth` as priority -
the bigger the depth, the bigger the priority.
"""
def add_page(%This{module: module, data: data} = this, %Page{depth: depth} = page) do
p = get_priority(this, depth)
data = module.in(page, p, data)
%This{this | data: data}
end
@spec take(This.t) :: {This.t, term}
@doc """
Takes an element with the highes priority from the priority queue and returns
the priority queue without the element and the element itself.
"""
def take(%This{module: module, data: data} = this) do
{{_priority, item}, data} = module.out(data)
{%This{this | data: data}, item}
end
#---------------------------------------------------------------------------
# Public helper functions
#---------------------------------------------------------------------------
@doc """
based on the page depht gets a priority to use with the queue,
to utilize the particular pqueue's tusage best
"""
def get_priority(%This{module: :pqueue}, depth), do: -depth + 20
def get_priority(%This{module: :pqueue2}, depth), do: -depth
def get_priority(%This{module: :pqueue3}, depth), do: -depth
def get_priority(%This{module: :pqueue4}, depth), do: -depth + 128
@spec all(This.t) :: [term]
def all(this) do
_all(this, [])
end
defp _all(this, acc) do
if empty?(this) do
acc
else
{this, item} = take(this)
_all(this, [item | acc])
end
end
end
|
lib/crawlie/pqueue_wrapper.ex
| 0.751283
| 0.620909
|
pqueue_wrapper.ex
|
starcoder
|
defmodule Co2Offset.Donations.DonationCreationSchema do
use Ecto.Schema
import Ecto.Changeset
alias __MODULE__
alias Co2Offset.Converters
alias Co2Offset.Geo
schema "donations" do
field :iata_from, :string
field :iata_to, :string
field :original_city_from, :string
field :original_city_to, :string
field :original_distance, :integer
field :original_co2, :float
field :original_donation, :integer
field :airport_from, :map, virtual: true
field :airport_to, :map, virtual: true
timestamps()
end
def changeset(%DonationCreationSchema{} = donation_schema, attrs) do
donation_schema
|> cast(attrs, [:iata_from, :iata_to])
|> validate_required([:iata_from, :iata_to])
|> validate_length(:iata_from, is: 3)
|> validate_length(:iata_to, is: 3)
|> put_airports()
|> validate_required([:airport_from, :airport_to])
|> put_cities()
|> validate_required([:original_city_from, :original_city_to])
|> put_original_distance()
|> validate_required([:original_distance])
|> put_original_co2()
|> put_original_donation()
|> validate_required([:original_co2, :original_donation])
end
defp put_airports(changeset) do
case changeset do
%Ecto.Changeset{valid?: true, changes: %{iata_from: iata_from, iata_to: iata_to}} ->
airport_from = Geo.get_airport_by_iata(iata_from)
airport_to = Geo.get_airport_by_iata(iata_to)
changeset
|> put_change(:airport_from, airport_from)
|> put_change(:airport_to, airport_to)
_ ->
changeset
end
end
defp put_cities(changeset) do
case changeset do
%Ecto.Changeset{
valid?: true,
changes: %{airport_from: airport_from, airport_to: airport_to}
} ->
changeset
|> put_change(:original_city_from, airport_from.city)
|> put_change(:original_city_to, airport_to.city)
_ ->
changeset
end
end
defp put_original_distance(changeset) do
case changeset do
%Ecto.Changeset{
valid?: true,
changes: %{airport_from: airport_from, airport_to: airport_to}
} ->
original_distance = Geo.distance_between_airports(airport_from, airport_to)
changeset
|> put_change(:original_distance, original_distance)
_ ->
changeset
end
end
defp put_original_co2(changeset) do
case changeset do
%Ecto.Changeset{
valid?: true,
changes: %{original_distance: distance}
} ->
original_co2 = Converters.co2_from_plane_km(distance)
changeset
|> put_change(:original_co2, original_co2)
_ ->
changeset
end
end
defp put_original_donation(changeset) do
case changeset do
%Ecto.Changeset{
valid?: true,
changes: %{original_co2: original_co2}
} ->
original_donation = Converters.money_from_co2(original_co2)
changeset
|> put_change(:original_donation, original_donation)
_ ->
changeset
end
end
end
|
lib/co2_offset/donations/donation_creation_schema.ex
| 0.544317
| 0.450178
|
donation_creation_schema.ex
|
starcoder
|
defmodule Calendar.ISO.Extension do
@moduledoc """
A module to extend the calendar implementation that follows to ISO8601 with methods found in
Elixir 1.5.1. This is to allow ESpec to support Elixir >= 1.3.4 more easily.
"""
@type year :: 0..9999
@type month :: 1..12
@type day :: 1..31
@seconds_per_minute 60
@seconds_per_hour 60 * 60
# Note that this does _not_ handle leap seconds.
@seconds_per_day 24 * 60 * 60
@microseconds_per_second 1_000_000
@parts_per_day @seconds_per_day * @microseconds_per_second
@days_per_nonleap_year 365
@days_per_leap_year 366
@doc """
Returns the `t:Calendar.iso_days` format of the specified date.
## Examples
iex> Calendar.ISO.naive_datetime_to_iso_days(0, 1, 1, 0, 0, 0, {0, 6})
{0, {0, 86400000000}}
iex> Calendar.ISO.naive_datetime_to_iso_days(2000, 1, 1, 12, 0, 0, {0, 6})
{730485, {43200000000, 86400000000}}
iex> Calendar.ISO.naive_datetime_to_iso_days(2000, 1, 1, 13, 0, 0, {0, 6})
{730485, {46800000000, 86400000000}}
"""
@spec naive_datetime_to_iso_days(
Calendar.year(),
Calendar.month(),
Calendar.day(),
Calendar.hour(),
Calendar.minute(),
Calendar.second(),
Calendar.microsecond()
) :: Calendar.iso_days()
def naive_datetime_to_iso_days(year, month, day, hour, minute, second, microsecond) do
{date_to_iso_days(year, month, day), time_to_day_fraction(hour, minute, second, microsecond)}
end
@doc """
Returns the normalized day fraction of the specified time.
## Examples
iex> Calendar.ISO.time_to_day_fraction(0, 0, 0, {0, 6})
{0, 86400000000}
iex> Calendar.ISO.time_to_day_fraction(12, 34, 56, {123, 6})
{45296000123, 86400000000}
"""
@spec time_to_day_fraction(
Calendar.hour(),
Calendar.minute(),
Calendar.second(),
Calendar.microsecond()
) :: Calendar.day_fraction()
def time_to_day_fraction(0, 0, 0, {0, _}) do
{0, @parts_per_day}
end
def time_to_day_fraction(hour, minute, second, {microsecond, _}) do
combined_seconds = hour * @seconds_per_hour + minute * @seconds_per_minute + second
{combined_seconds * @microseconds_per_second + microsecond, @parts_per_day}
end
# Converts year, month, day to count of days since 0000-01-01.
@doc false
def date_to_iso_days(0, 1, 1) do
0
end
def date_to_iso_days(1970, 1, 1) do
719_528
end
def date_to_iso_days(year, month, day) when year in 0..9999 do
true = day <= days_in_month(year, month)
days_in_previous_years(year) + days_before_month(month) + leap_day_offset(year, month) + day -
1
end
@doc """
Returns how many days there are in the given year-month.
## Examples
iex> Calendar.ISO.days_in_month(1900, 1)
31
iex> Calendar.ISO.days_in_month(1900, 2)
28
iex> Calendar.ISO.days_in_month(2000, 2)
29
iex> Calendar.ISO.days_in_month(2001, 2)
28
iex> Calendar.ISO.days_in_month(2004, 2)
29
iex> Calendar.ISO.days_in_month(2004, 4)
30
"""
@spec days_in_month(year, month) :: 28..31
def days_in_month(year, month)
def days_in_month(year, 2) do
if leap_year?(year), do: 29, else: 28
end
def days_in_month(_, month) when month in [4, 6, 9, 11], do: 30
def days_in_month(_, month) when month in 1..12, do: 31
@spec leap_year?(year) :: boolean()
def leap_year?(year) when is_integer(year) and year >= 0 do
rem(year, 4) === 0 and (rem(year, 100) > 0 or rem(year, 400) === 0)
end
# Note that this function does not add the extra leap day for a leap year.
# If you want to add that leap day when appropriate,
# add the result of leap_day_offset/2 to the result of days_before_month/1.
defp days_before_month(1), do: 0
defp days_before_month(2), do: 31
defp days_before_month(3), do: 59
defp days_before_month(4), do: 90
defp days_before_month(5), do: 120
defp days_before_month(6), do: 151
defp days_before_month(7), do: 181
defp days_before_month(8), do: 212
defp days_before_month(9), do: 243
defp days_before_month(10), do: 273
defp days_before_month(11), do: 304
defp days_before_month(12), do: 334
defp leap_day_offset(_year, month) when month < 3, do: 0
defp leap_day_offset(year, _month) do
if leap_year?(year), do: 1, else: 0
end
defp days_in_previous_years(0), do: 0
defp days_in_previous_years(year) do
previous_year = year - 1
Integer.Extension.floor_div(previous_year, 4) -
Integer.Extension.floor_div(previous_year, 100) +
Integer.Extension.floor_div(previous_year, 400) + previous_year * @days_per_nonleap_year +
@days_per_leap_year
end
@doc false
def iso_days_to_unit({days, {parts, ppd}}, unit) do
day_microseconds = days * @parts_per_day
microseconds = div(parts * @parts_per_day, ppd)
System.convert_time_unit(day_microseconds + microseconds, :microsecond, unit)
end
@doc false
# Note: A method to call naive_datetime_to_iso_days, which Elixir 1.5.1 has, but 1.3.4 does not
def to_iso_days(%{
calendar: _calendar,
year: year,
month: month,
day: day,
hour: hour,
minute: minute,
second: second,
microsecond: microsecond
}) do
Calendar.ISO.Extension.naive_datetime_to_iso_days(
year,
month,
day,
hour,
minute,
second,
microsecond
)
end
end
|
lib/espec/extension/calendar_extension.ex
| 0.904543
| 0.498535
|
calendar_extension.ex
|
starcoder
|
defmodule EctoAsStateMachine do
@moduledoc """
This package allows to use [finite state machine pattern](https://en.wikipedia.org/wiki/Finite-state_machine) in Ecto.
1. Add ecto_as_state_machine to your list of dependencies in `mix.exs`:
```elixir
def deps do
[{:ecto_as_state_machine, "~> 1.0"}]
end
```
2. Ensure ecto_as_state_machine is started before your application:
```elixir
def application do
[applications: [:ecto_as_state_machine]]
end
```
"""
alias EctoAsStateMachine.State
alias Ecto.Repo
alias Mix.Project
defmodule Helpers do
@moduledoc """
``` elixir
## Example model:
defmodule User do
use Web, :model
use EctoAsStateMachine
easm column: :state,
initial: :unconfirmed,
states: [:unconfirmed, :confirmed, :blocked, :admin],
events: [
[
name: :confirm,
from: [:unconfirmed],
to: :confirmed,
callback: fn(model) ->
# yeah you can bring your own code to these function.
Ecto.Changeset.change(model, confirmed_at: DateTime.utc_now |> DateTime.to_naive)
end
], [
name: :block,
from: [:confirmed, :admin],
to: :blocked
], [
name: :make_admin,
from: [:confirmed],
to: :admin
]
]
schema "users" do
field :state, :string
end
end
```
## Examples
user = Repo.get_by(User, id: 1)
#=> %User{}
new_user_changeset = User.confirm(user)
#=> %{changes: %{state: "confirmed"}}
Repo.update(new_user_changeset)
#=> true
new_user = User.confirm!(user)
#=> Or auto-transition user state to "confirmed". We can make him admin!
User.confirmed?(new_user)
#=> true
User.admin?(new_user)
#=> false
User.can_confirm?(new_user)
#=> false
User.can_make_admin?(new_user)
#=> true
new_user = User.make_admin!(new_user)
User.admin?(new_user)
#=> true
"""
@spec easm([repo: Repo, initial: String.t(),
column: atom, events: List.t(), states: List.t()]) :: term
defmacro easm(opts) do
app = Project.config[:app]
default_repo = Application.get_env(app, :ecto_repos, []) |> List.first
repo = Keyword.get(opts, :repo, default_repo)
valid_states = Keyword.get(opts, :states)
column = Keyword.get(opts, :column, :state)
initial = Keyword.get(opts, :initial)
events = Keyword.get(opts, :events)
|> Enum.map(fn(event) ->
Keyword.put_new(event, :callback, quote(do: fn(model) -> model end))
end)
|> Enum.map(fn(event) ->
Keyword.update!(event, :callback, &Macro.escape/1)
end)
function_prefix = if column == :state, do: nil, else: "#{column}_"
quote bind_quoted: [
valid_states: valid_states,
events: events,
column: column,
repo: repo,
initial: initial,
function_prefix: function_prefix
] do
def unquote(:"#{function_prefix}states")() do
unquote(valid_states)
end
def unquote(:"#{function_prefix}events")() do
unquote(events) |> Enum.map(fn(x) -> x[:name] end)
end
events
|> Enum.each(fn(event) ->
unless event[:to] in valid_states do
raise "Target state :#{event[:to]} is not present in states"
end
def unquote(event[:name])(model) do
State.update(%{
event: unquote(event),
model: model,
column: unquote(column),
states: unquote(valid_states),
initial: unquote(initial)
})
end
def unquote(:"#{event[:name]}!")(model) do
State.update!(%{
repo: unquote(repo),
event: unquote(event),
model: model,
column: unquote(column),
states: unquote(valid_states),
initial: unquote(initial)
})
end
def unquote(:"can_#{event[:name]}?")(model) do
State.can_event?(%{
event: unquote(event),
model: model,
column: unquote(column),
states: unquote(valid_states),
initial: unquote(initial)
})
end
end)
def unquote(:"#{function_prefix}next_state")(model) do
State.next_state(%{
events: unquote(events),
model: model,
column: unquote(column),
states: unquote(valid_states),
initial: unquote(initial)
})
end
valid_states
|> Enum.each(fn(state) ->
def unquote(:"#{state}?")(model) do
State.is_state?(%{
model: model,
column: unquote(column),
state: unquote(state),
states: unquote(valid_states),
initial: unquote(initial)
})
end
end)
def unquote(column)(model) do
"#{State.state_with_initial(
Map.get(model, unquote(column)),
%{states: unquote(valid_states), initial: unquote(initial)}
)}"
end
end
end
end
@doc false
defmacro __using__(_) do
quote do
import Helpers
end
end
end
|
lib/ecto_as_state_machine.ex
| 0.782413
| 0.868102
|
ecto_as_state_machine.ex
|
starcoder
|
defmodule AWS.AutoScaling do
@moduledoc """
Amazon EC2 Auto Scaling
Amazon EC2 Auto Scaling is designed to automatically launch or terminate
EC2 instances based on user-defined scaling policies, scheduled actions,
and health checks. Use this service with AWS Auto Scaling, Amazon
CloudWatch, and Elastic Load Balancing.
For more information, including information about granting IAM users
required permissions for Amazon EC2 Auto Scaling actions, see the [Amazon
EC2 Auto Scaling User
Guide](https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html).
"""
@doc """
Attaches one or more EC2 instances to the specified Auto Scaling group.
When you attach instances, Amazon EC2 Auto Scaling increases the desired
capacity of the group by the number of instances being attached. If the
number of instances being attached plus the desired capacity of the group
exceeds the maximum size of the group, the operation fails.
If there is a Classic Load Balancer attached to your Auto Scaling group,
the instances are also registered with the load balancer. If there are
target groups attached to your Auto Scaling group, the instances are also
registered with the target groups.
For more information, see [Attach EC2 Instances to Your Auto Scaling
Group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-instance-asg.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def attach_instances(client, input, options \\ []) do
request(client, "AttachInstances", input, options)
end
@doc """
Attaches one or more target groups to the specified Auto Scaling group.
To describe the target groups for an Auto Scaling group, call the
`DescribeLoadBalancerTargetGroups` API. To detach the target group from the
Auto Scaling group, call the `DetachLoadBalancerTargetGroups` API.
With Application Load Balancers and Network Load Balancers, instances are
registered as targets with a target group. With Classic Load Balancers,
instances are registered with the load balancer. For more information, see
[Attaching a Load Balancer to Your Auto Scaling
Group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-load-balancer-asg.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def attach_load_balancer_target_groups(client, input, options \\ []) do
request(client, "AttachLoadBalancerTargetGroups", input, options)
end
@doc """
<note> To attach an Application Load Balancer or a Network Load Balancer,
use the `AttachLoadBalancerTargetGroups` API operation instead.
</note> Attaches one or more Classic Load Balancers to the specified Auto
Scaling group. Amazon EC2 Auto Scaling registers the running instances with
these Classic Load Balancers.
To describe the load balancers for an Auto Scaling group, call the
`DescribeLoadBalancers` API. To detach the load balancer from the Auto
Scaling group, call the `DetachLoadBalancers` API.
For more information, see [Attaching a Load Balancer to Your Auto Scaling
Group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-load-balancer-asg.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def attach_load_balancers(client, input, options \\ []) do
request(client, "AttachLoadBalancers", input, options)
end
@doc """
Deletes one or more scheduled actions for the specified Auto Scaling group.
"""
def batch_delete_scheduled_action(client, input, options \\ []) do
request(client, "BatchDeleteScheduledAction", input, options)
end
@doc """
Creates or updates one or more scheduled scaling actions for an Auto
Scaling group. If you leave a parameter unspecified when updating a
scheduled scaling action, the corresponding value remains unchanged.
"""
def batch_put_scheduled_update_group_action(client, input, options \\ []) do
request(client, "BatchPutScheduledUpdateGroupAction", input, options)
end
@doc """
Cancels an instance refresh operation in progress. Cancellation does not
roll back any replacements that have already been completed, but it
prevents new replacements from being started.
For more information, see [Replacing Auto Scaling Instances Based on an
Instance
Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html).
"""
def cancel_instance_refresh(client, input, options \\ []) do
request(client, "CancelInstanceRefresh", input, options)
end
@doc """
Completes the lifecycle action for the specified token or instance with the
specified result.
This step is a part of the procedure for adding a lifecycle hook to an Auto
Scaling group:
<ol> <li> (Optional) Create a Lambda function and a rule that allows
CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto
Scaling launches or terminates instances.
</li> <li> (Optional) Create a notification target and an IAM role. The
target can be either an Amazon SQS queue or an Amazon SNS topic. The role
allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the
target.
</li> <li> Create the lifecycle hook. Specify whether the hook is used when
the instances launch or terminate.
</li> <li> If you need more time, record the lifecycle action heartbeat to
keep the instance in a pending state.
</li> <li> **If you finish before the timeout period ends, complete the
lifecycle action.**
</li> </ol> For more information, see [Amazon EC2 Auto Scaling Lifecycle
Hooks](https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def complete_lifecycle_action(client, input, options \\ []) do
request(client, "CompleteLifecycleAction", input, options)
end
@doc """
Creates an Auto Scaling group with the specified name and attributes.
If you exceed your maximum limit of Auto Scaling groups, the call fails. To
query this limit, call the `DescribeAccountLimits` API. For information
about updating this limit, see [Amazon EC2 Auto Scaling Service
Quotas](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html)
in the *Amazon EC2 Auto Scaling User Guide*.
For introductory exercises for creating an Auto Scaling group, see [Getting
Started with Amazon EC2 Auto
Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/GettingStartedTutorial.html)
and [Tutorial: Set Up a Scaled and Load-Balanced
Application](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-register-lbs-with-asg.html)
in the *Amazon EC2 Auto Scaling User Guide*. For more information, see
[Auto Scaling
Groups](https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html)
in the *Amazon EC2 Auto Scaling User Guide*.
Every Auto Scaling group has three size parameters (`DesiredCapacity`,
`MaxSize`, and `MinSize`). Usually, you set these sizes based on a specific
number of instances. However, if you configure a mixed instances policy
that defines weights for the instance types, you must specify these sizes
with the same units that you use for weighting instances.
"""
def create_auto_scaling_group(client, input, options \\ []) do
request(client, "CreateAutoScalingGroup", input, options)
end
@doc """
Creates a launch configuration.
If you exceed your maximum limit of launch configurations, the call fails.
To query this limit, call the `DescribeAccountLimits` API. For information
about updating this limit, see [Amazon EC2 Auto Scaling Service
Quotas](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html)
in the *Amazon EC2 Auto Scaling User Guide*.
For more information, see [Launch
Configurations](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def create_launch_configuration(client, input, options \\ []) do
request(client, "CreateLaunchConfiguration", input, options)
end
@doc """
Creates or updates tags for the specified Auto Scaling group.
When you specify a tag with a key that already exists, the operation
overwrites the previous tag definition, and you do not get an error
message.
For more information, see [Tagging Auto Scaling Groups and
Instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def create_or_update_tags(client, input, options \\ []) do
request(client, "CreateOrUpdateTags", input, options)
end
@doc """
Deletes the specified Auto Scaling group.
If the group has instances or scaling activities in progress, you must
specify the option to force the deletion in order for it to succeed.
If the group has policies, deleting the group deletes the policies, the
underlying alarm actions, and any alarm that no longer has an associated
action.
To remove instances from the Auto Scaling group before deleting it, call
the `DetachInstances` API with the list of instances and the option to
decrement the desired capacity. This ensures that Amazon EC2 Auto Scaling
does not launch replacement instances.
To terminate all instances before deleting the Auto Scaling group, call the
`UpdateAutoScalingGroup` API and set the minimum size and desired capacity
of the Auto Scaling group to zero.
"""
def delete_auto_scaling_group(client, input, options \\ []) do
request(client, "DeleteAutoScalingGroup", input, options)
end
@doc """
Deletes the specified launch configuration.
The launch configuration must not be attached to an Auto Scaling group.
When this call completes, the launch configuration is no longer available
for use.
"""
def delete_launch_configuration(client, input, options \\ []) do
request(client, "DeleteLaunchConfiguration", input, options)
end
@doc """
Deletes the specified lifecycle hook.
If there are any outstanding lifecycle actions, they are completed first
(`ABANDON` for launching instances, `CONTINUE` for terminating instances).
"""
def delete_lifecycle_hook(client, input, options \\ []) do
request(client, "DeleteLifecycleHook", input, options)
end
@doc """
Deletes the specified notification.
"""
def delete_notification_configuration(client, input, options \\ []) do
request(client, "DeleteNotificationConfiguration", input, options)
end
@doc """
Deletes the specified scaling policy.
Deleting either a step scaling policy or a simple scaling policy deletes
the underlying alarm action, but does not delete the alarm, even if it no
longer has an associated action.
For more information, see [Deleting a Scaling
Policy](https://docs.aws.amazon.com/autoscaling/ec2/userguide/deleting-scaling-policy.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def delete_policy(client, input, options \\ []) do
request(client, "DeletePolicy", input, options)
end
@doc """
Deletes the specified scheduled action.
"""
def delete_scheduled_action(client, input, options \\ []) do
request(client, "DeleteScheduledAction", input, options)
end
@doc """
Deletes the specified tags.
"""
def delete_tags(client, input, options \\ []) do
request(client, "DeleteTags", input, options)
end
@doc """
Describes the current Amazon EC2 Auto Scaling resource quotas for your AWS
account.
For information about requesting an increase, see [Amazon EC2 Auto Scaling
Service
Quotas](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def describe_account_limits(client, input, options \\ []) do
request(client, "DescribeAccountLimits", input, options)
end
@doc """
Describes the available adjustment types for Amazon EC2 Auto Scaling
scaling policies. These settings apply to step scaling policies and simple
scaling policies; they do not apply to target tracking scaling policies.
The following adjustment types are supported:
<ul> <li> ChangeInCapacity
</li> <li> ExactCapacity
</li> <li> PercentChangeInCapacity
</li> </ul>
"""
def describe_adjustment_types(client, input, options \\ []) do
request(client, "DescribeAdjustmentTypes", input, options)
end
@doc """
Describes one or more Auto Scaling groups.
"""
def describe_auto_scaling_groups(client, input, options \\ []) do
request(client, "DescribeAutoScalingGroups", input, options)
end
@doc """
Describes one or more Auto Scaling instances.
"""
def describe_auto_scaling_instances(client, input, options \\ []) do
request(client, "DescribeAutoScalingInstances", input, options)
end
@doc """
Describes the notification types that are supported by Amazon EC2 Auto
Scaling.
"""
def describe_auto_scaling_notification_types(client, input, options \\ []) do
request(client, "DescribeAutoScalingNotificationTypes", input, options)
end
@doc """
Describes one or more instance refreshes.
You can determine the status of a request by looking at the `Status`
parameter. The following are the possible statuses:
<ul> <li> `Pending` - The request was created, but the operation has not
started.
</li> <li> `InProgress` - The operation is in progress.
</li> <li> `Successful` - The operation completed successfully.
</li> <li> `Failed` - The operation failed to complete. You can
troubleshoot using the status reason and the scaling activities.
</li> <li> `Cancelling` - An ongoing operation is being cancelled.
Cancellation does not roll back any replacements that have already been
completed, but it prevents new replacements from being started.
</li> <li> `Cancelled` - The operation is cancelled.
</li> </ul> For more information, see [Replacing Auto Scaling Instances
Based on an Instance
Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html).
"""
def describe_instance_refreshes(client, input, options \\ []) do
request(client, "DescribeInstanceRefreshes", input, options)
end
@doc """
Describes one or more launch configurations.
"""
def describe_launch_configurations(client, input, options \\ []) do
request(client, "DescribeLaunchConfigurations", input, options)
end
@doc """
Describes the available types of lifecycle hooks.
The following hook types are supported:
<ul> <li> autoscaling:EC2_INSTANCE_LAUNCHING
</li> <li> autoscaling:EC2_INSTANCE_TERMINATING
</li> </ul>
"""
def describe_lifecycle_hook_types(client, input, options \\ []) do
request(client, "DescribeLifecycleHookTypes", input, options)
end
@doc """
Describes the lifecycle hooks for the specified Auto Scaling group.
"""
def describe_lifecycle_hooks(client, input, options \\ []) do
request(client, "DescribeLifecycleHooks", input, options)
end
@doc """
Describes the target groups for the specified Auto Scaling group.
"""
def describe_load_balancer_target_groups(client, input, options \\ []) do
request(client, "DescribeLoadBalancerTargetGroups", input, options)
end
@doc """
Describes the load balancers for the specified Auto Scaling group.
This operation describes only Classic Load Balancers. If you have
Application Load Balancers or Network Load Balancers, use the
`DescribeLoadBalancerTargetGroups` API instead.
"""
def describe_load_balancers(client, input, options \\ []) do
request(client, "DescribeLoadBalancers", input, options)
end
@doc """
Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling.
The `GroupStandbyInstances` metric is not returned by default. You must
explicitly request this metric when calling the `EnableMetricsCollection`
API.
"""
def describe_metric_collection_types(client, input, options \\ []) do
request(client, "DescribeMetricCollectionTypes", input, options)
end
@doc """
Describes the notification actions associated with the specified Auto
Scaling group.
"""
def describe_notification_configurations(client, input, options \\ []) do
request(client, "DescribeNotificationConfigurations", input, options)
end
@doc """
Describes the policies for the specified Auto Scaling group.
"""
def describe_policies(client, input, options \\ []) do
request(client, "DescribePolicies", input, options)
end
@doc """
Describes one or more scaling activities for the specified Auto Scaling
group.
"""
def describe_scaling_activities(client, input, options \\ []) do
request(client, "DescribeScalingActivities", input, options)
end
@doc """
Describes the scaling process types for use with the `ResumeProcesses` and
`SuspendProcesses` APIs.
"""
def describe_scaling_process_types(client, input, options \\ []) do
request(client, "DescribeScalingProcessTypes", input, options)
end
@doc """
Describes the actions scheduled for your Auto Scaling group that haven't
run or that have not reached their end time. To describe the actions that
have already run, call the `DescribeScalingActivities` API.
"""
def describe_scheduled_actions(client, input, options \\ []) do
request(client, "DescribeScheduledActions", input, options)
end
@doc """
Describes the specified tags.
You can use filters to limit the results. For example, you can query for
the tags for a specific Auto Scaling group. You can specify multiple values
for a filter. A tag must match at least one of the specified values for it
to be included in the results.
You can also specify multiple filters. The result includes information for
a particular tag only if it matches all the filters. If there's no match,
no special message is returned.
For more information, see [Tagging Auto Scaling Groups and
Instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def describe_tags(client, input, options \\ []) do
request(client, "DescribeTags", input, options)
end
@doc """
Describes the termination policies supported by Amazon EC2 Auto Scaling.
For more information, see [Controlling Which Auto Scaling Instances
Terminate During Scale
In](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def describe_termination_policy_types(client, input, options \\ []) do
request(client, "DescribeTerminationPolicyTypes", input, options)
end
@doc """
Removes one or more instances from the specified Auto Scaling group.
After the instances are detached, you can manage them independent of the
Auto Scaling group.
If you do not specify the option to decrement the desired capacity, Amazon
EC2 Auto Scaling launches instances to replace the ones that are detached.
If there is a Classic Load Balancer attached to the Auto Scaling group, the
instances are deregistered from the load balancer. If there are target
groups attached to the Auto Scaling group, the instances are deregistered
from the target groups.
For more information, see [Detach EC2 Instances from Your Auto Scaling
Group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/detach-instance-asg.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def detach_instances(client, input, options \\ []) do
request(client, "DetachInstances", input, options)
end
@doc """
Detaches one or more target groups from the specified Auto Scaling group.
"""
def detach_load_balancer_target_groups(client, input, options \\ []) do
request(client, "DetachLoadBalancerTargetGroups", input, options)
end
@doc """
Detaches one or more Classic Load Balancers from the specified Auto Scaling
group.
This operation detaches only Classic Load Balancers. If you have
Application Load Balancers or Network Load Balancers, use the
`DetachLoadBalancerTargetGroups` API instead.
When you detach a load balancer, it enters the `Removing` state while
deregistering the instances in the group. When all instances are
deregistered, then you can no longer describe the load balancer using the
`DescribeLoadBalancers` API call. The instances remain running.
"""
def detach_load_balancers(client, input, options \\ []) do
request(client, "DetachLoadBalancers", input, options)
end
@doc """
Disables group metrics for the specified Auto Scaling group.
"""
def disable_metrics_collection(client, input, options \\ []) do
request(client, "DisableMetricsCollection", input, options)
end
@doc """
Enables group metrics for the specified Auto Scaling group. For more
information, see [Monitoring Your Auto Scaling Groups and
Instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-monitoring.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def enable_metrics_collection(client, input, options \\ []) do
request(client, "EnableMetricsCollection", input, options)
end
@doc """
Moves the specified instances into the standby state.
If you choose to decrement the desired capacity of the Auto Scaling group,
the instances can enter standby as long as the desired capacity of the Auto
Scaling group after the instances are placed into standby is equal to or
greater than the minimum capacity of the group.
If you choose not to decrement the desired capacity of the Auto Scaling
group, the Auto Scaling group launches new instances to replace the
instances on standby.
For more information, see [Temporarily Removing Instances from Your Auto
Scaling
Group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enter-exit-standby.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def enter_standby(client, input, options \\ []) do
request(client, "EnterStandby", input, options)
end
@doc """
Executes the specified policy. This can be useful for testing the design of
your scaling policy.
"""
def execute_policy(client, input, options \\ []) do
request(client, "ExecutePolicy", input, options)
end
@doc """
Moves the specified instances out of the standby state.
After you put the instances back in service, the desired capacity is
incremented.
For more information, see [Temporarily Removing Instances from Your Auto
Scaling
Group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enter-exit-standby.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def exit_standby(client, input, options \\ []) do
request(client, "ExitStandby", input, options)
end
@doc """
Creates or updates a lifecycle hook for the specified Auto Scaling group.
A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an
instance when the instance launches (before it is put into service) or as
the instance terminates (before it is fully terminated).
This step is a part of the procedure for adding a lifecycle hook to an Auto
Scaling group:
<ol> <li> (Optional) Create a Lambda function and a rule that allows
CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto
Scaling launches or terminates instances.
</li> <li> (Optional) Create a notification target and an IAM role. The
target can be either an Amazon SQS queue or an Amazon SNS topic. The role
allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the
target.
</li> <li> **Create the lifecycle hook. Specify whether the hook is used
when the instances launch or terminate.**
</li> <li> If you need more time, record the lifecycle action heartbeat to
keep the instance in a pending state using the
`RecordLifecycleActionHeartbeat` API call.
</li> <li> If you finish before the timeout period ends, complete the
lifecycle action using the `CompleteLifecycleAction` API call.
</li> </ol> For more information, see [Amazon EC2 Auto Scaling Lifecycle
Hooks](https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html)
in the *Amazon EC2 Auto Scaling User Guide*.
If you exceed your maximum limit of lifecycle hooks, which by default is 50
per Auto Scaling group, the call fails.
You can view the lifecycle hooks for an Auto Scaling group using the
`DescribeLifecycleHooks` API call. If you are no longer using a lifecycle
hook, you can delete it by calling the `DeleteLifecycleHook` API.
"""
def put_lifecycle_hook(client, input, options \\ []) do
request(client, "PutLifecycleHook", input, options)
end
@doc """
Configures an Auto Scaling group to send notifications when specified
events take place. Subscribers to the specified topic can have messages
delivered to an endpoint such as a web server or an email address.
This configuration overwrites any existing configuration.
For more information, see [Getting Amazon SNS Notifications When Your Auto
Scaling Group
Scales](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ASGettingNotifications.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def put_notification_configuration(client, input, options \\ []) do
request(client, "PutNotificationConfiguration", input, options)
end
@doc """
Creates or updates a scaling policy for an Auto Scaling group.
For more information about using scaling policies to scale your Auto
Scaling group, see [Target Tracking Scaling
Policies](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-target-tracking.html)
and [Step and Simple Scaling
Policies](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def put_scaling_policy(client, input, options \\ []) do
request(client, "PutScalingPolicy", input, options)
end
@doc """
Creates or updates a scheduled scaling action for an Auto Scaling group. If
you leave a parameter unspecified when updating a scheduled scaling action,
the corresponding value remains unchanged.
For more information, see [Scheduled
Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/schedule_time.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def put_scheduled_update_group_action(client, input, options \\ []) do
request(client, "PutScheduledUpdateGroupAction", input, options)
end
@doc """
Records a heartbeat for the lifecycle action associated with the specified
token or instance. This extends the timeout by the length of time defined
using the `PutLifecycleHook` API call.
This step is a part of the procedure for adding a lifecycle hook to an Auto
Scaling group:
<ol> <li> (Optional) Create a Lambda function and a rule that allows
CloudWatch Events to invoke your Lambda function when Amazon EC2 Auto
Scaling launches or terminates instances.
</li> <li> (Optional) Create a notification target and an IAM role. The
target can be either an Amazon SQS queue or an Amazon SNS topic. The role
allows Amazon EC2 Auto Scaling to publish lifecycle notifications to the
target.
</li> <li> Create the lifecycle hook. Specify whether the hook is used when
the instances launch or terminate.
</li> <li> **If you need more time, record the lifecycle action heartbeat
to keep the instance in a pending state.**
</li> <li> If you finish before the timeout period ends, complete the
lifecycle action.
</li> </ol> For more information, see [Auto Scaling
Lifecycle](https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def record_lifecycle_action_heartbeat(client, input, options \\ []) do
request(client, "RecordLifecycleActionHeartbeat", input, options)
end
@doc """
Resumes the specified suspended automatic scaling processes, or all
suspended process, for the specified Auto Scaling group.
For more information, see [Suspending and Resuming Scaling
Processes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def resume_processes(client, input, options \\ []) do
request(client, "ResumeProcesses", input, options)
end
@doc """
Sets the size of the specified Auto Scaling group.
If a scale-in activity occurs as a result of a new `DesiredCapacity` value
that is lower than the current size of the group, the Auto Scaling group
uses its termination policy to determine which instances to terminate.
For more information, see [Manual
Scaling](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-manual-scaling.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def set_desired_capacity(client, input, options \\ []) do
request(client, "SetDesiredCapacity", input, options)
end
@doc """
Sets the health status of the specified instance.
For more information, see [Health Checks for Auto Scaling
Instances](https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def set_instance_health(client, input, options \\ []) do
request(client, "SetInstanceHealth", input, options)
end
@doc """
Updates the instance protection settings of the specified instances.
For more information about preventing instances that are part of an Auto
Scaling group from terminating on scale in, see [Instance
Protection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def set_instance_protection(client, input, options \\ []) do
request(client, "SetInstanceProtection", input, options)
end
@doc """
Starts a new instance refresh operation, which triggers a rolling
replacement of all previously launched instances in the Auto Scaling group
with a new group of instances.
If successful, this call creates a new instance refresh request with a
unique ID that you can use to track its progress. To query its status, call
the `DescribeInstanceRefreshes` API. To describe the instance refreshes
that have already run, call the `DescribeInstanceRefreshes` API. To cancel
an instance refresh operation in progress, use the `CancelInstanceRefresh`
API.
For more information, see [Replacing Auto Scaling Instances Based on an
Instance
Refresh](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html).
"""
def start_instance_refresh(client, input, options \\ []) do
request(client, "StartInstanceRefresh", input, options)
end
@doc """
Suspends the specified automatic scaling processes, or all processes, for
the specified Auto Scaling group.
If you suspend either the `Launch` or `Terminate` process types, it can
prevent other process types from functioning properly. For more
information, see [Suspending and Resuming Scaling
Processes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html)
in the *Amazon EC2 Auto Scaling User Guide*.
To resume processes that have been suspended, call the `ResumeProcesses`
API.
"""
def suspend_processes(client, input, options \\ []) do
request(client, "SuspendProcesses", input, options)
end
@doc """
Terminates the specified instance and optionally adjusts the desired group
size.
This call simply makes a termination request. The instance is not
terminated immediately. When an instance is terminated, the instance status
changes to `terminated`. You can't connect to or start an instance after
you've terminated it.
If you do not specify the option to decrement the desired capacity, Amazon
EC2 Auto Scaling launches instances to replace the ones that are
terminated.
By default, Amazon EC2 Auto Scaling balances instances across all
Availability Zones. If you decrement the desired capacity, your Auto
Scaling group can become unbalanced between Availability Zones. Amazon EC2
Auto Scaling tries to rebalance the group, and rebalancing might terminate
instances in other zones. For more information, see [Rebalancing
Activities](https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-benefits.html#AutoScalingBehavior.InstanceUsage)
in the *Amazon EC2 Auto Scaling User Guide*.
"""
def terminate_instance_in_auto_scaling_group(client, input, options \\ []) do
request(client, "TerminateInstanceInAutoScalingGroup", input, options)
end
@doc """
Updates the configuration for the specified Auto Scaling group.
To update an Auto Scaling group, specify the name of the group and the
parameter that you want to change. Any parameters that you don't specify
are not changed by this update request. The new settings take effect on any
scaling activities after this call returns.
If you associate a new launch configuration or template with an Auto
Scaling group, all new instances will get the updated configuration.
Existing instances continue to run with the configuration that they were
originally launched with. When you update a group to specify a mixed
instances policy instead of a launch configuration or template, existing
instances may be replaced to match the new purchasing options that you
specified in the policy. For example, if the group currently has 100%
On-Demand capacity and the policy specifies 50% Spot capacity, this means
that half of your instances will be gradually terminated and relaunched as
Spot Instances. When replacing instances, Amazon EC2 Auto Scaling launches
new instances before terminating the old ones, so that updating your group
does not compromise the performance or availability of your application.
Note the following about changing `DesiredCapacity`, `MaxSize`, or
`MinSize`:
<ul> <li> If a scale-in activity occurs as a result of a new
`DesiredCapacity` value that is lower than the current size of the group,
the Auto Scaling group uses its termination policy to determine which
instances to terminate.
</li> <li> If you specify a new value for `MinSize` without specifying a
value for `DesiredCapacity`, and the new `MinSize` is larger than the
current size of the group, this sets the group's `DesiredCapacity` to the
new `MinSize` value.
</li> <li> If you specify a new value for `MaxSize` without specifying a
value for `DesiredCapacity`, and the new `MaxSize` is smaller than the
current size of the group, this sets the group's `DesiredCapacity` to the
new `MaxSize` value.
</li> </ul> To see which parameters have been set, call the
`DescribeAutoScalingGroups` API. To view the scaling policies for an Auto
Scaling group, call the `DescribePolicies` API. If the group has scaling
policies, you can update them by calling the `PutScalingPolicy` API.
"""
def update_auto_scaling_group(client, input, options \\ []) do
request(client, "UpdateAutoScalingGroup", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "autoscaling"}
host = build_host("autoscaling", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-www-form-urlencoded"}
]
input = Map.merge(input, %{"Action" => action, "Version" => "2011-01-01"})
payload = AWS.Util.encode_query(input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/auto_scaling.ex
| 0.900685
| 0.661861
|
auto_scaling.ex
|
starcoder
|
defmodule Broker.Collector.Distributor do
@moduledoc """
Documentation for Broker.Collector.Distributor.
This lightweight processor will receive all the
flow of transactions from tx_feeder(s) or sn_feeder(s) and it will
insert them to FIFO queue and will receive ask
requests from tx_validator(s) or sn_feeder(s).
"""
alias Broker.Collector.Distributor.Helper
@interval Application.get_env(:broker, :__INTERVAL__) || 1000
require Logger
use GenStage
@spec start_link(Keyword.t) :: tuple
def start_link(args) do
name = name_by_topic(args)
GenStage.start_link(__MODULE__, [name: name]++args, name: name)
end
@spec init(Keyword.t) :: tuple
def init(args) do
Process.put(:name, args[:name])
state = %{queue: :queue.new(), demand: 0}
{:producer, state}
end
@spec handle_subscribe(atom, tuple | list, tuple, tuple) :: tuple
def handle_subscribe(:consumer, _, _from, state) do
Logger.info("Distributor: #{Process.get(:name)} got subscribed_to Validator")
{:automatic, state}
end
@spec handle_demand(integer,tuple) :: tuple
def handle_demand(demand,%{queue: queue,demand: pending_demand} = state) do
total_demand = pending_demand+demand
{events, queue} = Helper.take_unique(queue, total_demand)
pending_demand = total_demand-length(events)
if pending_demand != 0 do
Process.send_after(self(), :demand, @interval)
end
{:noreply, events, %{state | demand: pending_demand, queue: queue}}
end
def handle_info(:demand, %{demand: 0} = state) do
{:noreply, [], state}
end
def handle_info(:demand, %{queue: queue,demand: pending_demand} = state) do
{events, queue} = Helper.take_unique(queue, pending_demand)
pending_demand = pending_demand-length(events)
if pending_demand != 0 do
Process.send_after(self(), :demand, @interval)
end
{:noreply, events, %{state | demand: pending_demand, queue: queue}}
end
@doc """
Here the Distributor receives transactions flow from feeder(s)
and insert them into the queue.
"""
@spec handle_cast(list, map) :: tuple
def handle_cast({:event, event}, %{queue: queue} = state) do
queue = :queue.in(event, queue)
{:noreply, [], %{state | queue: queue}}
end
defp name_by_topic(args) do
case args[:topic] do
:tx_trytes ->
:tx_distributor
:sn_trytes ->
:sn_distributor
end
end
def child_spec(args) do
%{
id: name_by_topic(args),
start: {__MODULE__, :start_link, [args]},
type: :worker,
restart: :permanent,
shutdown: 500
}
end
end
|
apps/broker/lib/collector/distributor/distributor.ex
| 0.777469
| 0.402891
|
distributor.ex
|
starcoder
|
defmodule Snap.Indexes do
@moduledoc """
Helper functions around index management.
"""
alias Snap.Bulk
alias Snap
@doc """
Creates an index.
"""
@spec create(module(), String.t(), map(), Keyword.t()) :: Snap.Cluster.result()
def create(cluster, index, mapping, opts \\ []) do
Snap.put(cluster, "/#{index}", mapping, [], [], opts)
end
@doc """
Deletes an index.
"""
@spec delete(module(), String.t(), Keyword.t()) :: Snap.Cluster.result()
def delete(cluster, index, opts \\ []) do
Snap.delete(cluster, "/#{index}", [], [], opts)
end
@doc """
Creates and loads a new index, switching the alias to it with zero-downtime.
Takes an `Enumerable` of `Snap.Bulk` actions, and builds a new index from
it. Refreshes it, updates the alias to it, and cleans up the old indexes,
leaving the previous one behind.
May return `t:Snap.Cluster.error/0` or a `Snap.BulkError` containing a list
of failed bulk actions.
"""
@spec hotswap(Enumerable.t(), module(), String.t(), map(), Keyword.t()) ::
:ok | Snap.Cluster.error() | {:error, Snap.BulkError.t()}
def hotswap(stream, cluster, alias, mapping, opts \\ []) do
index = generate_index_name(alias)
with {:ok, _} <- create(cluster, index, mapping),
:ok <- Bulk.perform(stream, cluster, index, opts),
:ok <- refresh(cluster, index),
:ok <- alias(cluster, index, alias),
:ok <- cleanup(cluster, alias, 2, opts) do
:ok
end
end
@doc """
Refreshes an index.
"""
@spec refresh(cluster :: module(), index :: String.t(), opts :: Keyword.t()) ::
:ok | Snap.Cluster.error()
def refresh(cluster, index, opts \\ []) do
with {:ok, _} <- Snap.post(cluster, "/#{index}/_refresh", nil, [], [], opts) do
:ok
end
end
@doc """
Creates an alias for a versioned index, removing any existing aliases.
"""
@spec alias(module(), String.t(), String.t(), Keyword.t()) :: :ok | Snap.Cluster.error()
def alias(cluster, index, alias, opts \\ []) do
with {:ok, indexes} <- list_starting_with(cluster, alias, opts) do
indexes = Enum.reject(indexes, &(&1 == index))
remove_actions =
Enum.map(indexes, fn i ->
%{"remove" => %{"index" => i, "alias" => alias}}
end)
actions = %{
"actions" => remove_actions ++ [%{"add" => %{"index" => index, "alias" => alias}}]
}
with {:ok, _response} <- Snap.post(cluster, "/_aliases", actions), do: :ok
end
end
@doc """
Lists all the indexes in the cluster.
"""
@spec list(module(), Keyword.t()) :: {:ok, list(String.t())} | Snap.Cluster.error()
def list(cluster, opts \\ []) do
with {:ok, indexes} <- Snap.get(cluster, "/_cat/indices", [format: "json"], [], opts) do
indexes =
indexes
|> Enum.map(& &1["index"])
|> Enum.sort()
{:ok, indexes}
end
end
@doc """
Lists all the timestamp versioned indexes starting with the prefix.
"""
@spec list_starting_with(module(), String.t(), Keyword.t()) ::
{:ok, list(String.t())} | Snap.Cluster.error()
def list_starting_with(cluster, prefix, opts \\ []) do
with {:ok, indexes} <- Snap.get(cluster, "/_cat/indices", [format: "json"], [], opts) do
prefix = prefix |> to_string() |> Regex.escape()
{:ok, regex} = Regex.compile("^#{prefix}-[0-9]+$")
indexes =
indexes
|> Enum.map(& &1["index"])
|> Enum.filter(&Regex.match?(regex, &1))
|> Enum.sort_by(&sort_index_by_timestamp/1)
{:ok, indexes}
end
end
@doc """
Deletes older timestamped indexes.
"""
@spec cleanup(module(), String.t(), non_neg_integer(), Keyword.t()) ::
:ok | Snap.Cluster.error()
def cleanup(cluster, alias, preserve \\ 2, opts \\ []) do
with {:ok, indexes} <- list_starting_with(cluster, alias, opts) do
indexes
|> Enum.sort_by(&sort_index_by_timestamp/1, &>=/2)
|> Enum.drop(preserve)
|> Enum.reduce_while(:ok, fn index, ok ->
case delete(cluster, index, opts) do
{:ok, _} -> {:cont, ok}
{:error, _} = err -> {:halt, err}
end
end)
end
end
defp generate_index_name(alias) do
ts = generate_alias_timestamp()
"#{alias}-#{ts}"
end
defp generate_alias_timestamp do
DateTime.to_unix(DateTime.utc_now(), :microsecond)
end
defp sort_index_by_timestamp(index) do
index
|> String.split("-")
|> Enum.at(-1)
|> String.to_integer()
end
end
|
lib/snap/indexes/indexes.ex
| 0.753013
| 0.669493
|
indexes.ex
|
starcoder
|
defmodule Wargaming.Warships.Account do
@moduledoc """
Account provides functions for interacting with the
WarGaming.net World of Warships Account API.
"""
use Wargaming.ApiEndpoint, api: Wargaming.Warships
@account_list "/account/list/"
@account_info "/account/info/"
@account_achievements "/account/achievements/"
@account_stats "/account/statsbydate/"
@doc """
Account.search/2 searches WarGaming accounts (in the configured region) and returns a list of matching nicknames with account IDs.
[Official Reference](https://developers.wargaming.net/reference/all/wows/account/info/?application_id=123456&run=1)
Returns `{:ok, response_map}` or `{:error, error_map}`
## Available Options
* `fields` : Comma separated list of fields. Embedded fields separated by periods. To exclude a field, prefix it with a `-`. Returns all fields if one of the fields is undefined.
* `language` : Default "en". Available options:
- "cs" — Čeština
- "de" — Deutsch
- "en" — English (by default)
- "es" — Español
- "fr" — Français
- "ja" — 日本語
- "pl" — Polski
- "ru" — Русский
- "th" — ไทย
- "zh-tw" — 繁體中文
* `limit` : Number of entries to return. No more than 100, but fewer can be returned.
* `type` : Search type. Default is "startswith". Available options:
- "startswith" — Search by initial characters of name. Min-length: 3. Max-length: 24
- "exact" - Search by exact (case-insensitive)
"""
def search(names, opts \\ %{}) do
constructed_get(:search, names, @account_list, opts)
end
@doc """
Account.search/2 searches WarGaming accounts (in the configured region) based on a list of account_ids and returns detailed information.
[Official Reference](https://developers.wargaming.net/reference/all/wows/account/info/?application_id=123456&r_realm=na)
Returns `{:ok, response_map}` or `{:error, error_map}`
## Available Options
* `access_token` : Token for accessing private data on account.
* `extra` : Additional fields to return. See [Official Reference](https://developers.wargaming.net/reference/all/wows/account/info/?application_id=123456&r_realm=na) for more information.
* `fields` : Comma separated list of fields. Embedded fields separated by periods. To exclude a field, prefix it with a `-`. Returns all fields if one of the fields is undefined.
* `language` : Default "en". Available options:
- "cs" — Čeština
- "de" — Deutsch
- "en" — English (by default)
- "es" — Español
- "fr" — Français
- "ja" — 日本語
- "pl" — Polski
- "ru" — Русский
- "th" — ไทย
- "zh-tw" — 繁體中文
"""
def by_id(account_ids, opts \\ %{}) do
constructed_get(:account_id, account_ids, @account_info, opts)
end
@doc """
Account.achievements/2 returns information about a players
in-game achievements.
[Official References](https://developers.wargaming.net/reference/all/wows/account/achievements/?application_id=123456&r_realm=na)
Returns `{:ok, response_map}` or `{:error, error_map}`
## Available Options
* `access_token` : Token for accessing private data on account.
* `fields` : Comma separated list of fields. Embedded fields separated by periods. To exclude a field, prefix it with a `-`. Returns all fields if one of the fields is undefined.
* `language` : Default "en". Available options:
- "cs" — Čeština
- "de" — Deutsch
- "en" — English (by default)
- "es" — Español
- "fr" — Français
- "ja" — 日本語
- "pl" — Polski
- "ru" — Русский
- "th" — ไทย
- "zh-tw" — 繁體中文
"""
def achievements(account_ids, opts \\ %{}) do
constructed_get(:account_id, account_ids, @account_achievements, opts)
end
@doc """
Account.stats_for_dates/3 returns the stats for the given accounts in the given date or date range.
[Official Reference](https://developers.wargaming.net/reference/all/wows/account/statsbydate/?application_id=123456&r_realm=na)
Returns `{:ok, response_map}` or `{:error, error_map}`
## Available Options
* `access_token` : Token for accessing private data on account.
* `dates` : List of dates to return statistics for. Max date range of 28 days from the current date. Defaults to yesterday. Maximum number of days is 10.
* `extra` : Additional fields to return. See [Official Reference](https://developers.wargaming.net/reference/all/wows/account/statsbydate/?application_id=123456&r_realm=na) for more information.
* `fields` : Comma separated list of fields. Embedded fields separated by periods. To exclude a field, prefix it with a `-`. Returns all fields if one of the fields is undefined.
* `language` : Default "en". Available options:
- "cs" — Čeština
- "de" — Deutsch
- "en" — English (by default)
- "es" — Español
- "fr" — Français
- "ja" — 日本語
- "pl" — Polski
- "ru" — Русский
- "th" — ไทย
- "zh-tw" — 繁體中文
"""
def stats_for_dates(account_ids, dates, opts \\ %{}) do
date_strings =
dates
|> Enum.map(fn date ->
Date.to_iso8601(date, :basic)
end)
|> Enum.join(",")
opts =
opts
|> Map.merge(%{dates: date_strings})
constructed_get(:account_id, account_ids, @account_stats, opts)
end
@doc """
See [stats_for_dates](#stats_for_dates/3). Returns default API value of yesterday.
"""
def stats_for_yesterday(account_ids, opts \\ %{}) do
constructed_get(:account_id, account_ids, @account_stats, opts)
end
end
|
lib/wargaming/warships/account.ex
| 0.861145
| 0.571288
|
account.ex
|
starcoder
|
defmodule ExPokerEval do
@moduledoc """
Documentation for ExPokerEval.
"""
alias ExPokerEval.Card
alias ExPokerEval.Rank
@doc """
Gets the higest ranked hand from the two given
"""
def get_highest(black: b_hand, white: w_hand) do
with {:ok, b_cards} <- Card.parse_hand(b_hand),
{:ok, w_cards} <- Card.parse_hand(w_hand),
comparison <- compare(b_cards, w_cards)
do
comparison |> decorate_result
else
{:error, msg} -> {:error, msg}
_ -> {:error, :other}
end
end
@doc """
Compares two sets of parsed cards recursively until
a winer is found or returns :tie
"""
def compare(b, w), do: compare(b, w, 0)
def compare(b_cards, w_cards, offset) do
{b_idx, b_rank, b_value} = Rank.highest(b_cards, offset)
{w_idx, w_rank, w_value} = Rank.highest(w_cards, offset)
cond do
b_idx < w_idx -> {:black, b_rank, b_value}
b_idx > w_idx -> {:white, w_rank, w_value}
b_value > w_value -> {:black, b_rank, b_value}
b_value < w_value -> {:white, w_rank, w_value}
b_rank == :high_card -> compare_high_cards(b_cards, w_cards)
true -> compare(b_cards, w_cards, offset + 1)
end
end
@doc """
Compares two sets of cards just by their higest cards
"""
def compare_high_cards(b_cards, w_cards) do
b_values = b_cards |> Enum.map(&(&1[:value])) |> Enum.reverse
w_values = w_cards |> Enum.map(&(&1[:value])) |> Enum.reverse
distinct_pair = Enum.zip(b_values, w_values)
|> Enum.reject(fn {b, w} -> b == w end)
|> List.first
case distinct_pair do
{b, w} when b > w -> {:black, :high_card, b}
{b, w} when b < w -> {:white, :high_card, w}
_ -> :tie
end
end
@doc """
Processes the result to use symbolic values instead of numeric unless is a tie.
## Examples
```
iex>ExPokerEval.decorate_result(:tie)
:tie
iex>ExPokerEval.decorate_result({:white, :flush, 14})
{:white, :flush, "Ace"}
```
"""
def decorate_result(:tie), do: :tie
def decorate_result({winer, rank, value}), do: {winer, rank, Card.num_to_sym(value)}
end
|
lib/ex_poker_eval.ex
| 0.832679
| 0.653061
|
ex_poker_eval.ex
|
starcoder
|
defmodule LinePay.PreapprovedPay do
@moduledoc """
Functions for working with accounts at LinePay. Through this API you can: * get an account,
LinePay API reference: https://pay.jp/docs/api/#account-アカウント
"""
@endpoint "payments/preapprovedPay"
@doc """
Create a pre approved payment.
Creates a pre approved payment by reg key returned on Reserve API.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = LinePay.PreapprovedPay.create("reg_key", %{productName: "foobar", amount: 1000, currency: "JPY", orderId: "#1"})
{:ok, payment} = LinePay.PreapprovedPay.create("reg_key", %{productName: "foobar", amount: 1000, currency: "JPY", orderId: "#1", capture: false})
"""
def create(reg_key, params \\ %{}) do
create(LinePay.config_or_env_channel_id(), LinePay.config_or_env_key(), reg_key, params)
end
@doc """
Create a pre approved payment.
Creates a pre approved payment by reg key returned on Reserve API.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = LinePay.PreapprovedPay.create("channel_id", "api_key", "reg_key", %{productName: "foobar", amount: 1000, currency: "JPY", orderId: "#1"})
{:ok, payment} = LinePay.PreapprovedPay.create("channel_id", "api_key", "reg_key", %{productName: "foobar", amount: 1000, currency: "JPY", orderId: "#1", capture: false})
"""
def create(channel_id, key, reg_key, params) do
LinePay.make_request_with_key(
:post,
"#{@endpoint}/#{reg_key}/payment",
channel_id,
key,
params
)
|> LinePay.Util.handle_line_pay_response()
end
@doc """
Check a pre approved payment.
Checks a pre approved payment.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = LinePay.PreapprovedPay.check("reg_key")
{:ok, payment} = LinePay.PreapprovedPay.check("reg_key", %{creditCardAuth: true})
"""
def check(reg_key, params \\ []) do
check(LinePay.config_or_env_channel_id(), LinePay.config_or_env_key(), reg_key, params)
end
@doc """
Check a pre approved payment.
Checks a pre approved payment.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = LinePay.PreapprovedPay.check("channel_id", "my_key", "reg_key")
{:ok, payment} = LinePay.PreapprovedPay.check("channel_id", "my_key", "reg_key", %{creditCardAuth: true})
"""
def check(channel_id, key, reg_key, params) do
LinePay.make_request_with_key(
:get,
"#{@endpoint}/#{reg_key}/check",
channel_id,
key,
%{},
%{},
params: params
)
|> LinePay.Util.handle_line_pay_response()
end
@doc """
Expire a pre approved payment.
Expires a pre approved payment.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = LinePay.PreapprovedPay.expire("reg_key")
"""
def expire(reg_key) do
expire(LinePay.config_or_env_channel_id(), LinePay.config_or_env_key(), reg_key)
end
@doc """
Expire a pre approved payment.
Expires a pre approved payment.
Returns a `{:ok, payment}` tuple.
## Examples
{:ok, payment} = LinePay.PreapprovedPay.expire("channel_id", "my_key", "reg_key")
"""
def expire(channel_id, key, reg_key) do
LinePay.make_request_with_key(:post, "#{@endpoint}/#{reg_key}/expire", channel_id, key)
|> LinePay.Util.handle_line_pay_response()
end
end
|
lib/line_pay/preapproved_pay.ex
| 0.881088
| 0.433442
|
preapproved_pay.ex
|
starcoder
|
defmodule DataTree do
alias DataTree.{Node, TreePath}
def normalize(tree) do
Map.keys(tree) |> Enum.reduce(tree, &normalize(&2, &1))
end
def normalize(tree, %TreePath{} = path) do
tree = Map.put_new_lazy(tree, path, &Node.new/0)
parent = TreePath.parent(path)
case TreePath.level(parent) do
0 -> tree
_ -> normalize(tree, parent)
end
end
def size(tree) do
map_size(tree)
end
def node(tree, %TreePath{} = path) do
Map.fetch(tree, path)
end
def children(tree, %TreePath{} = path) do
children_level = TreePath.level(path) + 1
Map.filter(tree, fn {key, _} ->
TreePath.starts_with?(key, path) && TreePath.level(key) <= children_level
end)
end
def subtree(tree, %TreePath{} = path) do
Map.filter(tree, fn {key, _} -> TreePath.starts_with?(key, path) end)
end
def update_value(tree, value) when is_map(tree) do
timestamp = system_time()
update(tree, fn {k, v} -> {k, %Node{v | value: value, modified: timestamp}} end)
end
def update_value(tree, %TreePath{} = path, value) when is_map(tree) do
update(tree, path, fn v -> %Node{v | value: value, modified: system_time()} end)
end
def update_status(tree, status) when is_map(tree) do
timestamp = system_time()
update(tree, fn {k, v} -> {k, %Node{v | status: status, modified: timestamp}} end)
end
def update_status(tree, %TreePath{} = path, status) when is_map(tree) do
update(tree, path, fn v -> %Node{v | status: status, modified: system_time()} end)
end
def update_time_modified(tree, modified) when is_map(tree) do
update(tree, fn {k, v} -> {k, %Node{v | modified: modified}} end)
end
def update_time_modified(tree, %TreePath{} = path, modified) when is_map(tree) do
update(tree, path, fn v -> %Node{v | modified: modified} end)
end
defp update(tree, fun) do
tree |> Enum.into(%{}, fun)
end
defp update(tree, %TreePath{} = path, fun) do
Map.update(tree, path, &Node.new/0, fun)
end
def delete(tree, %TreePath{} = path) do
Map.reject(tree, fn {k, _} -> TreePath.starts_with?(k, path) end)
end
defp system_time() do
System.system_time()
end
end
|
lib/data_tree.ex
| 0.593374
| 0.661359
|
data_tree.ex
|
starcoder
|
defmodule Geometry.PolygonM do
@moduledoc """
A polygon struct, representing a 2D polygon with a measurement.
A none empty line-string requires at least one ring with four points.
"""
alias Geometry.{GeoJson, LineStringM, PolygonM, WKB, WKT}
defstruct rings: []
@type t :: %PolygonM{rings: [Geometry.coordinates()]}
@doc """
Creates an empty `PolygonM`.
## Examples
iex> PolygonM.new()
%PolygonM{rings: []}
"""
@spec new :: t()
def new, do: %PolygonM{}
@doc """
Creates a `PolygonM` from the given `rings`.
## Examples
iex> PolygonM.new([
...> LineStringM.new([
...> PointM.new(35, 10, 14),
...> PointM.new(45, 45, 24),
...> PointM.new(10, 20, 34),
...> PointM.new(35, 10, 14)
...> ]),
...> LineStringM.new([
...> PointM.new(20, 30, 14),
...> PointM.new(35, 35, 24),
...> PointM.new(30, 20, 34),
...> PointM.new(20, 30, 14)
...> ])
...> ])
%PolygonM{
rings: [
[[35, 10, 14], [45, 45, 24], [10, 20, 34], [35, 10, 14]],
[[20, 30, 14], [35, 35, 24], [30, 20, 34], [20, 30, 14]]
]
}
iex> PolygonM.new()
%PolygonM{}
"""
@spec new([LineStringM.t()]) :: t()
def new(rings) when is_list(rings) do
%PolygonM{rings: Enum.map(rings, fn line_string -> line_string.points end)}
end
@doc """
Returns `true` if the given `PolygonM` is empty.
## Examples
iex> PolygonM.empty?(PolygonM.new())
true
iex> PolygonM.empty?(
...> PolygonM.new([
...> LineStringM.new([
...> PointM.new(35, 10, 14),
...> PointM.new(45, 45, 24),
...> PointM.new(10, 20, 34),
...> PointM.new(35, 10, 14)
...> ])
...> ])
...> )
false
"""
@spec empty?(t()) :: boolean
def empty?(%PolygonM{rings: rings}), do: Enum.empty?(rings)
@doc """
Creates a `PolygonM` from the given coordinates.
## Examples
iex> PolygonM.from_coordinates([
...> [[1, 1, 1], [2, 1, 3], [2, 2, 2], [1, 1, 1]]
...> ])
%PolygonM{
rings: [
[[1, 1, 1], [2, 1, 3], [2, 2, 2], [1, 1, 1]]
]
}
"""
@spec from_coordinates([Geometry.coordinate()]) :: t()
def from_coordinates(rings) when is_list(rings), do: %PolygonM{rings: rings}
@doc """
Returns an `:ok` tuple with the `PolygonM` from the given GeoJSON term.
Otherwise returns an `:error` tuple.
## Examples
iex> ~s(
...> {
...> "type": "Polygon",
...> "coordinates": [
...> [[35, 10, 12],
...> [45, 45, 22],
...> [15, 40, 33],
...> [10, 20, 55],
...> [35, 10, 12]]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> PolygonM.from_geo_json()
{:ok, %PolygonM{
rings: [
[
[35, 10, 12],
[45, 45, 22],
[15, 40, 33],
[10, 20, 55],
[35, 10, 12]
]
]
}}
iex> ~s(
...> {
...> "type": "Polygon",
...> "coordinates": [
...> [[35, 10, 12],
...> [45, 45, 22],
...> [15, 40, 33],
...> [10, 20, 55],
...> [35, 10, 12]],
...> [[20, 30, 11],
...> [35, 35, 55],
...> [30, 20, 45],
...> [20, 30, 11]]
...> ]
...> }
...> )
iex> |> Jason.decode!()
iex> |> PolygonM.from_geo_json()
{:ok, %PolygonM{
rings: [[
[35, 10, 12],
[45, 45, 22],
[15, 40, 33],
[10, 20, 55],
[35, 10, 12]
], [
[20, 30, 11],
[35, 35, 55],
[30, 20, 45],
[20, 30, 11]
]]
}}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json), do: GeoJson.to_polygon(json, PolygonM)
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_polygon(json, PolygonM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `PolygonM`.
## Examples
iex> PolygonM.to_geo_json(
...> PolygonM.new([
...> LineStringM.new([
...> PointM.new(35, 10, 14),
...> PointM.new(45, 45, 24),
...> PointM.new(10, 20, 34),
...> PointM.new(35, 10, 14)
...> ]),
...> LineStringM.new([
...> PointM.new(20, 30, 14),
...> PointM.new(35, 35, 24),
...> PointM.new(30, 20, 34),
...> PointM.new(20, 30, 14)
...> ])
...> ])
...> )
%{
"type" => "Polygon",
"coordinates" => [
[
[35, 10, 14],
[45, 45, 24],
[10, 20, 34],
[35, 10, 14]
], [
[20, 30, 14],
[35, 35, 24],
[30, 20, 34],
[20, 30, 14]
]
]
}
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%PolygonM{rings: rings}) do
%{
"type" => "Polygon",
"coordinates" => rings
}
end
@doc """
Returns an `:ok` tuple with the `PolygonM` from the given WKT string.
Otherwise returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
## Examples
iex> PolygonM.from_wkt("
...> POLYGON M (
...> (35 10 22, 45 45 33, 15 40 44, 10 20 66, 35 10 22),
...> (20 30 55, 35 35 66, 30 20 99, 20 30 55)
...> )
...> ")
{:ok,
%PolygonM{
rings: [
[
[35, 10, 22],
[45, 45, 33],
[15, 40, 44],
[10, 20, 66],
[35, 10, 22]
], [
[20, 30, 55],
[35, 35, 66],
[30, 20, 99],
[20, 30, 55]
]
]
}}
iex> "
...> SRID=789;
...> POLYGON M (
...> (35 10 22, 45 45 33, 15 40 44, 10 20 66, 35 10 22),
...> (20 30 55, 35 35 66, 30 20 99, 20 30 55)
...> )
...> "
iex> |> PolygonM.from_wkt()
{:ok, {
%PolygonM{
rings: [
[
[35, 10, 22],
[45, 45, 33],
[15, 40, 44],
[10, 20, 66],
[35, 10, 22]
], [
[20, 30, 55],
[35, 35, 66],
[30, 20, 99],
[20, 30, 55]
]
]
},
789
}}
iex> PolygonM.from_wkt("Polygon M EMPTY")
{:ok, %PolygonM{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, PolygonM)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, PolygonM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKT representation for a `PolygonM`. With option `:srid` an
EWKT representation with the SRID is returned.
## Examples
iex> PolygonM.to_wkt(PolygonM.new())
"Polygon M EMPTY"
iex> PolygonM.to_wkt(PolygonM.new(), srid: 1123)
"SRID=1123;Polygon M EMPTY"
iex> PolygonM.to_wkt(
...> PolygonM.new([
...> LineStringM.new([
...> PointM.new(35, 10, 14),
...> PointM.new(45, 45, 24),
...> PointM.new(10, 20, 34),
...> PointM.new(35, 10, 14)
...> ]),
...> LineStringM.new([
...> PointM.new(20, 30, 14),
...> PointM.new(35, 35, 24),
...> PointM.new(30, 20, 34),
...> PointM.new(20, 30, 14)
...> ])
...> ])
...> )
"Polygon M ((35 10 14, 45 45 24, 10 20 34, 35 10 14), (20 30 14, 35 35 24, 30 20 34, 20 30 14))"
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%PolygonM{rings: rings}, opts \\ []) do
WKT.to_ewkt(<<"Polygon M ", to_wkt_rings(rings)::binary()>>, opts)
end
@doc """
Returns the WKB representation for a `PolygonM`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:xdr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.PointM.to_wkb/1` function.
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid(), mode: Geometry.mode()]
def to_wkb(%PolygonM{rings: rings}, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
mode = Keyword.get(opts, :mode, Geometry.default_mode())
srid = Keyword.get(opts, :srid)
to_wkb(rings, srid, endian, mode)
end
@doc """
Returns an `:ok` tuple with the `PolygonM` from the given WKB string. Otherwise
returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
The optional second argument determines if a `:hex`-string or a `:binary`
input is expected. The default is `:binary`.
An example of a simpler geometry can be found in the description for the
`Geometry.PointM.from_wkb/2` function.
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}} | Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, PolygonM)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, PolygonM) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc false
@compile {:inline, to_wkt_rings: 1}
@spec to_wkt_rings(list()) :: String.t()
def to_wkt_rings([]), do: "EMPTY"
def to_wkt_rings([ring | rings]) do
<<
"(",
LineStringM.to_wkt_points(ring)::binary(),
Enum.reduce(rings, "", fn ring, acc ->
<<acc::binary(), ", ", LineStringM.to_wkt_points(ring)::binary()>>
end)::binary(),
")"
>>
end
@doc false
@compile {:inline, to_wkb: 4}
@spec to_wkb(coordinates, srid, endian, mode) :: wkb
when coordinates: [Geometry.coordinates()],
srid: Geometry.srid() | nil,
endian: Geometry.endian(),
mode: Geometry.mode(),
wkb: Geometry.wkb()
def to_wkb(rings, srid, endian, mode) do
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary(),
WKB.srid(srid, endian, mode)::binary(),
to_wkb_rings(rings, endian, mode)::binary()
>>
end
@compile {:inline, to_wkb_rings: 3}
defp to_wkb_rings(rings, endian, mode) do
Enum.reduce(rings, WKB.length(rings, endian, mode), fn ring, acc ->
<<acc::binary(), LineStringM.to_wkb_points(ring, endian, mode)::binary()>>
end)
end
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "40000003"
{:ndr, false} -> "03000040"
{:xdr, true} -> "60000003"
{:ndr, true} -> "03000060"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0x40000003::big-integer-size(32)>>
{:ndr, false} -> <<0x40000003::little-integer-size(32)>>
{:xdr, true} -> <<0x60000003::big-integer-size(32)>>
{:ndr, true} -> <<0x60000003::little-integer-size(32)>>
end
end
end
|
lib/geometry/polygon_m.ex
| 0.92883
| 0.722008
|
polygon_m.ex
|
starcoder
|
defmodule Jaxon.Decoders.Value do
alias Jaxon.ParseError
@array 0
@object 1
def decode(events) do
value(events, [])
end
defp parse_error(got, expected) do
{:error,
%ParseError{
unexpected: got,
expected: expected
}}
end
@compile {:inline, value: 2}
defp value(e, stack) do
case e do
[:start_object | rest] ->
object(rest, [[] | stack])
[:start_array | rest] ->
array(rest, [[] | stack])
[{:incomplete, {_, value}, _}] ->
add_value([], stack, value)
[{:incomplete, _} = other] ->
parse_error(other, [:value])
[{type, value} | rest] when type in ~w(string decimal integer boolean)a ->
add_value(rest, stack, value)
[value = nil | rest] ->
add_value(rest, stack, value)
[other | _] ->
parse_error(other, [:value])
[] ->
parse_error(:end_stream, [:value])
end
end
@compile {:inline, array: 2}
defp array_next([:comma | rest], stack) do
value(rest, [@array | stack])
end
defp array_next([:end_array | rest], [array | stack]) do
add_value(rest, stack, :lists.reverse(array))
end
defp array_next([other | _rest], _stack) do
parse_error(other, [:value, :end_array])
end
defp array_next([], _stack) do
parse_error(:end_stream, [:value, :end_array])
end
# empty array
defp array([:end_array | rest], [array | stack]) do
add_value(rest, stack, array)
end
defp array(rest, stack) do
value(rest, [@array | stack])
end
@compile {:inline, add_value: 3}
defp add_value(e, stack, value) do
case stack do
[key, @object, object | stack] ->
object(e, [[{key, value} | object] | stack])
[@array, array | stack] ->
array_next(e, [[value | array] | stack])
[] ->
case e do
[] ->
{:ok, value}
[event | _rest] ->
parse_error(event, [:end_stream])
end
end
end
@compile {:inline, object: 2}
defp object([:end_object | rest], [object | stack]) do
add_value(rest, stack, :maps.from_list(object))
end
defp object([:comma | _rest], [[] | _stack]) do
parse_error(:comma, [:value, :end_object])
end
defp object([:comma | rest], stack) do
key(rest, stack)
end
defp object(rest, stack) do
key(rest, stack)
end
@compile {:inline, key: 2}
defp key(e, stack) do
case e do
[{:string, key}, :colon | rest] ->
value(rest, [key, @object | stack])
[other | _rest] ->
parse_error(other, [:key])
[] ->
parse_error(:end_stream, [:key])
end
end
end
|
lib/jaxon/decoders/value.ex
| 0.740456
| 0.728917
|
value.ex
|
starcoder
|
defmodule OpenTelemetryJaeger do
@moduledoc """
`OpenTelemetryJaeger` is a library for exporting [OpenTelemetry](https://opentelemetry.io/)
trace data, as modeled by [opentelemetry-erlang](https://github.com/open-telemetry/opentelemetry-erlang),
to a [Jaeger](https://www.jaegertracing.io/) endpoint.
The configuration is passed through the options specified when configuring the `:opentelemetry` application:
```elixir
config :opentelemetry,
processors: [
otel_batch_processor: %{
exporter: {OpenTelemetryJaeger, %{
# Defaults to `:agent`.
endpoint_type: :agent,
# Defaults to `"localhost"`.
host: "localhost",
# Defaults to `6832`.
port: 6832,
# Used only when `endpoint_type` is set to `:collector`.
http_headers: [{"X-Foo", "Bar"}],
# Defaults to `OpenTelemetryJaeger.SpanRefTypeMapperDefault` and if set, the module must implement the
# `OpenTelemetryJaeger.SpanRefTypeMapper` protocol. It is used when using linking spans together and the
# implementation for `Any` returns `SpanRefType.child_of()`.
span_ref_type_mapper: MySpanRefTypeMapper,
# https://hexdocs.pm/finch/Finch.html#start_link/1
finch_pool_settings: [],
# Defaults to `Mix.Project.config()[:app]`, in PascalCase.
service_name: "MyService",
# Defaults to `Mix.Project.config()[:version]`.
service_version: "MyServiceVersion"
}}
}
]
```
When the project is compiled, the [Jaeger Thrift IDL](https://github.com/jaegertracing/jaeger-idl/tree/master/thrift)
files, stored in the `priv` directory, are compiled and the output is stored in `lib/jaeger/thrift`.
For the meaning of the configuration key `span_ref_type_mapper`, see `OpenTelemetryJaeger.SpanRefTypeMapper`.
Internally, `OpenTelemetryJaeger` starts a `DynamicSupervisor` to supervise the connection processes started by `Finch`.
"""
require Jaeger.Thrift.TagType, as: TagType
@keys [
:endpoint_type,
:host,
:port,
:http_headers,
:span_ref_type_mapper,
:finch_pool_settings,
:service_name,
:service_version
]
@enforce_keys @keys
defstruct @keys
@type t :: %__MODULE__{
endpoint_type: :agent | :collector,
host: charlist(),
port: pos_integer(),
http_headers: [{String.t(), String.t()}],
span_ref_type_mapper: struct() | nil,
finch_pool_settings: [
protocol: :http1 | :http2,
size: pos_integer(),
count: pos_integer(),
max_idle_time: pos_integer() | :infinity,
conn_opts: list()
],
service_name: String.t(),
service_version: String.t()
}
alias Jaeger.Thrift.{Agent, Batch, Log, Process, Span, SpanRef, Tag}
alias OpenTelemetryJaeger.{SpanRefTypeMapper, SpanRefTypeMapperDefault}
alias Thrift.Protocol.Binary
@doc """
Initializes the exporter's configuration by constructing a `t:OpenTelemetryJaeger.t/0`.
"""
@spec init(map()) :: {:ok, t()} | {:error, term()}
def init(opts) when is_map(opts) do
with {:ok, _} <- init_dynamic_supervisor(),
%__MODULE__{} = opts = init_opts(opts),
:ok <- init_http_client(opts) do
{:ok, opts}
end
end
@doc """
Transforms a batch of `t:OpenTelemetry.span_ctx/0`s into a batch of `Jaeger.Thrift.Span`s.
Then, it sends the batch to the specified Jaeger endpoint.
"""
@spec export(atom() | :ets.tid(), :otel_resource.t(), term()) :: :ok | {:error, term()}
def export(ets_table, resource, opts) do
_ = :otel_resource.attributes(resource)
fn span, acc -> [span | acc] end
|> :ets.foldl([], ets_table)
|> prepare_payload(opts)
|> send_payload(opts)
:ok
end
@doc """
Shuts down an `OpenTelemetryJaeger` exporter.
"""
@spec shutdown(term()) :: :ok
def shutdown(_), do: :ok
@doc """
Converts an `:opentelemetry.trace_id()` into its lowercase hexadecimal string representation.
"""
@spec to_hex_trace_id(non_neg_integer()) :: String.t()
def to_hex_trace_id(trace_id) when is_integer(trace_id) and trace_id > 0 do
<<trace_id::128>>
|> :binary.bin_to_list()
|> Enum.map(fn byte ->
byte
|> Integer.to_string(16)
|> String.downcase()
end)
|> Enum.join()
end
@spec init_dynamic_supervisor() :: Supervisor.on_start()
defp init_dynamic_supervisor() do
DynamicSupervisor.start_link(
strategy: :one_for_one,
name: OpenTelemetryJaeger.DynamicSupervisor
)
end
@spec init_opts(map()) :: t()
defp init_opts(opts) when is_map(opts) do
endpoint_type = Map.get(opts, :endpoint_type, :agent)
host =
opts
|> Map.get(:host, "localhost")
|> to_charlist()
port = Map.get(opts, :port, 6832)
http_headers = Map.get(opts, :http_headers, [])
span_ref_type_mapper = Map.get(opts, :span_ref_type_mapper, SpanRefTypeMapperDefault)
span_ref_type_mapper =
if span_ref_type_mapper == nil or Code.ensure_loaded?(span_ref_type_mapper),
do: struct!(span_ref_type_mapper),
else: raise("#{inspect(span_ref_type_mapper)} is not a loaded module.")
finch_pool_settings =
opts
|> Map.get(:finch_pool_settings, [])
|> Keyword.put_new(:protocol, :http1)
|> Keyword.put_new(:size, 10)
|> Keyword.put_new(:count, 1)
|> Keyword.put_new(:max_idle_time, :infinity)
|> Keyword.put_new(:conn_opts, [])
service_name =
Map.get_lazy(opts, :service_name, fn ->
Mix.Project.config()
|> Keyword.get(:app)
|> to_string()
|> Macro.camelize()
end)
service_version =
Map.get_lazy(opts, :service_version, fn ->
Mix.Project.config()[:version]
end)
%__MODULE__{
endpoint_type: endpoint_type,
host: host,
port: port,
http_headers: http_headers,
span_ref_type_mapper: span_ref_type_mapper,
finch_pool_settings: finch_pool_settings,
service_name: service_name,
service_version: service_version
}
end
@spec init_http_client(t()) :: :ok | {:error, term()}
defp init_http_client(opts)
defp init_http_client(%__MODULE__{endpoint_type: :agent}), do: :ok
defp init_http_client(%__MODULE__{endpoint_type: :collector} = opts) do
init_http_client_if_not_started(:persistent_term.get(__MODULE__, false), opts)
end
@spec init_http_client_if_not_started(boolean(), t()) :: :ok | {:error, term()}
defp init_http_client_if_not_started(started, opts)
defp init_http_client_if_not_started(true, _opts), do: :ok
defp init_http_client_if_not_started(false, opts) do
%__MODULE__{host: host, port: port, finch_pool_settings: finch_pool_settings} = opts
DynamicSupervisor.start_child(
OpenTelemetryJaeger.DynamicSupervisor,
{
Finch,
name: OpenTelemetryJaeger.Finch,
pools: %{
"#{host}:#{port}" => finch_pool_settings
}
}
)
|> case do
{:ok, _pid} ->
:ok
{:ok, _pid, _info} ->
:ok
:ignore ->
{:error, {:finch_start_error, :ignore}}
{:error, {:already_started, _pid}} ->
:ok
{:error, _} = error ->
error
end
end
@spec prepare_payload([tuple()], t()) :: binary
defp prepare_payload(spans, opts)
defp prepare_payload(spans, %__MODULE__{endpoint_type: :agent} = opts) do
batch = %{
Agent.EmitBatchArgs.new()
| batch: prepare_batch(spans, opts)
}
batch
|> Agent.EmitBatchArgs.serialize()
|> IO.iodata_to_binary()
end
defp prepare_payload(spans, %__MODULE__{endpoint_type: :collector} = opts) do
spans
|> prepare_batch(opts)
|> Batch.serialize()
|> IO.iodata_to_binary()
end
@spec prepare_batch([tuple()], t()) :: map()
defp prepare_batch(spans, opts) do
process = %{
Process.new()
| service_name: opts.service_name,
tags: [
%Tag{
Tag.new()
| key: "client.version",
v_str: opts.service_version,
v_type: TagType.string()
}
]
}
%{
Batch.new()
| process: process,
spans: to_jaeger_spans(spans, opts)
}
end
@spec send_payload(binary(), t()) :: :ok
defp send_payload(data, opts)
defp send_payload(data, %__MODULE__{endpoint_type: :agent} = opts) do
%__MODULE__{host: host, port: port} = opts
{:ok, server} = :gen_udp.open(0)
message =
Binary.serialize(
:message_begin,
{
:oneway,
:os.system_time(:microsecond),
"emitBatch"
}
)
:ok = :gen_udp.send(server, host, port, [message | data])
:gen_udp.close(server)
:ok
end
defp send_payload(data, %__MODULE__{endpoint_type: :collector} = opts) do
%__MODULE__{host: host, port: port, http_headers: http_headers} = opts
http_headers = [{"Content-Type", "application/x-thrift"} | http_headers]
url = "#{host}:#{port}/api/traces?format=jaeger.thrift"
request = Finch.build(:post, url, http_headers, data)
request
|> Finch.request(OpenTelemetryJaeger.Finch)
|> case do
{:ok, %Finch.Response{status: 202}} -> :ok
{:ok, %Finch.Response{status: status, body: body}} -> {:error, {status, body}}
{:error, _} = error -> error
end
:ok
end
@spec to_jaeger_spans([tuple()], t()) :: [Span.t()]
defp to_jaeger_spans(spans, opts), do: Enum.map(spans, &to_jaeger_span(&1, opts))
@spec to_jaeger_span(
{
:span,
:opentelemetry.trace_id() | :undefined,
:opentelemetry.span_id() | :undefined,
:opentelemetry.tracestate() | :undefined,
:opentelemetry.span_id() | :undefined,
String.t() | atom(),
:opentelemetry.span_kind() | :undefined,
:opentelemetry.timestamp(),
:opentelemetry.timestamp() | :undefined,
:opentelemetry.attributes() | :undefined,
:opentelemetry.events(),
:opentelemetry.links(),
:opentelemetry.status() | :undefined,
integer() | :undefined,
boolean() | :undefined,
tuple() | :undefined
},
t()
) :: Span.t()
# credo:disable-for-next-line Credo.Check.Refactor.CyclomaticComplexity
defp to_jaeger_span(
{:span, trace_id, span_id, trace_state, parent_span_id, name, kind, start_time, end_time,
attributes, events, links, status, flags, is_recording, instrumentation_library},
%__MODULE__{span_ref_type_mapper: span_ref_type_mapper}
)
when ((is_integer(trace_id) and trace_id > 0) or trace_id == :undefined) and
((is_integer(span_id) and span_id > 0) or span_id == :undefined) and
(is_list(trace_state) or trace_state == :undefined) and
((is_integer(parent_span_id) and parent_span_id > 0) or parent_span_id == :undefined) and
(is_binary(name) or is_atom(name)) and
(is_atom(kind) or kind == :undefined) and
is_integer(start_time) and
(is_integer(end_time) or end_time == :undefined) and
(is_list(attributes) or attributes == :undefined) and
is_list(events) and
is_list(links) and
(is_tuple(status) or status == :undefined) and
(is_integer(flags) or flags == :undefined) and
(is_boolean(is_recording) or is_recording == :undefined) and
(is_tuple(instrumentation_library) or instrumentation_library == :undefined) do
%Span{
Span.new()
| operation_name: to_string(name),
trace_id_low: to_jaeger_trace_id(:low, trace_id),
trace_id_high: to_jaeger_trace_id(:high, trace_id),
span_id: to_jaeger_span_id(span_id),
parent_span_id: to_jaeger_span_id(parent_span_id),
flags: to_jaeger_flags(flags),
start_time: :opentelemetry.convert_timestamp(start_time, :microsecond),
duration: to_duration(start_time, end_time),
tags: to_jaeger_tags(attributes),
logs: to_jaeger_logs(events),
references: to_jaeger_span_refs(links, span_ref_type_mapper)
}
|> add_jaeger_span_kind_tag(kind)
end
@spec to_jaeger_trace_id(:low | :high, :opentelemetry.trace_id() | :undefined) ::
non_neg_integer()
defp to_jaeger_trace_id(part, id)
defp to_jaeger_trace_id(_, :undefined), do: 0
defp to_jaeger_trace_id(part, id) when is_integer(id) and id > 0 do
case part do
:low ->
low = <<id::64>>
<<low::64-signed>> = low
low
:high ->
high = <<id::128>>
<<high::64-signed, _::binary>> = high
high
end
end
@spec to_jaeger_span_id(:opentelemetry.span_id() | :undefined) :: non_neg_integer()
defp to_jaeger_span_id(id)
defp to_jaeger_span_id(:undefined), do: 0
defp to_jaeger_span_id(id) when is_integer(id) and id > 0, do: id
@spec to_jaeger_flags(integer() | :undefined) :: non_neg_integer()
defp to_jaeger_flags(flags)
defp to_jaeger_flags(:undefined), do: 0
defp to_jaeger_flags(flags) when is_integer(flags), do: flags
@spec to_duration(:opentelemetry.timestamp(), :opentelemetry.timestamp() | :undefined) ::
non_neg_integer()
defp to_duration(start_time, end_time)
defp to_duration(_start_time, :undefined), do: 0
defp to_duration(start_time, end_time) when is_integer(start_time) and is_integer(end_time),
do: :erlang.convert_time_unit(end_time - start_time, :native, :microsecond)
@spec to_jaeger_tags(:opentelemetry.attributes()) :: [Tag.t()]
defp to_jaeger_tags(attributes)
defp to_jaeger_tags(:undefined), do: []
defp to_jaeger_tags(attributes) do
attributes
|> to_jaeger_tags([])
|> Enum.reverse()
end
@spec to_jaeger_tags(:opentelemetry.attributes(), [Tag.t()]) :: [Tag.t()]
defp to_jaeger_tags(attributes, tags)
defp to_jaeger_tags([], tags), do: tags
defp to_jaeger_tags([{key, value} | attributes], tags),
do: to_jaeger_tags(attributes, [to_jaeger_tag(key, value) | tags])
@spec to_jaeger_tag(:opentelemetry.attribute_key(), :opentelemetry.attribute_value()) :: Tag.t()
defp to_jaeger_tag(key, value)
defp to_jaeger_tag(key, value) when is_function(value), do: to_jaeger_tag(key, value.())
defp to_jaeger_tag(key, value) when is_list(value),
do: to_jaeger_tag(key, IO.iodata_to_binary(value))
defp to_jaeger_tag(key, value) when is_binary(key) or is_atom(key) do
%Tag{
Tag.new()
| v_type: to_jaeger_tag_value_type(value),
key: to_string(key)
}
|> Map.replace!(get_jaeger_tag_value_key_name(value), value)
end
@spec get_jaeger_tag_value_key_name(bitstring() | number() | boolean() | nil) ::
:v_str | :v_double | :v_long | :v_bool | :v_binary
defp get_jaeger_tag_value_key_name(value)
defp get_jaeger_tag_value_key_name(value)
when is_binary(value) or is_nil(value) or is_list(value),
do: :v_str
defp get_jaeger_tag_value_key_name(value) when is_float(value), do: :v_double
defp get_jaeger_tag_value_key_name(value) when is_number(value), do: :v_long
defp get_jaeger_tag_value_key_name(value) when is_boolean(value), do: :v_bool
defp get_jaeger_tag_value_key_name(value) when is_bitstring(value), do: :v_binary
@spec to_jaeger_tag_value_type(bitstring() | number() | boolean() | nil) ::
unquote(TagType.string())
| unquote(TagType.double())
| unquote(TagType.bool())
| unquote(TagType.long())
| unquote(TagType.binary())
defp to_jaeger_tag_value_type(nil), do: TagType.string()
defp to_jaeger_tag_value_type(value) when is_binary(value), do: TagType.string()
defp to_jaeger_tag_value_type(value) when is_float(value), do: TagType.double()
defp to_jaeger_tag_value_type(value) when is_number(value), do: TagType.long()
defp to_jaeger_tag_value_type(value) when is_boolean(value), do: TagType.bool()
defp to_jaeger_tag_value_type(value) when is_bitstring(value), do: TagType.binary()
@spec to_jaeger_logs(:opentelemetry.events()) :: [Log.t()]
defp to_jaeger_logs(events)
defp to_jaeger_logs(events), do: to_jaeger_logs(events, [])
@spec to_jaeger_logs(:opentelemetry.events(), [Log.t()]) :: [Log.t()]
defp to_jaeger_logs(events, logs)
defp to_jaeger_logs([], logs), do: logs
defp to_jaeger_logs([event | events], logs),
do: to_jaeger_logs(events, [to_jaeger_log(event) | logs])
@spec to_jaeger_log(:opentelemetry.event()) :: Log.t()
defp to_jaeger_log(event)
defp to_jaeger_log({:event, timestamp, key, attributes}) when is_binary(key) or is_atom(key) do
attributes = [{"event.name", to_string(key)} | attributes]
%Log{
Log.new()
| timestamp: :opentelemetry.convert_timestamp(timestamp, :microsecond),
fields: to_jaeger_tags(attributes)
}
end
@spec to_jaeger_span_refs(:opentelemetry.links(), struct()) :: [SpanRef.t()]
defp to_jaeger_span_refs(links, span_ref_type_mapper) do
links
|> to_jaeger_span_refs(span_ref_type_mapper, [])
|> Enum.reverse()
end
@spec to_jaeger_span_refs(:opentelemetry.links(), struct(), [SpanRef.t()]) :: [SpanRef.t()]
defp to_jaeger_span_refs(links, span_ref_type_mapper, span_refs)
defp to_jaeger_span_refs([], _span_ref_type_mapper, span_refs), do: span_refs
defp to_jaeger_span_refs([link | links], span_ref_type_mapper, span_refs) do
to_jaeger_span_refs(links, span_ref_type_mapper, [
to_jaeger_span_ref(link, span_ref_type_mapper) | span_refs
])
end
@spec to_jaeger_span_ref(:opentelemetry.link(), struct()) :: SpanRef.t()
defp to_jaeger_span_ref(
{:link, trace_id, span_id, attributes, _trace_state},
span_ref_type_mapper
) do
%SpanRef{
SpanRef.new()
| ref_type: SpanRefTypeMapper.resolve(span_ref_type_mapper, attributes),
trace_id_low: to_jaeger_trace_id(:low, trace_id),
trace_id_high: to_jaeger_trace_id(:high, trace_id),
span_id: to_jaeger_span_id(span_id)
}
end
@spec add_jaeger_span_kind_tag(Span.t(), atom()) :: Span.t()
defp add_jaeger_span_kind_tag(span, kind)
defp add_jaeger_span_kind_tag(span, :undefined), do: span
defp add_jaeger_span_kind_tag(%Span{tags: tags} = span, kind)
when kind in [:INTERNAL, :PRODUCER, :CONSUMER, :SERVER, :CLIENT] do
kind =
kind
|> to_string()
|> String.downcase()
kind_tag = %Tag{
Tag.new()
| key: "span.kind",
v_str: kind,
v_type: TagType.string()
}
tags = tags ++ [kind_tag]
%Span{span | tags: tags}
end
end
|
lib/opentelemetry_jaeger.ex
| 0.880013
| 0.862178
|
opentelemetry_jaeger.ex
|
starcoder
|
defmodule SSE.Chunk do
@moduledoc """
Structure and type for Chunk model
"""
@enforce_keys [:data]
defstruct [:comment, :event, :data, :id, :retry]
@typedoc """
Defines the Chunk struct.
Reference: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Fields
* :comment - The comment line can be used to prevent connections from timing
out; a server can send a comment periodically to keep the connection alive.
SSE package keeps connection alive, so you don't have to send the comment.
* :data - The data field for the message. When the EventSource receives
multiple consecutive lines that begin with data:, it will concatenate them,
inserting a newline character between each one. Trailing newlines are
removed.
* :event - A string identifying the type of event described. If this is
specified, an event will be dispatched on the browser to the listener for
the specified event name; the web site source code should use
addEventListener() to listen for named events. The onmessage handler is
called if no event name is specified for a message.
* :id - The event ID to set the EventSource object's last event ID value.
* :retry - The reconnection time to use when attempting to send the event.
This must be an integer, specifying the reconnection time in milliseconds.
If a non-integer value is specified the field is ignored.
"""
@type t :: %__MODULE__{
comment: String.t() | nil,
data: list(String.t()),
event: String.t() | nil,
id: String.t() | nil,
retry: integer() | nil
}
@spec build(t()) :: String.t()
def build(%__MODULE__{
comment: comment,
data: data,
event: event,
id: id,
retry: retry
}) do
build_field("", comment) <> build_field("id", id) <>
build_field("event", event) <> build_data(data) <>
build_field("retry", retry) <> "\n"
end
@spec build_data(nil) :: no_return()
defp build_data(nil) do
raise("Chunk data can't be blank!")
end
@spec build_data(list(String.t())) :: String.t()
defp build_data(data_list) when is_list(data_list) do
Enum.reduce(data_list, "", fn(data, acc) ->
acc <> "data: #{data}\n"
end)
end
@spec build_data(String.t()) :: String.t()
defp build_data(data) do
"data: #{data}\n"
end
@spec build_field(String.t(), nil) :: String.t()
defp build_field(_, nil) do
""
end
@spec build_field(String.t(), String.t() | integer()) :: String.t()
defp build_field(field, value) do
"#{field}: #{value}\n"
end
end
|
lib/sse/chunk.ex
| 0.821546
| 0.50238
|
chunk.ex
|
starcoder
|
defmodule Day11 do
use Bitwise
def part1(input) do
Parser.parse(input)
|> solve
end
def part2(input) do
Parser.parse(input)
|> add_to_first_floor
|> solve
end
defp add_to_first_floor([{1, things} | rest]) do
[{1, [{:generator, :dilithium},
{:generator, :elerium},
{:microchip, :dilithium},
{:microchip, :elerium} | things]} | rest]
end
defp solve(input) do
state = input
|> convert_to_bitmap
|> Map.new
|> Map.put(:current_floor, 1)
bfs([state], 0, MapSet.new(state))
end
defp convert_to_bitmap(contents) do
bitmap = contents
|> Enum.flat_map(fn {_, list} ->
for {_, type} <- list, do: type
end)
|> Enum.uniq
|> Enum.with_index
|> Map.new
Enum.map(contents, fn {floor, list} ->
generators = things_to_bitmask(list, :generator, bitmap)
microchips = things_to_bitmask(list, :microchip, bitmap)
{floor, {generators, microchips}}
end)
end
defp things_to_bitmask(list, tag, bitmap) do
Enum.reduce(list, 0, fn thing, acc ->
case thing do
{^tag, type} ->
acc ||| (1 <<< Map.fetch!(bitmap, type))
_ ->
acc
end
end)
end
defp bfs([], moves, _seen), do: {:failed, moves}
defp bfs(states, moves, seen) do
case Enum.any?(states, &is_done?/1) do
true ->
moves
false ->
{states, seen} = update_states(states, seen, [])
# IO.inspect {moves, length(states)}
bfs(states, moves + 1, seen)
end
end
defp is_done?(state) do
case state do
%{1 => {0,0}, 2 => {0,0}, 3 => {0,0}} -> true
%{} -> false
end
end
defp update_states([], seen, acc), do: {acc, seen}
defp update_states([state | states], seen, acc) do
floor = Map.fetch!(state, :current_floor)
things_here = Map.fetch!(state, floor)
movables = combinations(things_here)
|> Enum.filter(fn things_moved ->
move_possible?(remove(things_here, things_moved))
end)
moved = move_to(floor, floor + 1, movables, state, [])
moved = move_to(floor, floor - 1, movables, state, moved)
new_states = moved
|> Enum.map(&canonical_state/1)
|> Enum.reject(&MapSet.member?(seen, &1))
seen = Enum.reduce(new_states, seen, &MapSet.put(&2, &1))
update_states(states, seen, new_states ++ acc)
end
defp canonical_state(state) do
mapping = 1..4
|> Enum.flat_map(fn floor ->
{gen, chip} = Map.fetch!(state, floor)
bit_numbers(gen ||| chip, 0, [])
end)
|> Enum.uniq
|> Enum.with_index
|> Map.new
1..4
|> Enum.reduce(state, fn floor, state ->
{gen, chip} = Map.fetch!(state, floor)
gen = translate_bits(gen, mapping, 0, 0)
chip = translate_bits(chip, mapping, 0, 0)
Map.replace(state, floor, {gen, chip})
end)
end
defp bit_numbers(0, _, acc), do: acc
defp bit_numbers(bits, n, acc) do
case bits &&& 1 do
0 -> bit_numbers(bits >>> 1, n + 1, acc)
1 -> bit_numbers(bits >>> 1, n + 1, [n | acc])
end
end
defp translate_bits(0, _, _, acc), do: acc
defp translate_bits(bits, mapping, n, acc) do
case bits &&& 1 do
0 -> translate_bits(bits >>> 1, mapping, n + 1, acc)
1 -> translate_bits(bits >>> 1, mapping, n + 1, acc ||| (1 <<< Map.fetch!(mapping, n)))
end
end
defp remove({gens, chips}, {gens1, chips1}) do
{gens &&& bnot(gens1), chips &&& bnot(chips1)}
end
defp add({gens, chips}, {gens1, chips1}) do
{gens ||| gens1, chips ||| chips1}
end
defp move_to(from_floor, to_floor, combinations, state, acc) do
case state do
%{^from_floor => things_here,
^to_floor => already_there} ->
Enum.reduce(combinations, acc, fn moved_things, acc ->
all_things = add(moved_things, already_there)
case move_possible?(all_things) do
false ->
acc
true ->
state = %{state | from_floor => remove(things_here, moved_things),
to_floor => all_things,
:current_floor => to_floor}
[state | acc]
end
end)
%{} ->
acc
end
end
defp move_possible?({generators, microchips}) do
generators === 0 or microchips === 0 or (generators &&& microchips) === microchips
end
defp combinations({generators, microchips}) do
for gen <- bit_combinations(generators, 0),
chip <- bit_combinations(microchips, 0),
(count_ones(gen) + count_ones(chip) in 1..2) do
{gen, chip}
end
end
defp bit_combinations(0, _), do: [0]
defp bit_combinations(mask, n) do
case (mask >>> n) &&& 1 do
0 ->
bit_combinations(mask, n + 1)
1 ->
bit = 1 <<< n
mask = mask &&& bnot bit
[bit | Enum.map(bits(mask, n + 1), fn other_bits ->
bit ||| other_bits
end)] ++ bit_combinations(mask, n + 1)
end
end
defp bits(0, _), do: []
defp bits(mask, n) do
case (mask >>> n) &&& 1 do
0 ->
bits(mask, n + 1)
1 ->
bit = 1 <<< n
mask = mask &&& bnot bit
[1 <<< n | bits(mask, n + 1)]
end
end
def count_ones(n), do: count_ones(n, 0)
def count_ones(0, count), do: count
def count_ones(n, count), do: count_ones(n &&& (n - 1), count + 1)
end
defmodule Parser do
import NimbleParsec
defp map_number([word]) do
map = ~w(first second third fourth)
|> Enum.with_index(1)
|> Map.new
Map.fetch!(map, word)
end
defp to_atom([word]), do: String.to_atom(word)
defp pack_arrangement([floor | contents]) do
{floor, Enum.sort(contents)}
end
blank = ignore(optional(ascii_char([?\s])))
a_an = ignore(string("a") |> optional(string("n")))
atom = ascii_string([?a..?z], min: 1)
|> reduce({:to_atom, []})
generator = atom
|> concat(blank)
|> ignore(string("generator"))
|> unwrap_and_tag(:generator)
microchip = atom
|> ignore(string("-compatible microchip"))
|> unwrap_and_tag(:microchip)
nothing = ignore(string("nothing relevant"))
thing = optional(a_an)
|> concat(blank)
|> choice([microchip, generator, nothing])
conjunction = ignore(optional(string(",")) |> optional(string(" and")))
defcombinatorp :things,
thing
|> choice([conjunction |> concat(blank) |> parsec(:things),
ignore(string("."))])
ordinal_number = ascii_string([?a..?z], min: 1)
|> reduce({:map_number, []})
floor = ignore(string("The "))
|> concat(ordinal_number)
|> ignore(string(" floor"))
defparsec :arrangement, floor
|> ignore(string(" contains "))
|> parsec(:things)
|> reduce({:pack_arrangement, []})
|> eos
def parse(input) do
Enum.map(input, fn line ->
{:ok, [result], "", _, _, _} = arrangement(line)
result
end)
end
end
|
day11/lib/day11.ex
| 0.624752
| 0.620277
|
day11.ex
|
starcoder
|
defmodule CSQuery do
@moduledoc """
A query builder for the AWS [CloudSearch][] [structured search syntax][sss].
This query builder is largely inspired by [csquery for Python][csquery.py].
The queries built with this library are raw input to the `q` parameter of a
CloudSearch request when `q.parser=structured`.
CSQuery provides two ways of building a query:
* A DSL-style approach like the Python implementation:
```
iex> and!([title: "star", actor: "<NAME>", boost: 2]) |> to_query()
"(and boost=2 title:'star' actor:'Harrison Ford')"
```
* A structured parser:
```
iex> parse(and: [title: "star", actor: "Harrison Ford", boost: 2]) |>
...> to_query()
"(and boost=2 title:'star' actor:'Harrison Ford')"
```
The structured parser feels like it fits better in the style of Elixir,
especially with complex queries (see below). Both are supported (and are
implemented the same way). The documentation for each operator is on the
DSL-like functions below (as with `and!/1`), but examples are given for both
forms.
### Complex Queries
A complex query can be built with sufficient nesting:
iex> and!([
...> not!(["test", field: "genres"]),
...> or!([
...> term!(["star", field: "title", boost: 2]),
...> term!(["star", field: "plot"])
...> ])
...> ]) |> to_query
"(and (not field=genres 'test') (or (term boost=2 field=title 'star') (term field=plot 'star')))"
iex> parse(and: [
...> not: ["test", field: "genres"],
...> or: [
...> term: ["star", field: "title", boost: 2],
...> term: ["star", field: "plot"]
...> ]
...> ]) |> to_query
"(and (not field=genres 'test') (or (term boost=2 field=title 'star') (term field=plot 'star')))"
It is also possible to mix and match the forms (but please avoid this):
iex> parse(and: [
...> not!(["test", field: "genres"]),
...> or: [
...> term: ["star", field: "title", boost: 2],
...> term: ["star", field: "plot"]
...> ]
...> ]) |> to_query
"(and (not field=genres 'test') (or (term boost=2 field=title 'star') (term field=plot 'star')))"
### Supported Field Value Data Types:
* Strings:
```
iex> term!(["STRING"]) |> to_query
"(term 'STRING')"
```
* Ranges:
```
iex> range!([1..2]) |> to_query
"(range [1,2])"
iex> range!([{nil, 10}]) |> to_query
"(range {,10])"
iex> range!([{10, nil}]) |> to_query
"(range [10,})"
iex> range!([CSQuery.Range.new(%{first?: 0, last?: 101})]) |> to_query
"(range {0,101})"
```
* Numbers:
```
iex> term!([10]) |> to_query
"(term 10)"
iex> term!([3.14159]) |> to_query
"(term 3.14159)"
```
* DateTime (`t:DateTime.t/0`):
```
iex> %DateTime{
...> year: 2018, month: 7, day: 21,
...> hour: 17, minute: 55, second: 0,
...> time_zone: "America/Toronto", zone_abbr: "EST",
...> utc_offset: -14_400, std_offset: 0
...> } |> List.wrap() |> term! |> to_query
"(term '2018-07-21T17:55:00-04:00')"
```
* Terms:
```
iex> or!(["(and 'star' 'wars')", "(and 'star' 'trek')"]) |> to_query
"(or (and 'star' 'wars') (and 'star' 'trek'))"
```
## ExAws.CloudSearch Support
The forthcoming ExAws.CloudSearch library will recognize CSQuery-generated
expressions and configure its query request so that the structured parser is
used.
[CloudSearch]: https://docs.aws.amazon.com/cloudsearch/
[sss]: https://docs.aws.amazon.com/cloudsearch/latest/developerguide/search-api.html#structured-search-syntax
[csquery.py]: https://github.com/tell-k/csquery
"""
@operators ~w(and near not or phrase prefix range term)a
@doc "Return the list of supported expression operators."
@spec operators :: list(atom)
def operators, do: @operators
alias CSQuery.{Expression, FieldValue, OperatorOption}
@doc """
Create an unnamed field value matcher.
iex> field(3)
%CSQuery.FieldValue{value: 3}
iex> CSQuery.FieldValue.to_value(field(3))
"3"
iex> field({1990, 2000})
%CSQuery.FieldValue{value: %CSQuery.Range{first: 1990, last: 2000}}
"""
@spec field(FieldValue.values()) :: FieldValue.t()
defdelegate field(value), to: FieldValue, as: :new
@doc """
Create an optionally-named field value matcher.
iex> field("title", 3)
%CSQuery.FieldValue{name: "title", value: 3}
iex> field(nil, 3)
%CSQuery.FieldValue{name: nil, value: 3}
iex> field(:year, {1990, 2000})
%CSQuery.FieldValue{name: :year, value: %CSQuery.Range{first: 1990, last: 2000}}
"""
@spec field(FieldValue.names(), FieldValue.values()) :: FieldValue.t()
defdelegate field(name, value), to: FieldValue, as: :new
@doc """
Create an operator option.
"""
@spec option(OperatorOption.names(), any) :: OperatorOption.t()
defdelegate option(name, value), to: OperatorOption, as: :new
@doc """
Creates an `and` expression.
(and boost=N EXPRESSION1 EXPRESSION2 ... EXPRESSIONn)
## Examples
Find any document that has both 'star' and 'space' in the title.
iex> and!(title: "star", title: "space") |> to_query
"(and title:'star' title:'space')"
iex> parse(and: [title: "star", title: "space"]) |> to_query
"(and title:'star' title:'space')"
Find any document that has 'star' in the title, 'Harrison Ford' in actors,
and the year is any time before 2000.
iex> parse(and: [title: "star", actors: "Harrison Ford", year: {nil, 2000}]) |> to_query
"(and title:'star' actors:'Harrison Ford' year:{,2000])"
iex> and!(title: "star", actors: "Harrison Ford", year: {nil, 2000}) |> to_query
"(and title:'star' actors:'Harrison Ford' year:{,2000])"
Find any document that has 'star' in the title, 'Harrison Ford' in actors,
and the year is any time after 2000. Note that the option name *must* be an
atom.
iex> and!([
...> option(:boost, 2),
...> field("title", "star"),
...> field("actors", "Harrison Ford"),
...> field("year", {2000, nil})
...> ]) |> to_query
"(and boost=2 title:'star' actors:'Harrison Ford' year:[2000,})"
iex> parse(and: [
...> option(:boost, 2),
...> field("title", "star"),
...> field("actors", "Harrison Ford"),
...> field("year", {2000, nil})
...> ]) |> to_query
"(and boost=2 title:'star' actors:'Harrison Ford' year:[2000,})"
Find any document that contains the words 'star' and 'trek' in any text or
text-array field.
iex> and!(["star", "trek"]) |> to_query
"(and 'star' 'trek')"
iex> parse(and: ["star", "trek"]) |> to_query
"(and 'star' 'trek')"
"""
@spec and!(keyword) :: Expression.t() | no_return
def and!(list), do: Expression.new(:and, list)
@doc """
Creates an `or` expression.
(or boost=N EXPRESSION1 EXPRESSION2 ... EXPRESSIONn)
## Examples
Find any document that has 'star' or 'space' in the title.
iex> or!(title: "star", title: "space") |> to_query
"(or title:'star' title:'space')"
iex> parse(or: [title: "star", title: "space"]) |> to_query
"(or title:'star' title:'space')"
Find any document that has 'star' in the title, 'Harrison Ford' in actors,
or the year is any time before 2000.
iex> parse(or: [title: "star", actors: "Harrison Ford", year: {nil, 2000}]) |> to_query
"(or title:'star' actors:'Harrison Ford' year:{,2000])"
iex> or!(title: "star", actors: "Harrison Ford", year: {nil, 2000}) |> to_query
"(or title:'star' actors:'Harrison Ford' year:{,2000])"
Find any document that has 'star' in the title, 'Harrison Ford' in actors,
or the year is any time after 2000. Note that the option name *must* be an
atom.
iex> or!([
...> option(:boost, 2),
...> field("title", "star"),
...> field("actors", "<NAME>"),
...> field("year", {2000, nil})
...> ]) |> to_query
"(or boost=2 title:'star' actors:'<NAME>' year:[2000,})"
iex> parse(or: [
...> option(:boost, 2),
...> field("title", "star"),
...> field("actors", "Harrison Ford"),
...> field("year", {2000, nil})
...> ]) |> to_query
"(or boost=2 title:'star' actors:'Harrison Ford' year:[2000,})"
Find any document that contains the words 'star' or 'trek' in any text or
text-array field.
iex> or!(["star", "trek"]) |> to_query
"(or 'star' 'trek')"
iex> parse(or: ["star", "trek"]) |> to_query
"(or 'star' 'trek')"
"""
@spec or!(keyword) :: Expression.t() | no_return
def or!(list), do: Expression.new(:or, list)
@doc """
Creates a `not` expression.
(not boost=N EXPRESSION)
## Examples
Find any document that does not have 'star' or 'space' in the title.
iex> not!([or!([title: "star", title: "space"])]) |> to_query
"(not (or title:'star' title:'space'))"
iex> [title: "star", title: "space"] |>
...> or!() |> List.wrap() |> not!() |> to_query
"(not (or title:'star' title:'space'))"
iex> parse(not: [or: [title: "star", title: "space"]]) |> to_query
"(not (or title:'star' title:'space'))"
Find any document that does not have both 'Harrison Ford' in actors and a
year before 2010.
iex> parse(not: [and: [actors: "H<NAME>", year: {nil, 2010}]]) |> to_query
"(not (and actors:'H<NAME>' year:{,2010]))"
iex> not!([and!(actors: "<NAME>", year: {nil, 2010})]) |> to_query
"(not (and actors:'Harrison Ford' year:{,2010]))"
Find any document that does not contain the words 'star' or 'trek' in any
text or text-array field.
iex> not!([or!(["star", "trek"])]) |> to_query
"(not (or 'star' 'trek'))"
iex> parse(not: [or: ["star", "trek"]]) |> to_query
"(not (or 'star' 'trek'))"
If more than one expression is provided, `CSQuery.TooManyFieldValuesError`
will be raised.
iex> not!(["star", "space", boost: 2]) |> to_query
** (CSQuery.TooManyFieldValuesError) Expression for operator `not` has 2 fields, but should only have one.
"""
@spec not!(keyword) :: Expression.t() | no_return
def not!(list), do: Expression.new(:not, list)
@doc """
Creates a `near` expression.
(near boost=N distance=N field=FIELD 'STRING')
## Examples
Find any document that contains the words 'teenage' and 'vampire' within two
words of each other in the plot field.
iex> near!(["teenage vampire", boost: 2, distance: 2, field: "plot"]) |> to_query
"(near boost=2 distance=2 field=plot 'teenage vampire')"
iex> parse(near: ["teenage vampire", boost: 2, distance: 2, field: "plot"]) |> to_query
"(near boost=2 distance=2 field=plot 'teenage vampire')"
Find any document that contains the words 'teenage' and 'vampire' within
three words in any text or text-array field.
iex> near!(["teenage vampire", distance: 3]) |> to_query
"(near distance=3 'teenage vampire')"
iex> parse(near: ["teenage vampire", distance: 3]) |> to_query
"(near distance=3 'teenage vampire')"
If the field value is a string but does not contain a space,
`CSQuery.Expression.MultipleWordsRequiredError` will be raised.
iex> near!(["word"]) |> to_query
** (CSQuery.MultipleWordsRequiredError) Expression field value for operator `near` requires multiple words.
If the field value is not a string,
`CSQuery.Expression.NearFieldValuemustBeString` will be raised.
iex> near!([2000, boost: 2, distance: 2, field: "title"]) |> to_query
** (CSQuery.StringRequiredError) Expression field value for operator `near` must be a string value.
"""
@spec near!(keyword) :: Expression.t() | no_return
def near!(list), do: Expression.new(:near, list)
@doc """
Creates a `phrase` expression.
(phrase boost=N field=FIELD 'STRING')
## Examples
Find any document that contains the exact phrase 'teenage vampire' in the
plot field.
iex> phrase!(["teenage vampire", boost: 2, field: "plot"]) |> to_query
"(phrase boost=2 field=plot 'teenage vampire')"
iex> parse(phrase: ["teenage vampire", boost: 2, field: "plot"]) |> to_query
"(phrase boost=2 field=plot 'teenage vampire')"
Find any document that contains the exact phrase 'teenage vampire' in any
text or text-array field.
iex> phrase!(["teenage vampire"]) |> to_query
"(phrase 'teenage vampire')"
iex> parse(phrase: ["teenage vampire"]) |> to_query
"(phrase 'teenage vampire')"
If more than one field value is provided, `CSQuery.TooManyFieldValuesError`
will be raised.
iex> phrase!(["teenage", "vampire"]) |> to_query
** (CSQuery.TooManyFieldValuesError) Expression for operator `phrase` has 2 fields, but should only have one.
If the field value is not a string, `CSQuery.StringRequiredError` will be
raised.
iex> phrase!([2000, boost: 2, field: "title"]) |> to_query
** (CSQuery.StringRequiredError) Expression field value for operator `phrase` must be a string value.
"""
@spec phrase!(keyword) :: Expression.t() | no_return
def phrase!(list), do: Expression.new(:phrase, list)
@doc """
Creates a `prefix` expression.
(prefix boost=N field=FIELD 'STRING')
## Examples
Find any document that has a word starting with 'teen' in the title field.
iex> prefix!(["teen", boost: 2, field: "title"]) |> to_query
"(prefix boost=2 field=title 'teen')"
iex> parse(prefix: ["teen", boost: 2, field: "title"]) |> to_query
"(prefix boost=2 field=title 'teen')"
Find any document that contains a word starting with 'teen' in any text or
text-array field.
iex> prefix!(["teen"]) |> to_query
"(prefix 'teen')"
iex> parse(prefix: ["teen"]) |> to_query
"(prefix 'teen')"
If there is more than one field provided, `CSQuery.TooManyFieldValuesError`
will be raised.
iex> prefix!(["star", "value"]) |> to_query
** (CSQuery.TooManyFieldValuesError) Expression for operator `prefix` has 2 fields, but should only have one.
If the field value is not a string, `CSQuery.StringRequiredError` will be
raised.
iex> prefix!([2000]) |> to_query
** (CSQuery.StringRequiredError) Expression field value for operator `prefix` must be a string value.
"""
@spec prefix!(keyword) :: Expression.t() | no_return
def prefix!(list), do: Expression.new(:prefix, list)
@doc """
Creates a `range` expression.
(range boost=N field=FIELD RANGE)
## Examples
Find any document that has a number between 1990 and 2000 in any field.
iex> range!([{1990, 2000}]) |> to_query
"(range [1990,2000])"
iex> parse(range: [{1990, 2000}]) |> to_query
"(range [1990,2000])"
Find any document that has a number up to 2000 in any field.
iex> range!([{nil, 2000}]) |> to_query
"(range {,2000])"
iex> parse(range: [{nil, 2000}]) |> to_query
"(range {,2000])"
Find any document that has a number equal to or greater than 1990 in any
field.
iex> range!([{1990, nil}]) |> to_query
"(range [1990,})"
iex> parse(range: [{1990, nil}]) |> to_query
"(range [1990,})"
Find any document that has a number between 2004 and 2006 in the date field,
inclusive, converted from an Elixir `t:Range.t/0` type.
iex> range!([2004..2006, field: "date"]) |> to_query
"(range field=date [2004,2006])"
iex> parse(range: [2004..2006, field: "date"]) |> to_query
"(range field=date [2004,2006])"
Find any document that has a number between 1990 and 2000 in the date field,
but includes neither 1990 nor 2000.
iex> range!([
...> CSQuery.Range.new(%{first?: 1990, last?: 2000}),
...> field: "date",
...> boost: 2
...> ]) |> to_query
"(range boost=2 field=date {1990,2000})"
iex> parse(range: [
...> CSQuery.Range.new(%{first?: 1990, last?: 2000}),
...> field: "date",
...> boost: 2
...> ]) |> to_query
"(range boost=2 field=date {1990,2000})"
Ranges may also be specified as strings.
iex> range!(["[1990,2000]"]) |> to_query
"(range [1990,2000])"
iex> range!(["[1990,}"]) |> to_query
"(range [1990,})"
iex> range!(["{,2000]"]) |> to_query
"(range {,2000])"
iex> range!(["{1990,2000}"]) |> to_query
"(range {1990,2000})"
iex> parse(range: ["[1990,2000]"]) |> to_query
"(range [1990,2000])"
iex> parse(range: ["[1990,}"]) |> to_query
"(range [1990,})"
iex> parse(range: ["{,2000]"]) |> to_query
"(range {,2000])"
iex> parse(range: ["{1990,2000}"]) |> to_query
"(range {1990,2000})"
If there are multiple values provided, `CSQuery.TooManyFieldValuesError` will
be raised.
iex> range!(["one", "two"]) |> to_query
** (CSQuery.TooManyFieldValuesError) Expression for operator `range` has 2 fields, but should only have one.
If there value provided is not a range, `CSQuery.RangeRequiredError` will be
raised.
iex> range!([2000]) |> to_query
** (CSQuery.RangeRequiredError) Expression field value for operator `range` must be a range.
"""
@spec range!(keyword) :: Expression.t() | no_return
def range!(list), do: Expression.new(:range, list)
@doc """
Creates a `term` expression.
(term boost=N field=FIELD 'STRING'|VALUE)
> Warning: The parser does not currently enforce a single term value, so it
> is possible to create an invalid query. The following test should fail.
iex> term!(["star", "space", boost: 2]) |> to_query
** (CSQuery.TooManyFieldValuesError) Expression for operator `term` has 2 fields, but should only have one.
## Examples
Find any document with a term 2000 in the year field.
iex> term!([2000, field: "year", boost: 2]) |> to_query
"(term boost=2 field=year 2000)"
iex> parse(term: [2000, field: "year", boost: 2]) |> to_query
"(term boost=2 field=year 2000)"
Find any document with a term 'star' in any text or text-array field.
iex> term!(["star"]) |> to_query
"(term 'star')"
iex> parse(term: ["star"]) |> to_query
"(term 'star')"
"""
@spec term!(keyword) :: Expression.t() | no_return
def term!(list), do: Expression.new(:term, list)
@doc """
Parse a structured description of the query to build to produce an
expression. An exception will be raised if an invalid expression is
constructed.
An empty keyword list as a document returns `nil`.
iex> parse([])
nil
If an error occurs during parsing, an exception will be raised, as per the
operator documentation. If an unknown operator is provided,
`CSQuery.UnknownOperatorError` will be raised. If no field values are
provided for an operator, `CSQuery.NoFieldValuesError` will be raised.
iex> parse(foo: [])
** (CSQuery.UnknownOperatorError) Unknown operator `foo` provided.
iex> parse(and: [])
** (CSQuery.NoFieldValuesError) Expression for operator `and` has no field values.
If more than one condition is at the top level of the structured query
document, a list of queries will be returned.
iex> parse(and: ["star", "wars"], and: ["star", "trek"]) |> to_query
["(and 'star' 'wars')", "(and 'star' 'trek')"]
Detailed examples of `CSQuery.parse/1` are found in the documentation for
`CSQuery.and!/1`, `CSQuery.near!/1`, `CSQuery.not!/1`, `CSQuery.or!/1`,
`CSQuery.phrase!/1`, `CSQuery.prefix!/1`, `CSQuery.range!/1`, and
`CSQuery.term!/1`.
"""
@spec parse(keyword) :: nil | Expression.t() | list(Expression.t()) | no_return
def parse(query) do
case Expression.new(query) do
[] -> nil
[expr] -> expr
result -> result
end
end
@doc """
Convert a query expression (`t:CSQuery.Expression.t/0`) to a string, or a
list of query expressions to a list of strings.
"""
@spec to_query(list(Expression.t())) :: list(String.t())
@spec to_query(Expression.t()) :: String.t()
defdelegate to_query(expr), to: Expression
end
|
lib/csquery.ex
| 0.910132
| 0.86771
|
csquery.ex
|
starcoder
|
defmodule ExUnit.Diff do
@moduledoc false
@doc """
Formats the difference between `left` and `right`.
Returns `nil` if they are not the same data type,
or if the given data type is not supported.
"""
def format(left, right, formatter)
def format(left, right, formatter)
when is_binary(left) and is_binary(right) do
if String.printable?(left) and String.printable?(right) do
left = Inspect.BitString.escape(left, ?\")
right = Inspect.BitString.escape(right, ?\")
"\"" <> format_string(left, right, formatter) <> "\""
end
end
def format(%name{} = left, %name{} = right, formatter) do
left = Map.from_struct(left)
right = Map.from_struct(right)
format_map(left, right, inspect(name), formatter)
end
def format(%{} = left, %{} = right, formatter) do
if match?(%_{}, left) or match?(%_{}, right) do
nil
else
format_map(left, right, "", formatter)
end
end
def format(left, right, formatter) when is_list(left) and is_list(right) do
if Inspect.List.printable?(left) and Inspect.List.printable?(right) do
left = List.to_string(left) |> Inspect.BitString.escape(?')
right = List.to_string(right) |> Inspect.BitString.escape(?')
"'" <> format_string(left, right, formatter) <> "'"
else
keyword? = Inspect.List.keyword?(left) and Inspect.List.keyword?(right)
format_list(left, right, formatter, keyword?, [])
end
end
def format(left, right, formatter)
when is_integer(left) and is_integer(right)
when is_float(left) and is_float(right) do
{kind, skew} =
case to_string(right - left) do
"-" <> _ = result ->
{:diff_delete, result}
result ->
{:diff_insert, "+" <> result}
end
value_diff = formatter.(kind, "(off by " <> skew <> ")")
format_string(inspect(left), inspect(right), formatter) <> " " <> value_diff
end
def format(left, right, formatter)
when is_tuple(left) and is_tuple(right) do
left = {left, tuple_size(left) - 1}
right = {right, tuple_size(right) - 1}
format_tuple(left, right, formatter, [])
end
def format(_left, _right, _formatter), do: nil
defp format_string(string1, string2, formatter) do
string_difference(string1, string2)
|> Enum.map_join(&format_fragment(&1, formatter))
end
defp string_difference(string1, string2) do
length1 = String.length(string1)
length2 = String.length(string2)
if bag_distance(string1, string2) / max(length1, length2) > 0.6 do
[del: string1, ins: string2]
else
String.myers_difference(string1, string2)
end
end
# The algorithm is outlined in the
# "String Matching with Metric Trees Using an Approximate Distance"
# paper by <NAME>, <NAME>, and <NAME>.
defp bag_distance(string1, string2) do
bag1 = string_to_bag(string1)
bag2 = string_to_bag(string2)
diff1 = bag_difference(bag1, bag2)
diff2 = bag_difference(bag2, bag1)
max(diff1, diff2)
end
defp string_to_bag(string) do
string_to_bag(string, %{}, &(&1 + 1))
end
defp string_to_bag(string, bag, fun) do
case String.next_grapheme(string) do
{char, rest} ->
bag = Map.update(bag, char, 1, fun)
string_to_bag(rest, bag, fun)
nil ->
bag
end
end
defp bag_difference(bag1, bag2) do
Enum.reduce(bag1, 0, fn {char, count1}, sum ->
case Map.fetch(bag2, char) do
{:ok, count2} ->
sum + max(count1 - count2, 0)
:error ->
sum + count1
end
end)
end
defp format_list([], [], _formatter, _keyword?, acc) do
result = with ", " <> rest <- Enum.join(Enum.reverse(acc)), do: rest
"[" <> result <> "]"
end
defp format_list([], [elem | rest], formatter, keyword?, acc) do
elem_diff = formatter.(:diff_insert, format_list_elem(elem, keyword?))
format_list([], rest, formatter, keyword?, [", " <> elem_diff | acc])
end
defp format_list([elem | rest], [], formatter, keyword?, acc) do
elem_diff = formatter.(:diff_delete, format_list_elem(elem, keyword?))
format_list(rest, [], formatter, keyword?, [", " <> elem_diff | acc])
end
defp format_list([elem | rest1], [elem | rest2], formatter, keyword?, acc) do
elem_diff = format_list_elem(elem, keyword?)
format_list(rest1, rest2, formatter, keyword?, [", " <> elem_diff | acc])
end
defp format_list([{key1, val1} | rest1], [{key2, val2} | rest2], formatter, true, acc) do
key_diff =
if key1 != key2 do
format_string(Atom.to_string(key1), Atom.to_string(key2), formatter)
else
Atom.to_string(key1)
end
value_diff = format_inner(val1, val2, formatter)
elem_diff = format_key_value(key_diff, value_diff, true)
format_list(rest1, rest2, formatter, true, [", " <> elem_diff | acc])
end
defp format_list([elem1 | rest1], [elem2 | rest2], formatter, false, acc) do
elem_diff = format_inner(elem1, elem2, formatter)
format_list(rest1, rest2, formatter, false, [", " <> elem_diff | acc])
end
defp format_list(last, [elem | rest], formatter, keyword?, acc) do
joiner_diff = format_plain_diff(" |", ",", formatter) <> " "
elem_diff = format_inner(last, elem, formatter)
new_acc = [joiner_diff <> elem_diff | acc]
format_list([], rest, formatter, keyword?, new_acc)
end
defp format_list([elem | rest], last, formatter, keyword?, acc) do
joiner_diff = format_plain_diff(",", " |", formatter) <> " "
elem_diff = format_inner(elem, last, formatter)
new_acc = [joiner_diff <> elem_diff | acc]
format_list(rest, [], formatter, keyword?, new_acc)
end
defp format_list(last1, last2, formatter, keyword?, acc) do
elem_diff =
cond do
last1 == [] ->
formatter.(:diff_insert, inspect(last2))
last2 == [] ->
formatter.(:diff_delete, inspect(last1))
true ->
format_inner(last1, last2, formatter)
end
new_acc = [" | " <> elem_diff | acc]
format_list([], [], formatter, keyword?, new_acc)
end
defp format_list_elem(elem, false), do: inspect(elem)
defp format_list_elem({key, val}, true) do
format_key_value(Atom.to_string(key), inspect(val), true)
end
defp format_tuple({_tuple1, -1}, {_tuple2, -1}, _formatter, acc) do
"{" <> Enum.join(acc, ", ") <> "}"
end
defp format_tuple({tuple1, index1}, {_, index2} = right, formatter, acc)
when index1 > index2 do
elem = elem(tuple1, index1)
elem_diff = formatter.(:diff_delete, inspect(elem))
format_tuple({tuple1, index1 - 1}, right, formatter, [elem_diff | acc])
end
defp format_tuple({_, index1} = left, {tuple2, index2}, formatter, acc)
when index1 < index2 do
elem = elem(tuple2, index2)
elem_diff = formatter.(:diff_insert, inspect(elem))
format_tuple(left, {tuple2, index2 - 1}, formatter, [elem_diff | acc])
end
defp format_tuple({tuple1, index}, {tuple2, index}, formatter, acc) do
elem1 = elem(tuple1, index)
elem2 = elem(tuple2, index)
elem_diff = format_inner(elem1, elem2, formatter)
format_tuple({tuple1, index - 1}, {tuple2, index - 1}, formatter, [elem_diff | acc])
end
defp format_map(left, right, name, formatter) do
{surplus, altered, missing} = map_difference(left, right)
keyword? =
Inspect.List.keyword?(surplus) and
Inspect.List.keyword?(altered) and
Inspect.List.keyword?(missing)
result =
if map_size(right) > length(altered) + length(missing),
do: ["..."],
else: []
result = Enum.reduce(missing, result, fn({key, val}, acc) ->
map_pair = format_key_value(inspect(key), inspect(val), keyword?)
[formatter.(:diff_insert, map_pair) | acc]
end)
result = Enum.reduce(surplus, result, fn({key, val}, acc) ->
map_pair = format_key_value(inspect(key), inspect(val), keyword?)
[formatter.(:diff_delete, map_pair) | acc]
end)
result = Enum.reduce(altered, result, fn({key, {val1, val2}}, acc) ->
value_diff = format_inner(val1, val2, formatter)
[format_key_value(inspect(key), value_diff, keyword?) | acc]
end)
"%" <> name <> "{" <> Enum.join(result, ", ") <> "}"
end
defp map_difference(map1, map2) do
{surplus, altered} =
Enum.reduce(map1, {[], []}, fn({key, val1}, {surplus, altered} = acc) ->
case Map.fetch(map2, key) do
{:ok, ^val1} ->
acc
{:ok, val2} ->
{surplus, [{key, {val1, val2}} | altered]}
:error ->
{[{key, val1} | surplus], altered}
end
end)
missing = Enum.reduce(map2, [], fn({key, _} = pair, acc) ->
if Map.has_key?(map1, key), do: acc, else: [pair | acc]
end)
{surplus, altered, missing}
end
defp format_key_value(key, value, false) do
key <> " => " <> value
end
defp format_key_value(":" <> rest, value, true) do
format_key_value(rest, value, true)
end
defp format_key_value(key, value, true) do
key <> ": " <> value
end
defp format_inner(term, term, _formatter), do: inspect(term)
defp format_inner(left, right, formatter) do
if result = format(left, right, formatter) do
result
else
format_plain_diff(inspect(left), inspect(right), formatter)
end
end
defp format_plain_diff(left, right, formatter) do
formatter.(:diff_delete, left) <>
formatter.(:diff_insert, right)
end
defp format_fragment({:eq, content}, _), do: content
defp format_fragment({:del, content}, formatter) do
formatter.(:diff_delete, content)
end
defp format_fragment({:ins, content}, formatter) do
formatter.(:diff_insert, content)
end
end
|
lib/ex_unit/lib/ex_unit/diff.ex
| 0.798972
| 0.62266
|
diff.ex
|
starcoder
|
defmodule XPlane.Cmd do
@moduledoc """
Send X-Plane commands.
"""
@listen_port 59001
use GenServer
# API
@doc """
Start GenServer controlling port used to send commands to a specific X-Plane instance.
## Parameters
- instance: X-Plane instance from list returned by `XPlane.Instance.list/0`
"""
@spec start(XPlane.Instance.t, list) :: {:ok, pid} | {:error, any} | :ignore
def start(instance, opts \\ []) do
GenServer.start(__MODULE__,
{:ok, instance},
[name: name(instance)] ++ opts)
end
@doc """
Start GenServer linked to current process controlling port used to send commands
to a specific X-Plane instance.
## Parameters
- instance: X-Plane instance from list returned by `XPlane.Instance.list/0`
"""
@spec start_link(XPlane.Instance.t, list) :: {:ok, pid} | {:error, any} | :ignore
def start_link(instance, opts \\ []) do
GenServer.start_link(__MODULE__,
{:ok, instance},
[name: name(instance)] ++ opts)
end
@doc """
Send a command to X-pLane
## Parameters
- instance: X-Plane instance from list returned by `XPlane.Instance.list/0`
- commands: List of command atoms - use `XPlane.CmdRef.describe()` to look
these up.
## Example
```
iex> XPlane.Cmd.send(master, [:engines_throttle_up])
:ok
```
"""
@spec send(XPlane.Instance.t, list(atom)) :: :ok | {:error, list}
def send(instance, command_ids) do
case GenServer.call(name(instance), {:send, command_ids}) do
e = {:error, _} -> e
r -> r
end
end
@doc """
Stop the GenServer controlling the port used to send commands.
"""
@spec stop(XPlane.Instance.t) :: :ok | {:error, any}
def stop(instance) do
GenServer.cast(name(instance), :stop)
end
# GensServer Callbacks
@impl true
def init({:ok, instance}) do
{:ok, sock} = :gen_udp.open(@listen_port, [:binary, active: false])
{:ok, {XPlane.CmdRef.load_version(instance.version_number), instance, sock}}
end
@impl true
def handle_call({:send, command_ids}, _from, state={cmd_refs, instance, sock}) do
vetted_command_ids = for cmd_id <- command_ids do
if cmd_refs |> Map.has_key?(cmd_id) do
{:ok, cmd_refs[cmd_id].name}
else
{:error, cmd_id}
end
end
errors = vetted_command_ids |> Enum.filter(&(match?({:error, _}, &1)))
if Enum.empty?(errors) do
for {:ok, name} <- vetted_command_ids do
:ok = :gen_udp.send(
sock,
instance.ip,
instance.port,
<<"CMND\0",
name::binary>>
)
end
{:reply, :ok, state}
else
{:reply, {:error,
for {:error, invalid_cmd_id} <- errors do
"Invalid command id: #{Atom.to_string(invalid_cmd_id)}"
end
}, state}
end
end
@impl true
def handle_cast(:stop, {_, _, sock}) do
:gen_udp.close(sock)
{:stop, :normal, nil}
end
# Helpers
defp name(instance) do
String.to_atom("#{__MODULE__}_#{instance.addr}")
end
end
|
lib/xplane_cmd.ex
| 0.834238
| 0.440951
|
xplane_cmd.ex
|
starcoder
|
defmodule RobotSimulator do
defstruct direction: :north, position: {0, 0}
@bearings [:north, :east, :south, :west]
@instructions %{
"A" => :advance,
"L" => :turn_left,
"R" => :turn_right
}
alias __MODULE__, as: RobotSimulator
defguardp invalid_direction?(direction) when not (direction in @bearings)
defguardp invalid_position?(position)
when not is_tuple(position) or
tuple_size(position) != 2 or
not is_integer(elem(position, 0)) or
not is_integer(elem(position, 1))
@doc """
Create a Robot Simulator given an initial direction and position.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec create(direction :: atom, position :: {integer, integer}) :: any
def create(direction \\ nil, position \\ nil)
def create(nil, nil), do: %RobotSimulator{}
def create(direction, _position) when invalid_direction?(direction) do
{:error, "invalid direction"}
end
def create(_direction, position) when invalid_position?(position) do
{:error, "invalid position"}
end
def create(direction, position) do
%RobotSimulator{direction: direction, position: position}
end
@doc """
Simulate the robot's movement given a string of instructions.
Valid instructions are: "R" (turn right), "L", (turn left), and "A" (advance)
"""
@spec simulate(robot :: any, instructions :: String.t()) :: any
def simulate(robot, instructions) do
instructions
|> String.graphemes()
|> Enum.map(&parse_instruction/1)
|> Enum.reduce(robot, &apply(RobotSimulator, &1, [&2]))
catch
:invalid_instruction ->
{:error, "invalid instruction"}
end
@doc """
Return the robot's direction.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec direction(robot :: any) :: atom
def direction(%RobotSimulator{direction: direction}), do: direction
@doc """
Return the robot's position.
"""
@spec position(robot :: any) :: {integer, integer}
def position(%RobotSimulator{position: position}), do: position
defp parse_instruction(instruction) do
case Map.fetch(@instructions, instruction) do
{:ok, instruction} ->
instruction
:error ->
throw(:invalid_instruction)
end
end
def turn_left(robot) do
@bearings
|> rotate(-1)
|> turn(robot)
end
def turn_right(robot) do
@bearings
|> rotate(1)
|> turn(robot)
end
def advance(%RobotSimulator{position: {x, y}, direction: direction} = robot) do
new_position =
case direction do
:north ->
{x, y + 1}
:east ->
{x + 1, y}
:south ->
{x, y - 1}
:west ->
{x - 1, y}
end
%RobotSimulator{robot | position: new_position}
end
defp turn(new_bearings, %RobotSimulator{direction: direction} = robot) do
index = Enum.find_index(@bearings, &(&1 == direction))
new_direction = Enum.fetch!(new_bearings, index)
%RobotSimulator{robot | direction: new_direction}
end
defp rotate(list, 0), do: list
defp rotate([head | tail], count) when count > 0 do
rotate(tail ++ [head], count - 1)
end
defp rotate(list, count) when count < 0 do
list
|> Enum.reverse()
|> rotate(abs(count))
|> Enum.reverse()
end
end
|
elixir/robot-simulator/lib/robot_simulator.ex
| 0.909299
| 0.785473
|
robot_simulator.ex
|
starcoder
|
defmodule GenNNTP do
@moduledoc ~S"""
The NNTP client and server library.
This module provides both the behaviour for an NNTP server, and the client
API to interact with a NNTP server.
All functionality is defined in `:gen_nntp`. This Elixir module is just a
wrapper for `:gen_nntp`.
## Example
The `GenNNTP` behaviour abstracts the common NNTP client-server interaction.
Developers are only required to implement the callbacks and functionality
they are interested in to respond to client's commands, per specs.
Let's start with a code example and then explore the available callbacks.
Imagine we want a NNTP server that keeps data in memory.
defmodule InMemoryNNTP do
@behaviour GenNNTP
# Callbacks
@impl true
def init(data) do
{:ok, data}
end
@impl true
def handle_CAPABILITIES(data) do
{:ok, ["READER", "POST"], data}
end
@impl true
def handle_HELP(data) do
{:ok, "This NNTP server only keeps data in memory.", data}
end
end
# Start the server
{:ok, pid} = GenNNTP.start(InMemoryNNTP, port: 6791)
# This is the client
{:ok, socket} = GenNNTP.connect("localhost", 6791)
{:ok, response} = GenNNTP.command(socket, "CAPABILITIES")
#=> {:ok, "101 Capability list:\r\nVERSION 2\r\nREADER\r\nPOST"}
We start our `NServ` by calling `start/3`, passing the module
with the server implementation and its initial argument (a path to a folder
containing the data to serve from). We can primarily interact with the server
by sending a command to the socket returned after connectiong to the server.
Every time you do a `GenNNTP.command/2`, the client will send a command that
must be handled by one of the callbacks defined in the GenNNTP, based on the
issued command. There are many callbacks to be implemented when you use a
`GenNNTP`. The required callbacks are `c:init/1`, `c:handle_CAPABILITIES/1`
and `c:handle_HELP/1`. Other callbacks are optional in the sense that they are
still required to be implemented when your server has the capability for them.
For example, if your server has "READER" capability, you MUST provide these
callbacks: `c:handle_GROUP/2`, `c:handle_LISTGROUP/2`, `c:handle_NEXT/2`,
`c:handle_LAST/2`, `c:handle_ARTICLE/2`, `c:handle_HEAD/2`, `c:handle_BODY/2`,
`c:handle_STAT/2`. Similarly, if your server has "POST" capability, you MUST
provide `c:handle_POST/2` callback.
## Handling commands
Our example advertises the "POST" capability, so we need to define a callback
to handle the "POST" command from client. This aptly named `c:handle_POST/2`
receives a map of type `t:article/0` and can decide to accept or reject it.
Do note that `GenNNTP` abstracts away all the command parsing and response's
codes, so that the callback only needs to, well, "handle" the corresponding
argument, and returns an ok-tuple to accept or an error-tuple to reject.
In our example NNTP server, we simply accept any article and store into our
internal in-memory database.
defmodule InMemoryNNTP do
@behaviour GenNNTP
# Callbacks
@impl true
def init(data) do
{:ok, data}
end
@impl true
def handle_CAPABILITIES(data) do
{:ok, ["READER", "POST"], data}
end
@impl true
def handle_POST(article, data) do
{:ok, [article | data]}
end
@impl true
def handle_HELP(data) do
{:ok, "This NNTP server only keeps data in memory.", data}
end
end
# Start the server
{:ok, pid} = GenNNTP.start(InMemoryNNTP, port: 6791)
# This is the client
{:ok, socket} = GenNNTP.connect("localhost", 6791)
# POST article
article = %{
id: "<test@post>",
headers: %{
"Message-ID" => "<test@post>",
"From" => "\"Demo User\" <<EMAIL>>",
"Newsgroups" => "misc.test",
"Subject" => "I am just a test article",
"Organization" => "An Example Net",
},
body: "This is a test article."
}
{:ok, response} = GenNNTP.command(socket, "POST", [article])
#=> {:ok, "240 Article received OK"}
Our NNTP server also advertises the "READER" capability, so we want to at
least let the clients fetch an article from our server. We do that by adding
a `c:handle_ARTICLE/2` callback to "handle" the "ARTICLE" command.
This callback is particularly interesting in which it can take 2 types of
arguments: either a message ID or a tuple of article number and its group.
For the sake of simplicity, we only handle the message ID for our example.
In actual implementation, we will also need to add `c:handle_GROUP/2` and/or
`c:handle_LISTGROUP/2` to let the user select a newsgroup. This is because
the second type of argument for `c:handle_ARTICLE/2` requires a newsgroup to
be selected first.
In our example here, we simply retrieve the article matching the ID from our
internal database, or return with `false` when we can't find it. For matching
article, we also return the article number. Because we don't implement the
groups, we can return 0 here.
defmodule InMemoryNNTP do
@behaviour GenNNTP
# Callbacks
@impl true
def init(data) do
{:ok, data}
end
@impl true
def handle_CAPABILITIES(data) do
{:ok, ["READER", "POST"], data}
end
@impl true
def handle_POST(article, data) do
{:ok, [article | data]}
end
@impl true
def handle_ARTICLE(message_id, data) when is_binary(message_id) do
result = Enum.find(data, false, fn
(%{id: ^message_id}) -> true
(_) -> false
end)
case result do
false -> {:ok, false, data}
article -> {:ok, {0, article}, data}
end
end
@impl true
def handle_HELP(data) do
{:ok, "This NNTP server only keeps data in memory.", data}
end
end
# Start the server
{:ok, pid} = GenNNTP.start(InMemoryNNTP, port: 6791)
# This is the client
{:ok, socket} = GenNNTP.connect("localhost", 6791)
# POST article
article = %{
id: "<test@post>",
headers: %{
"Message-ID" => "<test@post>",
"From" => "\"Demo User\" <<EMAIL>>",
"Newsgroups" => "misc.test",
"Subject" => "I am just a test article",
"Organization" => "An Example Net",
},
body: "This is a test article."
}
{:ok, response} = GenNNTP.command(socket, "POST", [article])
# ARTICLE
{:ok, response} = GenNNTP.command(socket, "ARTICLE", ["<test@post>"])
#=> {:ok, "220 0 "<test@post>"\r\nMessage-ID: <test@post>\r\n...\r\n\r\nThis is a test article."}
"""
@type option :: :gen_nntp.option()
@type article :: :gen_nntp.article()
@typep state :: any
# Default port from "PORT" environment variable or 199.
@port String.to_integer(System.get_env("PORT", "119"))
@doc """
Starts a NNTP server with a callback module.
Similar to starting a `GenServer`.
"""
@spec start(module(), any, [option]) :: :gen_nntp.on_start()
defdelegate start(module, args, options), to: :gen_nntp
@doc """
Stops a NNTP server by its reference.
The reference is usually the callback module.
"""
@spec stop(module()) :: :ok
defdelegate stop(ref), to: :gen_nntp
@doc """
Connects to a NNTP server and receives the greeting.
## Examples:
iex> {:ok, socket, _greeting} = GenNNTP.connect()
iex> is_port(socket)
true
iex> {:ok, socket, _greeting} = GenNNTP.connect("localhost")
iex> is_port(socket)
true
iex> {:ok, socket, _greeting} = GenNNTP.connect(
...> "localhost",
...> String.to_integer(System.get_env("PORT", "119"))
...> )
iex> is_port(socket)
true
iex> {:ok, socket, _greeting} = GenNNTP.connect(
...> "localhost",
...> String.to_integer(System.get_env("PORT", "119")),
...> []
...> )
iex> is_port(socket)
true
iex> {:ok, _socket, "200 " <> _} = GenNNTP.connect(
...> "localhost",
...> String.to_integer(System.get_env("PORT", "119")),
...> []
...> )
"""
defdelegate connect(address \\ "localhost", port \\ @port, options \\ []), to: :gen_nntp
@doc ~S"""
Sends a command and receives server's response.
Both single and multi-line response are handled. The terminating
line in a multi-line response is discarded, and the whole response
is trimmed for whitespaces.
For commands that are followed by a multi-line data block, such as
"POST", place the data block as the argument to `command/3` call.
The arguments will be converted to binary when possible.
## Examples
iex> {:ok, socket, _greeting} = GenNNTP.connect()
iex> GenNNTP.command(socket, "HELP")
{:ok, "100 Help text follows\r\nThis is some help text."}
iex> {:ok, socket, _greeting} = GenNNTP.connect()
iex> GenNNTP.command(socket, "CAPABILITIES")
{:ok, "101 Capability list:\r\nVERSION 2\r\nREADER\r\n\POST"}
iex> {:ok, socket, _greeting} = GenNNTP.connect()
iex> article = %{
...> headers: %{
...> "Message-ID" => "<test@post>",
...> "From" => "\"Demo User\" <<EMAIL>>",
...> "Newsgroups" => "misc.test",
...> "Subject" => "I am just a test article",
...> "Organization" => "An Example Net",
...> },
...> body: "This is a test article for posting",
...> }
iex> GenNNTP.command(socket, "POST", [article])
{:ok, "240 Article received OK"}
"""
defdelegate command(socket, command, args \\ []), to: :gen_nntp
@doc """
Invoked when a client is connecting to the server.
`init_arg` is the argument term (second argument) passed to `start/3`.
Returning `{:ok, state}` wll start the handshake to establish the socket.
Returning `{:ok, state, timeout}` is similar to `{:ok, state}`, except that
it also sets a delay before establishing the handshake.
Returning `:ignore` will make the process exit normally without entering the
loop, closing the socket.
Returning `{:stop, reason}` will cause the process to exit with reason
`reason` without entering the loop, also closing the socket.
"""
@callback init(init_arg :: term()) ::
{:ok, state} | {:ok, state, timeout} |
:ignore | {:stop, reason :: term}
@doc """
Invoked when a client asks for the server's capabilities.
`state` is the current state of the NNTP server.
The returning `capabilities` list is responded to the client, with "VERSION"
always the first in the list. Server containues the loop with the new state.
Only standard capabilities are responded to the client. Invalid ones in the
callback's return are ignored.
"""
@callback handle_CAPABILITIES(state) :: {:ok, capabilities :: [String.t()], state}
@doc """
Invoked when a client selects a newsgroup.
`group` is the name of the newsgroup to be selected (e.g., "news.software").
Returning `{:ok, group_summary, new_state}` sends the group summary to client
and continues the loop with new state `new_state`.
Returning `{:ok, false, new_state}` sends 411 response to tell the client of
unavailable grop and continues the loop with new state `new_state`.
Returning `{:error, reason}` to respond with `reason` and closes the client.
"""
@callback handle_GROUP(group, state) ::
{:ok, {
group,
number: non_neg_integer(),
low: non_neg_integer(),
high: non_neg_integer()
}, state} |
{:ok, false, state} |
{:error, reason :: String.t(), state}
when group: String.t()
@doc """
Invoked when a client selects a newsgroup.
`group` is the name of the newsgroup to be selected (e.g., "news.software").
Returning `{:ok, group_summary, new_state}` sends the group summary to client
and continues the loop with new state `new_state`. The group summary is
similar to the one responded by "GROUP" command, but also has a list of
article numbers in the newsgroup.
Returning `{:ok, false, new_state}` sends 411 response to tell the client of
unavailable grop and continues the loop with new state `new_state`.
Returning `{:error, reason}` to respond with `reason` and closes the client.
"""
@callback handle_LISTGROUP(group, state) ::
{:ok, {
group,
number: non_neg_integer(),
low: non_neg_integer(),
high: non_neg_integer(),
numbers: [non_neg_integer()]
}, state} |
{:ok, false, state} |
{:error, reason :: String.t(), state}
when group: String.t()
@doc """
Invoked when a client selects the next article in the current newsgroup.
The next article in that newsgroup is the lowest existing article number
greater than the current article number.
Returning `{:ok, { number, article }, new_state}` sends the new current
article number and the message-id of that article to the client and continues
the loop with new state `new_state`.
If `number` is the same as the current article number, a 421 is responded to
tell the client of no next article in this group.
Returning `{:ok, false, new_state}` sends 421 response to tell the client of
unavailable article and continues the loop with new state `new_state`.
Returning `{:error, reason}` to respond with `reason` and closes the client.
Note that this callback is not invoked when currently selected newsgroup is
invalid, or the current article number is invalid. GenNNTP handles those
cases with appropriate response code.
"""
@callback handle_NEXT(arg, state) ::
{:ok, { number, :gen_nttp.article() }, state }|
{:ok, false, state} |
{:error, reason :: String.t(), state}
when number: non_neg_integer(),
arg: :gen_nttp.message_id() | {number, group :: String.t()}
@doc """
Invoked when a client selects the previous article in the current newsgroup.
The previous article in that newsgroup is the highest existing article number
less than the current article number.
Returning `{:ok, { number, article }, new_state}` sends the new current
article number and the message-id of that article to the client and continues
the loop with new state `new_state`.
If `number` is the same as the current article number, a 422 is responded to
tell the client of no next article in this group.
Returning `{:ok, false, new_state}` sends 422 response to tell the client of
unavailable article and continues the loop with new state `new_state`.
Returning `{:error, reason}` to respond with `reason` and closes the client.
Note that this callback is not invoked when currently selected newsgroup is
invalid, or the current article number is invalid. GenNNTP handles those
cases with appropriate response code.
"""
@callback handle_LAST(arg, state) ::
{:ok, { number, :gen_nttp.article() }, state }|
{:ok, false, state} |
{:error, reason :: String.t(), state}
when number: non_neg_integer(),
arg: :gen_nttp.message_id() | {number, group :: String.t()}
@doc """
Invoked when a client selects an article.
The `arg` is the argument used to specify the article to retrieve. It has 2
forms:
- A message-id.
- A 2-tuple containing the article number and the group name.
Returning `{:ok, { number, article }, new_state}` sends the new current
article number and the entire article to the client and continues
the loop with new state `new_state`. A full article has message-id, the
headers, and a body.
Returning `{:ok, false, new_state}` sends 423 response to tell the client of
unavailable article and continues the loop with new state `new_state`.
Returning `{:error, reason}` to respond with `reason` and closes the client.
Note that this callback is not invoked when currently selected newsgroup is
invalid, or the current article number is invalid. GenNNTP handles those
cases with appropriate response code. GenNNTP also handles the ARTICLE
command with no argument, by taking the current article number instead before
passing it to this callback.
"""
@callback handle_ARTICLE(arg, state) ::
{:ok, { number, :gen_nttp.article() }, state }|
{:ok, false, state} |
{:error, reason :: String.t(), state}
when number: non_neg_integer(),
arg: :gen_nttp.message_id() | {number, group :: String.t()}
@doc """
Invoked when a client selects headers of an article.
This callback behaves identically to the `c:handle_ARTICLE/2` except that
only the headers are returned in the article.
"""
@callback handle_HEAD(arg, state) ::
{:ok, { number, :gen_nttp.article() }, state }|
{:ok, false, state} |
{:error, reason :: String.t(), state}
when number: non_neg_integer(),
arg: :gen_nttp.message_id() | {number, group :: String.t()}
@doc """
Invoked when a client selects body of an article.
This callback behaves identically to the `c:handle_ARTICLE/2` except that
only the body is returned in the article.
"""
@callback handle_BODY(arg, state) ::
{:ok, { number, :gen_nttp.article() }, state }|
{:ok, false, state} |
{:error, reason :: String.t(), state}
when number: non_neg_integer(),
arg: :gen_nttp.message_id() | {number, group :: String.t()}
@doc """
Invoked when a client checks if an article exists.
This callback behaves identically to the `c:handle_ARTICLE/2` except that
only the message-id is returned in the article.
This callback allows the client to determine whether an article exists and
in the second forms, what its message-id is, without having to process an
arbitrary amount of text.
"""
@callback handle_STAT(arg, state) ::
{:ok, { number, :gen_nttp.article() }, state }|
{:ok, false, state} |
{:error, reason :: String.t(), state}
when number: non_neg_integer(),
arg: :gen_nttp.message_id() | {number, group :: String.t()}
@doc """
Invoked when a client sends an article to be posted.
The callback receives a map of type `article()` without `id` field.
Returning `{:ok, new_state}` to accept the article.
Returning `{:error, reason}` to reject the article with specific reason.
"""
@callback handle_POST(article :: :gen_nttp.article(), state) ::
{:ok, state} | {:error, reason :: String.t(), state}
@doc """
Invoked when a client wants summary of the server.
Returning `{:ok, help_test, new_state}` to respond the `help_text` to the
client and continues the loop with new state `new_state`.
"""
@callback handle_HELP(state) :: {:ok, help_text :: String.t(), state}
@doc """
Invoked when an uknown command is asked by the client.
This optional callback can be used to handle commands not understood by the
current GenNNTP implementation. If not defined, GenNNTP responds with 500.
The `command` is full command line sent by the client, minus the LFCR pair at
the end. The callback can choose to `reply` or `noreply` to the command. The
`response` does not need the ending LFCR pair.
Returning `{:error, reason}` to respond with `reason` and closes the client.
"""
@callback handle_command(command :: String.t(), state) ::
{:reply, response :: any(), state} |
{:noreply, state} |
{:stop, reason :: any(), state} |
{:stop, reason :: any(), response :: any(), state}
@optional_callbacks [
handle_GROUP: 2,
handle_LISTGROUP: 2,
handle_NEXT: 2,
handle_LAST: 2,
handle_ARTICLE: 2,
handle_HEAD: 2,
handle_BODY: 2,
handle_STAT: 2,
handle_POST: 2,
handle_command: 2
]
end
|
lib/gen_nntp.ex
| 0.836688
| 0.744238
|
gen_nntp.ex
|
starcoder
|
defmodule AdyenCheckoutEx.Model.AdditionalDataRisk do
@moduledoc """
"""
@derive [Poison.Encoder]
defstruct [
:"riskdata.[customFieldName]",
:"riskdata.basket.item[itemNr].amountPerItem",
:"riskdata.basket.item[itemNr].brand",
:"riskdata.basket.item[itemNr].category",
:"riskdata.basket.item[itemNr].color",
:"riskdata.basket.item[itemNr].currency",
:"riskdata.basket.item[itemNr].itemID",
:"riskdata.basket.item[itemNr].manufacturer",
:"riskdata.basket.item[itemNr].productTitle",
:"riskdata.basket.item[itemNr].quantity",
:"riskdata.basket.item[itemNr].receiverEmail",
:"riskdata.basket.item[itemNr].size",
:"riskdata.basket.item[itemNr].sku",
:"riskdata.basket.item[itemNr].upc",
:"riskdata.promotions.promotion[itemNr].promotionCode",
:"riskdata.promotions.promotion[itemNr].promotionDiscountAmount",
:"riskdata.promotions.promotion[itemNr].promotionDiscountCurrency",
:"riskdata.promotions.promotion[itemNr].promotionDiscountPercentage",
:"riskdata.promotions.promotion[itemNr].promotionName",
:"riskdata.riskProfileReference"
]
@type t :: %__MODULE__{
:"riskdata.[customFieldName]" => String.t | nil,
:"riskdata.basket.item[itemNr].amountPerItem" => String.t | nil,
:"riskdata.basket.item[itemNr].brand" => String.t | nil,
:"riskdata.basket.item[itemNr].category" => String.t | nil,
:"riskdata.basket.item[itemNr].color" => String.t | nil,
:"riskdata.basket.item[itemNr].currency" => String.t | nil,
:"riskdata.basket.item[itemNr].itemID" => String.t | nil,
:"riskdata.basket.item[itemNr].manufacturer" => String.t | nil,
:"riskdata.basket.item[itemNr].productTitle" => String.t | nil,
:"riskdata.basket.item[itemNr].quantity" => String.t | nil,
:"riskdata.basket.item[itemNr].receiverEmail" => String.t | nil,
:"riskdata.basket.item[itemNr].size" => String.t | nil,
:"riskdata.basket.item[itemNr].sku" => String.t | nil,
:"riskdata.basket.item[itemNr].upc" => String.t | nil,
:"riskdata.promotions.promotion[itemNr].promotionCode" => String.t | nil,
:"riskdata.promotions.promotion[itemNr].promotionDiscountAmount" => String.t | nil,
:"riskdata.promotions.promotion[itemNr].promotionDiscountCurrency" => String.t | nil,
:"riskdata.promotions.promotion[itemNr].promotionDiscountPercentage" => String.t | nil,
:"riskdata.promotions.promotion[itemNr].promotionName" => String.t | nil,
:"riskdata.riskProfileReference" => String.t | nil
}
end
defimpl Poison.Decoder, for: AdyenCheckoutEx.Model.AdditionalDataRisk do
def decode(value, _options) do
value
end
end
|
lib/adyen_checkout_ex/model/additional_data_risk.ex
| 0.658966
| 0.403332
|
additional_data_risk.ex
|
starcoder
|
defmodule ExWire.Packet.Status do
@moduledoc """
Status messages establish a proper Eth Wire connection, and verify the two clients are compatable.
```
**Status** [`+0x00`: `P`, `protocolVersion`: `P`, `networkId`: `P`, `td`: `P`, `bestHash`: `B_32`, `genesisHash`: `B_32`]
Inform a peer of its current ethereum state. This message should be sent after the initial
handshake and prior to any ethereum related messages.
* `protocolVersion` is one of:
* `0x00` for PoC-1;
* `0x01` for PoC-2;
* `0x07` for PoC-3;
* `0x09` for PoC-4.
* `0x17` for PoC-5.
* `0x1c` for PoC-6.
* `61` for PV61
* `62` for PV62
* `63` for PV63
* `networkId`: 0=Olympic (disused), 1=Frontier (mainnet), 2=Morden (disused), 3=Ropsten (testnet), 4=Rinkeby
* `td`: Total Difficulty of the best chain. Integer, as found in block header.
* `bestHash`: The hash of the best (i.e. highest TD) known block.
* `genesisHash`: The hash of the Genesis block.
```
"""
require Logger
alias ExWire.Config
@behaviour ExWire.Packet
@type t :: %__MODULE__{
protocol_version: integer(),
network_id: integer(),
total_difficulty: integer(),
best_hash: binary(),
genesis_hash: binary(),
manifest_hash: binary(),
block_number: integer()
}
defstruct [
:protocol_version,
:network_id,
:total_difficulty,
:best_hash,
:genesis_hash,
:manifest_hash,
:block_number
]
@doc """
Given a Status packet, serializes for transport over Eth Wire Protocol.
## Examples
iex> %ExWire.Packet.Status{protocol_version: 0x63, network_id: 3, total_difficulty: 10, best_hash: <<5>>, genesis_hash: <<4>>}
...> |> ExWire.Packet.Status.serialize
[0x63, 3, 10, <<5>>, <<4>>]
"""
@spec serialize(t) :: ExRLP.t()
def serialize(packet = %__MODULE__{}) do
[
packet.protocol_version,
packet.network_id,
packet.total_difficulty,
packet.best_hash,
packet.genesis_hash
]
end
@doc """
Given an RLP-encoded Status packet from Eth Wire Protocol, decodes into a Status packet.
Note: we will decode warp's `manifest_hash` and `block_number`, if given.
## Examples
iex> ExWire.Packet.Status.deserialize([<<0x63>>, <<3>>, <<10>>, <<5>>, <<4>>])
%ExWire.Packet.Status{protocol_version: 0x63, network_id: 3, total_difficulty: 10, best_hash: <<5>>, genesis_hash: <<4>>}
iex> ExWire.Packet.Status.deserialize([<<0x63>>, <<3>>, <<10>>, <<5>>, <<4>>, <<11>>, <<11>>])
%ExWire.Packet.Status{protocol_version: 0x63, network_id: 3, total_difficulty: 10, best_hash: <<5>>, genesis_hash: <<4>>, manifest_hash: <<11>>, block_number: 11}
"""
@spec deserialize(ExRLP.t()) :: t
def deserialize(rlp) do
[
protocol_version
| [
network_id
| [
total_difficulty
| [
best_hash
| [
genesis_hash
| rest
]
]
]
]
] = rlp
{manifest_hash, block_number} =
case rest do
[] ->
{nil, nil}
[manifest_hash, block_number] ->
{manifest_hash, block_number |> :binary.decode_unsigned()}
end
%__MODULE__{
protocol_version: protocol_version |> :binary.decode_unsigned(),
network_id: network_id |> :binary.decode_unsigned(),
total_difficulty: total_difficulty |> :binary.decode_unsigned(),
best_hash: best_hash,
genesis_hash: genesis_hash,
manifest_hash: manifest_hash,
block_number: block_number
}
end
@doc """
Handles a Status message.
We should decide whether or not we want to continue communicating with
this peer. E.g. do our network and protocol versions match?
## Examples
iex> %ExWire.Packet.Status{protocol_version: 63, network_id: 3, total_difficulty: 10, best_hash: <<5>>, genesis_hash: <<4>>}
...> |> ExWire.Packet.Status.handle()
:ok
# Test a peer with an incompatible version
iex> %ExWire.Packet.Status{protocol_version: 555, network_id: 3, total_difficulty: 10, best_hash: <<5>>, genesis_hash: <<4>>}
...> |> ExWire.Packet.Status.handle()
{:disconnect, :useless_peer}
"""
@spec handle(t) :: ExWire.Packet.handle_response()
def handle(packet = %__MODULE__{}) do
_ =
if System.get_env("TRACE"),
do: _ = Logger.debug(fn -> "[Packet] Got Status: #{inspect(packet)}" end)
protocol_version = packet.protocol_version
case Config.protocol_version() do
^protocol_version ->
:ok
_ ->
# TODO: We need to follow up on disconnection packets with disconnection ourselves
_ =
Logger.debug(fn ->
"[Packet] Disconnecting to due incompatible protocol version (them #{
packet.protocol_version
}, us: #{Config.protocol_version()})"
end)
{:disconnect, :useless_peer}
end
end
end
|
apps/ex_wire/lib/ex_wire/packet/status.ex
| 0.883663
| 0.840586
|
status.ex
|
starcoder
|
defmodule Cocktail.Validation.Interval do
@moduledoc false
import Integer, only: [mod: 2, floor_div: 2]
import Cocktail.Validation.Shift
@typep iso_week :: {Timex.Types.year(), Timex.Types.weeknum()}
@type t :: %__MODULE__{type: Cocktail.frequency(), interval: pos_integer}
@enforce_keys [:type, :interval]
defstruct type: nil,
interval: nil
@spec new(Cocktail.frequency(), pos_integer) :: t
def new(type, interval), do: %__MODULE__{type: type, interval: interval}
@spec next_time(t, Cocktail.time(), Cocktail.time()) :: Cocktail.Validation.Shift.result()
def next_time(%__MODULE__{type: _, interval: 1}, time, _), do: {:no_change, time}
def next_time(%__MODULE__{type: :monthly, interval: interval}, time, start_time) do
start_time
|> Timex.beginning_of_month()
|> Timex.diff(Timex.beginning_of_month(time), :months)
|> mod(interval)
|> shift_by(:months, time)
end
def next_time(%__MODULE__{type: :weekly, interval: interval}, time, start_time) do
week = Timex.iso_week(time)
start_week = Timex.iso_week(start_time)
diff = weeks_diff(start_week, week)
off_by = mod(diff * -1, interval)
shift_by(off_by * 7, :days, time)
end
def next_time(%__MODULE__{type: :daily, interval: interval}, time, start_time) do
date = Timex.to_date(time)
start_date = Timex.to_date(start_time)
start_date
|> Timex.diff(date, :days)
|> mod(interval)
|> shift_by(:days, time)
end
def next_time(%__MODULE__{type: type, interval: interval}, time, start_time) do
unit = unit_for_type(type)
start_time
|> Timex.diff(time, unit)
|> mod(interval)
|> shift_by(unit, time)
end
@spec weeks_diff(iso_week, iso_week) :: integer
defp weeks_diff({year, week1}, {year, week2}) when week2 >= week1, do: week2 - week1
defp weeks_diff({year1, week1}, {year2, week2}) when year2 > year1,
do: (year1..(year2 - 1) |> Enum.map(&iso_weeks_per_year/1) |> Enum.sum()) - week1 + week2
@spec iso_weeks_per_year(Timex.Types.year()) :: 52 | 53
defp iso_weeks_per_year(year) do
if year_cycle(year) == 4 || year_cycle(year - 1) == 3 do
53
else
52
end
end
@spec year_cycle(Timex.Types.year()) :: integer
defp year_cycle(year) do
cycle = year + floor_div(year, 4) - floor_div(year, 100) + floor_div(year, 400)
mod(cycle, 7)
end
@spec unit_for_type(:hourly | :minutely | :secondly) :: :hours | :minutes | :seconds
defp unit_for_type(:hourly), do: :hours
defp unit_for_type(:minutely), do: :minutes
defp unit_for_type(:secondly), do: :seconds
end
|
lib/cocktail/validation/interval.ex
| 0.852675
| 0.610802
|
interval.ex
|
starcoder
|
defmodule ExSieve do
@moduledoc """
`ExSieve` is meant to be `use`d by a module implementing `Ecto.Repo` behaviour.
When used, optional configuration parameters can be provided.
For details about cofngiuration parameters see `t:ExSieve.Config.t/0`.
defmodule MyApp.Repo do
use Ecto.Repo, otp_app: :my_app
use ExSieve
end
defmodule MyApp.Repo do
use Ecto.Repo, otp_app: :my_app
use ExSieve, ignore_erros: true
end
When `use` is called, a `c:ExSieve.filter/3` function is defined in the Repo.
This function can and used for filtering entries based on query parameters
def index(conn, %{"q"=> params}) do
posts = MyApp.Repo.filter(MyApp.Post, params)
render conn, :index, posts: posts
end
Options can be overridden by setting on per-schema basis (see `ExSieve.Schema`)
or on single `c:ExSieve.filter/3` calls.
## Examples
In the following we assume these schemas are defined in your application:
defmodule MyApp.Post do
use Ecto.Schema
schema "posts" do
has_many :comments, MyApp.Comment
field :title
field :body
field :published, :boolean
timestamps()
end
end
defmodule MyApp.Comment do
use Ecto.Schema
schema "comments" do
belongs_to :post, MyApp.Post
field :body
timestamps()
end
end
### Simple query
Given this json representation of the query
```json
{
"m": "or",
"id_in": [1, 2],
"title_and_body_cont": "text",
"comments_body_eq": "body",
"s": ["title desc", "inserted_at asc"]
}
```
the following SQL query is sent to the database
```sql
SELECT posts.* FROM posts INNER JOIN comments ON posts.id = comments.post_id
WHERE posts.id IN (1, 2)
OR (posts.title ILIKE '%text%' AND posts.body ILIKE '%text%')
OR comments.body == "body"
ORDER BY posts.title DESC, posts.inserted_at ASC;
```
### Grouping queries
Query fields can be nested for obtaining more advanced filters.
Given this json representation of the query
```json
{
"m": "and",
"id_in": [1, 2],
"g": [
{
"m": "or",
"c": {
"title_and_body_cont": "text",
"comments_body_eq": "body"
}
}
],
"s": ["title desc", "inserted_at asc"]
}
```
the following SQL query is sent to the database
```sql
SELECT posts.* FROM posts INNER JOIN comments ON posts.id = comments.post_id
WHERE posts.id IN (1, 2)
AND (
(posts.title ILIKE '%text%' AND posts.body ILIKE '%text%')
OR comments.body == "body")
ORDER BY posts.title DESC, posts.inserted_at ASC;
```
## Supported predicates
### Base predicates
* `eq`
* `not_eq`
* `cont`
* `not_cont`
* `lt`
* `lteq`
* `gt`
* `gteq`
* `in`
* `not_in`
* `matches`
* `does_not_match`
* `start`
* `not_start`
* `end`
* `not_end`
* `true`
* `not_true`
* `false`
* `not_false`
* `present`
* `blank`
* `null`
* `not_null`
### Composite predicates
* `eq_any`
* `not_eq_all`
* `cont_all`
* `cont_any`
* `not_cont_all`
* `not_cont_any`
* `matches_all`
* `matches_any`
* `does_not_match_all`
* `does_not_match_any`
* `start_any`
* `not_start_all`
* `end_any`
* `not_end_all`
### Combinators
* `or`
* `and`
## Custom predicates
ExSieve allows to define user-specific predicates.
These predicates must be defined at compile time with the `:custom_predicates` key
of the `:ex_sieve` application environment. It should be a keyword list that maps
predicate_names (atom) to `Ecto.Query.API.fragment/1` strings.
config :ex_sieve,
custom_predicates: [
has_key: "? \\\\? ?",
less_than_6: "? < 6",
key_is: "(? ->> ?) = ?"
]
The first argument given to the fragment is the field while next ones are the values
given in the query string.
Given this json representation of the query
```json
{
"metadata_has_key": "tag",
"score_less_than_6": true,
"metadata_key_is: ["status", "approved"]
}
```
the following SQL query is sent to the database
```sql
SELECT posts.* FROM posts
WHERE posts.metadata ? 'tag'
AND posts.score < 6
AND (posts.metadata ->> 'status') = 'approved';
```
## Predicate aliases
Aliases to built-in and custom predicates can be configured in using the
`:predicate_aliases` key in `:ex_sieve` application environment.
It should be a keyword list that maps an alias name (atom) to a predicate (atom).
Aliases for not existent predicates are silently discarded, multiple aliases
can be defined for the same predicate.
config :ex_sieve,
custom_predicates: [
has_key: "? \\\\? ?"
],
predicate_aliases: [
m: :matches,
hk: :has_key
]
## Notes
### LIKE injection
`LIKE` queries can suffer of [LIKE injection](https://github.blog/2015-11-03-like-injection/) attacks.
For this reason all predicates which result in a `LIKE` query (`cont`, `not_cont`, `start`, `not_start`, `end`, `not_end`
and their composite predicates) are properly escaped.
Some exceptions are `matches`, `does_not_match` and their composite predicates that allows `%`, `_` and `\\` chars in the value.
You should be very careful when allowing an external user to use these predicates.
"""
defmacro __using__(opts) do
quote do
@behaviour ExSieve
@ex_sieve_defaults unquote(opts)
def filter(queryable, params, options \\ %{}) do
ExSieve.Filter.filter(queryable, params, @ex_sieve_defaults, options)
end
end
end
@type result :: Ecto.Query.t() | error()
@type error ::
{:error, :invalid_query}
| {:error, {:too_deep, key :: String.t()}}
| {:error, {:predicate_not_found, key :: String.t()}}
| {:error, {:attribute_not_found, key :: String.t()}}
| {:error, {:direction_not_found, invalid_direction :: String.t()}}
| {:error, {:value_is_empty, key :: String.t()}}
| {:error, {:invalid_type, field :: String.t()}}
| {:error, {:invalid_value, {field :: String.t(), value :: any()}}}
| {:error, {:too_few_values, {key :: String.t(), arity :: non_neg_integer()}}}
@doc """
Filters the given query based on params.
Returns the query with the added filters or an error tuple.
For details about available options see `t:ExSieve.Config.t/0`.
In order to avoid duplicated joins being sent to database only named bindings should be used
and the binding name should correspond to the related table one.
## Examples
Repo.filter(User, %{"name_cont" => "foo"})
Repo.filter(from(u in User), %{"name_cont" => "foo"})
Repo.filter(from(u in User), %{"name_cont" => "foo"})
User
|> join(:inner, [u], p in assoc(u, :posts), as: :posts)
|> preload(:posts)
|> Repo.filter(%{"name_cont" => "foo"})
# WARNING: this will result in a duplicated join
User
|> join(:inner, [u], p in assoc(u, :posts), as: :posts_dup)
|> Repo.filter(%{"posts_title_cont" => "foo"})
"""
@callback filter(Ecto.Queryable.t(), params :: %{(binary | atom) => term}, options :: %{atom => term}) :: result()
end
|
lib/ex_sieve.ex
| 0.914355
| 0.759136
|
ex_sieve.ex
|
starcoder
|
defmodule Spell.Peer do
@moduledoc """
The `Spell.Peer` module implements the general WAMP peer behaviour.
From the WAMP protocol:
> A WAMP Session connects two Peers, a Client and a Router. Each WAMP
> Peer can implement one or more roles.
See `new` for documentation on starting new peers.
"""
use GenServer
alias Spell.Message
alias Spell.Role
require Logger
# Module Attributes
@supervisor_name __MODULE__.Supervisor
defstruct [:transport,
:serializer,
:owner,
:role,
:realm,
:retries,
:retry_interval]
# Type Specs
@type start_option ::
{:serializer, module}
| {:transport, {module, Keyword.t}}
@type t :: %__MODULE__{
transport: map,
serializer: map,
owner: pid,
role: map,
retry_interval: integer,
retries: integer}
# Public Functions
@doc """
Start `Spell.Peer.Supervisor`.
"""
def start_link() do
import Supervisor.Spec
child = worker(__MODULE__, [], [function: :new, restart: :transient])
options = [strategy: :simple_one_for_one, name: @supervisor_name]
Supervisor.start_link([child], options)
end
@doc """
Start a new peer with `options`. This function can be used to start a child
outside of the supervision tree.
## Options
* `:transport :: %{module: module, options: Keyword.t}` required
* `:serializer :: %{module: module, options: Keyword.t}` required
* `:realm :: Message.wamp_uri` required
* `:roles :: [{module, Keyword.t}]` required
* `:features :: map` defaults to result of role's `get_features/1` callback
* `:owner :: pid` defaults to self()
"""
@spec new(map | Keyword.t) :: {:ok, pid} | {:error, any}
def new(options) when is_map(options) do
GenServer.start_link(__MODULE__, options)
end
@doc """
Stop the `peer` process. Roles are responsible for notifying owners
of open commands that the command is being terminated via a message
like
{Spell.Peer, peer, {:closed, command}}
"""
def stop(peer) do
GenServer.cast(peer, :stop)
end
@doc """
Add a new child as part of the supervision tree.
## Options
See `new/1`.
"""
@spec add(map | Keyword.t) :: {:ok, pid} | {:error, any}
def add(options) do
options = Dict.update(options, :owner, self(), fn
nil -> self()
otherwise -> otherwise
end)
Supervisor.start_child(@supervisor_name, [options])
end
@doc """
Block until the process receives a message from `peer` of `type` or timeout.
"""
@spec await(pid, atom, integer) :: {:ok, t} | {:error, timeout}
def await(peer, type, timeout \\ 1000)
when is_pid(peer) and is_atom(type) do
receive do
{__MODULE__, ^peer, %Message{type: ^type} = message} ->
{:ok, message}
after
timeout ->
{:error, :timeout}
end
end
# Public Role Interface
@doc """
Synchronously send a message to the role.
"""
@spec call(pid, module, any) :: :ok
def call(peer, role, message) do
GenServer.call(peer, {:call_role, {role, message}})
end
@doc """
Send a WAMP message from the peer.
If a pid is provided as the peer, the message will be cast to and
sent from the peer process. If it is the peer state, the message
is sent directly.
"""
@spec send_message(pid | t, Message.t) :: :ok | {:error, any}
def send_message(peer, %Message{} = message) when is_pid(peer) do
GenServer.cast(peer, {:send_message, message})
end
def send_message(%__MODULE__{transport: transport, serializer: serializer},
%Message{} = message) do
case serializer.module.encode(message) do
{:ok, raw_message} ->
transport.module.send_message(transport.pid, raw_message)
{:error, reason} ->
{:error, {serializer.module, reason}}
end
end
@doc """
Send an Erlang message to the peer's owner.
"""
@spec send_to_owner(pid, any) :: :ok
def send_to_owner(peer, term) do
send(peer.owner, {__MODULE__, self(), term})
:ok
end
@spec notify(pid, any) :: :ok
def notify(pid, term) do
send(pid, {__MODULE__, self(), term})
:ok
end
# GenServer Callbacks
def init(options) do
case Enum.into(options, %{}) do
%{transport: %{module: transport_module,
options: transport_options},
serializer: %{module: serializer_module,
options: _serializer_options},
owner: owner,
realm: realm,
role: %{options: role_options, features: role_features},
retries: retries,
retry_interval: retry_interval} ->
send(self(), {:role_hook, :init})
{:ok, %__MODULE__{transport: %{module: transport_module,
options: transport_options,
pid: nil},
serializer: %{module: serializer_module},
owner: owner,
realm: realm,
role: %{options: role_options,
state: nil,
features: role_features},
retries: retries,
retry_interval: retry_interval}}
_ -> {:error, :badargs}
end
end
def handle_call({:call_role, {role, message}}, from, state) do
case Role.call(state.role.state, role, message, from, state) do
{:ok, reply, role_state} ->
{:reply, reply, put_in(state.role[:state], role_state)}
{:error, reason} ->
{:stop, {:cast_role, reason}, state}
end
end
def handle_cast({:send_message, %Message{} = message}, state) do
case send_message(state, message) do
:ok -> {:noreply, state}
{:error, reason} -> {:stop, {:send_message, reason}, state}
end
end
def handle_cast(:stop, state) do
{:stop, :normal, state}
end
def handle_info({:role_hook, :init},
%{role: %{state: nil}} = state) do
case Role.map_init(state.role.options, state) do
{:ok, role_state} ->
send(self(), {:transport, :reconnect})
{:noreply, put_in(state.role[:state], role_state)}
{:error, reason} ->
{:stop, {{:role_hook, :init}, reason}, state}
end
end
def handle_info({:transport, :reconnect},
%{transport: %{pid: nil} = transport,
serializer: serializer} = state) do
# WARNING: role states aren't reset. TBD if this is a good thing
case transport.module.connect(serializer.module, transport.options) do
{:ok, pid} ->
Logger.debug(fn -> "Connected using #{inspect(state)}" end)
send(self(), {:role_hook, :on_open})
{:noreply, put_in(state.transport[:pid], pid)}
{:error, reason} ->
Logger.debug(fn -> "Peer error on reconnect: #{inspect(reason)}" end)
case state.retries - 1 do
# TODO: store the retry timer to properly cancel it on
# the peer being shutdown, or a different reconnect message coming in
retries when retries > 0 ->
:erlang.send_after(state.retry_interval, self,
{:transport, :reconnect})
{:noreply, %{state | retries: retries}}
retries when retries <= 0 ->
# The stop is `normal` in the sense that it wasn't caused by
# an internal error and the process shouldn't be restarted
send_to_owner(state, {:error, {:transport, reason}})
{:stop, :normal, state}
end
end
end
def handle_info({:role_hook, :on_open}, state) do
case Role.map_on_open(state.role.state, state) do
{:ok, role_state} ->
{:noreply, put_in(state.role[:state], role_state)}
{:error, reason} ->
{:stop, {{:role_hook, :on_open}, reason}, state}
end
end
def handle_info({module, pid, {:message, raw_message}},
%{transport: %{module: module, pid: pid}} = state) do
case state.serializer.module.decode(raw_message) do
{:ok, message} ->
Logger.debug(fn ->
"Peer #{inspect(pid)} received #{inspect(message)}" end)
case Role.map_handle_message(state.role.state, message, state) do
{:ok, role_state} ->
{:noreply, put_in(state.role[:state], role_state)}
{:close, _reasons, role_state} ->
# NOTE: if close == normal, are reasons necessary?
{:stop, :normal, put_in(state.role[:state], role_state)}
{:error, reason} ->
{:stop, {{:role_hook, :handle_message}, reason}, state}
end
{:error, reason} ->
{:stop, {:serializer, reason}, state}
end
end
def handle_info({module, pid, {:terminating, reason}},
%{transport: %{module: module, pid: pid}} = state) do
# NOTE: the transport closed
send_to_owner(state, {:error, {:transport, reason}})
{:stop, {:transport, reason}, state}
end
def terminate(reason, state) do
{:ok, _state} = Role.map_on_close(state.role.state, self())
Logger.debug(fn -> "Peer terminating due to: #{inspect(reason)}" end)
end
end
|
lib/spell/peer.ex
| 0.88452
| 0.403626
|
peer.ex
|
starcoder
|
defmodule TomlElixir.Mapper do
@moduledoc """
Module for transforming toml list to map format
"""
alias TomlElixir.Error
@doc """
Transform TOML list to map format
"""
@spec parse(list) :: map
def parse([]), do: %{}
def parse(toml) when is_list(toml), do: to_map(toml, {[], %{}})
@spec to_map(list, {list, map}) :: map
defp to_map([], {_to, acc}), do: acc
defp to_map([{:table, to} | _tail], {to, _acc}) do
throw Error.exception("Duplicate table #{Enum.join(to, ".")}")
end
defp to_map([{:table, to} | []], {_to, acc}) do
do_put_in(to, nil, %{}, acc)
end
defp to_map([{:table, to} | tail], {_to, acc}) do
to_map(tail, {to, acc})
end
defp to_map([{:array_table, to} | tail], {_to, acc}) do
to_map(tail, {to, do_put_in_new(to, acc)})
end
defp to_map([{{:key, key}, {:array, val}} | tail], {to, acc}) do
to_map(tail, {to, do_put_in(to, key, val, acc)})
end
defp to_map([{{:key, key}, {:datetime, val}} | tail], {to, acc}) do
to_map(tail, {to, do_put_in(to, key, val, acc)})
end
defp to_map([{{:key, key}, {:date, val}} | tail], {to, acc}) do
to_map(tail, {to, do_put_in(to, key, val, acc)})
end
defp to_map([{{:key, key}, {:time, val}} | tail], {to, acc}) do
to_map(tail, {to, do_put_in(to, key, val, acc)})
end
defp to_map([{{:key, key}, {:string, val}} | tail], {to, acc}) do
to_map(tail, {to, do_put_in(to, key, val, acc)})
end
defp to_map([{{:key, key}, {:string_ml, val}} | tail], {to, acc}) do
to_map(tail, {to, do_put_in(to, key, val, acc)})
end
defp to_map([{{:key, key}, {:literal, val}} | tail], {to, acc}) do
to_map(tail, {to, do_put_in(to, key, val, acc)})
end
defp to_map([{{:key, key}, {:literal_ml, val}} | tail], {to, acc}) do
to_map(tail, {to, do_put_in(to, key, val, acc)})
end
defp to_map([{{:key, key}, {:inline_table, val}} | tail], {to, acc}) when is_list(val) do
to_map(tail, {to, do_put_in(to, key, parse(val), acc)})
end
defp to_map([{{:key, key}, val} | tail], {to, acc}) do
to_map(tail, {to, do_put_in(to, key, val, acc)})
end
@spec do_put_in(list, String.t | nil, any, list | map) :: map
defp do_put_in([], key, val, []) do
[Map.put(%{}, key, val)]
end
defp do_put_in([], key, val, acc) when is_list(acc) do
List.update_at(acc, -1, &Map.put(&1, key, val))
end
defp do_put_in([], key, val, acc) when is_map(acc) do
if Map.has_key?(acc, key) do
throw Error.exception("Duplicate key #{key}")
else
Map.put(acc, key, val)
end
end
defp do_put_in([key], nil, val, acc) when is_map(acc) do
Map.put(acc, key, val)
end
defp do_put_in(to, key, val, acc) when is_list(acc) do
List.update_at(acc, -1, &do_put_in(to, key, val, &1))
end
defp do_put_in([head | tail], key, val, acc) when is_map(acc) do
Map.put(acc, head, do_put_in(tail, key, val, Map.get(acc, head, %{})))
end
defp do_put_in(_to, _key, _val, acc) do
throw Error.exception("Invalid type #{inspect acc}, should be map")
end
@spec do_put_in_new(list, list | map) :: list | map
defp do_put_in_new([], acc) when is_list(acc) do
List.insert_at(acc, -1, %{})
end
defp do_put_in_new([], acc) when acc == %{} do
[%{}]
end
defp do_put_in_new([], acc) when is_map(acc) do
throw Error.exception("Should be empty, but #{inspect acc} was found")
end
defp do_put_in_new(to, acc) when is_list(acc) do
List.update_at(acc, -1, &do_put_in_new(to, &1))
end
defp do_put_in_new([head | tail], acc) when is_map(acc) do
Map.put(acc, head, do_put_in_new(tail, Map.get(acc, head, %{})))
end
end
|
lib/toml_elixir/mapper.ex
| 0.657758
| 0.654267
|
mapper.ex
|
starcoder
|
defmodule Idconex do
@moduledoc """
Creates an identicon for a given username.
Use `render/1` to create a usual github like identicon.
Use `render/2` to create an extended identicon.
Use `encode64/1` to get the identicon as a base64 encoded png image.
Use `save/2` to save the identicon as a png image file.
## Examples
iex> Idconex.render("codebio") |> Idconex.save("./test/tmp/codebio.png")
:ok
iex> Idconex.render("cfranzl", alg: :md5, chunk_size: 3, block_size: 5) |> Idconex.encode64
"iVBORw0KG<KEY>
"""
defstruct [:name, :hashlist, :alg, :rgb, :grid, :chunk_size, :row_size, :block_size, :image_size, :image]
@doc """
Render a given username to an identicon.
Provide the hash algorithm to create different kinds of identicons.
Provide a chunk_size to change the number of columns (chunk_size*2-1 = number of rectangles per side).
Provide a block_size to define the number of pixels per side of a rectangle.
Typical types of identicons are:
* `alg: :md5, chunk_size: 3`: Image of 250x250 pixel with 5x5 grid
* `alg: :sha256, chunk_size: 4`: Image of 350x350 pixel with 7x7 grid
* `alg: :sha512, chunk_size: 5`: Image of 450x450 pixel with 9x9 grid
* `alg: :sha512, chunk_size: 5, block_size: 27`: Image of 243x243 pixel with 9x9 grid
Options:
* `alg`: md5 | sha | sha224 | sha256 | sha384 | sha512 (default: :md5)
* `chunk_size`: number of rectangles for one side including the axis (default: 3)
* `block_size`: side length of the rectangle in pixel (default: 50)
Returns: `%Idconex{}`
"""
def render(name, opts \\ []) do
alg = Keyword.get(opts, :alg, :md5)
chunk_size = Keyword.get(opts, :chunk_size, 3)
block_size = Keyword.get(opts, :block_size, 50)
name
|> hashlist(alg)
|> set_color
|> set_size(chunk_size, block_size)
|> build_grid
|> build_image
end
@doc """
Save the identicon as png image.
Provide only a path to create a file with the default name in the given path
or include a complete new filename to override the default filename.
Parameter:
* `%Idconex{}`: rendered image struct
* `path`: optional ; default: #name_#alg_#gridsize.png
Returns: `:ok`
"""
def save(%Idconex{name: name, image: image, row_size: row_size, alg: alg}, path \\ nil) do
default_filename = "#{name}_#{alg}_#{row_size}x#{row_size}.png"
filename =
case path do
nil -> default_filename
_ -> path
end
filename =
case String.ends_with?(filename, "/") do
true -> "#{filename}#{default_filename}"
false -> filename
end
File.write filename, image
end
@doc """
Encode the identicon with base64.
Parameter:
* `%Idconex{}`: rendered image struct
Returns: base64 encoded png image
"""
def encode64(%Idconex{image: image}) do
Base.encode64 image
end
defp hashlist(name, alg) do
hashlist = :crypto.hash(alg, name) |> :binary.bin_to_list
%Idconex{name: name, hashlist: hashlist, alg: alg}
end
defp set_color(%Idconex{hashlist: [r, g, b | _tail]} = idc) do
%Idconex{idc | rgb: {r, g, b}}
end
defp set_size(idc, chunk_size, block_size) do
row_size = chunk_size * 2 - 1
%Idconex{idc | chunk_size: chunk_size, row_size: row_size, block_size: block_size, image_size: block_size * row_size}
end
defp build_grid(%Idconex{hashlist: hashlist, chunk_size: chunk_size} = idc) do
grid =
hashlist
|> Enum.chunk(chunk_size)
|> Enum.map(&mirror_row/1)
|> List.flatten
|> Enum.with_index
|> Enum.filter(fn({x, _index}) -> rem(x, 2) == 0 end)
%Idconex{idc | grid: grid}
end
defp mirror_row(row) do
[_axis | append] = Enum.reverse row
row ++ append
end
defp build_image(%Idconex{grid: grid, rgb: rgb, row_size: row_size, block_size: block_size, image_size: image_size} = idc) do
image = :egd.create(image_size, image_size)
color = :egd.color(rgb)
for {_point, index} <- grid do
x = rem(index, row_size) * block_size
y = div(index, row_size) * block_size
:egd.filledRectangle(image, {x, y}, {x + block_size, y + block_size}, color)
end
%Idconex{idc | image: :egd.render(image)}
end
end
|
lib/idconex.ex
| 0.892234
| 0.660087
|
idconex.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.