code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Grizzly.Packet do
@moduledoc """
Module for working with raw Z/IP packets
This is used to marshell a Z/IP packet of bytes
into an Elixir data structure for use to work with.
This data structure is a more "lower level" repersentation of
the messaging between this library and Zwave. Most the time
you should probably be working with a `Grizzly.Message`.
This structure is for internal btye string parsing.
"""
# TODO: @mattludwigs - make a `to_message` function here or a `from_packet` function in
# Grizzly.Message to have no need to expose a Packet to the client.
require Logger
alias Grizzly.Packet.{Decode, BodyParser, HeaderExtension}
@type t :: %__MODULE__{
seq_number: non_neg_integer | nil,
body: any,
types: [type],
raw?: boolean,
header_extensions: HeaderExtension.t()
}
@type type :: :ack_response | :nack_response | :nack_waiting
defstruct seq_number: nil, body: nil, types: [], raw?: false, header_extensions: []
@spec new(options :: keyword) :: t
def new(opts \\ []) do
struct(__MODULE__, opts)
end
@doc """
Return Z/IP binary for the heart beat packet
"""
@spec heart_beat() :: <<_::24>>
def heart_beat() do
<<0x23, 0x03, 0x80>>
end
@spec decode(binary) :: t()
def decode(<<0x23, 0x03, 0x40>>) do
%__MODULE__{types: [:ack_response], body: :heart_beat}
end
def decode(zip_packet_binary) do
_ = Logger.debug("[GATEWAY]: received - #{inspect(zip_packet_binary)}")
if raw?(zip_packet_binary) do
body = Decode.raw(zip_packet_binary)
%__MODULE__{raw?: true, body: body}
else
types = Decode.get_packet_types(zip_packet_binary)
body =
zip_packet_binary
|> Decode.get_body()
|> BodyParser.parse()
seq_number = Decode.get_sequence_number(zip_packet_binary)
header_extensions = Decode.get_header_extensions(zip_packet_binary)
packet = %__MODULE__{
types: types,
body: body,
seq_number: seq_number,
header_extensions: header_extensions
}
packet
end
end
@spec sleeping_delay?(t()) :: boolean()
def sleeping_delay?(%__MODULE__{header_extensions: header_ext}) do
# This function kinda a hack for right now, idealy we can add some meta
# data to packet about which node we are communicating with, so
# then make smarter handling of wake up nodes.
case HeaderExtension.get_expected_delay(header_ext) do
{:ok, seconds} when seconds > 1 -> true
{:ok, _} -> false
nil -> false
end
end
@spec put_expected_delay(t(), seconds :: non_neg_integer()) :: t()
def put_expected_delay(%__MODULE__{header_extensions: hext} = packet, seconds) do
# TODO: right now we don't do any checking on which header extensions
# are currently part of the header extensions.
expected_delay = HeaderExtension.expected_delay_from_seconds(seconds)
%{packet | header_extensions: [hext] ++ [expected_delay]}
end
@spec log(t) :: t | no_return
def log(packet) do
_ =
unless heart_beat_response(packet) do
_ = Logger.debug("Received Packet: #{inspect(packet)}")
end
packet
end
@spec header(seq_number :: non_neg_integer) :: binary
def header(seq_number) do
<<0x23, 0x02, 0x80, 0xD0, seq_number, 0x00, 0x00, 0x03, 0x02, 0x00>>
end
@spec raw?(binary() | Grizzly.Packet.t()) :: any()
def raw?(<<0x23, _rest::binary>>), do: false
def raw?(%__MODULE__{raw?: raw}), do: raw
def raw?(bin) when is_binary(bin), do: true
@spec heart_beat_response(t()) :: boolean()
def heart_beat_response(%__MODULE__{body: :heart_beat, types: [:ack_response]}) do
true
end
def heart_beat_response(%__MODULE__{}), do: false
@spec ack_request?(t()) :: boolean
def ack_request?(%__MODULE__{types: [:ack_request]}), do: true
def ack_request?(%__MODULE__{}), do: false
@spec as_ack_response(Grizzly.seq_number()) :: binary()
def as_ack_response(seq_number) do
<<0x23, 0x02, 0x40, 0x10, seq_number, 0x00, 0x00>>
end
end
|
lib/grizzly/packet.ex
| 0.536799
| 0.423339
|
packet.ex
|
starcoder
|
defmodule Statisaur.Bivariate do
@moduledoc """
Contains functions for computing bivariate statistics.
"""
@doc """
Finds the covariance between two lists
### Examples
iex>Statisaur.Bivariate.covariance([1,3,5,7,9],[10,20,30,40,50])
50.0
"""
def covariance(list1, list2)
when is_list(list1) and is_list(list2) and length(list1) == length(list2) do
n = length(list1)
mu_x = Statisaur.mean(list1)
mu_y = Statisaur.mean(list2)
numerator =
Enum.zip(list1, list2)
|> Enum.map(fn {x, y} -> (x - mu_x) * (y + mu_y) end)
|> Enum.sum()
numerator / (n - 1)
end
@doc """
Calculates the coefficients of regression.
B1 = sum((x(i) - mean(x)) * (y(i) - mean(y))) / sum( (x(i) - mean(x))^2 )
B0 = mean(y) - B1 * mean(x)
###Examples
iex>Statisaur.Bivariate.simple_linear_regression([118,484,664,1004,1231,1372],[30,58,87,115,120,142])
{2, {21.244330227661493, 0.08711964265011925}}
"""
def simple_linear_regression(list1, list2) do
m1 = Statisaur.mean(list1)
m2 = Statisaur.mean(list2)
b1_num_p1 = Statisaur.powered_error(list1, m1, 1)
b1_num_p2 = Statisaur.powered_error(list2, m2, 1)
b1_num = Statisaur.sum(Enum.map(Enum.zip(b1_num_p1, b1_num_p2), fn {x, y} -> x * y end))
b1_denom = Statisaur.sum(Statisaur.powered_error(list1, m1, 2))
b1 = b1_num / b1_denom
b0 = Statisaur.mean(list2) - b1 * Statisaur.mean(list1)
b1 = b1
{2, {b0, b1}}
end
@doc """
Finds the Pearson correlation of two lists, provided they are of equal length.
### Examples
iex> Statisaur.Bivariate.pearson_correlation( [1,2,3,4], [3,4,5,6] ) |> Float.round(4)
1.0000
iex> Statisaur.Bivariate.pearson_correlation( [4,3,2,1], [3,4,5,6] ) |> Float.round(4)
-1.0000
iex> Statisaur.Bivariate.pearson_correlation( [1,2,-1,-2], [1,2,3,4] ) |> Float.round(4)
-0.8485
iex> Statisaur.Bivariate.pearson_correlation( [],[] )
** (ArgumentError) arguments must be non-zero length lists
iex> Statisaur.Bivariate.pearson_correlation( [1,2,3], [4,5] )
** (ArgumentError) arguments must be identical length lists
iex> Statisaur.Bivariate.pearson_correlation( [1,1], [1,3] )
** (ArithmeticError) std. deviation of one or both inputs is 0
"""
def pearson_correlation(list1, list2)
when is_list(list1) and is_list(list2) and (length(list1) == 0 or length(list2) == 0) do
raise ArgumentError, "arguments must be non-zero length lists"
end
def pearson_correlation(list1, list2)
when is_list(list1) and is_list(list2) and length(list1) != length(list2) do
raise ArgumentError, "arguments must be identical length lists"
end
def pearson_correlation(list1, list2)
when is_list(list1) and is_list(list2) and length(list1) != 0 and length(list2) != 0 and
length(list1) == length(list2) do
covXY = covariance(list1, list2)
sigmaX = Statisaur.stddev(list1)
sigmaY = Statisaur.stddev(list2)
# check that the std. deviations are nonzero
denom = sigmaX * sigmaY
if denom < 1.0e-10 do
raise ArithmeticError, "std. deviation of one or both inputs is 0"
else
covXY / (sigmaX * sigmaY)
end
end
@doc """
Finds the pooled (weighted) std. dev. of two samples.
# Examples
iex> Statisaur.Bivariate.pooled_stddev(0.5, 4)
** (ArgumentError) arguments must both be lists
iex> Statisaur.Bivariate.pooled_stddev([], [])
** (ArgumentError) arguments must be non-empty lists
iex> Statisaur.Bivariate.pooled_stddev([2], [4])
** (ArgumentError) arguments have insufficient degrees of freedom
iex> Statisaur.Bivariate.pooled_stddev([2,3,12], [40,44,48,54,60,32])
80.0
"""
def pooled_stddev(list1, list2)
when is_list(list1) == false or is_list(list2) == false do
raise ArgumentError, "arguments must both be lists"
end
def pooled_stddev(list1, list2)
when is_list(list1) and is_list(list2) and
(length(list1) < 1 or length(list2) < 1) do
raise ArgumentError, "arguments must be non-empty lists"
end
def pooled_stddev(list1, list2)
when is_list(list1) and is_list(list2) and
length(list1) + length(list2) < 3 do
raise ArgumentError, "arguments have insufficient degrees of freedom"
end
def pooled_stddev(list1, list2)
when is_list(list1) and is_list(list2) and
length(list1) > 0 and length(list2) > 0 do
mu1 = Statisaur.mean(list1)
mu2 = Statisaur.mean(list2)
err_list1 = Statisaur.powered_error(list1, mu1, 2)
err_list2 = Statisaur.powered_error(list2, mu2, 2)
sum_err_1 = Statisaur.sum(err_list1)
sum_err_2 = Statisaur.sum(err_list2)
(sum_err_1 + sum_err_2) / (length(list1) + length(list2) - 2)
end
@doc """
Finds the pooled (weighted) std. error of two samples.
# Examples
iex> Statisaur.Bivariate.pooled_stderr(0.5, 4)
** (ArgumentError) arguments must both be lists
iex> Statisaur.Bivariate.pooled_stderr([], [])
** (ArgumentError) arguments must be non-empty lists
iex> Statisaur.Bivariate.pooled_stderr([2], [4])
** (ArgumentError) arguments have insufficient degrees of freedom
iex> Statisaur.Bivariate.pooled_stderr([2,3,12], [40,44,48,54,60,32]) |> Float.round(6)
5.172577
"""
def pooled_stderr(list1, list2)
when is_list(list1) == false or is_list(list2) == false do
raise ArgumentError, "arguments must both be lists"
end
def pooled_stderr(list1, list2)
when is_list(list1) and is_list(list2) and
(length(list1) < 1 or length(list2) < 1) do
raise ArgumentError, "arguments must be non-empty lists"
end
def pooled_stderr(list1, list2)
when is_list(list1) and is_list(list2) and
length(list1) + length(list2) < 3 do
raise ArgumentError, "arguments have insufficient degrees of freedom"
end
def pooled_stderr(list1, list2)
when is_list(list1) and is_list(list2) and
length(list1) > 0 and length(list2) > 0 do
v1 = Statisaur.variance(list1)
v2 = Statisaur.variance(list2)
n1 = length(list1)
n2 = length(list2)
stderrx = :math.sqrt(v1 / n1)
stderry = :math.sqrt(v2 / n2)
:math.sqrt(:math.pow(stderrx, 2) + :math.pow(stderry, 2))
end
@doc """
Function to find the t-score of two samples.
# Examples
iex> Statisaur.Bivariate.t_score(0.5, 4)
** (ArgumentError) arguments must both be lists
iex> Statisaur.Bivariate.t_score([], [])
** (ArgumentError) arguments must be non-empty lists
iex> Statisaur.Bivariate.t_score([2], [4])
** (ArgumentError) arguments have insufficient degrees of freedom
iex> Statisaur.Bivariate.t_score([2,3,12], [40,44,48,54,60,32]) |> Float.round(3)
-7.862
"""
def t_score(list1, list2)
when is_list(list1) == false or is_list(list2) == false do
raise ArgumentError, "arguments must both be lists"
end
def t_score(list1, list2)
when is_list(list1) and is_list(list2) and
(length(list1) < 1 or length(list2) < 1) do
raise ArgumentError, "arguments must be non-empty lists"
end
def t_score(list1, list2)
when is_list(list1) and is_list(list2) and
length(list1) + length(list2) < 3 do
raise ArgumentError, "arguments have insufficient degrees of freedom"
end
def t_score(list1, list2) do
mu1 = Statisaur.mean(list1)
mu2 = Statisaur.mean(list2)
std_err = pooled_stderr(list1, list2)
(mu1 - mu2) / std_err
end
end
|
lib/statisaur/bivariate.ex
| 0.856392
| 0.698709
|
bivariate.ex
|
starcoder
|
defmodule Zaryn.BeaconChain do
@moduledoc """
Manage the beacon chain by providing functions to add to the subsets information and
to retrieve the beacon storage nodes involved.
"""
alias Zaryn.Election
alias Zaryn.BeaconChain.Slot
alias Zaryn.BeaconChain.Slot.EndOfNodeSync
alias Zaryn.BeaconChain.Slot.TransactionSummary
alias Zaryn.BeaconChain.Slot.Validation, as: SlotValidation
alias Zaryn.BeaconChain.SlotTimer
alias Zaryn.BeaconChain.Subset
alias Zaryn.BeaconChain.Subset.P2PSampling
alias Zaryn.BeaconChain.SummaryTimer
alias Zaryn.Crypto
alias Zaryn.Election
alias Zaryn.P2P
alias Zaryn.P2P.Message.GetTransactionChain
alias Zaryn.P2P.Message.TransactionList
alias Zaryn.P2P.Node
alias Zaryn.PubSub
alias Zaryn.TransactionChain
alias Zaryn.TransactionChain.Transaction
alias Zaryn.TransactionChain.TransactionData
require Logger
@type pools ::
list({
subset :: binary(),
nodes_by_date :: list({DateTime.t(), list(Node.t())})
})
@doc """
Initialize the beacon subsets (from 0 to 255 for a byte capacity)
"""
@spec init_subsets() :: :ok
def init_subsets do
subsets = Enum.map(0..255, &:binary.encode_unsigned(&1))
:persistent_term.put(:beacon_subsets, subsets)
end
@doc """
List of all transaction subsets (255 subsets for a byte capacity)
## Examples
BeaconChain.list_subsets()
[ <<0>>, <<1>>,<<2>>, <<3>> ... <<253>>, <<254>>, <255>>]
"""
@spec list_subsets() :: list(binary())
def list_subsets do
:persistent_term.get(:beacon_subsets)
end
@doc """
Retrieve the beacon summaries storage nodes from a last synchronization date
"""
@spec get_summary_pools(DateTime.t()) :: pools()
def get_summary_pools(
last_sync_date = %DateTime{},
node_list \\ P2P.authorized_nodes()
) do
summary_times = SummaryTimer.previous_summaries(last_sync_date)
Enum.reduce(list_subsets(), [], fn subset, acc ->
nodes_by_summary_time =
Enum.map(summary_times, fn time ->
filter_nodes =
Enum.filter(node_list, &(DateTime.compare(&1.authorization_date, time) == :lt))
{time, Election.beacon_storage_nodes(subset, time, filter_nodes)}
end)
[{subset, nodes_by_summary_time} | acc]
end)
end
@doc """
Get the next beacon summary time
"""
@spec next_summary_date(DateTime.t()) :: DateTime.t()
defdelegate next_summary_date(date), to: SummaryTimer, as: :next_summary
@doc """
Get the next beacon slot time from a given date
"""
@spec next_slot(last_sync_date :: DateTime.t()) :: DateTime.t()
defdelegate next_slot(last_sync_date), to: SlotTimer
@doc """
Retrieve the beacon slots storage nodes from a last synchronization date
"""
@spec get_slot_pools(DateTime.t(), list(Node.t())) :: pools()
def get_slot_pools(date = %DateTime{}, node_list \\ P2P.authorized_nodes()) do
slot_times = SlotTimer.previous_slots(date)
Enum.reduce(list_subsets(), [], fn subset, acc ->
nodes_by_slot_time =
Enum.map(slot_times, fn time ->
filter_nodes =
Enum.filter(node_list, &(DateTime.compare(&1.authorization_date, time) == :lt))
{time, Election.beacon_storage_nodes(subset, time, filter_nodes)}
end)
[{subset, nodes_by_slot_time} | acc]
end)
end
@doc """
Extract the beacon subset from an address
## Examples
iex> BeaconChain.subset_from_address(<<0, 44, 242, 77, 186, 95, 176, 163,
...> 14, 38, 232, 59, 42, 197, 185, 226, 158, 51, 98, 147, 139, 152, 36,
...> 27, 22, 30, 92, 31, 167, 66, 94, 115, 4, >>)
<<44>>
"""
@spec subset_from_address(binary()) :: binary()
def subset_from_address(address) do
:binary.part(address, 1, 1)
end
@doc """
Add a transaction to the beacon chain
"""
@spec add_transaction_summary(Transaction.t()) :: :ok
def add_transaction_summary(tx = %Transaction{address: address}) do
address
|> subset_from_address()
|> Subset.add_transaction_summary(TransactionSummary.from_transaction(tx))
PubSub.notify_new_transaction(address)
end
@doc """
Add a node entry into the beacon chain subset
"""
@spec add_end_of_node_sync(Crypto.key(), DateTime.t()) :: :ok
def add_end_of_node_sync(node_public_key, timestamp = %DateTime{})
when is_binary(node_public_key) do
node_public_key
|> subset_from_address
|> Subset.add_end_of_node_sync(%EndOfNodeSync{
public_key: node_public_key,
timestamp: timestamp
})
end
@doc """
Get the transaction address for a beacon chain daily summary based from a subset and date
"""
@spec summary_transaction_address(binary(), DateTime.t()) :: binary()
def summary_transaction_address(subset, date = %DateTime{}) when is_binary(subset) do
{pub, _} =
Crypto.derive_keypair(
Crypto.storage_nonce(),
Crypto.hash([subset, <<DateTime.to_unix(date)::32>>])
)
Crypto.hash(pub)
end
@doc """
Return the previous summary time
"""
@spec previous_summary_time(DateTime.t()) :: DateTime.t()
defdelegate previous_summary_time(date_from), to: SummaryTimer, as: :previous_summary
@doc """
Load the transaction in the beacon chain context
"""
@spec load_transaction(Transaction.t()) :: :ok | :error
def load_transaction(
tx = %Transaction{
address: address,
type: :beacon,
data: %TransactionData{content: content}
}
) do
with {%Slot{} = slot, _} <- Slot.deserialize(content),
:ok <- validate_slot(tx, slot),
invovled_nodes <- Slot.involved_nodes(slot),
{:ok, %TransactionList{transactions: transactions}} <-
P2P.reply_atomic(invovled_nodes, 3, %GetTransactionChain{address: address}) do
[tx]
|> Stream.concat(transactions)
|> TransactionChain.write()
:ok
else
_ ->
:error
end
end
def load_transaction(_), do: :ok
defp validate_slot(
%Transaction{address: address},
slot = %Slot{subset: subset, slot_time: slot_time}
) do
cond do
address != Crypto.derive_beacon_chain_address(subset, slot_time) ->
{:error, :invalid_address}
!SlotValidation.valid_transaction_summaries?(slot) ->
{:error, :invalid_transaction_summaries}
!SlotValidation.valid_end_of_node_sync?(slot) ->
{:error, :invalid_end_of_node_sync}
true ->
:ok
end
end
@doc """
List the nodes for the subset to sample the P2P availability
"""
@spec list_p2p_sampling_nodes(binary()) :: list(Node.t())
defdelegate list_p2p_sampling_nodes(subset), to: P2PSampling, as: :list_nodes_to_sample
def config_change(changed_conf) do
changed_conf
|> Keyword.get(SummaryTimer)
|> SummaryTimer.config_change()
changed_conf
|> Keyword.get(SlotTimer)
|> SlotTimer.config_change()
end
end
|
lib/zaryn/beacon_chain.ex
| 0.884763
| 0.541288
|
beacon_chain.ex
|
starcoder
|
defmodule BMFont do
@moduledoc """
Parses text and binary BMFont files in accordance with the
[AngelCode spec](http://www.angelcode.com/products/bmfont/doc/file_format.html).
Everything is kept pretty much as is, with the exception of some of the fields
being renamed to their longer forms.
"""
@type t :: %BMFont{ info: %BMFont.Info{}, common: %BMFont.Common{}, pages: [%BMFont.Page{}], chars: [%BMFont.Char{}], kernings: [%BMFont.Kerning{}] }
defstruct info: %BMFont.Info{}, common: %BMFont.Common{}, pages: [], chars: [], kernings: []
@doc """
Parse a BMFont file format, supports both text and binary versions.
The `parse/1` function can be passed either the binary data, the entire string, or
an enumerable with the separate lines of text.
"""
@spec parse(binary | [String.t]) :: t
def parse(data = <<"BMF", _ :: binary>>) do
{ { { :header, {:magic, "BMF"}, {:version, 3} }, { :block, data } }, <<>> } = Tonic.load(data, BMFont.Binary)
new_binary(data)
end
def parse(string) when is_binary(string), do: parse(String.split(string, "\n", trim: true))
def parse(lines) do
Enum.map(lines, fn line ->
create_tag(String.trim(line) |> String.split(" ", trim: true))
end) |> new
end
defp create_tag(["char"|args]), do: BMFont.Char.parse(args)
defp create_tag(["kerning"|args]), do: BMFont.Kerning.parse(args)
defp create_tag(["page"|args]), do: BMFont.Page.parse(args)
defp create_tag(["info"|args]), do: BMFont.Info.parse(args)
defp create_tag(["common"|args]), do: BMFont.Common.parse(args)
defp create_tag(["chars"|_]), do: nil #isn't needed
defp create_tag(["kernings"|_]), do: nil #isn't needed
defp create_tag(line), do: IO.puts "Unable to parse line: #{line}"
defp new(tags, font \\ %BMFont{})
defp new([], font), do: %{ font | pages: Enum.reverse(font.pages), chars: Enum.reverse(font.chars), kernings: Enum.reverse(font.kernings) }
defp new([tag = %BMFont.Char{}|tags], font), do: new(tags, %{ font | chars: [tag|font.chars] })
defp new([tag = %BMFont.Kerning{}|tags], font), do: new(tags, %{ font | kernings: [tag|font.kernings] })
defp new([tag = %BMFont.Page{}|tags], font), do: new(tags, %{ font | pages: [tag|font.pages] })
defp new([tag = %BMFont.Info{}|tags], font), do: new(tags, %{ font | info: tag })
defp new([tag = %BMFont.Common{}|tags], font), do: new(tags, %{ font | common: tag })
defp new([nil|tags], font), do: new(tags, font)
defp new_binary(tags, font \\ %BMFont{})
defp new_binary([], font), do: font
defp new_binary([{ _, _, { :info, { :size, size }, { :smooth, smooth }, { :unicode, unicode }, { :italic, italic }, { :bold, bold }, _, { :charset, charset }, { :stretch_height, stretch_height }, { :supersampling, supersampling }, { :padding, { :up, padding_up }, { :right, padding_right }, { :down, padding_down }, { :left, padding_left } }, { :spacing, { :horizontal, spacing_horizontal }, { :vertical, spacing_vertical } }, { :outline, outline }, { :face, face } } }|tags], font), do: new_binary(tags, %{ font | info: %BMFont.Info{ size: size, smooth: smooth, unicode: unicode, italic: italic, bold: bold, charset: charset, stretch_height: stretch_height, supersampling: supersampling, padding: %{ up: padding_up, right: padding_right, down: padding_down, left: padding_left }, spacing: %{ horizontal: spacing_horizontal, vertical: spacing_vertical }, outline: outline, face: face } })
defp new_binary([{ _, _, { :common, { :line_height, line_height }, { :base, base }, { :width, width }, { :height, height }, { :pages, pages }, { :packed, packed }, { :alpha_channel, alpha_channel }, { :red_channel, red_channel }, { :green_channel, green_channel }, { :blue_channel, blue_channel } } }|tags], font), do: new_binary(tags, %{ font | common: %BMFont.Common{ line_height: line_height, base: base, width: width, height: height, pages: pages, packed: packed, alpha_channel: alpha_channel, red_channel: red_channel, green_channel: green_channel, blue_channel: blue_channel } })
defp new_binary([{ _, _, { :pages, pages } }|tags], font), do: new_binary(tags, %{ font | pages: Enum.map(Enum.with_index(pages), fn { { { :file, file } }, index } -> %BMFont.Page{ id: index, file: file } end) })
defp new_binary([{ _, _, { :chars, chars } }|tags], font), do: new_binary(tags, %{ font | chars: Enum.map(chars, fn { { :id, id }, { :x, x }, { :y, y }, { :width, width }, { :height, height }, { :xoffset, xoffset }, { :yoffset, yoffset }, { :xadvance, xadvance }, { :page, page }, { :channel, channel } } -> %BMFont.Char{ id: id, x: x, y: y, width: width, height: height, xoffset: xoffset, yoffset: yoffset, xadvance: xadvance, page: page, channel: channel } end) })
defp new_binary([{ _, _, { :kernings, kernings } }|tags], font), do: new_binary(tags, %{ font | kernings: Enum.map(kernings, fn { { :first, first }, { :second, second }, { :amount, amount } } -> %BMFont.Kerning{ first: first, second: second, amount: amount } end) })
end
|
lib/bmfont.ex
| 0.829596
| 0.444444
|
bmfont.ex
|
starcoder
|
defmodule GitHubActions.Workflow do
@moduledoc """
The `GitHubActions.Workflow` is used to create a GitHub actions workflow.
```elixir
defmodule Minimal do
use GitHubActions.Workflow
def workflow do
[
name: "CI"
]
end
end
```
The workflow module must define the `workflow/0` function. This function
returns a nested data structure that will be translated in a yml-file.
The line `use GitHubActions.Workflow` imports `GitHubActions.Workflow`,
`GitHubActions.Mix` and `GitHubActions.Sigils` and adds the aliases
`GitHubActions.Config`, `GitHubActions.Project` and `GitHubActions.Versions`.
List entries with the value `:skip` are not taken over.
Key-value pairs with a value of `:skip` are alos not part of the resulting
data structure.
With :skip, you can handle optional parts in a workflow script.
```elixir
defmodule Simple do
use GitHubActions.Workflow
def workflow do
[
name: "CI",
jobs: [
linux: linux(),
os2: os2()
]
]
end
defp linux do
job(:linux,
name: "Test on \#{Config.fetch!([:linux, :name])}",
runs_on: Config.fetch!([:linux, :runs_on])
)
end
defp os2 do
job(:linux,
name: "Test on \#{Config.fetch!([:os2, :name])}",
runs_on: Config.fetch!([:os2, :runs_on])
)
end
defp job(os, cofig) do
case :jobs |> Config.get([]) |> Enum.member?(os) do
true -> config
false -> :skip
end
end
end
```
It is also possible to add steps when a dependency is available in the current
project.
```elixir
defmodule Simple do
use GitHubActions.Workflow
def workflow do
[
name: "CI",
jobs: [linux: linux()]
]
end
defp linux do
name: "Test on \#{Config.fetch!([:linux, :name])}",
runs_on: Config.fetch!([:linux, :runs_on])
steps: [
checkout(),
check_code_format(),
lint_code()
]
end
defp checkout do
[
name: "Checkout",
uses: "actions/checkout@v2"
]
end
defp lint_code do
case Project.has_dep?(:credo) do
false ->
:skip
true ->
[
name: "Lint code",
run: mix(:credo, strict: true, env: :test)
]
end
end
defp check_code_format do
case Config.get(:check_code_format, true) do
false ->
:skip
true ->
[
name: "Check code format",
run: mix(:format, check_formatted: true, env: :test)
]
end
end
end
```
"""
alias GitHubActions.ConvCase
defmacro __using__(_opts) do
quote do
import GitHubActions.Workflow
import GitHubActions.Mix
import GitHubActions.Sigils
alias GitHubActions.Config
alias GitHubActions.Project
alias GitHubActions.Versions
end
end
@doc """
Evaluates a workflow script and returns the worflow data structure.
"""
@spec eval(Path.t()) :: {:ok, term()} | :error
def eval(file) do
with {:ok, module} <- compile(file),
{:ok, workflow} <- workflow(module) do
{:ok, map(workflow)}
end
end
defp workflow(module) do
case function_exported?(module, :workflow, 0) do
true -> {:ok, module.workflow()}
false -> {:error, :workflow}
end
end
defp compile(file) do
case file |> File.read!() |> Code.eval_string([], file: file) do
{{:module, module, _bin, _meta}, _bind} -> {:ok, module}
_else -> :error
end
end
defp map({_key, :skip}), do: :skip
defp map({key, value}) do
{ConvCase.to_kebab(key), map(value)}
end
defp map(workflow) when is_list(workflow) do
workflow
|> Enum.reduce([], fn item, acc ->
case map(item) do
:skip -> acc
value -> [value | acc]
end
end)
|> Enum.reverse()
end
defp map(:skip), do: :skip
defp map(value), do: to_string(value)
end
|
lib/git_hub_actions/workflow.ex
| 0.769817
| 0.897695
|
workflow.ex
|
starcoder
|
defmodule Neoscan.Assets do
@moduledoc """
The boundary for the Assets system.
"""
@page_size 15
import Ecto.Query, warn: false
alias Neoscan.Repo
alias Neoscan.Asset
alias Neoscan.Counter
@doc """
Gets a single asset by its hash value
## Examples
iex> get(123)
%Block{}
iex> get(456)
nill
"""
def get(hash) do
query =
from(
a in Asset,
left_join: ca in Counter,
on: a.transaction_hash == ca.ref and ca.name == "addresses_by_asset",
left_join: ct in Counter,
on: a.transaction_hash == ct.ref and ct.name == "transactions_by_asset",
where: a.transaction_hash == ^hash,
select: %{
transaction_hash: a.transaction_hash,
name: a.name,
block_time: a.block_time,
type: a.type,
owner: a.owner,
admin: a.admin,
issued: a.issued,
symbol: a.symbol,
precision: a.precision,
amount: a.amount,
addr_count: ca.value,
tx_count: ct.value
}
)
Repo.one(query)
end
@doc """
Returns the list of paginated assets.
## Examples
iex> paginate(page)
[%Asset{}, ...]
"""
def paginate(page) do
assets_query =
from(
a in Asset,
left_join: ca in Counter,
on: a.transaction_hash == ca.ref and ca.name == "addresses_by_asset",
left_join: ct in Counter,
on: a.transaction_hash == ct.ref and ct.name == "transactions_by_asset",
order_by: [desc: fragment("coalesce(?, 0)", ct.value)],
limit: @page_size,
select: %{
transaction_hash: a.transaction_hash,
name: a.name,
block_time: a.block_time,
type: a.type,
owner: a.owner,
admin: a.admin,
issued: a.issued,
precision: a.precision,
amount: a.amount,
symbol: a.symbol,
addr_count: ca.value,
tx_count: ct.value
}
)
Repo.paginate(assets_query, page: page, page_size: @page_size)
end
end
|
apps/neoscan/lib/neoscan/assets/assets.ex
| 0.814053
| 0.453867
|
assets.ex
|
starcoder
|
defmodule Day5 do
@moduledoc """
--- Day 5: A Maze of Twisty Trampolines, All Alike ---
An urgent interrupt arrives from the CPU: it's trapped in a maze of jump instructions, and it would like assistance
from any programs with spare cycles to help find the exit.
The message includes a list of the offsets for each jump. Jumps are relative: -1 moves to the previous instruction,
and 2 skips the next one. Start at the first instruction in the list. The goal is to follow the jumps until one leads
outside the list.
In addition, these instructions are a little strange; after each jump, the offset of that instruction increases by 1.
So, if you come across an offset of 3, you would move three instructions forward, but change it to a 4 for the next
time it is encountered.
For example, consider the following list of jump offsets:
0
3
0
1
-3
Positive jumps ("forward") move downward; negative jumps move upward. For legibility in this example, these offset
values will be written all on one line, with the current instruction marked in parentheses. The following steps would
be taken before an exit is found:
(0) 3 0 1 -3 - before we have taken any steps.
(1) 3 0 1 -3 - jump with offset 0 (that is, don't jump at all). Fortunately, the instruction is then incremented
to 1.
2 (3) 0 1 -3 - step forward because of the instruction we just modified. The first instruction is incremented
again, now to 2.
2 4 0 1 (-3) - jump all the way to the end; leave a 4 behind.
2 (4) 0 1 -2 - go back to where we just were; increment -3 to -2.
2 5 0 1 -2 - jump 4 steps forward, escaping the maze.
In this example, the exit is reached in 5 steps.
How many steps does it take to reach the exit?
--- Part Two ---
Now, the jumps are even stranger: after each jump, if the offset was three or more, instead decrease it by 1. Otherwise, increase it by 1 as before.
Using this rule with the above example, the process now takes 10 steps, and the offset values after finding the exit are left as 2 3 2 3 -1.
How many steps does it now take to reach the exit?
Your puzzle answer was 31150702.
"""
@doc """
Part A for Day 4
"""
def common_part(file) do
instruction_list = File.read!(file) |>
String.split("\n") |>
Enum.map(&String.to_integer/1)
instruction_length = length(instruction_list)
{instruction_length, 0..(instruction_length-1) |> Enum.zip(instruction_list) |> Map.new}
end
def part_a do
{instruction_length, instruction_map} = common_part("res/day5.input")
process_instructions(instruction_map, {0,0}, 0, instruction_length, false)
end
@doc """
Part B for Day 4
"""
def part_b do
{instruction_length, instruction_map} = common_part("res/day5.input")
process_instructions(instruction_map, {0,0}, 0, instruction_length, true)
end
def test_a do
{instruction_length, instruction_map} = common_part("res/day5_test.input")
process_instructions(instruction_map, {0,0}, 0, instruction_length, false)
end
def test_b do
{instruction_length, instruction_map} = common_part("res/day5_test.input")
process_instructions(instruction_map, {0,0}, 0, instruction_length, true)
end
def process_instructions(_, {cur_loc,val}, steps, max_size, _)
when cur_loc + val >= max_size or cur_loc + val < 0 do
steps + 1
end
def process_instructions(instruction_map, {cur_loc, val}, steps, max_size, true)
when val >= 3 do
newmap = Map.replace(instruction_map, cur_loc, val - 1)
Help.print_dot_every(steps+1000, 1000000)
process_instructions(newmap, {cur_loc+val, Map.fetch!(newmap, cur_loc+val)}, steps+1, max_size, true)
end
def process_instructions(instruction_map, {cur_loc, val}, steps, max_size, bool_part) do
newmap = Map.replace(instruction_map, cur_loc, val + 1)
Help.print_dot_every(steps+1000, 1000000)
process_instructions(newmap, {cur_loc+val, Map.fetch!(newmap, cur_loc+val)}, steps+1, max_size, bool_part)
end
end
|
lib/day5.ex
| 0.73848
| 0.883638
|
day5.ex
|
starcoder
|
defmodule Nostrum.Struct.Guild.Member do
@moduledoc ~S"""
Struct representing a Discord guild member.
A `Nostrum.Struct.Guild.Member` stores a `Nostrum.Struct.User`'s properties
pertaining to a specific `Nostrum.Struct.Guild`.
## Mentioning Members in Messages
A `Nostrum.Struct.Guild.Member` can be mentioned in message content using the `String.Chars`
protocol or `mention/1`.
```Elixir
member = %Nostrum.Struct.Guild.Member{user: Nostrum.Struct.User{id: 120571255635181568}}
Nostrum.Api.create_message!(184046599834435585, "#{member}")
%Nostrum.Struct.Message{content: "<@120571255635181568>"}
member = %Nostrum.Struct.Guild.Member{user: Nostrum.Struct.User{id: 89918932789497856}}
Nostrum.Api.create_message!(280085880452939778, "#{Nostrum.Struct.Guild.Member.mention(member)}")
%Nostrum.Struct.Message{content: "<@89918932789497856>"}
```
"""
alias Nostrum.Permission
alias Nostrum.Struct.{Channel, Guild, User}
alias Nostrum.Struct.Guild.Role
alias Nostrum.{Snowflake, Util}
defstruct [
:user,
:nick,
:roles,
:joined_at,
:deaf,
:mute
]
defimpl String.Chars do
def to_string(member), do: @for.mention(member)
end
@typedoc """
The user struct. This field can be `nil` if the Member struct came as a partial Member object included
in a message received from a guild channel.
"""
@type user :: User.t() | nil
@typedoc "The nickname of the user"
@type nick :: String.t() | nil
@typedoc "A list of role ids"
@type roles :: [Role.id()]
@typedoc """
Date the user joined the guild.
If you dont request offline guild members this field will be `nil` for any members that come online.
"""
@type joined_at :: String.t() | nil
@typedoc """
Whether the user is deafened.
If you dont request offline guild members this field will be `nil` for any members that come online.
"""
@type deaf :: boolean | nil
@typedoc """
Whether the user is muted.
If you dont request offline guild members this field will be `nil` for any members that come online.
"""
@type mute :: boolean | nil
@type t :: %__MODULE__{
user: user,
nick: nick,
roles: roles,
joined_at: joined_at,
deaf: deaf,
mute: mute
}
@doc ~S"""
Formats a `Nostrum.Struct.Guild.Member` into a mention.
## Examples
```Elixir
iex> member = %Nostrum.Struct.Guild.Member{user: %Nostrum.Struct.User{id: 177888205536886784}}
...> Nostrum.Struct.Guild.Member.mention(member)
"<@177888205536886784>"
```
"""
@spec mention(t) :: String.t()
def mention(%__MODULE__{user: user}), do: User.mention(user)
@doc """
Returns a member's guild permissions.
## Examples
```Elixir
guild = Nostrum.Cache.GuildCache.get!(279093381723062272)
member = Map.get(guild.members, 177888205536886784)
Nostrum.Struct.Guild.Member.guild_permissions(member, guild)
#=> [:administrator]
```
"""
@spec guild_permissions(t, Guild.t()) :: [Permission.t()]
def guild_permissions(member, guild)
def guild_permissions(%__MODULE__{user: %{id: user_id}}, %Guild{owner_id: owner_id})
when user_id === owner_id,
do: Permission.all()
def guild_permissions(%__MODULE__{} = member, %Guild{} = guild) do
use Bitwise
everyone_role_id = guild.id
member_role_ids = member.roles ++ [everyone_role_id]
member_permissions =
member_role_ids
|> Enum.map(&Map.get(guild.roles, &1))
|> Enum.filter(&(!match?(nil, &1)))
|> Enum.reduce(0, fn role, bitset_acc ->
bitset_acc ||| role.permissions
end)
|> Permission.from_bitset()
if Enum.member?(member_permissions, :administrator) do
Permission.all()
else
member_permissions
end
end
@doc """
Returns a member's permissions in a guild channel, based on its `Nostrum.Struct.Overwrite`s.
## Examples
```Elixir
guild = Nostrum.Cache.GuildCache.get!(279093381723062272)
member = Map.get(guild.members, 177888205536886784)
channel_id = 381889573426429952
Nostrum.Struct.Guild.Member.guild_channel_permissions(member, guild, channel_id)
#=> [:manage_messages]
```
"""
@spec guild_channel_permissions(t, Guild.t(), Channel.id()) :: [Permission.t()]
def guild_channel_permissions(%__MODULE__{} = member, guild, channel_id) do
use Bitwise
guild_perms = guild_permissions(member, guild)
if Enum.member?(guild_perms, :administrator) do
Permission.all()
else
channel = Map.get(guild.channels, channel_id)
everyone_role_id = guild.id
role_ids = [everyone_role_id | member.roles]
overwrite_ids = role_ids ++ [member.user.id]
{allow, deny} =
channel.permission_overwrites
|> Enum.filter(&(&1.id in overwrite_ids))
|> Enum.map(fn overwrite -> {overwrite.allow, overwrite.deny} end)
|> Enum.reduce({0, 0}, fn {allow, deny}, {allow_acc, deny_acc} ->
{allow_acc ||| allow, deny_acc ||| deny}
end)
allow_perms = allow |> Permission.from_bitset()
deny_perms = deny |> Permission.from_bitset()
guild_perms
|> Enum.reject(&(&1 in deny_perms))
|> Enum.concat(allow_perms)
|> Enum.dedup()
end
end
@doc """
Return the topmost role of the given member on the given guild.
The topmost role is determined via `t:Nostrum.Struct.Guild.Role.position`.
## Parameters
- `member`: The member whose top role to return.
- `guild`: The guild which the member belongs to.
## Return value
The topmost role of the member on the given guild, if the member has roles
assigned. Otherwise, `nil` is returned.
"""
@doc since: "0.5.0"
@spec top_role(__MODULE__.t(), Guild.t()) :: Role.t() | nil
def top_role(%__MODULE__{roles: member_roles}, %Guild{roles: guild_roles}) do
guild_roles
|> Stream.filter(fn {id, _role} -> id in member_roles end)
|> Stream.map(fn {_id, role} -> role end)
|> Enum.max_by(& &1.position, fn -> nil end)
end
@doc false
def p_encode do
%__MODULE__{
user: User.p_encode()
}
end
@doc false
def to_struct(map) do
new =
map
|> Map.new(fn {k, v} -> {Util.maybe_to_atom(k), v} end)
|> Map.update(:user, nil, &Util.cast(&1, {:struct, User}))
|> Map.update(:roles, nil, &Util.cast(&1, {:list, Snowflake}))
struct(__MODULE__, new)
end
end
|
lib/nostrum/struct/guild/member.ex
| 0.867204
| 0.729941
|
member.ex
|
starcoder
|
defmodule Akd.Generator.Task do
@moduledoc """
This module handles the generation of a custom task which use `Akd.Task`.
This can either directly be called, or called through a mix task,
`mix akd.gen.task`.
This class uses EEx and Mix.Generator to fetch file contents from an eex
template and populate the interpolated fields, writing it to the speficied
file.
## Usage:
The following call creates a file `run.ex` at location `path/to/file/run.ex`
```
Akd.Generator.Task.gen(["run.ex"], path: "path/to/file")
```
"""
require EEx
require Mix.Generator
@path "lib/"
# Native hook types that can be added using this genenrator
@hooks ~w(fetch init build publish start stop)a
@doc """
This is the callback implementation for `gen/2`.
This function takes in a list of inputs and a list of options and generates
a module that uses `Akd.Task` at the specified path with the specified name.
The first element of the input is expected to be the name of the file.
The path can be sent to the `opts`.
If no path is sent, it defaults to #{@path}
## Examples:
```elixir
Akd.Generator.Hook.gen(["task.ex"], [path: "some/path"])
```
"""
@spec gen(list, Keyword.t()) :: :ok | {:error, String.t()}
def gen([name | _], opts) do
name
|> validate_and_format_opts(opts)
|> text_from_template()
|> write_to_file(name)
end
# This function validates the name and options sent to the generator
# and formats the options making it ready for the template to read from.
defp validate_and_format_opts(name, opts) do
opts =
@hooks
|> Enum.reduce(opts, &resolve_hook_opts/2)
|> Keyword.put_new(:path, @path)
|> Keyword.put_new(:with_phx, false)
[{:name, resolve_name(name)} | opts]
end
# This function adds the default_hook to a keyword, if the keyword
# doesn't have key corresponding to the `hook`. Else just returns the keyword
# itself.
defp resolve_hook_opts(hook, opts) do
Keyword.put_new(opts, hook, default_string(hook))
end
# This function gets default_hook from `Akd` module based on hook type
# and converts the module name to string
defp default_string(hook) do
Akd
|> apply(hook, [])
|> Macro.to_string()
end
# This function gets the name of file from the module name
defp resolve_name(name) do
Macro.camelize(name)
end
# This function gives the location for the template which will be used
# by the generator
defp template(), do: "#{__DIR__}/templates/task.ex.eex"
# This function takes formatted options and returns a tuple.
# First element of the tuple is the path to file and second element is
# the evaluated file string.
defp text_from_template(opts) do
{Keyword.get(opts, :path), EEx.eval_file(template(), assigns: opts)}
end
# This function writes contents to a file at a specific path
defp write_to_file({path, code}, name) do
path = path <> Macro.underscore(name) <> ".ex"
case File.exists?(path) do
true -> {:error, "File #{path} already exists."}
_ -> Mix.Generator.create_file(path, code)
end
end
end
|
lib/akd/generator/task.ex
| 0.88471
| 0.875095
|
task.ex
|
starcoder
|
defmodule Ockam.Session.Pluggable.Initiator do
@moduledoc """
Simple routing session initiator
Upon starting, uses Handshake.init to generate a handshake message
and send it to init_route.
Initial stage is :handshake, in this stage waits for a handshake response
After receiving a handshake response, runs Handshake.handle_initiator
and starts the data worker on the same process and moves to the :data stage
Data worker is started with `worker_options` merged with
the options from handle_initiator
In the :data stage processes all messages with the data worker module
Options:
`init_route` - route to responder (or spawner)
`worker_mod` - data worker module
`worker_options` - data worker options
`handshake` - handshake module (defaults to `Ockam.Session.Handshake.Default`)
`handshake_options` - options for handshake module
"""
use Ockam.AsymmetricWorker
alias Ockam.Message
alias Ockam.Router
alias Ockam.Session.Pluggable, as: RoutingSession
require Logger
@dialyzer {:nowarn_function, handle_inner_message: 2, handle_outer_message: 2}
def get_stage(worker) do
Ockam.Worker.call(worker, :get_stage)
end
def wait_for_session(worker, interval \\ 100, timeout \\ 5000)
def wait_for_session(_worker, _interval, expire) when expire < 0 do
{:error, :timeout}
end
def wait_for_session(worker, interval, timeout) do
case get_stage(worker) do
:data ->
:ok
:handshake ->
:timer.sleep(interval)
wait_for_session(worker, interval, timeout - interval)
end
end
def create_and_wait(options, interval \\ 50, timeout \\ 5000) do
with {:ok, address} <- create(options),
:ok <- wait_for_session(address, interval, timeout) do
{:ok, address}
end
end
@impl true
def address_prefix(_options), do: "S_I_"
@impl true
def inner_setup(options, state) do
## TODO: should init_route be in the handshake options?
init_route = Keyword.fetch!(options, :init_route)
## rename to data_mod
worker_mod = Keyword.fetch!(options, :worker_mod)
worker_options = Keyword.get(options, :worker_options, [])
base_state = Map.put(state, :module, worker_mod)
handshake = Keyword.get(options, :handshake, Ockam.Session.Handshake.Default)
handshake_options = Keyword.get(options, :handshake_options, [])
handshake_state = %{
init_route: init_route,
worker_address: state.inner_address,
handshake_address: state.inner_address
}
state =
Map.merge(state, %{
worker_mod: worker_mod,
worker_options: worker_options,
base_state: base_state
})
handshake_state = send_handshake(handshake, handshake_options, handshake_state)
state =
Map.merge(state, %{
handshake: handshake,
handshake_options: handshake_options,
handshake_state: handshake_state,
stage: :handshake
})
{:ok, state}
end
def send_handshake(handshake, handshake_options, handshake_state) do
{:next, handshake_msg, handshake_state} = handshake.init(handshake_options, handshake_state)
send_message(handshake_msg)
handshake_state
end
@impl true
def handle_call(:get_stage, _from, state) do
{:reply, Map.get(state, :stage), state}
end
@impl true
def handle_message(message, %{stage: :handshake} = state) do
case message_type(message, state) do
:inner ->
handle_handshake_message(message, state)
_other ->
Logger.info("Ignoring non-inner message in handshake stage: #{inspect(message)}")
{:ok, state}
end
end
def handle_message(message, %{stage: :data, data_state: _, worker_mod: _} = state) do
RoutingSession.handle_data_message(message, state)
end
def handle_handshake_message(message, state) do
handshake = Map.fetch!(state, :handshake)
handshake_options = Map.fetch!(state, :handshake_options)
handshake_state = Map.fetch!(state, :handshake_state)
case handshake.handle_initiator(handshake_options, message, handshake_state) do
{:ready, options, handshake_state} ->
switch_to_data_stage(options, handshake_state, state)
{:ready, message, options, handshake_state} ->
switch_to_data_stage(message, options, handshake_state, state)
{:next, message, handshake_state} ->
send_message(message)
{:ok, Map.put(state, :handshake_state, handshake_state)}
{:next, handshake_state} ->
{:ok, Map.put(state, :handshake_state, handshake_state)}
{:error, err} ->
## TODO: error handling in Ockam.Worker
{:error, err}
end
end
def switch_to_data_stage(message \\ nil, handshake_options, handshake_state, state) do
base_state = Map.get(state, :base_state)
worker_mod = Map.fetch!(state, :worker_mod)
worker_options = Map.fetch!(state, :worker_options)
options = Keyword.merge(worker_options, handshake_options)
case worker_mod.setup(options, base_state) do
{:ok, data_state} ->
send_message(message)
{:ok,
Map.merge(state, %{
data_state: data_state,
handshake_state: handshake_state,
stage: :data
})}
{:error, err} ->
{:stop, {:cannot_start_data_worker, {:error, err}, options, handshake_state, base_state},
state}
end
end
def send_message(nil) do
:ok
end
def send_message(message) do
Logger.info("Sending handshake #{inspect(message)}")
Router.route(message)
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/session/pluggable/initiator.ex
| 0.691185
| 0.5083
|
initiator.ex
|
starcoder
|
defmodule Xpeg.Codegen do
@moduledoc false
defp emit_inst(ip, inst, options) do
case inst do
{:nop} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
parse(unquote(ip+1), s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:any, n} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures, unquote(n))
end
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures, n) do
case {s, n} do
{[_|s2], 1} -> parse(unquote(ip+1), s2, si+1, ctx, back_stack, ret_stack, cap_stack, captures)
{[_|s2], m} -> parse(unquote(ip), s2, si+1, ctx, back_stack, ret_stack, cap_stack, captures, m-1)
{[], _} -> parse(:fail, [], si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
end
{:chr, cmatch} ->
quote location: :keep do
def parse(unquote(ip), s=[c|s2], si, ctx, back_stack, ret_stack, cap_stack, captures) when c == unquote(cmatch) do
parse(unquote(ip+1), s2, si+1, ctx, back_stack, ret_stack, cap_stack, captures)
end
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
parse(:fail, s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:set, cs} ->
quote location: :keep do
def parse(unquote(ip), s=[c|s2], si, ctx, back_stack, ret_stack, cap_stack, captures) when c in unquote(cs) do
parse(unquote(ip+1), s2, si+1, ctx, back_stack, ret_stack, cap_stack, captures)
end
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
parse(:fail, s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:span, cs} ->
quote location: :keep do
def parse(unquote(ip), s=[c|s2], si, ctx, back_stack, ret_stack, cap_stack, captures) when c in unquote(cs) do
parse(unquote(ip), s2, si+1, ctx, back_stack, ret_stack, cap_stack, captures)
end
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
parse(unquote(ip+1), s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:return} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
case ret_stack do
[ip | ret_stack] ->
parse(ip, s, si, ctx, back_stack, ret_stack, cap_stack, captures)
[] ->
{ctx, s, si, :ok, cap_stack, captures}
end
end
end
{:choice, ip_back, ip_commit} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
frame = { unquote(ip_back), unquote(ip_commit), ret_stack, cap_stack, s, si }
back_stack = [frame | back_stack]
parse(unquote(ip+1), s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:commit} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
[frame | back_stack] = back_stack
{ _, ip, _, _, _, _ } = frame
parse(ip, s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:call, addr} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
ret_stack = [unquote(ip+1) | ret_stack]
parse(unquote(addr), s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:jump, addr} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
parse(unquote(addr), s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:capopen} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
cap_stack = [{:open, s, si} | cap_stack]
parse(unquote(ip+1), s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:capclose, type} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
cap_stack = [{:close, s, si, unquote(type)} | cap_stack]
parse(unquote(ip+1), s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:code, code} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
{cap_stack, captures} = Xpeg.collect_captures(cap_stack, captures)
func = unquote(code)
{captures, ctx} = case unquote(options[:userdata]) do
true -> func.(captures, ctx)
_ -> {func.(captures), ctx}
end
parse(unquote(ip+1), s, si, ctx, back_stack, ret_stack, cap_stack, captures)
end
end
{:fail} ->
quote location: :keep do
def parse(unquote(ip), s, si, ctx, back_stack, ret_stack, cap_stack, captures) do
case back_stack do
[frame | back_stack] ->
{ ip, _, ret_stack, cap_stack, s, si } = frame
parse(ip, s, si, ctx, back_stack, ret_stack, cap_stack, captures)
[] ->
{ctx, s, si, :error, cap_stack, captures}
end
end
end
end
end
def add_trace(options, ast, ip, inst) do
if options[:trace] do
{ast, _} = Macro.prewalk(ast, false, fn
{:do, body}, false ->
body = quote do
Xpeg.trace(unquote(ip), unquote(inspect(inst)), s)
unquote(body)
end
{{:do, body}, true}
e, done -> {e, done}
end)
ast
else
ast
end
end
def emit(program, options \\ []) do
ast = Enum.reduce(program.instructions, [], fn {ip, inst}, defs ->
ast = emit_inst(ip, inst, options)
case ast do
{:__block__, _, subs} ->
Enum.map(subs, &add_trace(options, &1, ip, inst)) ++ defs
_ -> [add_trace(options, ast, ip, inst) | defs]
end
end)
ast = {
:__block__, [], [quote do
require Xpeg
end ] ++ ast
}
if options[:dump_code] do
IO.puts(Macro.to_string(ast))
end
Macro.escape(ast)
end
end
# set ft=elixir
|
lib/codegen.ex
| 0.571647
| 0.598899
|
codegen.ex
|
starcoder
|
defmodule Holidays.Definitions.Us do
import Holidays.Define
alias Holidays.DateCalculator.DateMath
@moduledoc """
United States holiday definitions.
"""
def init() do
holiday "Good Friday",
%{regions: [:us],
function: {Holidays, :easter, [:year], -2},
type: :informal}
holiday "Easter Sunday",
%{regions: [:us],
function: {Holidays, :easter, [:year]},
type: :informal}
holiday "New Year's Day",
%{month: 1,
regions: [:us],
day: 1,
observed: {:to_weekday_if_weekend, [:date]}}
holiday "<NAME>, Jr. Day",
%{month: 1,
week: :third,
regions: [:us],
weekday: :monday}
holiday "Inauguration Day",
%{month: 1,
function: {Holidays.Definitions.Us, :inauguration_day, [:year]},
regions: [:us_dc]}
holiday "Presidents' Day",
%{month: 2,
week: :third,
regions: [:us],
weekday: :monday}
holiday "Cesar Chavez Day",
%{month: 3,
regions: [:us_ca],
day: 31}
holiday "Memorial Day",
%{month: 5,
week: :last,
regions: [:us],
weekday: :monday}
holiday "Independence Day",
%{month: 7,
regions: [:us],
day: 4,
observed: {:to_weekday_if_weekend, [:date]}}
holiday "Labor Day",
%{month: 9,
week: :first,
regions: [:us],
weekday: :monday}
holiday "Columbus Day",
%{month: 10,
week: :second,
regions: [:us],
weekday: :monday}
holiday "Veterans Day",
%{month: 11,
regions: [:us],
day: 11,
observed: {:to_weekday_if_weekend, [:date]}}
holiday "Thanksgiving",
%{month: 11,
week: :fourth,
regions: [:us],
weekday: :thursday}
holiday "Day after Thanksgiving",
%{month: 11,
function: {Holidays.Definitions.Us, :day_after_thanksgiving, [:year]},
regions: [:us]}
holiday "Christmas Day",
%{month: 12,
regions: [:us],
day: 25,
observed: {:to_weekday_if_weekend, [:date]}}
end
@doc """
January 20, every fourth year, following Presidential election.
## Examples
iex> Holidays.Defenitions.Us.inauguration_day(2016)
:none
iex> Holidays.Defenitions.Us.inauguration_day(2017)
{2017, 1, 20}
iex> Holidays.Defenitions.Us.inauguration_day(2018)
:none
"""
def inauguration_day(year) when rem(year, 4) == 1, do: {:ok, {year, 1, 20}}
def inauguration_day(_year), do: :none
def day_after_thanksgiving(year) do
DateMath.get_weekth_day(year, 11, :fourth, :thursday)
|> DateMath.add_days(1)
end
end
|
lib/holidays/definitions/us.ex
| 0.793226
| 0.789477
|
us.ex
|
starcoder
|
defmodule Axon.Initializers do
@moduledoc """
Parameter initializers.
Parameter initializers are used to initialize the weights
and biases of a neural network. Because most deep learning
optimization algorithms are iterative, they require an initial
point to iterate from.
Sometimes the initialization of a model can determine whether
or not a model converges. In some cases, the initial point is
unstable, and therefore the model has no chance of converging
using common first-order optimization methods. In cases where
the model will converge, initialization can have a significant
impact on how quickly the model converges.
Most initialization strategies are built from intuition and
heuristics rather than theory. It's commonly accepted that
the parameters of different layers should be different -
motivating the use of random initialization for each layer's
parameters. Usually, only the weights of a layer are initialized
using a random distribution - while the biases are initialized
to a uniform constant (like 0).
Most initializers use Gaussian (normal) or uniform distributions
with variations on scale. The output scale of an initializer
should generally be large enough to avoid information loss but
small enough to avoid exploding values. The initializers in
this module have a default scale known to work well with
the initialization strategy.
All of the functions in this module are implemented as
numerical functions and can be JIT or AOT compiled with
any supported `Nx` compiler.
"""
# TODO: Add random keys
import Nx.Defn
import Axon.Shared
@doc """
Initializes parameters to 0.
## Examples
iex> Axon.Initializers.zeros(shape: {2, 2})
#Nx.Tensor<
f32[2][2]
[
[0.0, 0.0],
[0.0, 0.0]
]
>
"""
defn zeros(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
Nx.broadcast(Nx.tensor(0, type: opts[:type]), opts[:shape])
end
@doc """
Initializes parameters to 1.
## Examples
iex> Axon.Initializers.ones(shape: {2, 2})
#Nx.Tensor<
f32[2][2]
[
[1.0, 1.0],
[1.0, 1.0]
]
>
"""
defn ones(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
Nx.broadcast(Nx.tensor(1, type: opts[:type]), opts[:shape])
end
@doc """
Initializes parameters to value.
## Examples
iex> Axon.Initializers.full(1.00, shape: {2, 2})
#Nx.Tensor<
f32[2][2]
[
[1.0, 1.0],
[1.0, 1.0]
]
>
"""
defn full(value, opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
Nx.as_type(Nx.broadcast(value, opts[:shape]), opts[:type])
end
@doc """
Initializes parameters to an identity matrix.
## Examples
iex> Axon.Initializers.identity(shape: {2, 2})
#Nx.Tensor<
f32[2][2]
[
[1.0, 0.0],
[0.0, 1.0]
]
>
"""
defn identity(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
Nx.eye(opts[:shape], type: opts[:type])
end
@doc """
Initializes parameters with a random uniform distribution.
## Options
* `:shape` - output shape
* `:type` - output type. Defaults to `{:f, 32}`
* `:scale` - scale of the output distribution. Defaults to `1.0e-2`
## Examples
iex> t = Axon.Initializers.uniform(shape: {2, 2})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> t = Axon.Initializers.uniform(shape: {2, 2}, type: {:bf, 16}, scale: 1.0e-3)
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
"""
defn uniform(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0e-2])
shape = Nx.shape(opts[:shape])
Nx.random_uniform(shape, Nx.negate(opts[:scale]), opts[:scale], type: opts[:type])
end
@doc """
Initializes parameters with a random normal distribution.
## Options
* `:shape` - output shape
* `:type` - output type. Defaults to `{:f, 32}`
* `:mean` - mean of the output distribution. Defaults to `0.0`
* `:scale` - scale of the output distribution. Defaults to `1.0e-2`
## Examples
iex> t = Axon.Initializers.normal(shape: {2, 2})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> t = Axon.Initializers.normal(shape: {2, 2}, type: {:bf, 16}, mean: 1.0, scale: 1.0)
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
"""
defn normal(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0e-2, mean: 0.0])
Nx.random_normal(opts[:shape], opts[:mean], opts[:scale], type: opts[:type])
end
@doc """
Initializes parameters with the Lecun uniform initializer.
The Lecun uniform initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_in`
and `distribution: :uniform`.
## Options
* `:shape` - output shape
* `:type` - output type. Defaults to `{:f, 32}`
* `:scale` - scale of the output distribution. Defaults to `1.0`
## Examples
iex> t = Axon.Initializers.lecun_uniform(shape: {2, 2})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> t = Axon.Initializers.lecun_uniform(shape: {2, 2}, type: {:bf, 16}, scale: 1.0e-3)
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
defn lecun_uniform(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0])
variance_scaling(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_in,
distribution: :uniform
)
end
@doc """
Initializes parameters with the Lecun normal initializer.
The Lecun normal initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_in`
and `distribution: :truncated_normal`.
## Options
* `:shape` - output shape
* `:type` - output type. Defaults to `{:f, 32}`
* `:scale` - scale of the output distribution. Defaults to `1.0`
## Examples
iex> t = Axon.Initializers.lecun_uniform(shape: {2, 2})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> t = Axon.Initializers.lecun_uniform(shape: {2, 2}, type: {:bf, 16}, scale: 1.0e-3)
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
defn lecun_normal(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0])
variance_scaling(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_in,
distribution: :truncated_normal
)
end
@doc """
Initializes parameters with the Glorot uniform initializer.
The Glorot uniform initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_avg`
and `distribution: :uniform`.
The Glorot uniform initializer is also called the Xavier
uniform initializer.
## Options
* `:shape` - output shape
* `:type` - output type. Defaults to `{:f, 32}`
* `:scale` - scale of the output distribution. Defaults to `1.0`
## Examples
iex> t = Axon.Initializers.glorot_uniform(shape: {2, 2})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> t = Axon.Initializers.glorot_uniform(shape: {2, 2}, type: {:bf, 16}, scale: 1.0e-3)
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a.html)
"""
defn glorot_uniform(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0])
variance_scaling(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_avg,
distribution: :uniform
)
end
@doc """
Initializes parameters with the Glorot normal initializer.
The Glorot normal initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_avg`
and `distribution: :truncated_normal`.
The Glorot normal initializer is also called the Xavier
normal initializer.
## Options
* `:shape` - output shape
* `:type` - output type. Defaults to `{:f, 32}`
* `:scale` - scale of the output distribution. Defaults to `1.0`
## Examples
iex> t = Axon.Initializers.glorot_normal(shape: {2, 2})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> t = Axon.Initializers.glorot_normal(shape: {2, 2}, type: {:bf, 16}, scale: 1.0e-3)
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a.html)
"""
defn glorot_normal(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0])
variance_scaling(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_avg,
distribution: :truncated_normal
)
end
@doc """
Initializes parameters with the He uniform initializer.
The He uniform initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_ni`
and `distribution: :uniform`.
## Options
* `:shape` - output shape
* `:type` - output type. Defaults to `{:f, 32}`
* `:scale` - scale of the output distribution. Defaults to `2.0`
## Examples
iex> t = Axon.Initializers.he_uniform(shape: {2, 2})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> t = Axon.Initializers.he_uniform(shape: {2, 2}, type: {:bf, 16}, scale: 1.0e-3)
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)
"""
defn he_uniform(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 2.0])
variance_scaling(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_in,
distribution: :uniform
)
end
@doc """
Initializes parameters with the He normal initializer.
The He normal initializer is equivalent to calling
`Axon.Initializers.variance_scaling` with `mode: :fan_ni`
and `distribution: :truncated_normal`.
## Options
* `:shape` - output shape
* `:type` - output type. Defaults to `{:f, 32}`
* `:scale` - scale of the output distribution. Defaults to `2.0`
## Examples
iex> t = Axon.Initializers.he_normal(shape: {2, 2})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> t = Axon.Initializers.he_normal(shape: {2, 2}, type: {:bf, 16}, scale: 1.0e-3)
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
## References
* [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)
"""
defn he_normal(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, scale: 2.0])
variance_scaling(
shape: opts[:shape],
type: opts[:type],
scale: opts[:scale],
mode: :fan_in,
distribution: :truncated_normal
)
end
@doc """
Initializes parameters with variance scaling according to
the given distribution and mode.
Variance scaling adapts scale to the weights of the output
tensor.
## Options
* `:shape` - output shape
* `:type` - output type. Defaults to `{:f, 32}`
* `:scale` - scale of the output distribution. Defaults to `1.0e-2`
* `:mode` - compute fan mode. One of `:fan_in`, `:fan_out`, or `:fan_avg`.
Defaults to `:fan_in`
* `:distribution` - output distribution. One of `:normal`, `:truncated_normal`,
or `:uniform`. Defaults to `:normal`
## Examples
iex> t = Axon.Initializers.variance_scaling(shape: {2, 2})
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:f, 32}
iex> t = Axon.Initializers.variance_scaling(shape: {2, 2}, type: {:bf, 16}, mode: :fan_out, distribution: :truncated_normal)
iex> Nx.shape(t)
{2, 2}
iex> Nx.type(t)
{:bf, 16}
iex> t = Axon.Initializers.variance_scaling(shape: {64, 3, 32, 32}, mode: :fan_out, distribution: :normal)
iex> Nx.shape(t)
{64, 3, 32, 32}
iex> Nx.type(t)
{:f, 32}
iex> Axon.Initializers.variance_scaling(shape: {2, 2}, mode: :not_a_mode)
** (ArgumentError) invalid mode :not_a_mode passed to variance_scaling/1
iex> Axon.Initializers.variance_scaling(shape: {2, 2}, distribution: :not_a_dist)
** (ArgumentError) invalid distribution :not_a_dist passed to variance_scaling/1
"""
defn variance_scaling(opts \\ []) do
opts =
keyword!(opts, [:shape, type: {:f, 32}, scale: 1.0, mode: :fan_in, distribution: :normal])
fans = transform(opts[:shape], &compute_fans/1)
denominator =
transform(
{fans, opts[:mode]},
fn
{{fan_in, _}, :fan_in} ->
fan_in
{{_, fan_out}, :fan_out} ->
fan_out
{{fan_in, fan_out}, :fan_avg} ->
(fan_in + fan_out) / 2.0
{{_, _}, mode} ->
raise ArgumentError, "invalid mode #{inspect(mode)} passed to variance_scaling/1"
end
)
variance = Nx.divide(Nx.tensor(opts[:scale], type: opts[:type]), Nx.max(denominator, 1.0))
var_opts = transform(opts, &Keyword.take(&1, [:shape, :type]))
transform(
{opts[:distribution], variance, var_opts},
fn
{:normal, variance, opts} ->
var_normal(variance, opts)
{:uniform, variance, opts} ->
var_uniform(variance, opts)
{:truncated_normal, variance, opts} ->
var_uniform(variance, opts)
{dist, _, _} ->
raise ArgumentError,
"invalid distribution #{inspect(dist)} passed to variance_scaling/1"
end
)
end
@doc """
Initializes a tensor with an orthogonal distribution.
For 2-D tensors, the initialization is generated through the QR decomposition of a random distribution
For tensors with more than 2 dimensions, a 2-D tensor with shape `{shape[0] * shape[1] * ... * shape[n-2], shape[n-1]}`
is initialized and then reshaped accordingly.
## Options
* `:shape` - output shape. Must be at least rank `2`
* `:type` - random seed's type. Defaults to `{:f, 32}`
* `:distribution` - output distribution. One of [`:normal`, `:uniform`]. Defaults to `:normal`
## Examples
iex> t = Axon.Initializers.orthogonal(shape: {3, 3})
iex> Nx.type(t)
{:f, 32}
iex> Nx.shape(t)
{3, 3}
iex> t = Axon.Initializers.orthogonal(shape: {1, 2, 3, 4}, type: {:f, 64})
iex> Nx.type(t)
{:f, 64}
iex> Nx.shape(t)
{1, 2, 3, 4}
"""
defn orthogonal(opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}, distribution: :normal])
shape = opts[:shape]
distribution = opts[:distribution]
type = opts[:type]
assert_greater_equal_rank!(shape, 2)
{{m, n}, random_seed} =
transform({shape, distribution, type}, fn {shape, distribution, type} ->
flat_shape =
if tuple_size(shape) > 2 do
tuple_list = shape |> Tuple.to_list() |> Enum.reverse()
n = hd(tuple_list)
m = Enum.reduce(tl(tuple_list), 1, &(&1 * &2))
{m, n}
else
shape
end
random_seed =
case distribution do
:uniform ->
Nx.random_uniform(flat_shape, type: type, backend: Nx.Defn.Expr)
:normal ->
Nx.random_normal(flat_shape, type: type, backend: Nx.Defn.Expr)
dist ->
raise ArgumentError,
"invalid distribution #{inspect(dist)} passed to orthogonal/1"
end
{flat_shape, random_seed}
end)
{q, _r} = Nx.LinAlg.qr(random_seed, mode: :complete)
q
|> Nx.slice([0, 0], [m, n])
|> Nx.reshape(shape)
end
# Variance scaling branches
defnp var_normal(variance, opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
shape = opts[:shape]
type = opts[:type]
sigma = Nx.sqrt(variance)
Nx.random_normal(shape, 0.0, sigma, type: type)
end
defnp var_uniform(variance, opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
shape = opts[:shape]
type = opts[:type]
limit = Nx.sqrt(3 * variance)
Nx.random_uniform(shape, -limit, limit, type: type)
end
defnp var_truncated(variance, opts \\ []) do
opts = keyword!(opts, [:shape, type: {:f, 32}])
shape = opts[:shape]
type = opts[:type]
sigma =
variance
|> Nx.sqrt()
|> Nx.divide(0.87962566103423978)
Nx.clip(Nx.random_normal(shape, 0.0, sigma, type: type), -2, 2)
end
defp compute_fans(shape) do
rank = Nx.rank(shape)
{fan_in, fan_out} =
cond do
rank < 1 ->
{1, 1}
rank == 1 ->
{elem(shape, 0), elem(shape, 0)}
rank == 2 ->
{elem(shape, 0), elem(shape, 1)}
true ->
receptive_field_size = Nx.size(shape) / elem(shape, 0) / elem(shape, 1)
fan_in = elem(shape, 0) * receptive_field_size
fan_out = elem(shape, 1) * receptive_field_size
{fan_in, fan_out}
end
{fan_in, fan_out}
end
end
|
lib/axon/initializers.ex
| 0.894384
| 0.821188
|
initializers.ex
|
starcoder
|
require Record
defmodule Geef.Reference do
import Geef
alias Geef.Reference
defstruct repo: nil, name: nil, type: nil, target: nil
def create(repo, name, target, force \\ :false) do
# fixme: this needs to ask the repo itself
repo_handle = :geef_repo.handle(repo)
case :geef_nif.reference_create(repo_handle, name, :oid, target, force) do
:ok ->
{:ok, %Reference{repo: repo, name: name, type: :oid, target: target}}
error ->
error
end
end
def create!(repo, name, target, force \\ :false) do
create(repo, name, target, force) |> assert_ok
end
def create_symbolic(repo, name, target, force \\ :false) do
# fixme: this needs to ask the repo itself
repo_handle = :geef_repo.handle(repo)
case :geef_nif.reference_create(repo_handle, name, :symbolic, target, force) do
:ok ->
{:ok, %Reference{repo: repo, name: name, type: :symbolic, target: target}}
error ->
error
end
end
def create_symbolic!(repo, name, target, force \\ :false) do
create_symbolic(repo, name, target, force) |> assert_ok
end
def lookup(repo, name) do
case :geef_repo.reference_lookup(repo, name) do
{:ok, type, target} ->
{:ok, %Reference{repo: repo, name: name, type: type, target: target}}
error ->
error
end
end
def lookup!(repo, name), do: lookup(repo, name) |> assert_ok
# A direct referene is only ever going to be itself
def resolve(ref = %Reference{type: :oid}) do
{:ok, ref}
end
def resolve(%Reference{repo: repo, name: name}) do
case :geef_repo.reference_resolve(repo, name) do
{:ok, resolved_name, target} ->
{:ok, %Reference{repo: repo, name: resolved_name, type: :oid, target: target}}
error ->
error
end
end
def resolve!(ref = %Reference{}), do: resolve(ref) |> assert_ok
def dwim(repo, name) do
case :geef_repo.reference_dwim(repo, name) do
{:ok, real_name, type, target} ->
{:ok, %Reference{repo: repo, name: real_name, type: type, target: target}}
error ->
error
end
end
def dwim!(repo, name), do: dwim(repo, name) |> assert_ok
def shorthand(%Reference{name: name}) do
:geef_ref.shorthand(name)
end
def shorthand(name) do
:geef_ref.shorthand(name)
end
def iterator(repo, regexp \\ :undefined) do
case :geef_ref.iterator(repo, regexp) do
{:ok, iter} ->
{:ok, Geef.Iterator.from_erl iter}
other ->
other
end
end
end
|
lib/geef/reference.ex
| 0.522446
| 0.400134
|
reference.ex
|
starcoder
|
defmodule JTD.TypeError do
@type t :: %__MODULE__{message: String.t()}
defexception message: nil
def message(%{message: message}) do
message
end
end
defmodule JTD.Schema do
@moduledoc """
Module to convert Map to JSON Type Definition schema struct.
"""
alias JTD.TypeError
@keywords [
:metadata,
:nullable,
:definitions,
:ref,
:type,
:enum,
:elements,
:properties,
:optionalProperties,
:additionalProperties,
:values,
:discriminator,
:mapping
]
@types [
"boolean",
"int8",
"uint8",
"int16",
"uint16",
"int32",
"uint32",
"float32",
"float64",
"string",
"timestamp"
]
@forms [
:ref,
:type,
:enum,
:elements,
:properties,
:optional_properties,
:additional_properties,
:values,
:discriminator,
:mapping
]
@valid_forms [
# Empty form
[],
# Ref form
[:ref],
# Type form
[:type],
# Enum form
[:enum],
# Elements form
[:elements],
# Properties form -- properties or optional properties or both, and
# never additional properties on its own
[:properties],
[:optional_properties],
[:properties, :optional_properties],
[:properties, :additional_properties],
[:optional_properties, :additional_properties],
[:properties, :optional_properties, :additional_properties],
# Values form
[:values],
# Discriminator form
[:discriminator, :mapping]
]
defstruct [
:metadata,
:nullable,
:definitions,
:ref,
:type,
:enum,
:elements,
:properties,
:optional_properties,
:additional_properties,
:values,
:discriminator,
:mapping
]
@type t :: %__MODULE__{
metadata: any,
nullable: any,
definitions: any,
ref: any,
type: any,
enum: any,
elements: any,
properties: any,
optional_properties: any,
additional_properties: any,
values: any,
discriminator: any,
mapping: any
}
@doc """
Convert given map to JTD.Schema.
"""
@spec from_map(map) :: JTD.Schema.t()
def from_map(map) when is_map(map) do
map
|> check_keywords!
{map, %{}}
|> atomize_keys
|> parse_metadata
|> parse_nullable
|> parse_definitions
|> parse_ref
|> parse_type
|> parse_enum
|> parse_elements
|> parse_properties
|> parse_optional_properties
|> parse_additional_properties
|> parse_values
|> parse_discriminator
|> parse_mapping
|> to_schema
end
def from_map(others) do
raise TypeError, message: "expected map, got: #{inspect(others)}"
end
@doc """
Verify converted schema.
"""
@spec verify(JTD.Schema.t()) :: JTD.Schema.t()
def verify(schema), do: verify(schema, schema)
@doc false
def verify(schema, root) do
[
{:metadata, [:map]},
{:nullable, [:boolean]},
{:definitions, [:map]},
{:ref, [:atom, :binary]},
{:type, [:atom, :binary]},
{:enum, [:list]},
{:elements, [:schema]},
{:properties, [:map]},
{:optional_properties, [:map]},
{:additional_properties, [:boolean]},
{:values, [:schema]},
{:discriminator, [:atom, :binary]},
{:mapping, [:map]}
]
|> Enum.each(fn opt -> check_type!(schema, opt) end)
schema
|> form_signature
|> check_schema_form!(schema)
schema
|> Map.get(:definitions)
|> check_definitions_is_only_in_root!(schema, root)
|> check_ref_form!
schema |> check_type_form!
schema |> check_enum_form!
schema |> check_properties_intersection!
schema |> check_mapping_form!
schema |> check_definitions_values(root)
schema |> check_elements_value(root)
schema |> check_properties_values(root)
schema |> check_optional_properties_values(root)
schema |> check_values_value(root)
schema |> check_mapping_values(root)
schema
end
@doc false
def form(%{ref: ref}) when not is_nil(ref), do: :ref
def form(%{type: type}) when not is_nil(type), do: :type
def form(%{enum: enum}) when not is_nil(enum), do: :enum
def form(%{elements: elements}) when not is_nil(elements), do: :elements
def form(%{properties: properties, optional_properties: optional_properties}) when (not is_nil(properties)) or (not is_nil(optional_properties)), do: :properties
def form(%{values: values}) when not is_nil(values), do: :values
def form(%{discriminator: discriminator}) when not is_nil(discriminator), do: :discriminator
def form(_), do: :empty
defp convert_key(s) do
s |> String.to_atom()
rescue
ArgumentError -> s
end
defp check_keywords!(schema) do
illegal_keywords = Map.keys(schema) |> Enum.map(&convert_key/1) |> Kernel.--(@keywords)
unless Enum.empty?(illegal_keywords) do
raise TypeError, message: "illegal schema keywords: #{inspect(illegal_keywords)}"
end
schema
end
defp atomize_keys({schema, accum}) do
schema = for {key, val} <- schema, into: %{}, do: {convert_key(key), val}
{schema, accum}
end
defp parse_metadata({schema, accum}) do
schema |> Map.get(:metadata) |> if do
{schema, schema |> Map.take([:metadata]) |> Map.merge(accum)}
else
{schema, accum}
end
end
defp parse_nullable({schema, accum}) do
schema |> Map.get(:nullable) |> is_nil |> if do
{schema, accum}
else
{schema, schema |> Map.take([:nullable]) |> Map.merge(accum)}
end
end
defp underscore(key) when is_atom(key) do
key |> Atom.to_string |> Macro.underscore |> String.to_atom
end
defp underscore(key) do
key |> Macro.underscore
end
defp recursively_parse_enumerable_schema(schema, keyname) do
%{underscore(keyname) => schema |> Map.get(keyname) |> Enum.map(fn {k, v} -> {k, from_map(v)} end) |> Map.new}
end
defp recursively_parse_schema(schema, keyname) do
%{underscore(keyname) => schema |> Map.get(keyname) |> from_map}
end
defp parse_definitions({schema, accum}) do
schema |> Map.get(:definitions) |> if do
{schema, schema |> recursively_parse_enumerable_schema(:definitions) |> Map.merge(accum)}
else
{schema, accum}
end
end
defp parse_ref({schema, accum}) do
{schema, schema |> Map.take([:ref]) |> Map.merge(accum)}
end
defp parse_type({schema, accum}) do
{schema, schema |> Map.take([:type]) |> Map.merge(accum)}
end
defp parse_enum({schema, accum}) do
{schema, schema |> Map.take([:enum]) |> Map.merge(accum)}
end
defp parse_elements({schema, accum}) do
schema |> Map.get(:elements) |> if do
{schema, schema |> recursively_parse_schema(:elements) |> Map.merge(accum)}
else
{schema, accum}
end
end
defp parse_properties({schema, accum}) do
schema |> Map.get(:properties) |> if do
{schema, schema |> recursively_parse_enumerable_schema(:properties) |> Map.merge(accum)}
else
{schema, accum}
end
end
defp parse_optional_properties({schema, accum}) do
schema |> Map.get(:optionalProperties) |> if do
{schema, schema |> recursively_parse_enumerable_schema(:optionalProperties) |> Map.merge(accum)}
else
{schema, accum}
end
end
defp parse_additional_properties({schema, accum}) do
schema |> Map.get(:additionalProperties) |> is_nil |> if do
{schema, accum}
else
additional_properties = %{additional_properties: Map.get(schema, :additionalProperties)}
{schema, Map.merge(accum, additional_properties)}
end
end
defp parse_values({schema, accum}) do
schema |> Map.get(:values) |> if do
{schema, schema |> recursively_parse_schema(:values) |> Map.merge(accum)}
else
{schema, accum}
end
end
defp parse_discriminator({schema, accum}) do
{schema, schema |> Map.take([:discriminator]) |> Map.merge(accum)}
end
defp parse_mapping({schema, accum}) do
schema |> Map.get(:mapping) |> if do
{schema, schema |> recursively_parse_enumerable_schema(:mapping) |> Map.merge(accum)}
else
{schema, accum}
end
end
defp to_schema({_, accum}) do
struct(JTD.Schema, accum)
end
@doc false
def is_schema(term) do
is_struct(term, JTD.Schema)
end
defp check_type!(schema, {keyname, types}) do
form = schema |> Map.get(keyname)
if form do
types
|> Enum.map(fn t -> String.to_atom("is_#{Atom.to_string(t)}") end)
|> Enum.all?(fn test_fn -> !apply_test_fn(test_fn, form) end)
|> if do
raise TypeError, message: "#{Atom.to_string(keyname)} must be one of #{inspect(types)}, got: #{inspect(form)}"
end
end
end
defp apply_test_fn(:is_schema, form), do: apply(JTD.Schema, :is_schema, [form])
defp apply_test_fn(test_fn, form), do: apply(Kernel, test_fn, [form])
defp form_signature(schema) do
schema
|> Map.take(@forms)
|> Enum.filter(fn {_, v} -> v != nil end)
|> Map.new
|> Map.keys
|> MapSet.new
end
defp check_schema_form!(form, schema) do
@valid_forms
|> Enum.map(&MapSet.new/1)
|> Enum.any?(fn valid_form -> MapSet.equal?(valid_form, form) end)
|> unless do
raise ArgumentError, message: "invalid schema form: #{inspect(schema)}"
end
end
defp check_definitions_is_only_in_root!(nil, schema, root) when schema != root, do: {schema, root}
defp check_definitions_is_only_in_root!(definitions, schema, root) when schema != root do
raise ArgumentError, message: "non-root definitions: #{inspect(definitions)}"
end
defp check_definitions_is_only_in_root!(_, schema, root), do: {schema, root}
defp check_ref_form!({%{ref: nil}, _}), do: true
defp check_ref_form!({%{ref: ref}, %{definitions: nil} }) do
raise ArgumentError, message: "ref to non-existent definition: #{ref}"
end
defp check_ref_form!({%{ref: ref}, %{definitions: definitions} }) do
unless is_map_key(definitions, ref) do
raise ArgumentError, message: "ref to non-existent definition: #{ref}"
end
end
defp check_type_form!(%{type: nil}), do: true
defp check_type_form!(%{type: type}) do
@types
|> Enum.member?(type)
|> unless do
raise ArgumentError, message: "invalid type: #{type}"
end
end
defp check_enum_form!(%{enum: nil}), do: true
defp check_enum_form!(schema) when schema.enum == [] do
raise ArgumentError, message: "enum must not be empty: #{inspect(schema)}"
end
defp check_enum_form!(%{enum: enum}) do
enum |> Enum.all?(&is_binary/1) |> unless do
raise ArgumentError, message: "enum must contain only strings: #{inspect(enum)}"
end
original_length = enum |> length
unique_length = enum |> Enum.uniq |> length
if original_length != unique_length do
raise ArgumentError, message: "enum must not contain duplicates: #{inspect(enum)}"
end
end
defp check_properties_intersection!(%{properties: nil}), do: true
defp check_properties_intersection!(%{optional_properties: nil}), do: true
defp check_properties_intersection!(%{properties: properties, optional_properties: optional_properties}) do
properties_keys = properties |> Map.keys |> MapSet.new
optional_properties_keys = optional_properties |> Map.keys |> MapSet.new
intersection = MapSet.intersection(properties_keys, optional_properties_keys)
intersection
|> MapSet.to_list
|> Enum.empty?
|> unless do
raise ArgumentError, message: "properties and optionalProperties share keys: #{inspect(intersection)}"
end
end
defp mapping_value_must_be_propeties_form(s) do
if form(s) != :properties do
raise ArgumentError, message: "mapping values must be of properties form: #{inspect(s)}"
end
end
defp mapping_value_must_not_be_nullable(s) do
s |> Map.get(:nullable) |> if do
raise ArgumentError, message: "mapping values must not be nullable: #{inspect(s)}"
end
end
defp mapping_value_must_not_contain_discriminator(s, keyname, discriminator) do
case s |> Map.get(keyname) do
nil ->
true
s -> s
|> Map.keys
|> Enum.member?(discriminator)
|> if do
raise ArgumentError, message: "mapping values must not contain discriminator (#{discriminator}): #{inspect(s)}"
end
end
end
defp check_mapping_form!(%{mapping: nil}), do: true
defp check_mapping_form!(%{discriminator: discriminator, mapping: mapping}) do
values = mapping |> Map.values
values |> Enum.each(&mapping_value_must_be_propeties_form/1)
values |> Enum.each(&mapping_value_must_not_be_nullable/1)
values |> Enum.each(&mapping_value_must_not_contain_discriminator(&1, :properties, discriminator))
values |> Enum.each(&mapping_value_must_not_contain_discriminator(&1, :optional_properties, discriminator))
end
defp check_definitions_values(%{definitions: nil}, _), do: true
defp check_definitions_values(%{definitions: definitions}, root) do
definitions |> Map.values |> Enum.each(&verify(&1, root))
end
defp check_elements_value(%{elements: nil}, _), do: true
defp check_elements_value(%{elements: elements}, root) do
elements |> verify(root)
end
defp check_properties_values(%{properties: nil}, _), do: true
defp check_properties_values(%{properties: properties}, root) do
properties |> Map.values |> Enum.each(&verify(&1, root))
end
defp check_optional_properties_values(%{optional_properties: nil}, _), do: true
defp check_optional_properties_values(%{optional_properties: optional_properties}, root) do
optional_properties |> Map.values |> Enum.each(&verify(&1, root))
end
defp check_values_value(%{values: nil}, _), do: true
defp check_values_value(%{values: values}, root) do
values |> verify(root)
end
defp check_mapping_values(%{mapping: nil}, _), do: true
defp check_mapping_values(%{mapping: mapping}, root) do
mapping |> Map.values |> Enum.each(&verify(&1, root))
end
end
|
lib/schema.ex
| 0.78016
| 0.625381
|
schema.ex
|
starcoder
|
defmodule Finch.Telemetry do
@moduledoc """
Telemetry integration.
Unless specified, all times are in `:native` units.
Finch executes the following events:
### Request Start
`[:finch, :request, :start]` - Executed when `Finch.request/3` or `Finch.stream/5` is called.
#### Measurements
* `:system_time` - The system time.
#### Metadata
* `:name` - The name of the Finch instance.
* `:request` - The request (`Finch.Request`).
### Request Stop
`[:finch, :request, :stop]` - Executed after `Finch.request/3` or `Finch.stream/5` ended.
#### Measurements
* `:duration` - Time taken from the request start event.
#### Metadata
* `:name` - The name of the Finch instance.
* `:request` - The request (`Finch.Request`).
* `:result` - The result of the operation. In case of `Finch.stream/5` this is
`{:ok, acc} | {:error, Exception.t()}`, where `acc` is the accumulator result of the
reducer passed in `Finch.stream/5`. In case of `Finch.request/3` this is
`{:ok, Finch.Response.t()} | {:error, Exception.t()}`.
### Request Exception
`[:finch, :request, :exception]` - Executed when an exception occurs while executing
`Finch.request/3` or `Finch.stream/5`.
#### Measurements
* `:duration` - The time it took since the start before raising the exception.
#### Metadata
* `:name` - The name of the Finch instance.
* `:request` - The request (`Finch.Request`).
* `:kind` - The type of exception.
* `:reason` - Error description or error data.
* `:stacktrace` - The stacktrace.
### Queue Start
`[:finch, :queue, :start]` - Executed before checking out a connection from the pool.
#### Measurements
* `:system_time` - The system time.
#### Metadata
* `:pool` - The pool's PID.
* `:request` - The request (`Finch.Request`).
### Queue Stop
`[:finch, :queue, :stop]` - Executed after a connection is retrieved from the pool.
#### Measurements
* `:duration` - Time taken to check out a pool connection.
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:pool` - The pool's PID.
* `:request` - The request (`Finch.Request`).
### Queue Exception
`[:finch, :queue, :exception]` - Executed if checking out a connection throws an exception.
#### Measurements
* `:duration` - The time it took since queue start event before raising an exception.
#### Metadata
* `:request` - The request (`Finch.Request`).
* `:kind` - The type of exception.
* `:reason` - Error description or error data.
* `:stacktrace` - The stacktrace.
### Connect Start
`[:finch, :connect, :start]` - Executed before opening a new connection.
If a connection is being re-used this event will *not* be executed.
#### Measurements
* `:system_time` - The system time.
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`.
* `:host` - The host address.
* `:port` - The port to connect on.
### Connect Stop
`[:finch, :connect, :stop]` - Executed after a connection is opened.
#### Measurements
* `:duration` - Time taken to connect to the host.
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`.
* `:host` - The host address.
* `:port` - The port to connect on.
* `:error` - This value is optional. It includes any errors that occurred while opening the connection.
### Send Start
`[:finch, :send, :start]` - Executed before sending a request.
#### Measurements
* `:system_time` - The system time.
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:request` - The request (`Finch.Request`).
### Send Stop
`[:finch, :send, :stop]` - Executed after a request is finished.
#### Measurements
* `:duration` - Time taken to make the request.
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:request` - The request (`Finch.Request`).
* `:error` - This value is optional. It includes any errors that occurred while making the request.
### Receive Start
`[:finch, :recv, :start]` - Executed before receiving the response.
#### Measurements
* `:system_time` - The system time.
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:request` - The request (`Finch.Request`).
### Receive Start
`[:finch, :recv, :stop]` - Executed after a response has been fully received.
#### Measurements
* `:duration` - Duration to receive the response.
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:request` - The request (`Finch.Request`).
* `:status` - The response status (`Mint.Types.status()`).
* `:headers` - The response headers (`Mint.Types.headers()`).
* `:error` - This value is optional. It includes any errors that occurred while receiving the response.
### Receive Exception
`[:finch, :recv, :exception]` - Executed if an exception is thrown before the response has
been fully received.
#### Measurements
* `:duration` - The time it took before raising an exception
#### Metadata
* `:request` - The request (`Finch.Request`).
* `:kind` - The type of exception.
* `:reason` - Error description or error data.
* `:stacktrace` - The stacktrace.
### Reused Connection
`[:finch, :reused_connection]` - Executed if an existing connection is reused. There are no measurements provided with this event.
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`.
* `:host` - The host address.
* `:port` - The port to connect on.
### Conn Max Idle Time Exceeded
`[:finch, :conn_max_idle_time_exceeded]` - Executed if a connection was discarded because the `conn_max_idle_time` had been reached.
#### Measurements
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`.
* `:host` - The host address.
* `:port` - The port to connect on.
### Pool Max Idle Time Exceeded
`[:finch, :pool_max_idle_time_exceeded]` - Executed if a pool was terminated because the `pool_max_idle_time` has been reached. There are no measurements provided with this event.
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`.
* `:host` - The host address.
* `:port` - The port to connect on.
### Max Idle Time Exceeded (Deprecated)
`[:finch, :max_idle_time_exceeded]` - Executed if a connection was discarded because the `max_idle_time` had been reached.
*Deprecated:* use `:conn_max_idle_time_exceeded` event instead.
#### Measurements
* `:idle_time` - Elapsed time since the connection was last checked in or initialized.
#### Metadata
* `:scheme` - The scheme used in the connection. either `http` or `https`.
* `:host` - The host address.
* `:port` - The port to connect on.
"""
@doc false
# emits a `start` telemetry event and returns the the start time
def start(event, meta \\ %{}, extra_measurements \\ %{}) do
start_time = System.monotonic_time()
:telemetry.execute(
[:finch, event, :start],
Map.merge(extra_measurements, %{system_time: System.system_time()}),
meta
)
start_time
end
@doc false
# Emits a stop event.
def stop(event, start_time, meta \\ %{}, extra_measurements \\ %{}) do
end_time = System.monotonic_time()
measurements = Map.merge(extra_measurements, %{duration: end_time - start_time})
:telemetry.execute(
[:finch, event, :stop],
measurements,
meta
)
end
@doc false
def exception(event, start_time, kind, reason, stack, meta \\ %{}, extra_measurements \\ %{}) do
end_time = System.monotonic_time()
measurements = Map.merge(extra_measurements, %{duration: end_time - start_time})
meta =
meta
|> Map.put(:kind, kind)
|> Map.put(:reason, reason)
|> Map.put(:stacktrace, stack)
:telemetry.execute([:finch, event, :exception], measurements, meta)
end
@doc false
# Used for reporting generic events
def event(event, measurements, meta) do
:telemetry.execute([:finch, event], measurements, meta)
end
@doc false
# Used to easily create :start, :stop, :exception events.
def span(event, start_metadata, fun) do
:telemetry.span(
[:finch, event],
start_metadata,
fun
)
end
end
|
lib/finch/telemetry.ex
| 0.921012
| 0.734477
|
telemetry.ex
|
starcoder
|
defmodule Day15.Redo do
def part1(file_name \\ "test.txt") do
file_name
|> parse()
|> dijkstra()
end
def part2(file_name \\ "test.txt") do
file_name
|> parse()
|> add_mins()
|> add_direction()
|> add_final_step(24)
|> expand_grid()
|> dijkstra()
end
def pop(queue) do
element =
queue
|> MapSet.to_list()
|> Enum.min_by(fn {distance, _point} -> distance end)
{element |> elem(1), MapSet.delete(queue, element)}
end
def dijkstra(%{grid: grid, costs: costs, queue: queue, visited: visited, target: target} = state) do
queue_empty? = MapSet.equal?(queue, MapSet.new())
if queue_empty? do
costs[target]
else
{current_point, queue} = pop(queue)
new_visited = MapSet.put(visited, current_point)
points_with_distances =
current_point
|> neighbors()
|> Enum.reduce(%{}, fn neighbor, acc ->
distance = Map.get(grid, neighbor)
if is_nil(distance) do
acc
else
Map.put(acc, neighbor, distance)
end
end)
{new_queue, new_costs} =
Enum.reduce(points_with_distances, {queue, costs}, fn {neighbor, distance}, {acc_queue, acc_costs} ->
if MapSet.member?(new_visited, neighbor) do
{acc_queue, acc_costs}
else
old_cost = costs[neighbor]
new_cost = costs[current_point] + distance
if new_cost < old_cost do
{MapSet.put(acc_queue, {new_cost, neighbor}), Map.put(acc_costs, neighbor, new_cost)}
else
{acc_queue, acc_costs}
end
end
end)
%{grid: grid, costs: new_costs, queue: new_queue, visited: new_visited}
dijkstra(%{state | grid: grid, costs: new_costs, queue: new_queue, visited: new_visited})
end
end
def dijkstra(%{grid: grid, target: target}) do
origin = {0, 0}
costs = grid |> Map.keys |> Enum.reduce(%{}, fn key, acc -> Map.put(acc, key, :infinity) end) |> Map.put(origin, 0)
visited = MapSet.new([origin])
queue = MapSet.new([{0, origin}])
dijkstra(%{grid: grid, costs: costs, queue: queue, visited: visited, target: target})
end
def neighbors({x, y}) do
[{x, y - 1}, {x + 1, y}, {x, y + 1}, {x - 1, y}]
end
def add_mins(%{grid: grid, target: {max_x, max_y}}) do
%{grid: grid, mins: {0, 0}, maxs: {max_x, max_y}}
end
def add_direction(state) do
Map.put(state, :direction, :right)
end
def add_final_step(state, final_step) do
Map.put(state, :final_step, final_step)
end
def inc(9), do: 1
def inc(value), do: value + 1
def dec(1), do: 9
def dec(value), do: value - 1
def opposite_direction(:left), do: :right
def opposite_direction(:right), do: :left
def expand_grid(state, step \\ 1)
def expand_grid(%{grid: grid, maxs: maxs, final_step: final_step}, step) when step > final_step do
%{grid: grid, target: maxs}
end
def expand_grid(%{grid: grid, mins: {min_x, min_y}, maxs: {max_x, max_y}, direction: direction} = state, step) when rem(step, 5) == 0 do
last_tile = last_tile(state)
diff = max_y - min_y
new_grid =
Enum.reduce(last_tile, grid, fn {{x, y}, value}, acc ->
Map.put(acc, {x, y + diff + 1}, inc(value))
end)
new_min_y = min_y + diff + 1
new_max_y = new_min_y + diff
new_state = %{state | grid: new_grid, mins: {min_x, new_min_y}, maxs: {max_x, new_max_y}, direction: opposite_direction(direction)}
expand_grid(new_state, step + 1)
end
def expand_grid(%{grid: grid, mins: {min_x, min_y}, maxs: {max_x, max_y}, direction: :right} = state, step) do
last_tile = last_tile(state)
diff = max_x - min_x
new_grid =
Enum.reduce(last_tile, grid, fn {{x, y}, value}, acc ->
Map.put(acc, {x + diff + 1, y}, inc(value))
end)
new_min_x = max_x + 1
new_max_x = new_min_x + diff
new_state = %{state | grid: new_grid, mins: {new_min_x, min_y}, maxs: {new_max_x, max_y}, direction: :right}
expand_grid(new_state, step + 1)
end
def expand_grid(%{grid: grid, mins: {min_x, min_y}, maxs: {max_x, max_y}, direction: :left} = state, step) do
last_tile = last_tile(state)
diff = max_x - min_x
new_grid =
Enum.reduce(last_tile, grid, fn {{x, y}, value}, acc ->
Map.put(acc, {x - diff - 1, y}, dec(value))
end)
new_max_x = min_x - 1
new_min_x = new_max_x - diff
new_state = %{state | grid: new_grid, mins: {new_min_x, min_y}, maxs: {new_max_x, max_y}, direction: :left}
expand_grid(new_state, step + 1)
end
def last_tile(%{grid: grid, mins: {min_x, min_y}, maxs: {max_x, max_y}}) do
for y <- min_y..max_y,
x <- min_x..max_x,
do: {{x,y}, Map.get(grid, {x, y})},
into: Map.new()
end
def target(grid) do
grid
|> Enum.max_by(fn {{x, y}, _value} -> {x, y} end)
|> elem(0)
end
def parse(file_name) do
list =
"priv/" <> file_name
|> File.read!()
|> String.split("\n", trim: true)
|> Enum.map(&to_charlist/1)
grid =
for {lines, y} <- Enum.with_index(list),
{value, x} <- Enum.with_index(lines),
do: {{x, y}, value - ?0},
into: Map.new()
%{grid: grid, target: target(grid)}
end
end
|
jpcarver+elixir/day15/lib/day15.redo.ex
| 0.501953
| 0.450601
|
day15.redo.ex
|
starcoder
|
module List
module Behavior
% Returns a new list as a concatenation of the given number
% of the original list. If n is a string, then the behavior is the same as join.
%
% ## Examples
%
% [1] * 3 % => [1,1,1]
% [1,2] * 2 % => [1,2,1,2]
% [1,2,3] * "," % => "1,2,3"
%
def *(mult)
if mult.__module_name__ == 'String::Behavior
join(mult)
else
duplicate(mult, self, [])
end
end
% Count the nummber of times a given item occurs in the list.
%
% ## Examples
%
% [1,1,2,3,3].count(1) % => 2
def count(item)
count item, self, 0
end
% Returns true if all items in the list evaluates to true according the given function.
%
% ## Examples
%
% [1,2,3].all? -> (i) i rem 2 == 0 % => false
% [2,4,6].all? -> (i) i rem 2 == 0 % => true
%
def all?(function)
Erlang.lists.all(function, self)
end
% Returns true if at least one item the list evaluates to true according
% to the given function.
%
% ## Examples
%
% [4,5,6].any? -> (i) i == 2 % => false
% [1,2,3].any? -> (i) i == 2 % => true
%
def any?(function)
Erlang.lists.any(function, self)
end
% Push a new element to the list.
%
% ## Examples
%
% [1,2,3].push 4 % => [1,2,3,4]
%
def push(item)
Erlang.lists.append(self, [item])
end
% Searches the list for a tuple whose nth element compares equal to key.
% Returns the tuple if such a tuple is found, otherwise false. The list
% needs necessarily to have only tuples and n is 0..(tuple.size - 1).
%
% ## Examples
%
% ['foo/1, 'bar/2].keyfind('foo, 0) % => {'foo, 1}
% ['foo/1, 'bar/2].keyfind('baz, 0) % => false
%
def keyfind(key, n)
Erlang.lists.keyfind(key, n + 1, self)
end
% Returns true if the list is empty.
def empty?
self == []
end
% "Zips" two lists of equal length into one list of two-tuples, where the
% first element of each tuple is taken from the first list and the second
% element is taken from corresponding element in the second list.
%
% Raises an error if list sizes does not match.
%
% ## Examples
%
% ['foo, 'bar].zip [1,2] % => [{'foo,1}, {'bar,2}]
%
def zip(list)
Erlang.lists.zip(self, list)
end
% Does the opposite of `zip`.
%
% ## Examples
%
% {['foo, 'bar], [1,2]} = [{'foo, 1}, {'bar, 2}].unzip
%
def unzip
Erlang.lists.unzip(self)
end
% Combine the elements of two lists of equal length into one list using
% a function.
%
% Raises an error if list sizes does not match.
%
% ## Examples
%
% [5,7,9] = [1,2,3].zipwith([4,5,6], -> (x,y) x + y)
%
def zipwith(list, function)
Erlang.lists.zipwith(function, self, list)
end
% Returns a new list with the contents of the
% current list and the other list.
%
% ## Examples
%
% [1,2,3] + [4,5,6] % => [1,2,3,4,5,6]
% [1,2,3] + [1,2,3] % => [1,2,3,1,2,3]
%
def +(another)
Erlang.lists.append(self, another)
end
alias_local '+, 'append, 1
% Returns the sum of the elements in the list.
% Returns 0 if the list is empty.
%
% ## Examples
%
% [1,2,3].sum % => 6
%
def sum
Erlang.lists.sum(self)
end
% Combine all elements of the lists by applying the given function, starting
% with the given accumulator. The list is traversed from the left.
%
% ## Examples
%
% [1,2,3].foldl(0, -> (e, acc) e + acc) % => 6
% ["foo", "bar", "baz"].foldl("", -> (e, acc) e + acc) % => "bazbarfoo"
%
def foldl(acc, function)
Erlang.lists.foldl(function, acc, self)
end
% Combine all elements of the lists by applying the given function, starting
% with the given accumulator. The list is traversed from the right.
%
% ## Examples
%
% [1,2,3].foldl(0, -> (e, acc) e + acc) % => 6
% ["foo", "bar", "baz"].foldr("", -> (e, acc) e + acc) % => "foobarbaz"
%
def foldr(acc, function)
Erlang.lists.foldr(function, acc, self)
end
% Retrieves an item from the list. Negative indexes are allowed
% and they retrieve the element in the reverse order.
%
% ## Examples
%
% [1,2,3][0] % => 1
% [1,2,3][1] % => 2
% [1,2,3][2] % => 3
% [1,2,3][3] % => nil
%
% [1,2,3][-1] % => 3
% [1,2,3][-2] % => 2
% [1,2,3][-3] % => 1
% [1,2,3][-43] % => nil
%
def [](number)
if number < 0
brackets(-1 * (1 + number), Erlang.lists.reverse(self))
else
brackets(number, self)
end
end
% Returns a sublist starting at start and of length elements.
% Negative indices count backward from the end of the array.
%
% ## Examples
%
% [1,2,3,4,5][0,2] % => [1,2,3]
% [1,2,3,4,5][-3,3] % => [3,4,5]
%
def [](start, len)
if start < 0
Erlang.lists.sublist(self, start + 1 + length, len + 1)
else
Erlang.lists.sublist(self, start + 1, len + 1)
end
end
% Calls the function once for each element in the list.
%
% Returns a new list containing the values returned by the function.
%
% [1,2,3].map -> (x) x + 1 % => [2,3,4]
%
def map(function)
Erlang.lists.map(function, self)
end
alias_local 'map, 'collect, 1
% Calls function once for each element in the list, passing that
% element as a parameter.
%
% Returns self.
%
% ## Examples
%
% [1,2,3].each -> (x) do_something(x)
% [1,2,3].each do (x)
% do_something_else(x)
% end
%
def each(function)
Erlang.lists.foreach(function, self)
self
end
% Returns the head of the list. Raises 'badarg error if the list
% is empty.
%
% ## Examples
%
% [1,2].head % => 1
% [].head % => []
%
def head
Erlang.hd(self)
end
% Similar to `head`, but returns nil if the list is empty.
%
% ## Examples
%
% [1,2].first % => 1
% [].first % => nil
%
def first
case self
match []
nil
match [x|_]
x
end
end
% Returns the last element of the list.
%
% ## Examples
%
% [1,2,3].last % => 3
%
def last
case self
match []
nil
else
Erlang.lists.last(self)
end
end
% Flattens the given list. If the list being flattened is made of lists
% one level deep, use flatten! instead as it is optimized for such cases.
%
% ## Examples
%
% [[1],[[2]],3].flatten % => [1,2,3]
%
def flatten
Erlang.lists.flatten(self)
end
% Returns the first element that is lesser than or equal to all other
% elements. Raises 'function_clause error if the list is empty.
%
% ## Examples
%
% [2,1,5,3,4].min % => 1
% ["foo", "bar", "baz"].min % => "bar"
%
def min
Erlang.lists.min(self)
end
% Returns the first element that is greater than or equal to all other
% elements. Raises 'function_clause error if the list is empty.
%
% ## Examples
%
% [2,1,5,3,4].max % => 5
% ["foo", "bar", "baz"].max % => "foo"
%
def max
Erlang.lists.max(self)
end
% Receives a list of lists and flatten them one level deep. If one of the
% elements of the list is not a list, raises an error. This has much better
% performance than the original flatten.
%
% ## Examples
%
% [[1],[2],[3]].flatten_lists % => [1,2,3]
% [[1],[[2]],[3]].flatten_lists % => [1,[2],3]
%
def flatten_lists
Erlang.lists.append(self)
end
% Removes all duplicated elements from a list.
def uniq
uniq(self, [])
end
% Returns the tail of the list. Raises 'badarg error if the list
% is empty.
%
% ## Examples
%
% [1,2].tail % => [2]
%
def tail
Erlang.tl(self)
end
% Returns true if the given item exists in the array.
%
% ## Examples
%
% [1,2,3].member?(1) % => true
% [1,2,3].include?(4) % => false
%
def member?(item)
Erlang.lists.member(item, self)
end
alias_local 'member?, 'include?, 1
% Returns a list with items matching the given filter function
%
% ## Examples
%
% [1,2,3].filter -> (x) x / 2 == 1 % => [2]
% [1,2,3,4].filter -> (x) [3,4].include?(x) % => [3,4]
%
def filter(function)
Erlang.lists.filter(function, self)
end
alias_local 'filter, 'select, 1
% Returns a list with its elements in reverse order.
%
% ## Examples
%
% [1,2,3].reverse % => [3,2,1]
%
def reverse
Erlang.lists.reverse(self)
end
% Deletes an item from the list. If there is more than one
% occurence of the item in the list just the first one is deleted.
%
% ## Examples
%
% [1,2,3].delete(2) % => [1,3]
% [1,2,1,3].delete(1) % => [2,1,3]
%
def delete(item)
Erlang.lists.delete(item, self)
end
% Deletes all item from the list matching the given argument.
%
% ## Examples
%
% [1,2,1,3].delete(1) % => [3,2]
%
def delete_all(item)
Erlang.sets.to_list(Erlang.sets.del_element(item, Erlang.sets.from_list(self)))
end
% Returns a string created by converting each items of the list
% to a string, separated by the given string
%
% ## Examples
%
% [1,2,3].join(",") % => "1,2,3"
% ['foo, 'bar].join("_") % => "foo_bar"
%
def join(string)
strings = map -> (x) x.to_s.to_char_list
Erlang.string.join(strings, string.to_char_list).to_bin
end
% Returns the sorted list
%
% ## Examples
%
% [4,1,3,2,4].sort % => [1,2,3,4,4]
% ["foo", "bar", "baz"] % => ["bar", "baz", "foo"]
%
def sort
Erlang.lists.sort(self)
end
% Takes elements from the list while the function returns true.
%
% ## Examples
%
% [1,2,3,4,5].takewhile -> (x) x < 3 % => [1,2]
%
def takewhile(function)
Erlang.lists.takewhile(function, self)
end
% Drops elements from the list while the function returns true.
%
% ## Examples
%
% [1,2,3,4,5].dropwhile -> (x) x < 3 % => [3,4,5]
%
def dropwhile(function)
Erlang.lists.dropwhile(function, self)
end
% Split the list into two list, where the first contains N elements
% and the second the rest.
%
% Raise an error if position is out of bound.
%
% ## Examples
%
% [1,2,3,4,5].split(3) % => {[1,2,3], [4,5]}
%
def split(n)
Erlang.lists.split(n, self)
end
% Returns a new list with item inserted at position n.
%
% Raise an error if position is out of bound.
%
% ## Examples
%
% [1,2,3,4,5].insert(0, 2) % => [1,2,0,3,4,5]
%
def insert(item, n)
{h,t} = split(n)
h + [item] + t
end
% Partition the list into two lists where the first list contains all
% elements for which the given function returns true, and the second
% when the function returns false.
%
% ## Examples
%
% [1,2,3,4,5,6].partition -> (x) x rem 2 == 0 % => {[2,4,6], [1,3,5]}
%
def partition(function)
Erlang.lists.partition(function, self)
end
% Partition the list into two lists according to the function. This
% is the same as doing {list.takewhile(function), list.dropwhile(function)}
%
% ## Examples
%
% [1,2,3,4,5,6].splitwith -> (x) x rem 2 == 1 % => {[1], [2,3,4,5,6]}
%
def splitwith(function)
Erlang.lists.splitwith(function, self)
end
def to_list
self
end
def to_bin
Erlang.list_to_binary(self)
end
def to_char_list
self
end
def inspect
tail = list_tail(self)
if tail == []
"[#{inspect_join(self)}]"
else
"[#{inspect_join(copy_without_tail(self, []))}|#{tail}]"
end
end
% Returns the list length. Also aliased to size.
%
% ## Examples
%
% [1,2,3].length % => 3
% [].size % => 0
%
def length
Erlang.length(self)
end
alias_local 'length, 'size, 0
% Equivalent to list.flatten.length but more efficient
%
% ## Examples
%
% [1, [2,3]].flatlength % => 3
% [[1,2,3]].flatlength % => 3
%
def flatlength
Erlang.lists.flatlength(self)
end
% Returns if the list is proper.
%
% ## Examples
%
% [1,2].proper? % => true
% [1|[2]].proper? % => true
% [1|2].proper? % => false
%
def proper?
list_tail(self) == []
end
private
def count(item, [item|t], counter)
count item, t, 1 + counter
end
def count(item, [_|t], counter)
count item, t, counter
end
def count(_item, [], counter)
counter
end
def brackets(0, [h|_])
h
end
def brackets(_, [])
nil
end
def brackets(n, [_|t]) when n > 0
brackets(n - 1, t)
end
def uniq([h|t], acc)
case Erlang.lists.member(h, acc)
match true
uniq(t, acc)
match false
uniq(t, [h|acc])
end
end
def uniq([], acc)
Erlang.lists.reverse(acc)
end
def inspect_join(list)
strings = list.map -> (x) x.inspect.to_char_list
Erlang.string.join(strings, [$,]).to_bin
end
def list_tail([_|t]) list_tail(t); end
def list_tail([]) []; end
def list_tail(object) object; end
def copy_without_tail([h|t], acc) copy_without_tail(t, [h|acc]); end
def copy_without_tail(_, acc) acc.reverse; end
def duplicate(0, _, list)
list.flatten_lists
end
def duplicate(n, term, list) when n > 0
duplicate(n - 1, term, [term|list])
end
end
end
|
lib/list.ex
| 0.745954
| 0.666883
|
list.ex
|
starcoder
|
defmodule Steroids.Utils do
@spec boolMerge(list) :: map
def boolMerge([]), do: %{}
# If we are a single bool should we need to include the condition
def boolMerge([{:query, field, condition, query}]) when condition != :must, do:
context(:query, field, condition, query)
def boolMerge([{:filter, field, condition, query}]) when condition != :must, do:
context(:filter, field, condition, query)
def boolMerge(queries) when length(queries) == 1 do
item = List.last(queries)
%{ elem(item, 1) => elem(item, 3) }
end
def boolMerge(queries) do
Enum.reduce(queries, %{}, fn
({:query, field, condition, query}, acc) ->
Map.merge(acc, context(:query, field, condition, query), &customizer(&1, &2, &3))
({:filter, field, condition, query}, acc) ->
Map.merge(acc, context(:filter, field, condition, query), &customizer(&1, &2, &3))
({:minimum_should_match, :filter, count}, acc) ->
Map.merge(acc, %{bool: %{ filter: %{ bool: %{ minimum_should_match: count } } } })
({:minimum_should_match, :query, count}, acc) ->
Map.merge(acc, %{bool: %{ minimum_should_match: count } })
end)
end
@spec buildClause(list) :: map
def buildClause(opts \\ []) do
buildClause(Keyword.get(opts, :field, %{}), Keyword.get(opts, :value), Keyword.get(opts, :args, %{}))
end
defp buildClause(%{} = field, nil, opts), do: Map.merge(field, opts)
defp buildClause(field, nil, opts), do: Map.merge(%{field: field }, opts)
defp buildClause(field, value, opts), do: Map.merge(%{field => value}, opts)
defp customizer(_k, l1, l2) when is_list(l1) and is_list(l2), do: l1 ++ l2
defp customizer(_k, l1, l2) when is_map(l1) and is_map(l2), do: Map.merge(l1, l2, &customizer(&1, &2, &3))
defp customizer(_k, l1, _l2), do: l1
# Map formatters for setting the context of the query body
defp context(:query, field, condition, query), do:
%{bool: %{condition => List.wrap(%{ field => query})}}
defp context(:filter, field, condition, query), do:
%{bool: %{ filter: %{ bool: %{ condition => List.wrap(%{ field => query})}}}}
end
|
lib/utils.ex
| 0.697712
| 0.501526
|
utils.ex
|
starcoder
|
defmodule BSV.MerkleProof do
@moduledoc """
The MerkleProof module implements the [BSV TCS Merkle proof standard](https://tsc.bitcoinassociation.net/standards/merkle-proof-standardised-format/).
Merkle proofs are fundamental to the Simplified Payment Verification (SPV)
model that underpins bitcoin scaling. Assuming we have stored block headers
from the blockchain, given a transaction and `t:BSV.MerkleProof.t/0`, we can
verify the transaction is contained in a block without downloading the entire
block.
The TSC Merkle proof standard describes a way of serialising a Merkle proof
in a binary or json format, so network participants can share the proofs in
a standardised format.
"""
use Bitwise
alias BSV.{BlockHeader, Hash, Serializable, Tx, VarInt}
import BSV.Util, only: [decode: 2, encode: 2]
defstruct flags: 0, index: nil, subject: nil, target: nil, nodes: []
@typedoc "Merkle proof struct"
@type t() :: %__MODULE__{
flags: integer(),
index: non_neg_integer(),
subject: Tx.t() | Tx.hash(),
target: BlockHeader.t() | binary(),
nodes: list(Tx.hash())
}
defguard is_txid?(flags) when (flags &&& 0x01) == 0
defguard is_tx?(flags) when (flags &&& 0x01) == 1
defguard targets_block_hash?(flags) when (flags &&& (0x04 ||| 0x02)) == 0
defguard targets_block_header?(flags) when (flags &&& (0x04 ||| 0x02)) == 2
defguard targets_merkle_root?(flags) when (flags &&& (0x04 ||| 0x02)) == 4
@doc """
Parses the given binary into a `t:BSV.MerkleProof.t/0`.
Returns the result in an `:ok` / `:error` tuple pair.
## Options
The accepted options are:
* `:encoding` - Optionally decode the binary with either the `:base64` or `:hex` encoding scheme.
"""
@spec from_binary(binary(), keyword()) :: {:ok, t()} | {:error, term()}
def from_binary(data, opts \\ []) when is_binary(data) do
encoding = Keyword.get(opts, :encoding)
with {:ok, data} <- decode(data, encoding),
{:ok, merkle_proof, _rest} <- Serializable.parse(%__MODULE__{}, data)
do
{:ok, merkle_proof}
end
end
@doc """
Parses the given binary into a `t:BSV.MerkleProof.t/0`.
As `from_binary/2` but returns the result or raises an exception.
"""
@spec from_binary!(binary(), keyword()) :: t()
def from_binary!(data, opts \\ []) when is_binary(data) do
case from_binary(data, opts) do
{:ok, merkle_proof} ->
merkle_proof
{:error, error} ->
raise BSV.DecodeError, error
end
end
@doc """
Calculates and returns the result of hashing all of the transaction hashes
contained in the Merkle proof into a tree-like structure known as a Merkle tree.
"""
@spec calc_merkle_root(t()) :: binary()
def calc_merkle_root(%__MODULE__{index: index, subject: %Tx{} = tx, nodes: nodes}),
do: hash_nodes(Tx.get_hash(tx), index, nodes)
def calc_merkle_root(%__MODULE__{index: index, subject: tx_hash, nodes: nodes})
when is_binary(tx_hash),
do: hash_nodes(tx_hash, index, nodes)
# Iterates over and hashes the tx hashes
defp hash_nodes(hash, _index, []), do: hash
defp hash_nodes(hash, index, ["*" | rest]) when rem(index, 2) == 0,
do: hash_nodes(hash, index, [hash | rest])
defp hash_nodes(_hash, index, ["*" | _rest]) when rem(index, 2) == 1,
do: raise "invalid nodes"
defp hash_nodes(hash, index, [node | rest]) when rem(index, 2) == 0 do
Hash.sha256_sha256(hash <> node)
|> hash_nodes(floor(index / 2), rest)
end
defp hash_nodes(hash, index, [node | rest]) when rem(index, 2) == 1 do
Hash.sha256_sha256(node <> hash)
|> hash_nodes(floor(index / 2), rest)
end
@doc """
Serialises the given `t:BSV.MerkleProof.t/0` into a binary.
## Options
The accepted options are:
* `:encoding` - Optionally encode the binary with either the `:base64` or `:hex` encoding scheme.
"""
@spec to_binary(t()) :: binary()
def to_binary(%__MODULE__{} = merkle_proof, opts \\ []) do
encoding = Keyword.get(opts, :encoding)
merkle_proof
|> Serializable.serialize()
|> encode(encoding)
end
defimpl Serializable do
defguard is_txid?(flags) when (flags &&& 0x01) == 0
defguard is_tx?(flags) when (flags &&& 0x01) == 1
defguard targets_block_hash?(flags) when (flags &&& (0x04 ||| 0x02)) == 0
defguard targets_block_header?(flags) when (flags &&& (0x04 ||| 0x02)) == 2
defguard targets_merkle_root?(flags) when (flags &&& (0x04 ||| 0x02)) == 4
@impl true
def parse(merkle_proof, data) do
with <<flags::integer, data::binary>> <- data,
{:ok, index, data} <- VarInt.parse_int(data),
{:ok, subject, data} <- parse_subject(data, flags),
{:ok, target, data} <- parse_target(data, flags),
{:ok, nodes_num, data} <- VarInt.parse_int(data),
{:ok, nodes, rest} <- parse_nodes(data, nodes_num)
do
{:ok, struct(merkle_proof, [
flags: flags,
index: index,
subject: subject,
target: target,
nodes: nodes
]), rest}
else
{:error, error} ->
{:error, error}
_data ->
{:error, :invalid_merkle_proof}
end
end
@impl true
def serialize(%{flags: flags, nodes: nodes} = merkle_proof) do
index = VarInt.encode(merkle_proof.index)
tx_or_id = serialize_subject(merkle_proof.subject)
target = serialize_target(merkle_proof.target)
nodes_data = Enum.reduce(nodes, VarInt.encode(length(nodes)), &serialize_node/2)
<<
flags::integer,
index::binary,
tx_or_id::binary,
target::binary,
nodes_data::binary
>>
end
# Parses the tx or tx hash as per the given flags
defp parse_subject(data, flags) when is_tx?(flags) do
with {:ok, rawtx, data} <- VarInt.parse_data(data),
{:ok, tx} <- Tx.from_binary(rawtx)
do
{:ok, tx, data}
end
end
defp parse_subject(data, flags) when is_txid?(flags) do
with <<txid::binary-size(32), data::binary>> <- data do
{:ok, txid, data}
end
end
# # Parses the target as per the given flags
defp parse_target(data, flags) when targets_block_header?(flags) do
with {:ok, block_header, data} <- Serializable.parse(%BlockHeader{}, data) do
{:ok, block_header, data}
end
end
defp parse_target(data, _flags) do
<<hash::binary-size(32), data::binary>> = data
{:ok, hash, data}
end
# Parses the list of nodes
defp parse_nodes(data, num, nodes \\ [])
defp parse_nodes(data, num, nodes) when length(nodes) == num,
do: {:ok, Enum.reverse(nodes), data}
defp parse_nodes(<<0, hash::binary-size(32), data::binary>>, num, nodes),
do: parse_nodes(data, num, [hash | nodes])
defp parse_nodes(<<1, data::binary>>, num, nodes),
do: parse_nodes(data, num, ["*" | nodes])
# Serialised the tx or tx hash
defp serialize_subject(%Tx{} = tx) do
tx
|> Tx.to_binary()
|> VarInt.encode_binary()
end
defp serialize_subject(tx_hash), do: tx_hash
# Serialise the target header or hash
defp serialize_target(%BlockHeader{} = header),
do: BlockHeader.to_binary(header)
defp serialize_target(target), do: target
# Serialises the lists of nodes
defp serialize_node("*", data), do: data <> <<1>>
defp serialize_node(<<hash::binary-size(32)>>, data),
do: data <> <<0, hash::binary>>
end
end
|
lib/bsv/merkle_proof.ex
| 0.914515
| 0.65758
|
merkle_proof.ex
|
starcoder
|
defmodule Appsignal.TransactionBehaviour do
@callback start(String.t(), atom) :: Appsignal.Transaction.t()
@callback start_event() :: Appsignal.Transaction.t()
@callback finish_event(Appsignal.Transaction.t() | nil, String.t(), String.t(), any, integer) ::
Appsignal.Transaction.t()
@callback finish() :: :sample | :no_sample
@callback finish(Appsignal.Transaction.t() | nil) :: :sample | :no_sample
@callback complete() :: :ok
@callback complete(Appsignal.Transaction.t() | nil) :: :ok
@callback set_error(Appsignal.Transaction.t() | nil, String.t(), String.t(), any) ::
Appsignal.Transaction.t()
@callback set_action(String.t()) :: Appsignal.Transaction.t()
@callback set_action(Appsignal.Transaction.t() | nil, String.t()) :: Appsignal.Transaction.t()
@callback set_sample_data(Appsignal.Transaction.t() | nil, String.t(), any) ::
Appsignal.Transaction.t()
if Appsignal.plug?() do
@callback set_request_metadata(Appsignal.Transaction.t() | nil, Plug.Conn.t()) ::
Appsignal.Transaction.t()
end
end
defmodule Appsignal.Transaction do
@behaviour Appsignal.TransactionBehaviour
use Appsignal.Config
@moduledoc """
Functions related to AppSignal transactions
This module contains functions for starting and stopping an
AppSignal transaction, recording events and collecting metrics
within a transaction, et cetera.
All functions take a `Transaction` as their first parameter. It is
possible to omit this parameter, in which case it is assumed that
the calling process has already an associated Transaction (the
"current" transaction). This is the case after `Transaction.start/2`
has been called from within the same process.
"""
defstruct [:resource, :id]
alias Appsignal.{Nif, Transaction, TransactionRegistry}
@typedoc """
Datatype which is used as a handle to the current AppSignal transaction.
"""
@type t :: %Transaction{}
@doc """
Create and register a transaction.
Call this when a transaction such as a http request or background job starts.
Parameters:
- `transaction_id` The unique identifier of this transaction.
- `namespace` The namespace of this transaction. Defaults to :background_job.
The function returns a `%Transaction{}` struct for use with the
other transaction functions in this module.
The returned transaction is also associated with the calling
process, so that processes / callbacks which don't get the
transaction passed in can still look it up through the
`Appsignal.TransactionRegistry`.
"""
@spec start(String.t(), atom) :: Transaction.t()
def start(transaction_id, namespace) when is_binary(transaction_id) do
transaction_id
|> create(namespace)
|> register
end
@doc """
Create a transaction with a transaction resource.
"""
@spec create(String.t(), atom) :: Transaction.t()
def create(transaction_id, namespace) when is_binary(transaction_id) and is_atom(namespace) do
{:ok, resource} = Nif.start_transaction(transaction_id, Atom.to_string(namespace))
%Transaction{resource: resource, id: transaction_id}
end
if Mix.env() in [:test, :test_phoenix] do
@spec to_map(Appsignal.Transaction.t()) :: map()
def to_map(transaction) do
{:ok, json} = Nif.transaction_to_json(transaction.resource)
Poison.decode!(json)
end
end
@spec register(Transaction.t()) :: Transaction.t()
defp register(transaction) do
TransactionRegistry.register(transaction)
transaction
end
@doc """
Start an event for the current transaction. See `start_event/1`
"""
@spec start_event() :: Transaction.t()
def start_event do
start_event(lookup())
end
@doc """
Start an event
Call this when an event within a transaction you want to measure starts, such as
an SQL query or http request.
- `transaction`: The pointer to the transaction this event occurred in.
"""
@spec start_event(Transaction.t() | nil) :: Transaction.t()
def start_event(nil), do: nil
def start_event(%Transaction{} = transaction) do
:ok = Nif.start_event(transaction.resource)
transaction
end
@doc """
Finish an event for the current transaction. See `finish_event/5`.
"""
@spec finish_event(String.t(), String.t(), String.t(), integer) :: Transaction.t()
def finish_event(name, title, body, body_format \\ 0) do
finish_event(lookup(), name, title, body, body_format)
end
@doc """
Finish an event
Call this when an event ends.
- `transaction`: The pointer to the transaction this event occurred in
- `name`: Name of the category of the event (sql.query, net.http)
- `title`: Title of the event ('User load', 'Http request to google.com')
- `body`: Body of the event, should not contain unique information per specific event (`select * from users where id=?`)
- `body_format` Format of the event's body which can be used for sanitization, 0 for general and 1 for sql currently.
"""
@spec finish_event(Transaction.t() | nil, String.t(), String.t(), any, integer) ::
Transaction.t()
def finish_event(nil, _name, _title, _body, _body_format), do: nil
def finish_event(%Transaction{} = transaction, name, title, body, body_format)
when is_binary(body) do
:ok = Nif.finish_event(transaction.resource, name, title, body, body_format)
transaction
end
def finish_event(%Transaction{} = transaction, name, title, body, body_format) do
encoded_body = Appsignal.Utils.DataEncoder.encode(body)
:ok = Nif.finish_event_data(transaction.resource, name, title, encoded_body, body_format)
transaction
end
@doc """
Record a finished event for the current transaction. See `record_event/6`.
"""
@spec record_event(String.t(), String.t(), String.t(), integer, integer) :: Transaction.t()
def record_event(name, title, body, duration, body_format \\ 0) do
record_event(lookup(), name, title, body, duration, body_format)
end
@doc """
Record a finished event
Call this when an event which you cannot track the start for
ends. This function can only be used for events that do not have
children such as database queries. GC metrics and allocation counts
will be tracked in the parent of this event.
- `transaction`: The pointer to the transaction this event occurred in
- `name`: Name of the category of the event (sql.query, net.http)
- `title`: Title of the event ('User load', 'Http request to google.com')
- `body`: Body of the event, should not contain unique information per specific event (`select * from users where id=?`)
- `duration`: Duration of this event in nanoseconds
- `body_format` Format of the event's body which can be used for sanitization, 0 for general and 1 for sql currently.
"""
@spec record_event(Transaction.t() | nil, String.t(), String.t(), String.t(), integer, integer) ::
Transaction.t()
def record_event(nil, _name, _title, _body, _duration, _body_format), do: nil
def record_event(%Transaction{} = transaction, name, title, body, duration, body_format) do
:ok = Nif.record_event(transaction.resource, name, title, body, body_format, duration)
transaction
end
@doc """
Set an error for a the current transaction. See `set_error/4`.
"""
@spec set_error(String.t(), String.t(), any) :: Transaction.t()
def set_error(name, message, backtrace) do
set_error(lookup(), name, message, backtrace)
end
@max_name_size 120
@doc """
Set an error for a transaction
Call this when an error occurs within a transaction.
- `transaction`: The pointer to the transaction this event occurred in
- `name`: Name of the error (RuntimeError)
- `message`: Message of the error ('undefined method call for something')
- `backtrace`: Backtrace of the error; will be JSON encoded
"""
@spec set_error(Transaction.t() | nil, String.t(), String.t(), any) :: Transaction.t()
def set_error(nil, _name, _message, _backtrace), do: nil
def set_error(%Transaction{} = transaction, name, message, backtrace) do
name = name |> String.split_at(@max_name_size) |> elem(0)
backtrace_data =
backtrace
|> Appsignal.Backtrace.from_stacktrace()
|> Appsignal.Utils.DataEncoder.encode()
:ok = Nif.set_error(transaction.resource, name, message, backtrace_data)
transaction
end
@doc """
Set sample data for the current transaction. See `set_sample_data/3`.
"""
@spec set_sample_data(Transaction.t(), Enum.t()) :: Transaction.t()
def set_sample_data(%Transaction{} = transaction, values) do
values
|> Enum.each(fn {key, value} ->
Transaction.set_sample_data(transaction, key, value)
end)
transaction
end
@spec set_sample_data(String.t(), any) :: Transaction.t()
def set_sample_data(key, payload) do
set_sample_data(lookup(), key, payload)
end
@doc """
Set sample data for a transaction
Use this to add sample data if finish_transaction returns true.
- `transaction`: The pointer to the transaction this event occurred in
- `key`: Key of this piece of metadata (params, session_data)
- `payload`: Metadata (e.g. `%{user_id: 1}`); will be JSON encoded
"""
@spec set_sample_data(Transaction.t() | nil, String.t(), any) :: Transaction.t()
def set_sample_data(nil, _key, _payload), do: nil
def set_sample_data(%Transaction{} = transaction, "params", payload) do
if config()[:send_params] do
do_set_sample_data(transaction, "params", payload)
else
transaction
end
end
def set_sample_data(%Transaction{} = transaction, key, payload) do
do_set_sample_data(transaction, key, payload)
end
def do_set_sample_data(%Transaction{} = transaction, key, payload) do
payload_data = Appsignal.Utils.DataEncoder.encode(payload)
:ok = Nif.set_sample_data(transaction.resource, key, payload_data)
transaction
end
@doc """
Set action of the current transaction. See `set_action/1`.
"""
@spec set_action(String.t()) :: Transaction.t()
def set_action(action) do
set_action(lookup(), action)
end
@doc """
Set action of a transaction
Call this when the identifying action of a transaction is known.
- `transaction`: The pointer to the transaction this event occurred in
- `action`: This transactions action (`"HomepageController.show"`)
"""
@spec set_action(Transaction.t() | nil, String.t()) :: Transaction.t()
def set_action(nil, _action), do: nil
def set_action(%Transaction{} = transaction, action) do
:ok = Nif.set_action(transaction.resource, action)
transaction
end
@doc """
Set namespace of the current transaction. See `set_namespace/1`.
"""
@spec set_namespace(atom()) :: Transaction.t()
def set_namespace(namespace) do
set_namespace(lookup(), namespace)
end
@doc """
Set namespace of a transaction
Call this to override the transaction's namespace.
- `transaction`: The pointer to the transaction this event occurred in
- `namespace`: This transaction's action (`:`)
"""
@spec set_namespace(Transaction.t() | nil, String.t() | atom()) :: Transaction.t()
def set_namespace(nil, _namespace), do: nil
def set_namespace(%Transaction{} = transaction, namespace) when is_atom(namespace) do
set_namespace(transaction, Atom.to_string(namespace))
end
def set_namespace(%Transaction{} = transaction, namespace) when is_binary(namespace) do
:ok = Nif.set_namespace(transaction.resource, namespace)
transaction
end
@doc """
Set queue start time of the current transaction. See `set_queue_start/2`.
"""
@spec set_queue_start(integer) :: Transaction.t()
def set_queue_start(start \\ -1) do
set_queue_start(lookup(), start)
end
@doc """
Set queue start time of a transaction
Call this when the queue start time in miliseconds is known.
- `transaction`: The pointer to the transaction this event occurred in
- `queue_start`: Transaction queue start time in ms if known
"""
@spec set_queue_start(Transaction.t() | nil, integer) :: Transaction.t()
def set_queue_start(nil, _start), do: nil
def set_queue_start(%Transaction{} = transaction, start) do
:ok = Nif.set_queue_start(transaction.resource, start)
transaction
end
@doc """
Set metadata for the current transaction from an enumerable.
The enumerable needs to be a keyword list or a map.
"""
@spec set_meta_data(Enum.t()) :: Transaction.t()
def set_meta_data(values) do
transaction = lookup()
set_meta_data(transaction, values)
transaction
end
@spec set_meta_data(Transaction.t(), Enum.t()) :: Transaction.t()
def set_meta_data(%Transaction{} = transaction, values) do
values
|> Enum.each(fn {key, value} ->
Transaction.set_meta_data(transaction, key, value)
end)
transaction
end
@doc """
Set metadata for the current transaction. See `set_meta_data/3`.
"""
@spec set_meta_data(String.t(), String.t()) :: Transaction.t()
def set_meta_data(key, value) do
set_meta_data(lookup(), key, value)
end
@doc """
Set metadata for a transaction
Call this when an error occurs within a transaction to set more detailed data about the error
- `transaction`: The pointer to the transaction this event occurred in
- `key`: Key of this piece of metadata (`"email"`)
- `value`: Value of this piece of metadata (`"<EMAIL>"`)
"""
@spec set_meta_data(Transaction.t() | nil, String.t(), String.t()) :: Transaction.t()
def set_meta_data(nil, _key, _value), do: nil
def set_meta_data(%Transaction{} = transaction, key, value)
when is_binary(key) and is_binary(value) do
:ok = Nif.set_meta_data(transaction.resource, key, value)
transaction
end
def set_meta_data(%Transaction{} = transaction, key, value) do
set_meta_data(transaction, to_s(key), to_s(value))
end
@doc """
Finish the current transaction. See `finish/1`.
"""
@spec finish() :: :sample | :no_sample
def finish do
finish(lookup())
end
@doc """
Finish a transaction
Call this when a transaction such as a http request or background job ends.
- `transaction`: The pointer to the transaction this event occurred in
Returns `:sample` whether sample data for this transaction should be
collected.
"""
@spec finish(Transaction.t() | nil) :: :sample | :no_sample
def finish(nil), do: nil
def finish(%Transaction{} = transaction) do
Nif.finish(transaction.resource)
end
@doc """
Complete the current transaction. See `complete/1`.
"""
@spec complete() :: :ok
def complete do
complete(lookup())
end
@doc """
Complete a transaction
Call this after finishing a transaction (and adding sample data if necessary).
- `transaction`: The pointer to the transaction this event occurred in
"""
@spec complete(Transaction.t() | nil) :: :ok
def complete(nil), do: nil
def complete(%Transaction{} = transaction) do
TransactionRegistry.remove_transaction(transaction)
:ok = Nif.complete(transaction.resource)
end
@doc """
Generate a random id as a string to use as transaction identifier.
"""
@spec generate_id :: String.t()
def generate_id do
8
|> :crypto.strong_rand_bytes()
|> Base.hex_encode32(case: :lower, padding: false)
end
# Lookup the current AppSignal transaction in the transaction registry.
defp lookup do
TransactionRegistry.lookup(self())
end
defimpl Inspect do
def inspect(transaction, _opts) do
"AppSignal.Transaction{#{transaction.id}}"
end
end
if Appsignal.plug?() do
@doc """
Set the request metadata, given a Plug.Conn.t.
"""
@spec set_request_metadata(Transaction.t() | nil, Plug.Conn.t()) :: Transaction.t()
def set_request_metadata(%Transaction{} = transaction, %Plug.Conn{} = conn) do
# preprocess conn
conn =
conn
|> Plug.Conn.fetch_query_params()
# collect sample data
transaction
|> Transaction.set_sample_data(Appsignal.Plug.extract_sample_data(conn))
|> Transaction.set_meta_data(Appsignal.Plug.extract_meta_data(conn))
# Add session data
if !config()[:skip_session_data] and conn.private[:plug_session_fetch] == :done do
Transaction.set_sample_data(
transaction,
"session_data",
Appsignal.Utils.MapFilter.filter_values(
conn.private[:plug_session],
Appsignal.Utils.MapFilter.get_filter_session_data()
)
)
else
transaction
end
end
end
def set_request_metadata(transaction, %{}), do: transaction
if Appsignal.phoenix?() do
@doc """
Given the transaction and a %Plug.Conn{}, try to set the Phoenix controller module / action in the transaction.
"""
def try_set_action(conn) do
IO.warn(
"Appsignal.Transaction.try_set_action/1 is deprecated. Use Appsignal.Plug.extract_action/1 and Appsignal.Transaction.set_action/1 instead."
)
Transaction.set_action(lookup(), Appsignal.Plug.extract_action(conn))
end
def try_set_action(transaction, conn) do
IO.warn(
"Appsignal.Transaction.try_set_action/2 is deprecated. Use Appsignal.Plug.extract_action/1 and Appsignal.Transaction.set_action/2 instead."
)
Transaction.set_action(transaction, Appsignal.Plug.extract_action(conn))
end
end
defp to_s(value) when is_atom(value), do: Atom.to_string(value)
defp to_s(value) when is_integer(value), do: Integer.to_string(value)
defp to_s(value) when is_binary(value), do: value
@doc """
Return the transaction for the given process
Creates a new one when not found. Can also return `nil`; in that
case, we should not continue submitting the transaction.
"""
def lookup_or_create_transaction(origin \\ self(), namespace \\ :background_job) do
TransactionRegistry.lookup(origin) || Transaction.start("_" <> generate_id(), namespace)
end
end
|
lib/appsignal/transaction.ex
| 0.832985
| 0.501343
|
transaction.ex
|
starcoder
|
defmodule Day07 do
@moduledoc """
Advent of Code 2019
Day 7: Amplification Circuit
"""
alias Day07.{Part1, Part2}
def get_program() do
Path.join(__DIR__, "inputs/day07.txt")
|> File.read!()
|> String.trim()
|> String.split(",")
|> Enum.map(&String.to_integer/1)
end
def execute() do
program = get_program()
IO.puts("Part 1: #{Part1.run(program)}")
IO.puts("Part 2: #{Part2.run(program)}")
end
end
defmodule Day07.Part1 do
alias Day07.Combinatorics
import Intcode, only: [run_computer: 2]
def run(program) do
Combinatorics.get_permutations([0, 1, 2, 3, 4])
|> Enum.map(&{&1, run_series(program, &1)})
|> Enum.max_by(&elem(&1, 1))
|> format_result()
end
def run_series(program, sequence) do
start_computers(program)
output =
run_computer(:Computer070, [Enum.at(sequence, 0), 0])
|> (&run_computer(:Computer071, [Enum.at(sequence, 1), &1])).()
|> (&run_computer(:Computer072, [Enum.at(sequence, 2), &1])).()
|> (&run_computer(:Computer073, [Enum.at(sequence, 3), &1])).()
|> (&run_computer(:Computer074, [Enum.at(sequence, 4), &1])).()
stop_computers()
output
end
def format_result({[a, b, c, d, e], signal}) do
"Max thruster signal #{signal} from sequence #{a}#{b}#{c}#{d}#{e}"
end
def start_computers(program) do
for i <- 0..4,
do: GenServer.start_link(Intcode, program, name: String.to_atom("Computer07#{i}"))
end
def stop_computers() do
for i <- 0..4, do: GenServer.stop(String.to_atom("Computer07#{i}"))
end
end
defmodule Day07.Part2 do
alias Day07.{Part1, Combinatorics}
def run(program) do
Combinatorics.get_permutations([5, 6, 7, 8, 9])
|> Enum.map(&{&1, run_feedback_loop(program, &1)})
|> Enum.max_by(&elem(&1, 1))
|> Part1.format_result()
end
def run_feedback_loop(program, sequence) do
Part1.start_computers(program)
output =
initialize_computers(sequence)
|> (&run_circuit_loop(program, &1)).()
Part1.stop_computers()
output
end
defp initialize_computers(sequence) do
sequence
|> Enum.with_index()
|> Enum.reduce(0, fn {setting, i}, input ->
GenServer.call(String.to_atom("Computer07#{i}"), {:run, [setting, input]})
|> (&elem(&1, 1)).()
end)
end
defp run_circuit_loop(program, input) do
output =
GenServer.call(:Computer070, {:run, [input]})
|> (&GenServer.call(:Computer071, {:run, [elem(&1, 1)]})).()
|> (&GenServer.call(:Computer072, {:run, [elem(&1, 1)]})).()
|> (&GenServer.call(:Computer073, {:run, [elem(&1, 1)]})).()
|> (&GenServer.call(:Computer074, {:run, [elem(&1, 1)]})).()
case output do
{:output, code} -> run_circuit_loop(program, code)
{:exit, code} -> code
end
end
end
defmodule Day07.Combinatorics do
def get_permutations([]), do: [[]]
def get_permutations(elements) do
for(e <- elements, body <- get_permutations(elements -- [e]), do: [e | body])
end
end
|
lib/day07.ex
| 0.600891
| 0.417301
|
day07.ex
|
starcoder
|
defmodule ComplexNumbers do
@typedoc """
In this module, complex numbers are represented as a tuple-pair containing the real and
imaginary parts.
For example, the real number `1` is `{1, 0}`, the imaginary number `i` is `{0, 1}` and
the complex number `4+3i` is `{4, 3}'.
"""
@type complex :: {float, float}
@doc """
Return the real part of a complex number
"""
@spec real(a :: complex) :: float
def real({real, _im}), do: real
@doc """
Return the imaginary part of a complex number
"""
@spec imaginary(a :: complex) :: float
def imaginary({_real, im}), do: im
@doc """
Multiply two complex numbers, or a real and a complex number
"""
@spec mul(a :: complex | float, b :: complex | float) :: complex
def mul({a, b}, {c, d}), do: {a * c - b * d, b * c + a * d}
def mul(a, b), do: mul(to_complex(a), to_complex(b))
@doc """
Add two complex numbers, or a real and a complex number
"""
@spec add(a :: complex | float, b :: complex | float) :: complex
def add({a, b}, {c, d}), do: {a + c, b + d}
def add(a, b), do: add(to_complex(a), to_complex(b))
@doc """
Subtract two complex numbers, or a real and a complex number
"""
@spec sub(a :: complex | float, b :: complex | float) :: complex
def sub({a, b}, {c, d}), do: {a - c, b - d}
def sub(a, b), do: sub(to_complex(a), to_complex(b))
@doc """
Divide two complex numbers, or a real and a complex number
"""
@spec div(a :: complex | float, b :: complex | float) :: complex
def div({a, b}, {c, d}) do
{(a * c + b * d) / (c * c + d * d), (b * c - a * d) / (c * c + d * d)}
end
def div(a, b), do: __MODULE__.div(to_complex(a), to_complex(b))
@doc """
Absolute value of a complex number
"""
@spec abs(a :: complex) :: float
def abs({a, b}), do: :math.sqrt(a * a + b * b)
@doc """
Conjugate of a complex number
"""
@spec conjugate(a :: complex) :: complex
def conjugate({a, b}), do: {a, -b}
@doc """
Exponential of a complex number
"""
@spec exp(a :: complex) :: complex
def exp({a, b}), do: {:math.exp(a) * :math.cos(b), -:math.exp(a) * :math.sin(b)}
defp to_complex({a, b}), do: {a, b}
defp to_complex(a), do: {a, 0}
end
|
exercises/practice/complex-numbers/.meta/example.ex
| 0.939665
| 0.789193
|
example.ex
|
starcoder
|
defmodule Brando.Blueprint.Attributes do
@moduledoc """
### Attributes
#### Uniqueness
To create an unique index in the db as well as running
`unique_constraint` in the changeset:
attribute :email, unique: true
If you have fields that need to be unique together:
attribute :email, unique: [with: :other_field]
If you need uniqueness, but are fine with changing the attribute
attribute :slug, unique: [prevent_collision: true]
This is good for URL slugs. If it detects a collision it will add
`-{x}` to the value, where x is the next in sequence.
If you need uniqueness, but validated against another field - for
instance if you have a `slug` field, but also a `language` field:
attribute :slug, unique: [prevent_collision: :language]
This allows you to have `%{slug: "test", language: "en"}` and
`%{slug: "test", language: "dk"}` without erroring.
"""
alias Brando.Blueprint.Attribute
@valid_attributes [
{:array, :map},
{:array, :id},
{:array, :integer},
{:array, :string},
:array,
:boolean,
:date,
:datetime,
:enum,
:naive_datetime,
:decimal,
:file,
:float,
:id,
:integer,
:language,
:map,
:slug,
:status,
:string,
:text,
:time,
:timestamp,
:uuid,
:villain
]
def validate_attr!(type) when type in @valid_attributes, do: true
def validate_attr!({:__aliases__, _, _}), do: true
def validate_attr!({:array, {:__aliases__, _, _}}), do: true
def validate_attr!(type),
do: raise("Unknown type `#{inspect(type)}` given in blueprint")
def build_attr(name, type, opts \\ [])
def build_attr(name, :language, opts) do
default_languages =
case Keyword.get(opts, :languages) do
nil ->
Brando.config(:languages) ||
[
[value: "en", text: "English"],
[value: "no", text: "Norsk"]
]
supplied_langs ->
supplied_langs
end
languages =
Enum.map(default_languages, fn [value: lang_code, text: _] ->
String.to_atom(lang_code)
end)
%Attribute{
name: name,
type: :language,
opts: %{values: languages, required: true}
}
end
def build_attr(name, type, opts) do
%Attribute{
name: name,
type: type,
opts: Enum.into(opts, %{})
}
end
defmacro attributes(do: block) do
attributes(__CALLER__, block)
end
defp attributes(_caller, block) do
quote generated: true, location: :keep do
Module.put_attribute(__MODULE__, :brando_macro_context, :attributes)
Module.register_attribute(__MODULE__, :attrs, accumulate: true)
unquote(block)
end
end
defmacro attribute(name, type, opts \\ []) do
validate_attr!(type)
attribute(__CALLER__, name, type, opts)
end
defp attribute(_caller, name, type, opts) do
quote location: :keep do
attr =
build_attr(
unquote(name),
unquote(type),
unquote(opts)
)
Module.put_attribute(__MODULE__, :attrs, attr)
end
end
def maybe_add_marked_as_deleted_attribute(true) do
[build_attr(:marked_as_deleted, :boolean, default: false, virtual: true)]
end
def maybe_add_marked_as_deleted_attribute(_), do: []
end
|
lib/brando/blueprint/attributes.ex
| 0.755186
| 0.448306
|
attributes.ex
|
starcoder
|
defmodule Abbrev do
@moduledoc """
Calculates the set of unambiguous abbreviations for a given set of strings.
"""
@doc """
Given a set of strings, calculate the set of unambiguous abbreviations for those strings,
and return a map where the keys are all the possible abbreviations
and the values are the full strings.
## Parameters
* words - The set of strings from which to calculate the abbreviations.
## Examples
iex> Abbrev.abbrev(~w())
%{}
iex> Abbrev.abbrev(~w(a))
%{"a" => "a"}
iex> Abbrev.abbrev(~w(a b))
%{"a" => "a", "b" => "b"}
iex> Abbrev.abbrev(~w(aa ab))
%{"aa" => "aa", "ab" => "ab"}
iex> Abbrev.abbrev(~w(car cone))
%{"ca" => "car", "car" => "car", "co" => "cone", "con" => "cone", "cone" => "cone"}
"""
@spec abbrev([binary()]) :: %{required(binary()) => binary()}
def abbrev(words) do
Enum.reduce(words, %{abbreviations: %{}, seen: %{}}, fn word, state ->
Enum.reduce(all_prefixes_for_word(word, [word]), state, fn prefix, state -> update_state(word, prefix, state) end)
end)[:abbreviations]
end
@doc """
Given a set of strings and a pattern, calculate the set of unambiguous abbreviations
for only those strings matching the pattern, and return a map where
the keys are all the possible abbreviations and the values are the full strings.
## Parameters
* words - The set of strings from which to calculate the abbreviations.
* pattern - A regex or string; only input strings and abbreviations that match
the pattern or string will be included in the return value.
## Examples
iex> Abbrev.abbrev(~w(), ~r/^a/)
%{}
iex> Abbrev.abbrev(~w(a), ~r/^a/)
%{"a" => "a"}
iex> Abbrev.abbrev(~w(a b), ~r/^a/)
%{"a" => "a"}
iex> Abbrev.abbrev(~w(aa ab), ~r/b/)
%{"ab" => "ab"}
iex> Abbrev.abbrev(~w(car box cone crab), ~r/b/)
%{"b" => "box", "bo" => "box", "box" => "box", "crab" => "crab"}
iex> Abbrev.abbrev(~w(car box cone), "ca")
%{"ca" => "car", "car" => "car"}
"""
@spec abbrev([binary()], binary() | Regex.t()) :: %{required(binary()) => binary()}
def abbrev(words, pattern) when is_binary(pattern) do
abbrev(words, Regex.compile!(pattern))
end
def abbrev(words, pattern) do
words
|> Enum.filter(&Regex.match?(pattern, &1))
|> abbrev()
|> Enum.filter(fn {k, _} -> Regex.match?(pattern, k) end)
|> Enum.into(%{})
end
defp all_prefixes_for_word(word, accum) do
case Regex.run(~r/(.+).$/, word) do
[_, prefix] ->
all_prefixes_for_word(prefix, [prefix | accum])
nil ->
accum
end
end
defp update_state(word, prefix, state) do
case get_and_update_in(state[:seen][prefix], &{&1, (&1 || 0) + 1}) do
{nil, state} ->
put_in(state[:abbreviations][prefix], word)
{1, state} ->
{_, new_state} = pop_in(state[:abbreviations][prefix])
new_state
{_, state} ->
state
end
end
end
|
lib/abbrev.ex
| 0.805594
| 0.614481
|
abbrev.ex
|
starcoder
|
defmodule LatLong do
@moduledoc """
A parser for all the ways that a longitude or latitude may be
specified. Comments and suggestions on how this might have been better
written are welcome. The following lat long formats are allowed ... all are
equivalent:
- 38.8977, -77.0365
- 38° 53' 51.635" N, 77° 2' 11.507" W
- 38 53 51.635 N, 77 2 11.507 W
- N 38° 53' 51.635", W 77° 2' 11.507"
- N 38 53 51.635, W 77 2 11.507
- 38 53 51.635, -77 2 11.507
And some other examples that are for different locations:
- -31.96, 115.84
- 90, 0 (North Pole)
- 41 25 01N, 120 58 57W
- 41°25'01"N, 120°58'57"W
- S17 33 08.352, W69 01 29.74
- 41 25N, 120 58W
- 41°25'N, 120°58'W
- N41 25.117, W120 58.292
- 41 N, 120 W
- 41°N, 120°W
- N41.092, W120.8362
- 90S, 0E (South Pole)
In addition, the latitude may be in one format and the longitude in another.
## Parsing
The strings are parsed with a state machine checking the next part of the
string, saving the various parts in a map. Each pass of the state machine
provides the next Float if available and the next grapheme. The state machine
lets the call fall to the appropriate state handler and then the next part is
examined until there isn't anything left in the string.
"""
@type latitude :: number
@type longitude :: number
@type message :: String.t
@doc """
Parses string representations of a latitude and longitude into decimals. The
latitude and longitude must be provided as single string argument with a
separating comma.
"""
@spec parse(String.t) :: { latitude, longitude } | { :error, message }
def parse latlong do
[ latitude, longitude ] = String.split latlong, ","
parse latitude, longitude
end
@doc """
Parses string representations of a latitude and longitude into decimals. The
latitude and longitude must be provided as string arguments.
"""
@spec parse(String.t, String.t) :: { latitude, longitude } | { :error, message }
def parse latitude, longitude do
latitude_value = part_to_decimal_position latitude, :latitude
longitude_value = part_to_decimal_position longitude, :longitude
cond do
latitude_value == :error -> {:error, "Error Parsing Latitude"}
latitude_value < -90.0 -> {:error, "Latitude < -90.0°"}
latitude_value > +90.0 -> {:error, "Latitude > +90.0°"}
longitude == :error -> {:error, "Error Parsing Longitude"}
longitude_value < -180.0 -> {:error, "Longitude < -180.0°"}
longitude_value > +180.0 -> {:error, "Longitude > +180.0°"}
true -> {latitude_value, longitude_value}
end
end
# Called to convert latitude or longitude. 'type' is either :latitude or
# :longitude. Angle is the string of the latitude or longitude.
# Starts the state machine by calling next_part with the string and state.
defp part_to_decimal_position angle, type do
state = %{ sign: 1, degrees: 0, minutes: 0, seconds: 0, field: :degrees, type: type }
next_part angle, state
end
# If string is empty, calcualte and return the parsed value.
defp next_part "", state do
(state[:degrees] + state[:minutes] / 60.0 + state[:seconds] / 3600.0) * state[:sign]
end
# Get possible value at next string position and next grapheme. Then drop
# it into the state machine.
defp next_part string, state do
next_state Float.parse(string), String.next_grapheme(string), state
end
# Just ignore spaces. They are delimiters for float parsing already used
# so toss.
defp next_state :error, {" ", tail}, state do
next_part tail, state
end
# Check for degrees sign. Must be found when field has moved to minutes.
defp next_state :error, {"°", tail}, %{field: :minutes} = state do
next_part tail, state
end
# Check for minutes sign. Must be found when field has moved to seconds.
defp next_state :error, {"\"", tail}, %{field: :seconds} = state do
next_part tail, state
end
# Check for seconds sign. Must be found when field has moved to nil.
defp next_state :error, {"\'", tail}, %{field: :nil} = state do
next_part tail, state
end
# Capture a minus sign but only if a valid float was found.
defp next_state { _, _ }, {"-", tail}, state do
next_part tail, Map.merge(state, %{sign: -1})
end
# Capture a plus sign but only if a valid float was found.
defp next_state { _, _ }, {"+", tail}, state do
next_part tail, Map.merge(state, %{sign: 1})
end
# North sets sign to + but only if type is latitude.
defp next_state :error, {"N", tail}, %{type: :latitude} = state do
next_part tail, Map.merge(state, %{sign: 1})
end
# South sets sign to - but only if type is latitude.
defp next_state :error, {"S", tail}, %{type: :latitude} = state do
next_part tail, Map.merge(state, %{sign: -1})
end
# East sets sign to + but only if type is longitude.
defp next_state :error, {"E", tail}, %{type: :longitude} = state do
next_part tail, Map.merge(state, %{sign: 1})
end
# West sets sign to - but only if type is longitude.
defp next_state :error, {"W", tail}, %{type: :longitude} = state do
next_part tail, Map.merge(state, %{sign: -1})
end
# Capture the value for degrees. Set field to minutes.
defp next_state({ value, tail }, _, %{field: :degrees} = state) do
next_part tail, Map.merge(state, %{degrees: value, field: :minutes})
end
# Capture the value for minutes. Set field to seconds.
defp next_state({ value, tail }, _, %{field: :minutes} = state) do
next_part tail, Map.merge(state, %{minutes: value, field: :seconds})
end
# Capture the value for seconds. Set field to nil.
defp next_state({ value, tail }, _, %{field: :seconds} = state) do
next_part tail, Map.merge(state, %{seconds: value, field: :nil})
end
# Return an error for this parse.
defp next_state(_,_,_) do
:error
end
end
|
lib/latlong.ex
| 0.901853
| 0.713132
|
latlong.ex
|
starcoder
|
defmodule Blockchain.Chain do
@moduledoc """
Represents the information about a specific chain.
This will either be a current chain (such as homestead), or
a test chain (such as ropsten). Different chains have
different parameters, such as accounts with an initial
balance and when EIPs are implemented.
For compatibility, we'll use the configuration files from Parity:
https://github.com/paritytech/parity/tree/master/ethcore/res/ethereum
"""
require Integer
alias Blockchain.Genesis
alias EVM.Configuration
defstruct name: nil,
engine: %{},
params: %{},
genesis: %{},
nodes: [],
accounts: %{},
evm_config: nil
@type engine :: %{
minimum_difficulty: integer(),
difficulty_bound_divisor: integer(),
duration_limit: integer(),
block_rewards: [{integer(), integer()}],
homestead_transition: integer(),
eip649_reward: integer(),
eip100b_transition: integer(),
eip649_transition: integer(),
difficulty_bomb_delays: [{integer(), integer()}],
dao_hardfork_transition: integer() | nil,
dao_hardfork_accounts: [binary()] | nil,
dao_hardfork_beneficiary: binary() | nil
}
@type params :: %{
gas_limit_bound_divisor: integer(),
registrar: EVM.address(),
account_start_nonce: integer(),
maximum_extra_data_size: integer(),
min_gas_limit: integer(),
network_id: integer(),
fork_block: integer(),
fork_canon_hash: EVM.hash(),
max_code_size: integer(),
max_code_size_transition: integer(),
eip150_transition: integer(),
eip160_transition: integer(),
eip161abc_transition: integer(),
eip161d_transition: integer(),
eip155_transition: integer(),
eip98_transition: integer(),
eip86_transition: integer(),
eip140_transition: integer(),
eip211_transition: integer(),
eip214_transition: integer(),
eip658_transition: integer(),
eip145_transition: integer(),
eip1014_transition: integer(),
eip1052_transition: integer(),
eip1283_transition: integer()
}
@type account :: %{
balance: EVM.Wei.t(),
nonce: integer(),
storage: %{
binary() => binary()
}
}
@type builtin_account :: %{
name: String.t(),
balance: integer(),
nonce: integer(),
pricing: %{
linear: %{
base: integer(),
word: integer()
}
}
}
@type t :: %__MODULE__{
name: String.t(),
engine: %{String.t() => engine()},
params: params(),
genesis: Genesis.t(),
nodes: [String.t()],
accounts: %{EVM.address() => account() | builtin_account()},
evm_config: Configuration.t()
}
@dao_extra_range 9
@doc """
Loads a given blockchain, such as Homestead or Ropsten.
This chain is used to set the genesis block and tweak parameters
of the Blockchain and EVM.
See the `/chains` directory of this repo for supported
block chains.
## Examples
iex> Blockchain.Chain.load_chain(:ropsten).name
"Ropsten"
iex> Blockchain.Chain.load_chain(:ropsten).genesis.difficulty
0x100000
"""
@spec load_chain(atom(), EVM.Configuration.t() | nil) :: t
def load_chain(chain, evm_config \\ nil) do
chain_data = read_chain!(chain)
engine = Enum.into(chain_data["engine"], %{}, &get_engine/1)
accounts =
(chain_data["accounts"] || [])
|> get_accounts()
%__MODULE__{
name: chain_data["name"],
engine: engine,
params: get_params(chain_data["params"]),
genesis: get_genesis(chain_data["genesis"]),
nodes: chain_data["nodes"],
accounts: accounts,
evm_config: evm_config
}
end
@doc """
Gets a test chain configuration (along with the respective EVM configuration)
based on a hardfork.
## Examples
iex> Blockchain.Chain.test_config("Frontier").name
"Frontier (Test)"
"""
def test_config(hardfork) when is_binary(hardfork) do
config = EVM.Configuration.hardfork_config(hardfork)
test = get_test(hardfork)
test && load_chain(test, config)
end
defp get_test("Frontier"), do: :frontier_test
defp get_test("Homestead"), do: :homestead_test
defp get_test("TangerineWhistle"), do: :eip150_test
defp get_test("SpuriousDragon"), do: :eip161_test
defp get_test("Byzantium"), do: :byzantium_test
defp get_test("Constantinople"), do: :constantinople_test
defp get_test("ByzantiumToConstantinopleAt5"), do: :byzantium_to_constantinople_transition_test
defp get_test("EIP158ToByzantiumAt5"), do: :eip158_to_byzantium_transition_test
defp get_test("HomesteadToEIP150At5"), do: :homestead_to_eip150_transition_test
defp get_test("FrontierToHomesteadAt5"), do: :frontier_to_homestead_transition_test
defp get_test("HomesteadToDaoAt5"), do: :dao_hardfork_test
defp get_test(_), do: nil
@doc """
Get the EVM configuration based on the chain and block number
"""
def evm_config(chain = %__MODULE__{}, block_number \\ nil) do
if block_number do
cond do
block_number >= chain.params.eip1283_transition ->
EVM.Configuration.Constantinople.new()
block_number >= chain.params.eip658_transition ->
EVM.Configuration.Byzantium.new()
block_number >= chain.params.eip160_transition ->
EVM.Configuration.SpuriousDragon.new()
block_number >= chain.params.eip150_transition ->
EVM.Configuration.TangerineWhistle.new()
block_number >= chain.engine["Ethash"].homestead_transition ->
EVM.Configuration.Homestead.new()
true ->
EVM.Configuration.Frontier.new()
end
else
chain.evm_config
end
end
@doc """
Convenience function to determine whether a block number is after the
bomb delays introduced in Byzantium and Constantinople
"""
@spec after_bomb_delays?(t(), integer()) :: boolean()
def after_bomb_delays?(chain = %__MODULE__{}, block_number) do
bomb_delays = chain.engine["Ethash"][:difficulty_bomb_delays]
Enum.any?(bomb_delays, fn {hard_fork_number, _delay} ->
block_number >= hard_fork_number
end)
end
@doc """
Function to determine what the bomb delay is for a block number.
Note: This function should not be called on a block number that happens before
bomb delays. Before bomb delays were introduced, the difficulty calculation
was different and thus we do not expect a bomb delay at all.
"""
@spec bomb_delay_factor_for_block(t, integer()) :: integer()
def bomb_delay_factor_for_block(chain = %__MODULE__{}, block_number) do
bomb_delays = chain.engine["Ethash"][:difficulty_bomb_delays]
bomb_delays
|> Enum.sort(fn {k1, _}, {k2, _} -> k1 < k2 end)
|> Enum.take_while(fn {k, _} -> k <= block_number end)
|> Enum.reduce(0, fn {_k, v}, acc -> acc + v end)
end
@doc """
Determines the base reward for a block number. The reward changed was lowered
in Byzantium and again in Constantinople
"""
@spec block_reward_for_block(t, integer()) :: integer()
def block_reward_for_block(chain = %__MODULE__{}, block_number) do
{_k, reward} =
chain.engine["Ethash"][:block_rewards]
|> Enum.sort(fn {k, _}, {k2, _} -> k < k2 end)
|> Enum.take_while(fn {k, _} -> k <= block_number end)
|> List.last()
reward
end
def support_dao_fork?(chain) do
!is_nil(chain.engine["Ethash"][:dao_hardfork_transition])
end
def dao_fork?(chain, block_number) do
chain.engine["Ethash"][:dao_hardfork_transition] == block_number
end
def within_dao_fork_extra_range?(chain, block_number) do
dao_hardfork = chain.engine["Ethash"][:dao_hardfork_transition]
block_number >= dao_hardfork && block_number <= dao_hardfork + @dao_extra_range
end
@doc """
Helper function to determine if block number is after the homestead transition
based on the chain configuration.
"""
@spec after_homestead?(t, integer()) :: boolean()
def after_homestead?(chain, block_number) do
homestead_block = chain.engine["Ethash"][:homestead_transition]
block_number >= homestead_block
end
@doc """
Helper function to determine if block number is after the byzantium transition
based on the chain configuration.
"""
@spec after_byzantium?(t, integer()) :: boolean()
def after_byzantium?(chain, block_number) do
eip658_transition = chain.params[:eip658_transition]
block_number >= eip658_transition
end
@spec get_engine({String.t(), map}) :: {String.t(), engine()}
defp get_engine({engine, %{"params" => params}}) do
config = %{
minimum_difficulty: params["minimumDifficulty"] |> load_hex(),
difficulty_bound_divisor: params["difficultyBoundDivisor"] |> load_hex(),
duration_limit: params["durationLimit"] |> load_hex(),
block_rewards: (params["blockReward"] || "0x0") |> parse_reward(),
homestead_transition: params["homesteadTransition"] |> load_hex(),
eip649_reward: params["eip649Reward"] |> load_hex(),
eip100b_transition: params["eip100bTransition"] |> load_hex(),
eip649_transition: params["eip649Transition"] |> load_hex(),
difficulty_bomb_delays: params["difficultyBombDelays"] |> parse_bomb_delays(),
dao_hardfork_transition: params["daoHardforkTransition"] |> load_hex(),
dao_hardfork_accounts: params["daoHardforkAccounts"] |> parse_dao_accounts(),
dao_hardfork_beneficiary: params["daoHardforkBeneficiary"] |> load_raw_hex()
}
{engine, config}
end
defp parse_dao_accounts(nil), do: []
defp parse_dao_accounts(accounts) do
Enum.map(accounts, &load_raw_hex/1)
end
defp parse_reward(block_reward) when is_binary(block_reward) do
[{load_hex("0x00"), load_hex(block_reward)}]
end
defp parse_reward(block_rewards) do
Enum.map(block_rewards, fn {k, v} ->
{block_number, _} = Integer.parse(k)
{block_number, load_hex(v)}
end)
end
defp parse_bomb_delays(nil), do: []
defp parse_bomb_delays(bomb_delays) do
Enum.map(bomb_delays, fn {k, v} ->
{block_number, _} = Integer.parse(k)
{block_number, v}
end)
end
@spec get_params(map) :: params()
defp get_params(map) do
%{
gas_limit_bound_divisor: map["gasLimitBoundDivisor"] |> load_hex(),
registrar: map["registrar"] |> load_raw_hex(),
account_start_nonce: map["accountStartNonce"] |> load_hex(),
maximum_extra_data_size: map["maximumExtraDataSize"] |> load_hex(),
min_gas_limit: map["minGasLimit"] |> load_hex(),
network_id: map["networkID"] |> load_hex(),
fork_block: map["forkBlock"] |> load_hex(),
fork_canon_hash: map["forkCanonHash"] |> load_raw_hex(),
max_code_size: map["maxCodeSize"] |> load_hex(),
max_code_size_transition: map["maxCodeSizeTransition"] |> load_hex(),
eip150_transition: map["eip150Transition"] |> load_hex(),
eip160_transition: map["eip160Transition"] |> load_hex(),
eip161abc_transition: map["eip161abcTransition"] |> load_hex(),
eip161d_transition: map["eip161dTransition"] |> load_hex(),
eip155_transition: map["eip155Transition"] |> load_hex(),
eip98_transition: map["eip98Transition"] |> load_hex(),
eip86_transition: map["eip86Transition"] |> load_hex(),
eip140_transition: map["eip140Transition"] |> load_hex(),
eip211_transition: map["eip211Transition"] |> load_hex(),
eip214_transition: map["eip214Transition"] |> load_hex(),
eip658_transition: map["eip658Transition"] |> load_hex(),
eip145_transition: map["eip145Transition"] |> load_hex(),
eip1014_transition: map["eip1014Transition"] |> load_hex(),
eip1052_transition: map["eip1052Transition"] |> load_hex(),
eip1283_transition: map["eip1283Transition"] |> load_hex()
}
end
@spec get_genesis(map) :: Genesis.t()
defp get_genesis(map) do
%{
seal: get_genesis_seal(map["seal"]),
difficulty: map["difficulty"] |> load_hex(),
author: map["author"] |> load_raw_hex(),
timestamp: map["timestamp"] |> load_hex(),
parent_hash: map["parentHash"] |> load_raw_hex(),
extra_data: map["extraData"] |> load_raw_hex(),
gas_limit: map["gasLimit"] |> load_hex()
}
end
@spec get_genesis_seal(map | nil) :: Genesis.seal_config() | nil
defp get_genesis_seal(nil), do: nil
defp get_genesis_seal(map) do
%{
nonce: map["ethereum"]["nonce"] |> load_raw_hex(),
mix_hash: map["ethereum"]["mixHash"] |> load_raw_hex()
}
end
defp get_accounts(json_accounts) do
accounts =
Enum.reduce(json_accounts, [], fn json_account = {_address, info}, acc ->
account =
if is_nil(info["builtin"]) do
get_account(json_account)
else
get_builtin_account(json_account)
end
[account | acc]
end)
Enum.into(accounts, %{})
end
defp get_account({raw_address, info}) do
nonce =
if info["nonce"],
do: info["nonce"] |> load_hex(),
else: 0
address = load_raw_hex(raw_address)
account = %{
balance: (info["balance"] || "0x0") |> load_decimal(),
nonce: nonce
}
{address, account}
end
defp get_builtin_account({raw_address, info}) do
address = load_raw_hex(raw_address)
balance = if info["balance"], do: load_decimal(info["balance"])
nonce =
if info["nonce"],
do: load_hex(info["nonce"]),
else: 0
builtin_account = %{
name: info["builtin"]["name"],
pricing: %{
linear: %{
base: info["builtin"]["pricing"]["linear"]["base"],
word: info["builtin"]["pricing"]["linear"]["word"]
}
},
balance: balance,
nonce: nonce
}
{address, builtin_account}
end
@spec read_chain!(atom()) :: map()
defp read_chain!(chain) do
filename = chain_filename(chain)
{:ok, body} = File.read(filename)
Jason.decode!(body)
end
@spec chain_filename(atom()) :: String.t()
defp chain_filename(chain) do
Path.expand("../../../../chains/#{Atom.to_string(chain)}.json", __DIR__)
end
@doc """
Given a string (e.g. user input), returns either a valid atom
referencing a chain or `:not_found`.
## Examples
iex> Blockchain.Chain.id_from_string("ropsten")
{:ok, :ropsten}
iex> Blockchain.Chain.id_from_string("jungle")
:not_found
"""
@spec id_from_string(String.t()) ::
{:ok, :kovan | :tobalaba | :ropsten | :foundation | :expanse | :ellaism | :musicoin}
| :not_found
def id_from_string("ellaism"), do: {:ok, :ellaism}
def id_from_string("expanse"), do: {:ok, :expanse}
def id_from_string("foundation"), do: {:ok, :foundation}
def id_from_string("kovan"), do: {:ok, :kovan}
def id_from_string("musicoin"), do: {:ok, :musicoin}
def id_from_string("ropsten"), do: {:ok, :ropsten}
def id_from_string("tobalaba"), do: {:ok, :tobalaba}
def id_from_string(_), do: :not_found
@spec load_raw_hex(String.t() | nil) :: binary()
defp load_raw_hex(nil), do: nil
defp load_raw_hex("0x" <> hex_data), do: load_raw_hex(hex_data)
defp load_raw_hex(hex_data) when Integer.is_odd(byte_size(hex_data)),
do: load_raw_hex("0" <> hex_data)
defp load_raw_hex(hex_data) do
Base.decode16!(hex_data, case: :mixed)
end
@spec load_decimal(String.t()) :: integer()
defp load_decimal("0x" <> hex_data) do
Base.decode16!(hex_data)
end
defp load_decimal(dec_data) do
{res, ""} = Integer.parse(dec_data)
res
end
@spec load_hex(String.t() | integer()) :: integer()
defp load_hex(nil), do: nil
defp load_hex(x) when is_integer(x), do: x
defp load_hex(x), do: x |> load_raw_hex |> :binary.decode_unsigned()
end
|
apps/blockchain/lib/blockchain/chain.ex
| 0.913886
| 0.551091
|
chain.ex
|
starcoder
|
defmodule XDR.Struct do
@moduledoc """
This module manages the `Structure` type based on the RFC4506 XDR Standard.
"""
@behaviour XDR.Declaration
alias XDR.StructError
defstruct [:components]
@typedoc """
`XDR.Struct` structure type specification.
"""
@type t :: %XDR.Struct{components: keyword()}
@doc """
Create a new `XDR.Struct` structure with the `opaque` and `length` passed.
"""
@spec new(components :: keyword()) :: t()
def new(components), do: %XDR.Struct{components: components}
@doc """
Encode a `XDR.Struct` structure into a XDR format.
"""
@impl true
def encode_xdr(%{components: components}) when not is_list(components), do: {:error, :not_list}
def encode_xdr(%{components: []}), do: {:error, :empty_list}
def encode_xdr(%{components: components}), do: {:ok, encode_components(components)}
@doc """
Encode a `XDR.Struct` structure into a XDR format.
If the `struct` is not valid, an exception is raised.
"""
@impl true
def encode_xdr!(struct) do
case encode_xdr(struct) do
{:ok, binary} -> binary
{:error, reason} -> raise(StructError, reason)
end
end
@doc """
Decode the Structure in XDR format to a `XDR.Struct` structure.
"""
@impl true
def decode_xdr(bytes, _struct) when not is_binary(bytes), do: {:error, :not_binary}
def decode_xdr(_bytes, %{components: components}) when not is_list(components),
do: {:error, :not_list}
def decode_xdr(bytes, %{components: components}) do
{decoded_components, rest} = decode_components(bytes, components)
decoded_struct = decoded_components |> Enum.reverse() |> new()
{:ok, {decoded_struct, rest}}
end
@doc """
Decode the Structure in XDR format to a `XDR.Struct` structure.
If the binaries are not valid, an exception is raised.
"""
@impl true
def decode_xdr!(bytes, struct) do
case decode_xdr(bytes, struct) do
{:ok, result} -> result
{:error, reason} -> raise(StructError, reason)
end
end
@spec encode_components(components :: keyword()) :: binary()
defp encode_components(components) do
Enum.reduce(components, <<>>, fn {_key, component}, bytes ->
component_module = component.__struct__
bytes <> component_module.encode_xdr!(component)
end)
end
@spec decode_components(bytes :: binary(), components :: keyword()) :: {keyword(), binary()}
defp decode_components(bytes, components) do
components
|> Enum.reduce({[], bytes}, fn {key, component}, {decoded_components, rest_bytes} ->
{decoded_component, rest} = component.decode_xdr!(rest_bytes)
{[{key, decoded_component} | decoded_components], rest}
end)
end
end
|
lib/xdr/struct.ex
| 0.939609
| 0.533276
|
struct.ex
|
starcoder
|
defmodule Ada.Workflow do
@moduledoc """
The Ada.Workflow module specifies a behaviour which needs to be implemented
by all workflows.
## Core concepts
A workflow has a set of requirements which define the parameters required for
its correct execution (e.g. it may require a user id).
It separates the **fetch** phase (gathering data) from the **format** phase
(presenting it according to a transport, e.g. email).
## From idea to implementation
One may want to fetch the list of trains starting from a specific location
and receive them by email.
This translates to a workflow that requires:
- a `user_id` (to resolve the email address to send the email to)
- a `location_id` (to fetch relevant trainline information)
In the fetch phase, the workflow will find user and location in the local
repo, then interact with a data source (created separately under the
`Ada.Source` namespace) to retrieve the list of trains.
In the format phase, this list of trains, along with any other data coming
from the fetch phase, can be formatted according to the transport.
It's important that all side-effectful operations (db queries, http api
interactions, current time, etc.) are performed in the fetch phase. This way
the format phase can be completely pure, immutable and easy to test.
All workflow module names need to start with `Ada.Worfklow` to be correctly
resolved by the runtime.
## Examples
Please see `Ada.Worfklow.SendLastFmReport` or any other existing workflow
module.
"""
@type t :: module
@type raw_data :: term()
@type transport :: :email
@type ctx :: Keyword.t()
@type validation_errors :: [{atom(), Ecto.Changeset.error()}]
@type requirements :: %{optional(atom()) => term()}
@type run_result :: {:ok, Ada.Email.t()} | {:error, term()}
@type raw_data_result :: {:ok, raw_data} | {:error, term()}
@doc "Returns a human readable workflow name"
@callback human_name() :: String.t()
@doc """
A map representing the workflow data requirements, keyed
by the parameter name (e.g. `user_id`) and its type (`:string`).
Supports all types handled by Ecto, as under the hood it uses Ecto's
Changeset functions to cast and validate data. See
<https://hexdocs.pm/ecto/2.2.9/Ecto.Schema.html#module-primitive-types> for a
list of available types.
"""
@callback requirements() :: requirements()
@doc """
Given some starting params, return data ready to be formatted.
"""
@callback fetch(map(), ctx()) :: {:ok, raw_data()} | {:error, term()}
@doc """
Given some data resulting from a `fetch/2` call and a transport, return a
result compatible with such a transport.
For example, for a transport with value `:email`, a `{:ok, %Ada.Email{}}`
needs to be returned for the workflow to complete successfully.
"""
@callback format(raw_data(), transport(), ctx()) :: {:ok, term()} | {:error, term()}
import Ecto.Changeset
@doc """
Returns all available transports.
"""
@spec transports :: [transport]
def transports, do: [:email]
@doc """
Runs a workflow given its name, starting params, a choice of transport and
supporting context.
Params are validated and formatted data is checked for compatibility with the
chosen transport.
"""
@spec run(t, map, transport, Keyword.t()) :: run_result()
def run(workflow_name, params, transport, ctx) do
with {:ok, normalized_params} <- validate(workflow_name, params),
{:ok, raw_data} <- workflow_name.fetch(normalized_params, ctx),
{:ok, formatted_data} <- workflow_name.format(raw_data, transport, ctx),
:ok <- validate_result(formatted_data, transport) do
apply_transport(formatted_data, transport, ctx)
end
end
@doc """
Executes a workflow's fetch phase, returning the resulting raw data.
"""
@spec raw_data(t, map, Keyword.t()) :: raw_data_result()
def raw_data(workflow_name, params, ctx) do
case validate(workflow_name, params) do
{:ok, normalized_params} ->
workflow_name.fetch(normalized_params, ctx)
error ->
error
end
end
@doc """
Validates that the passed module name is actually a workflow.
"""
@spec valid_name?(t) :: boolean
def valid_name?(workflow_name) do
Code.ensure_loaded?(workflow_name) and
function_exported?(workflow_name, :requirements, 0) and
function_exported?(workflow_name, :fetch, 2) and
function_exported?(workflow_name, :format, 3)
end
@doc """
Validates a map of params according to a workflows's requirements specification.
"""
@spec validate(t, map) :: {:ok, map} | {:error, :invalid_params, validation_errors}
def validate(workflow_name, params) do
changeset = validate_params(workflow_name, params)
if changeset.valid? do
{:ok, apply_changes(changeset)}
else
{:error, :invalid_params, changeset.errors}
end
end
@doc """
Normalizes a workflow name to string, avoiding issue in the conversion
between a module atom and a string.
"""
@spec normalize_name(t | String.t()) :: String.t()
def normalize_name(workflow_name) when is_atom(workflow_name) do
inspect(workflow_name)
end
def normalize_name("Elixir." <> workflow_name), do: workflow_name
def normalize_name(workflow_name), do: workflow_name
defp validate_params(workflow_name, params) do
types = workflow_name.requirements()
{params, types}
|> cast(params, Map.keys(types))
|> validate_required(Map.keys(types))
end
defp validate_result(%Ada.Email{}, :email), do: :ok
defp validate_result(_incompatible_result, :email), do: {:error, :incompatible_result}
defp apply_transport(email, :email, ctx) do
email_adapter = Keyword.fetch!(ctx, :email_adapter)
email_adapter.send_email(email)
end
end
|
lib/ada/workflow.ex
| 0.83952
| 0.687007
|
workflow.ex
|
starcoder
|
defmodule Plaid.PaymentInitiation.Recipients do
@moduledoc """
Functions for Plaid `payment_initiation/recipient` endpoints.
"""
import Plaid, only: [make_request_with_cred: 4, validate_cred: 1]
alias Plaid.Utils
@derive Jason.Encoder
defstruct recipient_id: nil,
request_id: nil
@type t :: %__MODULE__{
recipient_id: String.t(),
request_id: String.t()
}
@type params :: %{required(atom) => String.t() | map}
@type config :: %{required(atom) => String.t()}
@endpoint :"payment_initiation/recipient"
defmodule Recipient do
@doc """
Plaid Recipient data structure.
"""
@derive Jason.Encoder
defstruct recipient_id: nil,
name: nil,
iban: nil,
address: nil,
request_id: nil
@type t :: %__MODULE__{
recipient_id: String.t(),
name: String.t(),
iban: String.t(),
address: Plaid.PaymentInitiation.Recipients.Recipient.Address.t(),
request_id: String.t()
}
defmodule Address do
@moduledoc """
Plaid Recipient Address data structure.
"""
@derive Jason.Encoder
defstruct street: nil,
city: nil,
postal_code: nil,
country: nil
@type t :: %__MODULE__{
street: [String.t()],
city: String.t(),
postal_code: String.t(),
country: String.t()
}
end
end
@doc """
Creates recipient.
Parameters
```
%{
}
```
"""
@spec create(params, config | nil) ::
{:ok, Plaid.PaymentInitiation.Recipients.t()} | {:error, Plaid.Error.t()}
def create(params, config \\ %{}) do
config = validate_cred(config)
endpoint = "#{@endpoint}/create"
make_request_with_cred(:post, endpoint, config, params)
|> Utils.handle_resp(@endpoint)
end
@doc """
Gets recipient by recipient_id.
Parameters
```
%{
recipient_id: ""
}
```
"""
@spec get(params, config | nil) ::
{:ok, Plaid.PaymentInitiation.Recipients.Recipient.t()} | {:error, Plaid.Error.t()}
def get(params, config \\ %{}) do
config = validate_cred(config)
endpoint = "#{@endpoint}/get"
make_request_with_cred(:post, endpoint, config, params)
|> Utils.handle_resp(@endpoint)
end
@doc """
Lists all recipients.
"""
@spec list(config | nil) ::
{:ok, [Plaid.PaymentInitiation.Recipients.Recipient.t()]} | {:error, Plaid.Error.t()}
def list(config \\ %{}) do
config = validate_cred(config)
endpoint = "#{@endpoint}/list"
make_request_with_cred(:post, endpoint, config, %{})
|> Utils.handle_resp(@endpoint)
end
end
|
lib/plaid/payment_initiation/recipients.ex
| 0.784402
| 0.628621
|
recipients.ex
|
starcoder
|
defmodule Ambry.Books do
@moduledoc """
Functions for dealing with Books.
"""
import Ambry.{FileUtils, SearchUtils}
import Ecto.Query
alias Ambry.Books.{Book, BookFlat}
alias Ambry.Media.Media
alias Ambry.{PubSub, Repo}
@book_direct_assoc_preloads [book_authors: [:author], series_books: [:series]]
@doc """
Returns a limited list of books and whether or not there are more.
By default, it will limit to the first 10 results. Supply `offset` and `limit`
to change this. Also can optionally filter by the given `filter` string.
## Examples
iex> list_books()
{[%BookFlat{}, ...], true}
"""
def list_books(offset \\ 0, limit \\ 10, filters \\ %{}, order \\ [asc: :title]) do
over_limit = limit + 1
books =
offset
|> BookFlat.paginate(over_limit)
|> BookFlat.filter(filters)
|> BookFlat.order(order)
|> Repo.all()
books_to_return = Enum.slice(books, 0, limit)
{books_to_return, books != books_to_return}
end
@doc """
Returns the number of books.
## Examples
iex> count_books()
1
"""
@spec count_books :: integer()
def count_books do
Repo.one(from b in Book, select: count(b.id))
end
@doc """
Gets a single book.
Raises `Ecto.NoResultsError` if the Book does not exist.
## Examples
iex> get_book!(123)
%Book{}
iex> get_book!(456)
** (Ecto.NoResultsError)
"""
def get_book!(id) do
Book
|> preload(^@book_direct_assoc_preloads)
|> Repo.get!(id)
end
@doc """
Creates a book.
## Examples
iex> create_book(%{field: value})
{:ok, %Book{}}
iex> create_book(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_book(attrs \\ %{}) do
%Book{}
|> change_book(attrs)
|> Repo.insert()
|> tap(&PubSub.broadcast_create/1)
end
@doc """
Updates a book.
## Examples
iex> update_book(book, %{field: new_value})
{:ok, %Book{}}
iex> update_book(book, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_book(%Book{} = book, attrs) do
book
|> Repo.preload(@book_direct_assoc_preloads)
|> change_book(attrs)
|> Repo.update()
|> tap(&PubSub.broadcast_update/1)
end
@doc """
Deletes a book.
## Examples
iex> delete_book(book)
:ok
iex> delete_book(book)
{:error, :has_media}
iex> delete_book(book)
{:error, changeset}
"""
def delete_book(%Book{} = book) do
case Repo.delete(change_book(book)) do
{:ok, book} ->
maybe_delete_image(book.image_path)
PubSub.broadcast_delete(book)
:ok
{:error, changeset} ->
if Keyword.has_key?(changeset.errors, :media) do
{:error, :has_media}
else
{:error, changeset}
end
end
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking book changes.
## Examples
iex> change_book(book)
%Ecto.Changeset{data: %Book{}}
"""
def change_book(%Book{} = book, attrs \\ %{}) do
Book.changeset(book, attrs)
end
@doc """
Gets a book and all of its media.
"""
def get_book_with_media!(book_id) do
media_query = from m in Media, where: [status: :ready]
Book
|> preload([:authors, media: ^{media_query, [:narrators]}, series_books: :series])
|> Repo.get!(book_id)
end
@doc """
Lists recent books.
"""
def get_recent_books(offset \\ 0, limit \\ 10) do
over_limit = limit + 1
query = from b in Book, order_by: [desc: b.inserted_at], offset: ^offset, limit: ^over_limit
books =
query
|> preload([:authors, series_books: :series])
|> Repo.all()
books_to_return = Enum.slice(books, 0, limit)
{books_to_return, books != books_to_return}
end
@doc """
Finds books that match a query string.
Returns a list of tuples of the form `{jaro_distance, book}`.
"""
def search(query_string, limit \\ 15) do
title_query = "%#{query_string}%"
query = from b in Book, where: ilike(b.title, ^title_query), limit: ^limit
query
|> preload([:authors, series_books: :series])
|> Repo.all()
|> sort_by_jaro(query_string, :title)
end
@doc """
Returns all books for use in `Select` components.
"""
def for_select do
query = from b in Book, select: {b.title, b.id}, order_by: b.title
Repo.all(query)
end
@doc """
Returns a description of a book containing its title and author names.
"""
def get_book_description(%Book{} = book) do
book = Repo.preload(book, :authors)
authors = Enum.map_join(book.authors, ", ", & &1.name)
"#{book.title} · by #{authors}"
end
end
|
lib/ambry/books.ex
| 0.85246
| 0.47792
|
books.ex
|
starcoder
|
defmodule TelemetryWrappers do
@moduledoc """
Documentation for TelemetryWrappers.
"""
defmacro __using__(_opts) do
quote do
import TelemetryWrappers,
only: [deftimed: 2, deftimed: 3, deftimed: 4, deftimedp: 2, deftimedp: 3, deftimedp: 4]
require TelemetryWrappers
end
end
@doc """
Defines a function that will have its execution time measured and sent as a telemetry event.
As an example you can define the following module
defmodule TelemetryWrappers.Support.TestModule do
use TelemetryWrappers
deftimed timed_function(a, b), [:a, :b] do
a + b
end
deftimed timed_function2(a, b) do
a + b
end
end
Then both functions will work as expected:
iex> TelemetryWrappers.Support.TestModule.timed_function(1, 2)
3
iex> TelemetryWrappers.Support.TestModule.timed_function2(1, 2)
3
but it will also emit a `:telemetry` event `[:a, :b]` with the measurement `%{call: timing}`
where `timing` is the time the function took to execute in microseconds (measured using :timer.tc/1).
The metric name is optional and will default to `[:timing, name]` where `name` is the name of the function (without arity).
Note that type specs can still be defined for function defined with `deftimed` just as you would normally, e.g.
@spec timed_function(number(), number()) :: number()
deftimed timed_function(a, b), [:a, :b] do
a + b
end
You can also add metadata to the metrics.
defmodule TelemetryWrappers.Support.TestModule do
use TelemetryWrappers
deftimed timed_function_with_meta(a, b), [:a, :b], %{a: a} do
a + b
end
end
This will emit a `:telemetry` event `[:a, :b]` with the contents `%{call: timing}` and the metadata '%{a: a}'
"""
defmacro deftimed(function_name, metric_name \\ [], metadata \\ quote(do: %{}), do: expr) do
{fname, _, _} = function_name
actual_name = get_actual_name(metric_name, fname)
quote do
def unquote(function_name) do
{timing, result} = :timer.tc(fn -> unquote(expr) end)
:telemetry.execute(
unquote(actual_name),
%{call: timing},
Map.merge(
%{
module: __MODULE__,
function: unquote(fname)
},
unquote(metadata)
)
)
result
end
end
end
@doc """
Defines a private function that will have its execution time measured and sent as a telemetry event.
In principle, same as `deftimed/3` but defines a private function instead.
As an example you can define the following module
defmodule TelemetryWrappers.Support.TestModule do
use TelemetryWrappers
def invoke_private(a) do
private_fun(a)
end
deftimedp(private_fun(a), [:something], do: a)
end
And then invoke the function:
iex> TelemetryWrappers.Support.TestModule.invoke_private(15)
15
which will also emit a `:telemetry` event `[:something]` with the measurement `%{call: timing}`
where `timing` is the time the function took to execute in microseconds (measured using :timer.tc/1).
The metric name is optional and will default to `[:timing, name]` where `name` is the name of the function (without arity),
just like in `deftimed/3`
You can also add metadata to private functions.
"""
defmacro deftimedp(function_name, metric_name \\ [], metadata \\ quote(do: %{}), do: expr) do
{fname, _, _} = function_name
actual_name = get_actual_name(metric_name, fname)
quote do
defp unquote(function_name) do
{timing, result} = :timer.tc(fn -> unquote(expr) end)
:telemetry.execute(
unquote(actual_name),
%{call: timing},
Map.merge(
%{
module: __MODULE__,
function: unquote(fname)
},
unquote(metadata)
)
)
result
end
end
end
defp get_actual_name([], name), do: [:timing, name]
defp get_actual_name(metric, _), do: metric
end
|
lib/telemetry_wrappers.ex
| 0.891693
| 0.594875
|
telemetry_wrappers.ex
|
starcoder
|
defmodule TypeCheck.Builtin.FixedMap do
@moduledoc """
Checks whether the value is a list with the expected elements
On failure returns a problem tuple with:
- `:not_a_map` if the value is not a map
- `:missing_keys` if the value does not have all of the expected keys. The extra information contains in this case `:keys` with a list of keys that are missing.
- `:value_error` if one of the elements does not match. The extra information contains in this case `:problem` and `:key` to indicate what and where the problem occured.
"""
defstruct [:keypairs]
use TypeCheck
@type! t :: %__MODULE__{keypairs: list({term(), TypeCheck.Type.t()})}
@type! problem_tuple ::
{t(), :not_a_map, %{}, any()}
| {t(), :missing_keys, %{keys: list(atom())}, map()}
| {t(), :value_error,
%{problem: lazy(TypeCheck.TypeError.Formatter.problem_tuple()), key: any()}, map()}
defimpl TypeCheck.Protocols.ToCheck do
# Optimization: If we have no expectations on keys -> value types, remove those useless checks.
def to_check(s = %TypeCheck.Builtin.FixedMap{keypairs: keypairs}, param)
when keypairs == [] do
map_check(param, s)
end
def to_check(s, param) do
res = quote generated: true, location: :keep do
with {:ok, _, _} <- unquote(map_check(param, s)),
{:ok, _, _} <- unquote(build_keys_presence_ast(s, param)),
{:ok, bindings3, altered_param} <- unquote(build_keypairs_checks_ast(s.keypairs, param, s)) do
{:ok, bindings3, altered_param}
end
end
# IO.puts(Macro.to_string(res) |> Code.format_string!())
res
end
defp map_check(param, s) do
quote generated: true, location: :keep do
x = unquote(param)
if is_map(x) do
{:ok, [], x}
else
{:error, {unquote(Macro.escape(s)), :not_a_map, %{}, x}}
end
end
end
# TODO raise on superfluous keys (just like Elixir's built-in typespecs do not allow them)
defp build_keys_presence_ast(s, param) do
required_keys =
s.keypairs
|> Enum.into(%{})
|> Map.keys()
quote generated: true, location: :keep do
actual_keys = unquote(param) |> Map.keys()
case unquote(required_keys) -- actual_keys do
[] ->
{:ok, [], unquote(param)}
missing_keys ->
{:error,
{unquote(Macro.escape(s)), :missing_keys, %{keys: missing_keys}, unquote(param)}}
end
end
end
defp build_keypairs_checks_ast(keypairs, param, s) do
keypair_checks =
keypairs
|> Enum.flat_map(fn {key, value_type} ->
value_check =
TypeCheck.Protocols.ToCheck.to_check(
value_type,
quote generated: true, location: :keep do
Map.fetch!(unquote(param), unquote(key))
end
)
quote generated: true, location: :keep do
[
{{:ok, value_bindings, altered_element}, _key} <- {unquote(value_check), unquote(key)},
bindings = value_bindings ++ bindings,
altered_keypairs = [{unquote(key), altered_element} | altered_keypairs]
]
end
end)
quote generated: true, location: :keep do
bindings = []
altered_keypairs = []
with unquote_splicing(keypair_checks),
altered_param = :maps.from_list(altered_keypairs) do
{:ok, bindings, altered_param}
else
{{:error, error}, key} ->
{:error,
{unquote(Macro.escape(s)), :value_error, %{problem: error, key: key}, unquote(param)}}
end
end
end
end
defimpl TypeCheck.Protocols.Inspect do
def inspect(s, opts) do
# map = case s.keypairs do
# list when is_list(list) ->
map = Enum.into(s.keypairs, %{})
# %TypeCheck.Builtin.List{element_type: %TypeCheck.Builtin.FixedTuple{element_types: [key_type, value_type]}} ->
# # Special case for when calling on the 'meta' FixedMap
# # i.e. `TypeCheck.Builtin.FixedMap.t()`
# %{key_type => value_type}
# end
# IO.inspect(s, structs: false, label: :inspect_my_fixed_map)
# map = Enum.into(s.keypairs, %{})
case Map.get(map, :__struct__) do
%TypeCheck.Builtin.Literal{value: value} ->
# Make sure we render structs as structs
map = Map.put(map, :__struct__, value)
# Ensure that structs can override their normal inspect
# by implementing the TypeCheck Inspect protocol:
TypeCheck.Protocols.Inspect.inspect(map, %Inspect.Opts{
opts
| inspect_fun: &TypeCheck.Protocols.Inspect.inspect/2
})
_ ->
# Ensure that structs can override their normal inspect
# by implementing the TypeCheck Inspect protocol:
TypeCheck.Protocols.Inspect.inspect(map, %Inspect.Opts{
opts
| inspect_fun: &TypeCheck.Protocols.Inspect.inspect/2
})
end
end
end
if Code.ensure_loaded?(StreamData) do
defimpl TypeCheck.Protocols.ToStreamData do
def to_gen(s) do
s.keypairs
|> Enum.map(fn {key, value} ->
{key, TypeCheck.Protocols.ToStreamData.to_gen(value)}
end)
|> StreamData.fixed_map()
end
end
end
end
|
lib/type_check/builtin/fixed_map.ex
| 0.716318
| 0.688468
|
fixed_map.ex
|
starcoder
|
defmodule ElixirLeaderboard.Indexer do
@moduledoc """
Indexer walks the entire leaderboard and calculates the needed stats, such as
rank and percentile.
You can customize the stats by creating a custom indexer — a struct consisting
of 2 callback functions:
- `on_rank` is called when the indexer finishes scanning a set of equal
scores, and moves onto a lower score
- `on_entry` is called for every entry
It's important to avoid doing anything `on_entry` that can instead be done
`on_rank`, since we don't want to unnecessarily slow down the indexer.
This library comes with a bunch of pre-made `on_rank` functions for different
flavor of rank and percentile calculation. See `ElixirLeaderboard.Indexer.Stats`
documentation for what's available.
## The on_rank callback
Indexer walks through the sorted dataset of all entries from the highest to
the lowest score. Every time a score is different between entries, it runs the
`on_rank` callback for the rank it just finished scanning. The return value of
the function is added to every record in the rank it just walked.
The function receives the following tuple as the argument:
{total_leadeboard_size, chunk_index, chunk_position, chunk_size}
- `total_leaderboard_size` - total number of entries in the leaderboard
- `chunk_index` - zero-based counter for how many different ranks we have
seen so far
- `chunk_position` - zero-based position where this rank started in the
leaderboard
- `chunk_size` - how many equal scores are in this rank
Based on these values the function can perform any kind of calculation and
return any term as a result.
Let's see an example of walking through a mini-leaderboard, and see what
numbers get passed into the `on_rank` function.
walking score total_size chunk_index chunk_position chunk_size
| 3 n/a n/a n/a n/a
| 3 6 0 0 2
| 2 n/a n/a n/a n/a
| 2 n/a n/a n/a n/a
| 2 6 1 2 3
V 1 6 2 5 1
As the indexer walks the leaderboard, it will only call the `on_rank` function
on the rows where score is about to change, therefore some of the rows are
marked n/a.
## The on_entry callback
An `on_entry` callback is similar to `on_rank` but it receives different
parameters, and it's called on every entry. Its result is added to the entry
for which it's called.
The function receives the following tuple as the argument:
{entry_index, entry_id, entry_key, rank_stats}
- `entry_index` - global position in the leaderboard (top is 0)
- `entry_id` - the id used for fetching records
- `entry_key` - either `{score, id}` or `{score, tiebreaker, id}` depending
on what was inserted
- `rank_stats` - the return value of the `on_rank` function
Due to `rank_stats` parameter it's possible to make more granular calculations
based on whatever was provided by the `on_rank` function.
"""
alias ElixirLeaderboard.Indexer.Stats
alias ElixirLeaderboard.Entry
defstruct on_rank: &Stats.offset_rank_1_99_less_or_equal_percentile/1,
on_entry: &Stats.global_index/1
@type t :: %__MODULE__{
on_rank: Indexer.on_rank(),
on_entry: Indexer.on_entry()
}
@type on_rank ::
({
non_neg_integer,
non_neg_integer,
non_neg_integer,
non_neg_integer
} ->
term)
@type on_entry :: ({non_neg_integer, term, Entry.key(), term} -> term)
@doc """
Create a custom indexer by supplying 2 functions: `on_rank` and `on_entry`.
See `ElixirLeaderboard.Indexer.Stats` for available functions, or implement custom
ones.
"""
@spec new(keyword()) :: t()
def new(kwargs) do
on_rank = Keyword.get(kwargs, :on_rank, nil)
on_entry = Keyword.get(kwargs, :on_entry, nil)
%__MODULE__{on_rank: on_rank, on_entry: on_entry}
end
@doc """
Same as `index/3` but counts the elements for you so that there is no need to
supply that number. This is inefficient if you already know the total count.
"""
def index(keys) do
index(keys, Enum.count(keys))
end
@doc """
Build leaderboard index from an enumerable containing `Entry.key()`-type
elements. Supply the total count for efficiency.
"""
def index(keys, cnt, indexer \\ %__MODULE__{})
def index(_, 0, _), do: []
def index(keys, cnt, indexer) do
keys
|> Stream.chunk_while(
{indexer, cnt},
&rank_split/2,
&rank_done/1
)
|> Stream.concat()
end
defp rank_split(key, {indexer, cnt}) do
{:cont, {indexer, cnt, 0, 0, 1, [{key, 0}]}}
end
defp rank_split(
key,
acc = {indexer, cnt, c_i, c_pos, c_size, buf = [{_, i} | _]}
) do
if score_changed?(key, buf) do
{:cont, flush(acc), {indexer, cnt, c_i + 1, i + 1, 1, [{key, i + 1}]}}
else
{:cont, {indexer, cnt, c_i, c_pos, c_size + 1, [{key, i + 1} | buf]}}
end
end
defp rank_done({_, _, _, []}), do: {:cont, []}
defp rank_done(acc), do: {:cont, flush(acc), {}}
defp score_changed?({score, _, _}, [{{score, _, _}, _} | _]), do: false
defp score_changed?({score, _, _}, [{{score, _}, _} | _]), do: false
defp score_changed?({score, _}, [{{score, _, _}, _} | _]), do: false
defp score_changed?({score, _}, [{{score, _}, _} | _]), do: false
defp score_changed?(_, _), do: true
defp flush({indexer, cnt, c_i, c_pos, c_size, buf}) do
rank_stats = indexer.on_rank.({cnt, c_i, c_pos, c_size})
Stream.map(buf, fn
{key = {_, _, id}, i} ->
{id, key, {indexer.on_entry.({i, id, key, rank_stats}), rank_stats}}
{key = {_, id}, i} ->
{id, key, {indexer.on_entry.({i, id, key, rank_stats}), rank_stats}}
end)
end
end
|
lib/elixir_leaderboard/indexer.ex
| 0.886629
| 0.755502
|
indexer.ex
|
starcoder
|
defmodule Day15 do
def part1(input) do
map = parse(input)
solve(map)
end
def part2(input) do
grid = parse(input)
{max_x, max_y} = find_lower_right(grid)
width = max_x + 1
height = max_y + 1
grid = grid
|> Enum.reduce(grid, fn {{x, y}, risk}, grid ->
Enum.reduce(1..4, grid, fn index, grid ->
Map.put(grid, {x + index * width, y}, increment_risk(risk, index))
end)
end)
grid = grid
|> Enum.reduce(grid, fn {{x, y}, risk}, grid ->
Enum.reduce(1..4, grid, fn index, grid ->
Map.put(grid, {x, y + index * height}, increment_risk(risk, index))
end)
end)
solve(grid)
end
defp increment_risk(risk, increment) do
risk = risk + increment
if risk > 9, do: risk - 9, else: risk
end
defp solve(map) do
goal = find_lower_right(map)
start = {0, 0}
q = :gb_sets.singleton({0, start})
best = :infinity
seen = MapSet.new()
solve(q, map, best, goal, seen)
end
defp find_lower_right(grid) do
{{max_x, _}, _} = Enum.max_by(grid, fn {{x, _}, _} -> x end)
{{_, max_y}, _} = Enum.max_by(grid, fn {{_, y}, _} -> y end)
{max_x, max_y}
end
defp solve(q, grid, best, goal, seen) do
case :gb_sets.is_empty(q) do
true ->
best
false ->
{element, q} = :gb_sets.take_smallest(q)
case element do
{risk, _} when risk >= best ->
solve(q, grid, best, goal, seen)
{risk, position} ->
seen = MapSet.put(seen, position)
case position do
^goal ->
solve(q, grid, risk, goal, seen)
_ ->
q = neighbors(grid, position)
|> Enum.reduce(q, fn pos, q ->
case MapSet.member?(seen, pos) do
true ->
q;
false ->
element = {risk + Map.fetch!(grid, pos), pos}
:gb_sets.add(element, q)
end
end)
solve(q, grid, best, goal, seen)
end
end
end
end
defp neighbors(map, {row, col}) do
[{row - 1, col},
{row, col - 1}, {row, col + 1},
{row + 1, col}]
|> Enum.filter(&(Map.has_key?(map, &1)))
end
def print_grid(grid) do
{max_x, max_y} = find_lower_right(grid)
Enum.map(0..max_y, fn row ->
[Enum.map(0..max_x, fn col ->
key = {row, col}
?0 + Map.fetch!(grid, key)
end), ?\n]
end)
|> IO.puts
end
defp parse(input) do
Enum.map(input, fn line ->
String.to_charlist(line)
|> Enum.map(&(&1 - ?0))
end)
|> Enum.with_index
|> Enum.flat_map(fn {list, row} ->
Enum.with_index(list)
|> Enum.map(fn {h, col} -> {{row, col}, h} end)
end)
|> Map.new
end
end
|
day15/lib/day15.ex
| 0.581778
| 0.643448
|
day15.ex
|
starcoder
|
defmodule PersistentList.Day01 do
alias PersistentList.Day01, as: List
defstruct head: nil, tail: nil
defimpl String.Chars, for: List do
def to_string(list), do:
"[" <> string_from(list) <> "]"
defp string_from(%List{head: nil, tail: nil}), do: ""
defp string_from(
%List{
head: head,
tail: %List{
head: nil,
tail: nil
}
}
), do: "#{head}"
defp string_from(%List{head: head, tail: tail}), do: "#{head}, " <> string_from(tail)
end
def new(), do: %List{}
def append(list, item), do: %List{head: item, tail: list}
def prepend(%List{head: nil, tail: nil} = list, item), do: %List{head: item, tail: list}
def prepend(%List{head: head, tail: tail}, item),
do: tail
|> prepend(item)
|> append(head)
def concat(%List{head: nil, tail: nil}, second), do: second
def concat(first, %List{head: nil, tail: nil}), do: first
def concat(%List{head: head, tail: rest}, second),
do: rest
|> concat(second)
|> append(head)
def drop(%List{head: nil, tail: nil} = empty, _), do: empty
def drop(list, num) when num == 0, do: list
def drop(%List{head: _, tail: tail}, num), do: drop(tail, num - 1)
def drop_while(%List{head: nil, tail: nil} = empty, _), do: empty
def drop_while(%List{head: head, tail: tail}, predicate) do
rest = drop_while(tail, predicate)
unless predicate.(head),
do: rest
|> append(head),
else: rest
end
def take(%List{head: nil, tail: nil} = empty, _), do: empty
def take(_, num) when num == 0, do: %List{}
def take(%List{head: head, tail: tail}, num), do:
tail
|> take(num - 1)
|> append(head)
def take_while(%List{head: nil, tail: nil} = empty, _), do: empty
def take_while(%List{head: head, tail: tail}, predicate), do:
if predicate.(head),
do: tail
|> take_while(predicate)
|> append(head),
else: %List{}
end
|
persistent_list/lib/persistent_list/day01.ex
| 0.595022
| 0.442335
|
day01.ex
|
starcoder
|
defmodule RemoteIp.Debugger do
require Logger
@moduledoc """
Compile-time debugging facilities.
`RemoteIp` uses the `debug/3` macro to instrument its implementation with
*debug events* at compile time. If an event is enabled, the macro will expand
into a `Logger.debug/2` call with a specific message. If an event is
disabled, the logging will be purged, thus generating no extra code and
having no impact on run time.
## Basic usage
Events are fired on every call to `RemoteIp.call/2` or `RemoteIp.from/2`. To
enable or disable all debug events at once, you can set a boolean in your
`Config` file:
```elixir
config :remote_ip, debug: true
```
By default, the debugger is turned off (i.e., `debug: false`).
Because `RemoteIp.Debugger` works at compile time, you must make sure to
recompile the `:remote_ip` dependency whenever you change the configuration:
```console
$ mix deps.clean --build remote_ip
```
## Advanced usage
You may also pass a list of atoms into the `:debug` configuration naming
which events to log.
These are all the possible events:
* `:options` - the keyword options *after* any runtime configuration has been
evaluated (see `RemoteIp.Options`)
* `:headers` - all incoming headers, either from the `Plug.Conn`'s
`req_headers` or the list passed directly into `RemoteIp.from/2`; useful
for seeing if you're even getting the forwarding headers you expect in the
first place
* `:forwarding` - the subset of headers (as configured by `RemoteIp.Options`)
that contain forwarding information
* `:ips` - the entire sequence of IP addresses parsed from the forwarding
headers, in order
* `:type` - for each IP (until we find the client), classifies the address
either as a known client, a known proxy, a reserved address, or none of the
above (and thus presumably a client)
* `:ip` - the final result of the remote IP processing; when rewriting the
`Plug.Conn`'s `remote_ip`, the message will tell you the original IP that
is being replaced
Therefore, `debug: true` is equivalent to passing in all of the above:
```elixir
config :remote_ip, debug: [:options, :headers, :forwarding, :ips, :type, :ip]
```
But you could disable certain events by removing them from the list. For
example, to log only the incoming headers and resulting IP:
```elixir
config :remote_ip, debug: [:headers, :ip]
```
## Interactions with `Logger`
Since they both work at compile time, your configuration of `:logger` will
also affect the operation of `RemoteIp.Debugger`. For example, it's possible
to enable debugging but still purge all the resulting logs:
```elixir
# All events *would* be logged...
config :remote_ip, debug: true
# ...But :debug logs will actually get purged at compile time
config :logger, compile_time_purge_matching: [[level_lower_than: :info]]
```
"""
@doc """
An internal macro for generating debug logs.
There is no reason for you to call this directly. It's used to instrument the
`RemoteIp` module at compilation time.
"""
@spec debug(atom(), [any()], do: any()) :: any()
defmacro debug(id, inputs \\ [], do: output) do
if debug?(id) do
quote do
inputs = unquote(inputs)
output = unquote(output)
unquote(__MODULE__).__log__(unquote(id), inputs, output)
output
end
else
output
end
end
if Version.match?(System.version(), "~> 1.10") do
@debug Application.compile_env(:remote_ip, :debug, false)
else
@debug Application.get_env(:remote_ip, :debug, false)
end
cond do
is_list(@debug) ->
defp debug?(id), do: Enum.member?(@debug, id)
is_boolean(@debug) ->
defp debug?(_), do: @debug
end
def __log__(id, inputs, output) do
Logger.debug(__message__(id, inputs, output))
end
def __message__(:options, [], options) do
headers = inspect(options[:headers])
parsers = inspect(options[:parsers])
proxies = inspect(options[:proxies] |> Enum.map(&to_string/1))
clients = inspect(options[:clients] |> Enum.map(&to_string/1))
[
"Processing remote IP\n",
" headers: #{headers}\n",
" parsers: #{parsers}\n",
" proxies: #{proxies}\n",
" clients: #{clients}"
]
end
def __message__(:headers, [], headers) do
"Taking forwarding headers from #{inspect(headers)}"
end
def __message__(:forwarding, [], headers) do
"Parsing IPs from forwarding headers: #{inspect(headers)}"
end
def __message__(:ips, [], ips) do
"Parsed IPs from forwarding headers: #{inspect(ips)}"
end
def __message__(:type, [ip], type) do
case type do
:client -> "#{inspect(ip)} is a known client IP"
:proxy -> "#{inspect(ip)} is a known proxy IP"
:reserved -> "#{inspect(ip)} is a reserved IP"
:unknown -> "#{inspect(ip)} is an unknown IP, assuming it's the client"
end
end
def __message__(:ip, [old_conn], new_conn) do
origin = inspect(old_conn.remote_ip)
client = inspect(new_conn.remote_ip)
if client != origin do
"Processed remote IP, found client #{client} to replace #{origin}"
else
"Processed remote IP, no client found to replace #{origin}"
end
end
def __message__(:ip, [], ip) do
if ip == nil do
"Processed remote IP, no client found"
else
"Processed remote IP, found client #{inspect(ip)}"
end
end
end
|
lib/remote_ip/debugger.ex
| 0.882687
| 0.845942
|
debugger.ex
|
starcoder
|
defmodule Joken do
@moduledoc """
Joken is a library for working with standard JSON Web Tokens.
It provides 4 basic operations:
- Verify: the act of confirming the signature of the JWT;
- Validate: processing validation logic on the set of claims;
- Claim generation: generate dynamic value at token creation time;
- Signature creation: encoding header and claims and generate a signature of their value.
## Architecture
The core of Joken is `JOSE`, a library which provides all facilities to sign and verify tokens.
Joken brings an easier Elixir API with some added functionality:
- Validating claims. JOSE does not provide validation other than signature verification.
- `config.exs` friendly. You can optionally define your signer configuration straight in your
`config.exs`.
- Portable configuration. All your token logic can be encapsulated in a module with behaviours.
- Enhanced errors. Joken strives to be as informative as it can when errors happen be it at
compilation or at validation time.
- Debug friendly. When a token fails validation, a `Logger` debug message will show which claim
failed validation with which value. The return value, though for security reasons, does not
contain these information.
- Performance. We have a benchmark suite for identifying where we can have a better performance.
From this analysis came: Jason adapter for JOSE, redefinition of :base64url module and other
minor tweaks.
## Usage
Joken has 3 basic concepts:
- Portable token configuration
- Signer configuration
- Hooks
The portable token configuration is a map of binary keys to `Joken.Claim` structs and is used to
dynamically generate and validate tokens.
A signer is an instance of `Joken.Signer` that encapsulates the algorithm and the key configuration
used to sign and verify a token.
A hook is an implementation of the behaviour `Joken.Hooks` for easy plugging into the lifecycle of
Joken operations.
There are 2 forms of using Joken:
1. Pure data structures. You can create your token configuration and signer and use them with this
module for all 4 operations: verify, validate, generate and sign.
```
iex> token_config = %{} # empty config
iex> token_config = Map.put(token_config, "scope", %Joken.Claim{
...> generate_function: fn -> "user" end,
...> validate_function: fn val, _claims, _context -> val in ["user", "admin"] end
...> })
iex> signer = Joken.Signer.create("HS256", "my secret")
iex> claims = Joken.generate_claims(token_config, %{"extra"=> "claim"})
iex> {:ok, jwt, claims} = Joken.encode_and_sign(claims, signer)
```
2. With the encapsulated module approach using `Joken.Config`. See the docs for `Joken.Config` for
more details.
```
iex> defmodule MyAppToken do
...> use Joken.Config, default_signer: :pem_rs256
...>
...> @impl Joken.Config
...> def token_config do
...> default_claims()
...> |> add_claim("role", fn -> "USER" end, &(&1 in ["ADMIN", "USER"]))
...> end
...> end
iex> {:ok, token, _claims} = MyAppToken.generate_and_sign(%{"user_id" => "1234567890"})
iex> {:ok, _claim_map} = MyAppToken.verify_and_validate(token)
```
"""
alias Joken.{Signer, Claim}
require Logger
@typedoc """
A signer argument that can be a key in the configuration or an instance of `Joken.Signer`.
"""
@type signer_arg :: atom | Joken.Signer.t()
@typedoc "A binary representing a bearer token."
@type bearer_token :: binary
@typedoc "A map with binary keys that represents a claim set."
@type claims :: %{binary => term}
@typedoc "A portable configuration of claims for generation and validation."
@type token_config :: %{binary => Joken.Claim.t()}
@typedoc "Error reason which might contain dynamic data for helping understand the cause"
@type error_reason :: atom | Keyword.t()
# This ensures we provide an easy to setup test environment
@current_time_adapter Application.get_env(:joken, :current_time_adapter, Joken.CurrentTime.OS)
@doc """
Retrieves current time in seconds.
This implementation uses an adapter so that you can replace it on your tests. The adapter is
set through `config.exs`. Example:
config :joken,
current_time_adapter: Joken.CurrentTime.OS
See Joken's own tests for an example of how to override this with a customizable time mock.
"""
@spec current_time() :: pos_integer
def current_time(), do: @current_time_adapter.current_time()
@doc """
Decodes the header of a token without validation.
**Use this with care!** This DOES NOT validate the token signature and therefore the token might
be invalid. The common use case for this function is when you need info to decide on which signer
will be used. Even though there is a use case for this, be extra careful to handle data without
validation.
"""
@spec peek_header(bearer_token) :: claims
def peek_header(token) when is_binary(token) do
%JOSE.JWS{alg: {_, alg}, fields: fields} = JOSE.JWT.peek_protected(token)
Map.put(fields, "alg", Atom.to_string(alg))
end
@doc """
Decodes the claim set of a token without validation.
**Use this with care!** This DOES NOT validate the token signature and therefore the token might
be invalid. The common use case for this function is when you need info to decide on which signer
will be used. Even though there is a use case for this, be extra careful to handle data without
validation.
"""
@spec peek_claims(bearer_token) :: claims
def peek_claims(token) when is_binary(token) do
%JOSE.JWT{fields: fields} = JOSE.JWT.peek_payload(token)
fields
end
@doc """
Default function for generating `jti` claims. This was inspired by the `Plug.RequestId` generation.
It avoids using `strong_rand_bytes` as it is known to have some contention when running with many
schedulers.
"""
@spec generate_jti() :: binary
def generate_jti do
binary = <<
System.system_time(:nanoseconds)::64,
:erlang.phash2({node(), self()}, 16_777_216)::24,
:erlang.unique_integer()::32
>>
Base.hex_encode32(binary, case: :lower)
end
@doc "Combines generate with encode_and_sign"
@spec generate_and_sign(token_config, claims, signer_arg, [module]) ::
{:ok, bearer_token, claims} | {:error, error_reason}
def generate_and_sign(
token_config,
extra_claims \\ %{},
signer_arg \\ :default_signer,
hooks \\ []
) do
with {:ok, claims} <- generate_claims(token_config, extra_claims, hooks),
{:ok, token, claims} <- encode_and_sign(claims, signer_arg, hooks) do
{:ok, token, claims}
end
end
@doc "Same as generate_and_sign/4 but raises if result is an error"
@spec generate_and_sign!(token_config, claims, signer_arg, [module]) :: binary
def generate_and_sign!(
token_config,
extra_claims \\ %{},
signer_arg \\ :default_signer,
hooks \\ []
) do
result = generate_and_sign(token_config, extra_claims, signer_arg, hooks)
case result do
{:ok, token, _claims} ->
token
{:error, reason} ->
raise Joken.Error, [:bad_generate_and_sign, reason: reason]
end
end
@doc """
Verifies a bearer_token using the given signer and executes hooks if any are given.
"""
@spec verify(bearer_token, signer_arg, [module]) :: {:ok, claims} | {:error, error_reason}
def verify(bearer_token, signer, hooks \\ [])
def verify(bearer_token, nil, hooks) when is_binary(bearer_token) and is_list(hooks),
do: verify(bearer_token, %Signer{}, hooks)
def verify(bearer_token, signer, hooks) when is_binary(bearer_token) and is_atom(signer),
do: verify(bearer_token, parse_signer(signer), hooks)
def verify(bearer_token, signer = %Signer{}, hooks) when is_binary(bearer_token) do
with {:ok, bearer_token, signer} <- before_verify(bearer_token, signer, hooks),
:ok <- check_signer_not_empty(signer),
result = {:ok, claim_map} <- Signer.verify(bearer_token, signer),
status <- parse_status(result),
{:ok, claims_map} <- after_verify(status, bearer_token, claim_map, signer, hooks) do
{:ok, claims_map}
end
end
defp check_signer_not_empty(%Signer{alg: nil}), do: {:error, :empty_signer}
defp check_signer_not_empty(%Signer{}), do: :ok
@doc """
Validates the claim map with the given token configuration and the context.
Context can by any term. It is always passed as the second argument to the validate
function.
It also executes hooks if any are given.
"""
@spec validate(token_config, claims, term, [module]) :: {:ok, claims} | {:error, error_reason}
def validate(token_config, claims_map, context \\ nil, hooks \\ []) do
with {:ok, claims_map, config} <- before_validate(claims_map, token_config, hooks),
status <- reduce_validations(token_config, claims_map, context),
{:ok, claims} <- after_validate(status, claims_map, config, hooks) do
{:ok, claims}
end
end
@doc "Combines verify and validate operations"
@spec verify_and_validate(token_config, bearer_token, signer_arg, term, [module]) ::
{:ok, claims} | {:error, error_reason}
def verify_and_validate(
token_config,
bearer_token,
signer \\ :default_signer,
context \\ nil,
hooks \\ []
) do
with {:ok, claims} <- verify(bearer_token, signer, hooks),
{:ok, claims} <- validate(token_config, claims, context, hooks) do
{:ok, claims}
end
end
@doc "Same as verify_and_validate/4 but raises on error"
@spec verify_and_validate!(token_config, bearer_token, term, [module]) :: claims
def verify_and_validate!(
token_config,
bearer_token,
signer \\ :default_signer,
context \\ nil,
hooks \\ []
) do
result = verify_and_validate(token_config, bearer_token, signer, context, hooks)
case result do
{:ok, claims} ->
claims
{:error, reason} ->
raise Joken.Error, [:bad_verify_and_validate, reason: reason]
end
end
@doc """
Generates claims with the given token configuration and merges them with the given extra claims.
It also executes hooks if any are given.
"""
@spec generate_claims(token_config, claims | nil, [module]) ::
{:ok, claims} | {:error, error_reason}
def generate_claims(token_config, extra \\ %{}, hooks \\ [])
def generate_claims(token_config, nil, hooks), do: generate_claims(token_config, %{}, hooks)
def generate_claims(token_config, extra_claims, hooks) do
with {:ok, extra_claims, token_config} <- before_generate(extra_claims, token_config, hooks),
claims <- Enum.reduce(token_config, extra_claims, &Claim.__generate_claim__/2),
{:ok, claims} <- after_generate(claims, hooks) do
{:ok, claims}
end
end
@doc """
Encodes and generates a token from the given claim map and signs the result with the given signer.
It also executes hooks if any are given.
"""
@spec encode_and_sign(claims, signer_arg, [module]) :: {:ok, bearer_token, claims}
def encode_and_sign(claims, signer, hooks \\ [])
def encode_and_sign(claims, nil, hooks),
do: encode_and_sign(claims, %Signer{}, hooks)
def encode_and_sign(claims, signer, hooks) when is_atom(signer),
do: encode_and_sign(claims, parse_signer(signer), hooks)
def encode_and_sign(claims, %Signer{} = signer, hooks) do
with {:ok, claims, signer} <- before_sign(claims, signer, hooks),
:ok <- check_signer_not_empty(signer),
result = {_, token} <- Signer.sign(claims, signer),
status <- parse_status(result),
{:ok, token, claims} <- after_sign(status, token, claims, signer, hooks) do
{:ok, token, claims}
end
end
defp parse_status(:ok), do: :ok
defp parse_status({:ok, _}), do: :ok
defp parse_status({:error, _} = res), do: res
defp parse_signer(signer_key) do
signer = Signer.parse_config(signer_key)
if is_nil(signer),
do: raise(Joken.Error, :no_default_signer),
else: signer
end
defp reduce_validations(_config, %{} = claims, _context) when map_size(claims) == 0 do
:ok
end
defp reduce_validations(config, claim_map, context) do
Enum.reduce_while(claim_map, nil, fn {key, claim_val}, _acc ->
# When there is a function for validating the token
with %Claim{validate: val_func} when not is_nil(val_func) <- config[key],
true <- val_func.(claim_val, claim_map, context) do
{:cont, :ok}
else
# When there is no configuration for the claim
nil ->
{:cont, :ok}
# When there is a configuration but no validation function
%Claim{validate: nil} ->
{:cont, :ok}
# When it fails validation
false ->
Logger.debug(fn ->
"""
Claim %{"#{key}" => #{inspect(claim_val)}} did not pass validation.
Current time: #{inspect(Joken.current_time())}
"""
end)
{:halt, {:error, message: "Invalid token", claim: key, claim_val: claim_val}}
end
end)
end
defp before_verify(bearer_token, signer, hooks) do
run_hooks(
hooks,
{:ok, bearer_token, signer},
fn hook, options, {status, bearer_token, signer} ->
hook.before_verify(options, status, bearer_token, signer)
end
)
end
defp before_validate(claims_map, token_config, hooks) do
run_hooks(
hooks,
{:ok, claims_map, token_config},
fn hook, options, {status, claims_map, token_config} ->
hook.before_validate(options, status, claims_map, token_config)
end
)
end
defp before_generate(extra_claims, token_config, hooks) do
run_hooks(
hooks,
{:ok, extra_claims, token_config},
fn hook, options, {status, extra_claims, token_config} ->
hook.before_generate(options, status, extra_claims, token_config)
end
)
end
defp before_sign(claims, signer, hooks) do
run_hooks(
hooks,
{:ok, claims, signer},
fn hook, options, {status, claims, signer} ->
hook.before_sign(options, status, claims, signer)
end
)
end
defp after_verify(status, bearer_token, claims_map, signer, hooks) do
result =
run_hooks(
hooks,
{status, bearer_token, claims_map, signer},
fn hook, options, {status, bearer_token, claims_map, signer} ->
hook.after_verify(options, status, bearer_token, claims_map, signer)
end
)
with {:ok, _bearer_token, claims_map, _signer} <- result do
{:ok, claims_map}
end
end
defp after_validate(status, claims_map, config, hooks) do
result =
run_hooks(
hooks,
{status, claims_map, config},
fn hook, options, {status, claims_map, config} ->
hook.after_validate(options, status, claims_map, config)
end
)
with {:ok, claims, _config} <- result do
{:ok, claims}
end
end
defp after_generate(claims, hooks) do
run_hooks(
hooks,
{:ok, claims},
fn hook, options, {status, claims} ->
hook.after_generate(options, status, claims)
end
)
end
defp after_sign(status, bearer_token, claims, signer, hooks) do
result =
run_hooks(
hooks,
{status, bearer_token, claims, signer},
fn hook, options, {status, bearer_token, claims, signer} ->
hook.after_sign(options, status, bearer_token, claims, signer)
end
)
with {:ok, bearer_token, claims, _signer} <- result do
{:ok, bearer_token, claims}
end
end
defp run_hooks([], args, _fun), do: args |> check_status()
defp run_hooks(hooks, args, fun) do
hooks
|> Enum.reduce_while(args, fn hook, args ->
{hook, options} = unwrap_hook(hook)
result = fun.(hook, options, args)
case result do
{:cont, result} ->
{:cont, result}
{:halt, result} ->
{:halt, result}
_ ->
{:halt, {:error, :wrong_hook_callback}}
end
end)
|> check_status()
end
defp check_status(result) when is_tuple(result) do
case elem(result, 0) do
:ok ->
result
:error ->
{:error, elem(result, 1)}
# When, for example, validation fails and hooks don't change status
{:error, _reason} = err ->
err
_ ->
{:error, :wrong_hook_status}
end
end
defp unwrap_hook({_hook_module, _opts} = hook), do: hook
defp unwrap_hook(hook) when is_atom(hook), do: {hook, []}
end
|
lib/joken.ex
| 0.913857
| 0.897201
|
joken.ex
|
starcoder
|
defmodule Parser do
@moduledoc """
Entrypoint to parse SPARQL queries and the W3C EBNF syntax.
"""
@type syntax :: %{optional(atom) => any}
@type unparsed_query :: String.t()
@type query :: %InterpreterTerms.SymbolMatch{} | %InterpreterTerms.WordMatch{}
@spec parse_sparql() :: syntax
def parse_sparql() do
EbnfParser.Sparql.syntax()
end
@spec parse_query(unparsed_query, atom) :: query() | {:fail}
def parse_query(string, rule \\ :Sparql) do
EbnfInterpreter.match_sparql_rule(rule, string)
end
def parse_query_all(string, rule_name \\ :Sparql) do
rule = {:symbol, rule_name}
state = %Generator.State{chars: String.graphemes(string), syntax: Parser.parse_sparql()}
EbnfParser.GeneratorConstructor.dispatch_generation(rule, state)
|> EbnfInterpreter.generate_all_options()
end
def parse_query_full(query, rule_name \\ :Sparql, syntax \\ Parser.parse_sparql()) do
case Interpreter.Diff.Store.parse(query, rule_name) do
{:fail} ->
Interpreter.CachedInterpreter.parse_query_full(query, rule_name, syntax)
|> Interpreter.Diff.Store.maybe_push_solution(0.2)
result ->
result
end
end
def parse_query_full_local(query, rule_name, template_local_store) do
%{sparql_syntax: sparql_syntax} = template_local_store
case Interpreter.Diff.Store.parse_with_local_store(query, rule_name, template_local_store) do
{:fail} ->
Logging.EnvLog.log(:log_template_matcher_performance, "Template: no")
result = Interpreter.CachedInterpreter.parse_query_full(query, rule_name, sparql_syntax)
new_template_local_store =
Interpreter.Diff.Store.maybe_push_solution_sync(
result,
0.2,
rule_name,
template_local_store
)
{result, new_template_local_store}
response ->
Logging.EnvLog.log(:log_template_matcher_performance, "Template: yes")
response
end
end
@doc """
Parses the query and yields the first (possibly non-complete) match.
"""
@spec parse_query_first(String.t(), atom) :: {unparsed_query, query()} | {:fail}
def parse_query_first(query, rule_name \\ :Sparql, syntax \\ parse_sparql()) do
rule = {:symbol, rule_name}
state = %Generator.State{chars: String.graphemes(query), syntax: syntax}
generator = EbnfParser.GeneratorConstructor.dispatch_generation(rule, state)
case EbnfParser.Generator.emit(generator) do
{:ok, _, %Generator.Result{matched_string: matched_string, match_construct: [construct]}} ->
{matched_string, construct}
{:fail} ->
{:fail}
end
end
defp test_full_solution_for_generator(generator) do
case EbnfParser.Generator.emit(generator) do
{:ok, new_state, answer} ->
if Generator.Result.full_match?(answer) do
true
else
test_full_solution_for_generator(new_state)
end
{:fail} ->
false
end
end
@doc """
Similar to parse_query_full, but handier in a setting where you
want to test whether a solution would exist or not. This is not
cheaper to execute than finding a solution.
"""
@spec test_full_solution(unparsed_query, atom) :: true | false
def test_full_solution(query, rule_name \\ :Sparql) do
rule = {:symbol, rule_name}
state = %Generator.State{chars: String.graphemes(query), syntax: Parser.parse_sparql()}
EbnfParser.GeneratorConstructor.dispatch_generation(rule, state)
|> test_full_solution_for_generator
end
@doc """
## Examples
iex> Parser.full_parse( "FOO" )
[{ :symbol, :FOO }]
iex> Parser.full_parse( "FOO BAR" )
[symbol: :FOO, symbol: :BAR]
iex> Parser.full_parse( "( FOO BAR )" )
[paren_group: [ symbol: :FOO, symbol: :BAR]]
iex> Parser.full_parse( "( FOO BAR )*" )
[maybe_many: [paren_group: [ symbol: :FOO, symbol: :BAR]]]
iex> Parser.full_parse( "( FOO BAR* (FOO|BAR) )+" )
[one_or_more: [ paren_group: [ symbol: :FOO, maybe_many: [ symbol: :BAR ], paren_group: [ one_of: [ symbol: :FOO, symbol: :BAR ] ] ] ]]
"""
def full_parse(string) do
EbnfParser.Parser.tokenize_and_parse(string)
end
def parse_and_match(rule, str, prev \\ []) do
rule = Parser.full_parse(rule)
chars = String.codepoints(str)
EbnfInterpreter.eagerly_match_rule(chars, %{}, rule, prev)
end
end
|
lib/parser.ex
| 0.761627
| 0.463141
|
parser.ex
|
starcoder
|
defmodule SanbaseWeb.Graphql.CustomTypes.DateTime do
use Absinthe.Schema.Notation
scalar :datetime, name: "DateTime" do
description("""
The `DateTime` scalar type represents a date and time in the UTC
timezone. The DateTime appears in a JSON response as an ISO8601 formatted
string, including UTC timezone ("Z"). The parsed date and time string will
be converted to UTC and any UTC offset other than 0 will be rejected.
""")
serialize(fn
%NaiveDateTime{} = ndt ->
DateTime.from_naive!(ndt, "Etc/UTC")
|> DateTime.truncate(:second)
|> DateTime.to_iso8601()
%DateTime{} = dt ->
dt
|> DateTime.truncate(:second)
|> DateTime.to_iso8601()
end)
parse(&parse_datetime/1)
end
scalar :naive_datetime, name: "NaiveDateTime" do
description("""
The `Naive DateTime` scalar type represents a naive date and time without
timezone. The DateTime appears in a JSON response as an ISO8601 formatted
string.
""")
serialize(&NaiveDateTime.to_iso8601/1)
parse(&parse_naive_datetime/1)
end
scalar :time do
description("""
The `Time` scalar type represents a time. The Time appears in a JSON
response as an ISO8601 formatted string, without a date component.
""")
serialize(&Time.to_iso8601/1)
parse(&parse_time/1)
end
@spec parse_datetime(Absinthe.Blueprint.Input.String.t()) :: {:ok, DateTime.t()} | :error
@spec parse_datetime(Absinthe.Blueprint.Input.Null.t()) :: {:ok, nil}
defp parse_datetime(%Absinthe.Blueprint.Input.String{value: "utc_now" <> _rest = value}) do
case String.split(value, ~r/\s*-\s*/) do
["utc_now"] ->
{:ok, DateTime.utc_now()}
["utc_now", interval] ->
case Sanbase.DateTimeUtils.valid_compound_duration?(interval) do
true ->
dt =
DateTime.utc_now()
|> Timex.shift(seconds: -Sanbase.DateTimeUtils.str_to_sec(interval))
{:ok, dt}
false ->
:error
end
_ ->
:error
end
end
defp parse_datetime(%Absinthe.Blueprint.Input.String{value: value}) do
case DateTime.from_iso8601(value) do
{:ok, datetime, 0} -> {:ok, datetime}
{:ok, _datetime, _offset} -> :error
_error -> :error
end
end
defp parse_datetime(%Absinthe.Blueprint.Input.Null{}) do
{:ok, nil}
end
defp parse_datetime(_) do
:error
end
@spec parse_naive_datetime(Absinthe.Blueprint.Input.String.t()) ::
{:ok, NaiveDateTime.t()} | :error
@spec parse_naive_datetime(Absinthe.Blueprint.Input.Null.t()) :: {:ok, nil}
defp parse_naive_datetime(%Absinthe.Blueprint.Input.String{value: value}) do
case NaiveDateTime.from_iso8601(value) do
{:ok, naive_datetime} -> {:ok, naive_datetime}
_error -> :error
end
end
defp parse_naive_datetime(%Absinthe.Blueprint.Input.Null{}) do
{:ok, nil}
end
defp parse_naive_datetime(_) do
:error
end
@spec parse_time(Absinthe.Blueprint.Input.String.t()) :: {:ok, Time.t()} | :error
@spec parse_time(Absinthe.Blueprint.Input.Null.t()) :: {:ok, nil}
defp parse_time(%Absinthe.Blueprint.Input.String{value: value}) do
case Time.from_iso8601(value) do
{:ok, time} -> {:ok, time}
_error -> :error
end
end
defp parse_time(%Absinthe.Blueprint.Input.Null{}) do
{:ok, nil}
end
defp parse_time(_) do
:error
end
end
|
lib/sanbase_web/graphql/schema/custom_types/datetime.ex
| 0.913068
| 0.482368
|
datetime.ex
|
starcoder
|
defmodule Andy.MockRover.InfraredSensor do
@moduledoc "A mock infrared sensor"
@behaviour Andy.Sensing
alias Andy.Device
import Andy.Utils
require Logger
@max_distance 70
@max_heading 25
def new(port) do
%Device{
mod: __MODULE__,
class: :sensor,
port: port,
path: "/mock/infrared_sensor",
type: :infrared,
mock: true
}
end
### Sensing
def senses(_) do
# TODO - get ready from andy_world
beacon_senses =
Enum.reduce(
1..max_beacon_channels(),
[],
fn channel, acc ->
acc ++
[
beacon_sense(:beacon_heading, channel),
beacon_sense(:beacon_distance, channel),
beacon_sense(:beacon_on, channel),
beacon_sense(:remote_buttons, channel)
]
end
)
[:beacon_proximity | beacon_senses]
end
def beacon_senses_for(channel) do
[
beacon_sense(:beacon_heading, channel),
beacon_sense(:beacon_distance, channel),
beacon_sense(:beacon_on, channel)
]
end
def read(sensor, sense) do
expanded_sense = expand_sense(sense)
{_, updated_sensor} = do_read(sensor, expanded_sense)
# double read seems necessary after a mode change
do_read(updated_sensor, expanded_sense)
end
def sensitivity(_sensor, _sense) do
nil
end
### Private
defp beacon_sense(kind, channel) do
"#{kind}/#{channel}" |> String.to_atom()
end
defp expand_sense(sense) do
case String.split("#{sense}", "/") do
[kind] ->
String.to_atom(kind)
[kind, channel_s] ->
{channel, _} = Integer.parse(channel_s)
{String.to_atom(kind), channel}
end
end
defp do_read(sensor, :beacon_proximity) do
proximity(sensor)
end
defp do_read(sensor, {:remote_buttons, channel}) do
remote_buttons(sensor, channel)
end
defp do_read(sensor, {beacon_sense, channel}) do
case beacon_sense do
:beacon_heading -> seek_heading(sensor, channel)
:beacon_distance -> seek_distance(sensor, channel)
:beacon_on -> seek_beacon_on?(sensor, channel)
end
end
defp proximity(sensor) do
value = :rand.uniform(20)
{value, sensor}
end
defp seek_heading(sensor, _channel) do
double_max_heading = 2 * @max_heading
value = 25 - Enum.random(0..double_max_heading)
{value, sensor}
end
defp seek_distance(sensor, _channel) do
value = Enum.random(0..@max_distance)
{value, sensor}
end
defp seek_beacon_on?(sensor, _channel) do
value = :rand.uniform(2) == 2
{value, sensor}
end
defp remote_buttons(sensor, _channel) do
value =
case :rand.uniform(12) - 1 do
1 -> %{red: :up, blue: nil}
2 -> %{red: :down, blue: nil}
3 -> %{red: nil, blue: :up}
4 -> %{red: nil, blue: :down}
5 -> %{red: :up, blue: :up}
6 -> %{red: :up, blue: :down}
7 -> %{red: :down, blue: :up}
8 -> %{red: :down, blue: :down}
10 -> %{red: :up_down, blue: nil}
11 -> %{red: nil, blue: :up_down}
# 0 or 9
_ -> %{red: nil, blue: nil}
end
{value, sensor}
end
end
|
lib/andy/platforms/mock_rover/infrared_sensor.ex
| 0.594787
| 0.45042
|
infrared_sensor.ex
|
starcoder
|
defmodule Relexe.Steps.Build.PackAndBuild.Commands do
defmodule Command do
@enforce_keys [:name, :help]
defstruct [:name, :help, args: []]
@type t :: %__MODULE__{
name: String.t(),
help: String.t(),
args: [String.t()]
}
end
defmodule CompoundCommand do
@enforce_keys [:name, :help]
defstruct [:name, :help, args: [], commands: []]
@type t :: %__MODULE__{
name: String.t(),
help: String.t(),
commands: [Relexe.Commands.t()]
}
end
defmodule EvalCommand do
@enforce_keys [:name, :help, :expr]
defstruct [:name, :help, :expr]
@type t :: %__MODULE__{
name: String.t(),
help: String.t(),
expr: String.t() | Commands.mod_fn_args()
}
end
defmodule RpcCommand do
@enforce_keys [:name, :help, :expr]
defstruct [:name, :help, :expr]
@type t :: %__MODULE__{
name: String.t(),
help: String.t(),
expr: String.t() | Commands.mod_fn_args()
}
end
alias __MODULE__.{Command, CompoundCommand, EvalCommand, RpcCommand}
alias Burrito.Builder.Log
@type t :: Command.t() | CompoundCommand.t() | EvalCommand.t() | RpcCommand.t()
@type arg_type :: :string | :integer | :float
@type mod_fn_args :: {module(), atom(), [{arg_name :: atom(), arg_type}]}
@type custom_command_option ::
{:name, String.t()}
| {:help, String.t()}
| {:eval, String.t() | mod_fn_args()}
| {:rpc, String.t() | mod_fn_args()}
@type custom_command :: [custom_command_option]
@type command_option ::
:start
| :start_iex
| :stop
| :restart
| :rpc
| :eval
| :remote
| :pid
| :version
| :service
| :daemon
| custom_command
def default, do: ~w(start start_iex service eval rpc remote restart stop pid version)a
@spec parse([command_option], release_name :: String.t(), os) :: [t()]
when os: :windows | :darwin | :linux
def parse(commands, release_name, os) when is_list(commands) do
Log.info(:step, "Parsing CLI commands")
do_parse(commands, [], release_name, os)
end
defp do_parse([], parsed, _release_name, _os), do: parsed |> List.flatten() |> Enum.reverse()
defp do_parse([command | commands], parsed, release_name, os) do
parsed_command = parse_command(command, release_name, os)
do_parse(commands, [parsed_command | parsed], release_name, os)
end
defp parse_command(command, release_name, os) when is_list(command) do
name = Keyword.fetch!(command, :name)
help = Keyword.fetch!(command, :help)
cond do
Keyword.has_key?(command, :rpc) ->
validate_rpc_or_eval_command!(command[:rpc], :rpc)
# TODO: commands with args
%RpcCommand{
name: name,
help: help,
expr: command[:rpc]
}
Keyword.has_key?(command, :eval) ->
validate_rpc_or_eval_command!(command[:eval], :eval)
# TODO: commands with args
%EvalCommand{
name: name,
help: help,
expr: command[:eval]
}
Keyword.has_key?(command, :commands) ->
nested_compound_commands? =
Enum.any?(command[:commands], &Keyword.has_key?(&1, :commands))
if nested_compound_commands? do
raise ArgumentError, message: "compound commands cannot be nested"
end
%CompoundCommand{
name: name,
help: help,
commands: parse(command[:commands], release_name, os)
}
true ->
raise ArgumentError, message: "custom commands must contain an :rpc or :eval option"
end
end
defp parse_command(:start, release_name, _os) do
%Command{
name: "start",
help: "Start #{release_name}"
}
end
defp parse_command(:start_iex, release_name, _os) do
%Command{
name: "start_iex",
help: "Start #{release_name} with IEx attached"
}
end
defp parse_command(:stop, release_name, _os) do
%Command{
name: "stop",
help: "Stop #{release_name}"
}
end
defp parse_command(:restart, release_name, _os) do
%Command{
name: "restart",
help: "Restart #{release_name}"
}
end
defp parse_command(:pid, _release_name, _os) do
%Command{
name: "pid",
help: "Prints the operating system PID of the running system"
}
end
defp parse_command(:version, _release_name, _os) do
%Command{
name: "version",
help: "Print the application version"
}
end
defp parse_command(:eval, _release_name, _os) do
%Command{
name: "eval",
help: "Executes an expression on a new, non-booted system",
args: ["expr"]
}
end
defp parse_command(:rpc, _release_name, _os) do
%Command{
name: "rpc",
help: "Executes an expression remotely on the running system",
args: ["expr"]
}
end
defp parse_command(:remote, _release_name, _os) do
%Command{
name: "remote",
help: "Connects to the running system via a remote shell"
}
end
defp parse_command(service_or_daemon, release_name, :windows)
when service_or_daemon in [:daemon, :service] do
%CompoundCommand{
name: "service",
help: "Add, remove, start or stop the #{release_name} Windows Service",
commands: [
%Command{name: "add", help: "Add Windows Service"},
%Command{name: "remove", help: "Remove the service"},
%Command{name: "start", help: "Start the service"},
%Command{name: "stop", help: "Stop the service"},
%Command{name: "list", help: "List installed services"},
%Command{name: "help", help: "Show service controller help"}
]
}
end
defp parse_command(service_or_daemon, _release_name, _os)
when service_or_daemon in [:daemon, :service] do
raise("not implemented")
end
defp validate_rpc_or_eval_command!(fn_string, _rpc_or_eval)
when is_binary(fn_string) do
:ok
end
defp validate_rpc_or_eval_command!({m, f, a}, rpc_or_eval)
when is_atom(m) and is_atom(f) and is_list(a) do
if Enum.all?(a, fn arg -> is_binary(arg) or is_atom(arg) end) do
:ok
else
raise ArgumentError,
message: "#{rpc_or_eval} argument names must be strings or atoms"
end
end
defp validate_rpc_or_eval_command!(_, rpc_or_eval) do
raise ArgumentError,
message:
"#{rpc_or_eval} commands must be a string or {Module, :function, [arg_names]} tuple"
end
end
|
lib/steps/build/pack_and_build/commands.ex
| 0.519765
| 0.478712
|
commands.ex
|
starcoder
|
if Cldr.Code.ensure_compiled?(Cldr.Unit) do
defmodule Cldr.HTML.Unit do
@moduledoc """
Implements `Phoenix.HTML.Form.select/4` specifically for
localised unit display.
"""
@type select_options :: [
{:units, [atom() | binary(), ...]}
| {:locale, Cldr.Locale.locale_name() | Cldr.LanguageTag.t()}
| {:mapper, function()}
| {:backend, module()}
| {:selected, atom() | binary()}
]
@doc """
Generate an HTML select tag for a unit list
that can be used with a `Phoenix.HTML.Form.t`.
## Arguments
* A `t:Phoenix.HTML.Form` form
* A `t:Phoenix.HTML.Form.field` field
* A `t:Keyword` list of options
## Options
For select options see `Phoenix.HTML.Form.select/4`
* `:units` is a list of units to be displayed in the
select. See `Cldr.Unit.known_units/0` and
`Cldr.Unit.known_units_for_category/1`
* `:style` is the style of unit name to be displayed and
must be one of the styles returned by `Cldr.Unit.known_styles/0`.
The current styles are :long, :short and :narrow.
The default is style: :long.
* `:locale` defines the locale to be used to localise the
description of the units. The default is the locale
returned by `Cldr.get_locale/0`
* `:backend` is any backend module. The default is
`Cldr.default_backend!/0`
* `:mapper` is a function that creates the text to be
displayed in the select tag for each unit. It is
passed the unit name. The default function
is `&({Cldr.Unit.display_name(&1), &1})`
* `:selected` identifies the unit that is to be selected
by default in the `select` tag. The default is `nil`. This
is passed unmodified to `Phoenix.HTML.Form.select/4`
* `:prompt` is a prompt displayed at the top of the select
box. This is passed unmodified to `Phoenix.HTML.Form.select/4`
# Examples
=> Cldr.HTML.Unit.select(:my_form, :unit, selected: :foot)
=> Cldr.HTML.Unit.select(:my_form, :unit,
units: [:foot, :inch], mapper: &{Cldr.Unit.display_name(&1, &2), &1})
"""
@spec select(
form :: Phoenix.HTML.Form.t(),
field :: Phoenix.HTML.Form.field(),
select_options
) ::
Phoenix.HTML.safe()
| {:error, {Cldr.UnknownUnitError, binary()}}
| {:error, {Cldr.UnknownLocaleError, binary()}}
def select(form, field, options \\ [])
def select(form, field, options) when is_list(options) do
select(form, field, validate_options(options), options[:selected])
end
# Invalid options
defp select(_form, _field, {:error, reason}, _selected) do
{:error, reason}
end
# Selected currency
defp select(form, field, options, _selected) do
select_options =
options
|> Map.take([:selected, :prompt])
|> Map.to_list
options =
options
|> maybe_include_selected_unit
|> unit_options
Phoenix.HTML.Form.select(form, field, options, select_options)
end
defp validate_options(options) do
with options <- Map.merge(default_options(), Map.new(options)),
{:ok, options} <- validate_locale(options),
{:ok, options} <- validate_selected(options),
{:ok, options} <- validate_units(options),
{:ok, options} <- validate_style(options) do
options
end
end
defp default_options do
Map.new(
units: default_unit_list(),
backend: nil,
locale: Cldr.get_locale(),
mapper: &{Cldr.Unit.display_name(&1, &2), &1},
style: :long,
selected: nil
)
end
defp validate_selected(%{selected: nil} = options) do
{:ok, options}
end
defp validate_selected(%{selected: selected} = options) do
with {:ok, unit, _conversion} <- Cldr.Unit.validate_unit(selected) do
{:ok, Map.put(options, :selected, unit)}
end
end
# Return a list of validated units or an error
defp validate_units(%{units: units} = options) do
validate_units(units, options)
end
defp validate_units(units) when is_list(units) do
Enum.reduce_while(units, [], fn unit, acc ->
case Cldr.Unit.validate_unit(unit) do
{:ok, unit, _conversion} -> {:cont, [unit | acc]}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
end
defp validate_units(units, options) do
case validate_units(units) do
{:error, reason} -> {:error, reason}
units -> {:ok, Map.put(options, :units, Enum.reverse(units))}
end
end
defp validate_style(options) do
with {:ok, style} <- Cldr.Unit.validate_style(options[:style]) do
{:ok, Map.put(options, :style, style)}
end
end
defp validate_locale(options) do
{locale, backend} = Cldr.locale_and_backend_from(options[:locale], options[:backend])
with {:ok, locale} <- Cldr.validate_locale(locale, backend) do
options
|> Map.put(:locale, locale)
|> Map.put(:backend, locale.backend)
|> wrap(:ok)
end
end
defp wrap(term, atom) do
{atom, term}
end
defp maybe_include_selected_unit(%{selected: nil} = options) do
options
end
defp maybe_include_selected_unit(%{units: units, selected: selected} = options) do
if Enum.any?(units, &(&1 == selected)) do
options
else
Map.put(options, :units, [selected | units])
end
end
defp unit_options(options) do
options = Map.to_list(options)
options[:units]
|> Enum.map(fn unit -> options[:mapper].(unit, options) end)
|> Enum.sort()
end
defp default_unit_list() do
Cldr.Unit.known_units()
end
end
end
|
lib/cldr_html_units.ex
| 0.854354
| 0.611063
|
cldr_html_units.ex
|
starcoder
|
defmodule GrovePi.Analog do
alias GrovePi.Board
@moduledoc ~S"""
Perform analog I/O using the GrovePi.
Analog reads return 10-bit values (0-1023) from analog to digital converters on
the GrovePi. These values map to voltages between 0 and 5 volts. Analog writes
generate a steady square wave on supported pins (also called PWM). The connectors
and pins on the GrovePi and GrovePiZero boards differ in their support for analog
reads and writes.
When in doubt, consult the following diagram or the corresponding one for
the GrovePiZero:

Analog reads can be performed on pins A0, A1, A2, and A3. For most Grove
analog sensors, the proper pin to use is the one labeled on the port.
Analog writes only work on the PWM pins. E.g., pins 3, 5, 6, and 9. Just
like the reads, for most Grove sensors, the proper pin to use is the same
as the one labeled on the port.
Example use:
```
iex> pin = 3
iex> GrovePi.Analog.read(pin)
971
iex> GrovePi.Analog.write(pin, 200)
:ok
```
"""
@type adc_level :: 0..1023
@type pwm :: 0..255
@doc """
Read the value from the specified analog pin. This returns a value from
0-1023 that maps to 0 to 5 volts.
"""
@spec read(atom, GrovePi.pin) :: adc_level | {:error, term}
def read(prefix, pin) do
with :ok <- Board.send_request(prefix, <<3, pin, 0, 0>>),
<<_, value::size(16)>> <- Board.get_response(prefix, 3),
do: value
end
def read(pin) do
read(Default, pin)
end
@doc """
Write an analog value to a pin. The GrovePi maps the specified value
(0-255) to a duty cycle for a 1.024 ms square wave (~976 Hz). This
can be used to dim an LED, for example, by turning the output on only
a fraction of the time.
"""
@spec write(GrovePi.pin, pwm) :: :ok | {:error, term}
def write(prefix, pin, value) do
Board.send_request(prefix, <<4, pin, value, 0>>)
end
def write(pin, value) do
write(Default, pin, value)
end
end
|
lib/grovepi/analog.ex
| 0.844633
| 0.94625
|
analog.ex
|
starcoder
|
defmodule Bolt.Sips.Internals.BoltProtocolV1 do
@moduledoc false
alias Bolt.Sips.Internals.BoltProtocolHelper
alias Bolt.Sips.Internals.BoltVersionHelper
alias Bolt.Sips.Internals.Error
@hs_magic <<0x60, 0x60, 0xB0, 0x17>>
@doc """
Initiates the handshake between the client and the server.
See [http://boltprotocol.org/v1/#handshake](http://boltprotocol.org/v1/#handshake)
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV1.handshake(:gen_tcp, port, [])
{:ok, bolt_version}
"""
@spec handshake(atom(), port(), Keyword.t()) ::
{:ok, integer()} | {:error, Bolt.Sips.Internals.Error.t()}
def handshake(transport, port, options) do
recv_timeout = BoltProtocolHelper.get_recv_timeout(options)
max_version = BoltVersionHelper.last()
# Define version list. Should be a 4 integer list
# Example: [1, 0, 0, 0]
versions =
((max_version..0
|> Enum.into([])) ++ [0, 0, 0])
|> Enum.take(4)
Bolt.Sips.Internals.Logger.log_message(
:client,
:handshake,
"#{inspect(@hs_magic, base: :hex)} #{inspect(versions)}"
)
data = @hs_magic <> Enum.into(versions, <<>>, fn version_ -> <<version_::32>> end)
transport.send(port, data)
case transport.recv(port, 4, recv_timeout) do
{:ok, <<version::32>> = packet} when version <= max_version ->
Bolt.Sips.Internals.Logger.log_message(:server, :handshake, packet, :hex)
Bolt.Sips.Internals.Logger.log_message(:server, :handshake, version)
{:ok, version}
{:ok, other} ->
{:error, Error.exception(other, port, :handshake)}
other ->
{:error, Error.exception(other, port, :handshake)}
end
end
@doc """
Initialises the connection.
Expects a transport module (i.e. `gen_tcp`) and a `Port`. Accepts
authorisation params in the form of {username, password}.
See [https://boltprotocol.org/v1/#message-init](https://boltprotocol.org/v1/#message-init)
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Examples
iex> Bolt.Sips.Internals.BoltProtocol.init(:gen_tcp, port, 1, {}, [])
{:ok, info}
iex> Bolt.Sips.Internals.BoltProtocol.init(:gen_tcp, port, 1, {"username", "password"}, [])
{:ok, info}
"""
@spec init(atom(), port(), integer(), tuple(), Keyword.t()) ::
{:ok, any()} | {:error, Bolt.Sips.Internals.Error.t()}
def init(transport, port, bolt_version, auth, options) do
BoltProtocolHelper.send_message(transport, port, bolt_version, {:init, [auth]})
case BoltProtocolHelper.receive_data(transport, port, bolt_version, options) do
{:success, info} ->
{:ok, info}
{:failure, response} ->
{:error, Error.exception(response, port, :init)}
other ->
{:error, Error.exception(other, port, :init)}
end
end
@doc """
Implementation of Bolt's RUN. It passes a statement for execution on the server.
Note that this message doesn't return the statemetn result. For this purpose, use PULL_ALL.
See [https://boltprotocol.org/v1/#message-run](https://boltprotocol.org/v1/#message-run)
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV1.run(:gen_tcp, port, 1, "RETURN {num} AS num", %{num: 5}, [])
{:ok, {:success, %{"fields" => ["num"]}}}
"""
@spec run(atom(), port(), integer(), String.t(), map(), Keyword.t()) ::
{:ok, any()} | {:error, Bolt.Sips.Internals.Error.t()}
def run(transport, port, bolt_version, statement, params, options) do
BoltProtocolHelper.send_message(transport, port, bolt_version, {:run, [statement, params]})
case BoltProtocolHelper.receive_data(transport, port, bolt_version, options) do
{:success, _} = result ->
{:ok, result}
{:failure, response} ->
{:error, Error.exception(response, port, :run)}
%Error{} = error ->
{:error, error}
other ->
{:error, Error.exception(other, port, :run)}
end
end
@doc """
Implementation of Bolt's PULL_ALL. It retrieves all remaining items from the active result
stream.
See [https://boltprotocol.org/v1/#message-run](https://boltprotocol.org/v1/#message-run)
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV1.run(:gen_tcp, port, 1, "RETURN {num} AS num", %{num: 5}, [])
{:ok, {:success, %{"fields" => ["num"]}}}
iex> BoltProtocolV1.pull_all(:gen_tcp, port_, 1, [])
{:ok,
[
record: [5],
success: %{"type" => "r"}
]}
"""
@spec pull_all(atom(), port(), integer(), Keyword.t()) ::
{:ok, list()}
| {:failure, Bolt.Sips.Internals.Error.t()}
| {:failure, Bolt.Sips.Internals.Error.t()}
def pull_all(transport, port, bolt_version, options) do
BoltProtocolHelper.send_message(transport, port, bolt_version, {:pull_all, []})
with data <- BoltProtocolHelper.receive_data(transport, port, bolt_version, options),
data <- List.wrap(data),
{:success, _} <- List.last(data) do
{:ok, data}
else
{:failure, response} ->
{:failure, Error.exception(response, port, :pull_all)}
other ->
{:error, Error.exception(other, port, :pull_all)}
end
end
@doc """
Runs a statement (most likely Cypher statement) and returns a list of the
records and a summary (Act as as a RUN + PULL_ALL).
Records are represented using PackStream's record data type. Their Elixir
representation is a Keyword with the indexes `:sig` and `:fields`.
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Examples
iex> Bolt.Sips.Internals.BoltProtocol.run_statement(:gen_tcp, port, 1, "MATCH (n) RETURN n")
[
{:success, %{"fields" => ["n"]}},
{:record, [sig: 1, fields: [1, "Example", "Labels", %{"some_attribute" => "some_value"}]]},
{:success, %{"type" => "r"}}
]
"""
@spec run_statement(atom(), port(), integer(), String.t(), map(), Keyword.t()) ::
[
Bolt.Sips.Internals.PackStream.Message.decoded()
]
| Bolt.Sips.Internals.Error.t()
def run_statement(transport, port, bolt_version, statement, params, options) do
with {:ok, run_data} <- run(transport, port, bolt_version, statement, params, options),
{:ok, result} <- pull_all(transport, port, bolt_version, options) do
[run_data | result]
else
{:error, %Error{} = error} ->
error
other ->
Error.exception(other, port, :run_statement)
end
end
@doc """
Implementation of Bolt's DISCARD_ALL. It discards all remaining items from the active result
stream.
See [https://boltprotocol.org/v1/#message-discard-all](https://boltprotocol.org/v1/#message-discard-all)
See http://boltprotocol.org/v1/#message-ack-failure
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV1.discard_all(:gen_tcp, port, 1, [])
:ok
"""
@spec discard_all(atom(), port(), integer(), Keyword.t()) :: :ok | Bolt.Sips.Internals.Error.t()
def discard_all(transport, port, bolt_version, options) do
BoltProtocolHelper.treat_simple_message(:discard_all, transport, port, bolt_version, options)
end
@doc """
Implementation of Bolt's ACK_FAILURE. It acknowledges a failure while keeping
transactions alive.
See [http://boltprotocol.org/v1/#message-ack-failure](http://boltprotocol.org/v1/#message-ack-failure)
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV1.ack_failure(:gen_tcp, port, 1, [])
:ok
"""
@spec ack_failure(atom(), port(), integer(), Keyword.t()) :: :ok | Bolt.Sips.Internals.Error.t()
def ack_failure(transport, port, bolt_version, options) do
BoltProtocolHelper.treat_simple_message(:ack_failure, transport, port, bolt_version, options)
end
@doc """
Implementation of Bolt's RESET message. It resets a session to a "clean"
state.
See [http://boltprotocol.org/v1/#message-reset](http://boltprotocol.org/v1/#message-reset)
## Options
See "Shared options" in `Bolt.Sips.Internals.BoltProtocolHelper` documentation.
## Example
iex> BoltProtocolV1.reset(:gen_tcp, port, 1, [])
:ok
"""
@spec reset(atom(), port(), integer(), Keyword.t()) :: :ok | Bolt.Sips.Internals.Error.t()
def reset(transport, port, bolt_version, options) do
BoltProtocolHelper.treat_simple_message(:reset, transport, port, bolt_version, options)
end
end
|
lib/bolt_sips/internals/bolt_protocol_v1.ex
| 0.84607
| 0.523603
|
bolt_protocol_v1.ex
|
starcoder
|
defmodule AWS.SecretsManager do
@moduledoc """
AWS Secrets Manager API Reference
AWS Secrets Manager provides a service to enable you to store, manage, and
retrieve, secrets.
This guide provides descriptions of the Secrets Manager API. For more
information about using this service, see the [AWS Secrets Manager User
Guide](https://docs.aws.amazon.com/secretsmanager/latest/userguide/introduction.html).
**API Version**
This version of the Secrets Manager API Reference documents the Secrets
Manager API version 2017-10-17.
<note> As an alternative to using the API, you can use one of the AWS SDKs,
which consist of libraries and sample code for various programming
languages and platforms such as Java, Ruby, .NET, iOS, and Android. The
SDKs provide a convenient way to create programmatic access to AWS Secrets
Manager. For example, the SDKs provide cryptographically signing requests,
managing errors, and retrying requests automatically. For more information
about the AWS SDKs, including downloading and installing them, see [Tools
for Amazon Web Services](http://aws.amazon.com/tools/).
</note> We recommend you use the AWS SDKs to make programmatic API calls to
Secrets Manager. However, you also can use the Secrets Manager HTTP Query
API to make direct calls to the Secrets Manager web service. To learn more
about the Secrets Manager HTTP Query API, see [Making Query
Requests](https://docs.aws.amazon.com/secretsmanager/latest/userguide/query-requests.html)
in the *AWS Secrets Manager User Guide*.
Secrets Manager API supports GET and POST requests for all actions, and
doesn't require you to use GET for some actions and POST for others.
However, GET requests are subject to the limitation size of a URL.
Therefore, for operations that require larger sizes, use a POST request.
**Support and Feedback for AWS Secrets Manager**
We welcome your feedback. Send your comments to
[<EMAIL>](mailto:<EMAIL>),
or post your feedback and questions in the [AWS Secrets Manager Discussion
Forum](http://forums.aws.amazon.com/forum.jspa?forumID=296). For more
information about the AWS Discussion Forums, see [Forums
Help](http://forums.aws.amazon.com/help.jspa).
**How examples are presented**
The JSON that AWS Secrets Manager expects as your request parameters and
the service returns as a response to HTTP query requests contain single,
long strings without line breaks or white space formatting. The JSON shown
in the examples displays the code formatted with both line breaks and white
space to improve readability. When example input parameters can also cause
long strings extending beyond the screen, you can insert line breaks to
enhance readability. You should always submit the input as a single JSON
text string.
**Logging API Requests**
AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API
calls for your AWS account and delivers log files to an Amazon S3 bucket.
By using information that's collected by AWS CloudTrail, you can determine
the requests successfully made to Secrets Manager, who made the request,
when it was made, and so on. For more about AWS Secrets Manager and support
for AWS CloudTrail, see [Logging AWS Secrets Manager Events with AWS
CloudTrail](http://docs.aws.amazon.com/secretsmanager/latest/userguide/monitoring.html#monitoring_cloudtrail)
in the *AWS Secrets Manager User Guide*. To learn more about CloudTrail,
including enabling it and find your log files, see the [AWS CloudTrail User
Guide](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
"""
@doc """
Disables automatic scheduled rotation and cancels the rotation of a secret
if currently in progress.
To re-enable scheduled rotation, call `RotateSecret` with
`AutomaticallyRotateAfterDays` set to a value greater than 0. This
immediately rotates your secret and then enables the automatic schedule.
<note> If you cancel a rotation while in progress, it can leave the
`VersionStage` labels in an unexpected state. Depending on the step of the
rotation in progress, you might need to remove the staging label
`AWSPENDING` from the partially created version, specified by the
`VersionId` response value. You should also evaluate the partially rotated
new version to see if it should be deleted, which you can do by removing
all staging labels from the new version `VersionStage` field.
</note> To successfully start a rotation, the staging label `AWSPENDING`
must be in one of the following states:
<ul> <li> Not attached to any version at all
</li> <li> Attached to the same version as the staging label `AWSCURRENT`
</li> </ul> If the staging label `AWSPENDING` attached to a different
version than the version with `AWSCURRENT` then the attempt to rotate
fails.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:CancelRotateSecret
</li> </ul> **Related operations**
<ul> <li> To configure rotation for a secret or to manually trigger a
rotation, use `RotateSecret`.
</li> <li> To get the rotation configuration details for a secret, use
`DescribeSecret`.
</li> <li> To list all of the currently available secrets, use
`ListSecrets`.
</li> <li> To list all of the versions currently associated with a secret,
use `ListSecretVersionIds`.
</li> </ul>
"""
def cancel_rotate_secret(client, input, options \\ []) do
request(client, "CancelRotateSecret", input, options)
end
@doc """
Creates a new secret. A secret in Secrets Manager consists of both the
protected secret data and the important information needed to manage the
secret.
Secrets Manager stores the encrypted secret data in one of a collection of
"versions" associated with the secret. Each version contains a copy of the
encrypted secret data. Each version is associated with one or more "staging
labels" that identify where the version is in the rotation cycle. The
`SecretVersionsToStages` field of the secret contains the mapping of
staging labels to the active versions of the secret. Versions without a
staging label are considered deprecated and not included in the list.
You provide the secret data to be encrypted by putting text in either the
`SecretString` parameter or binary data in the `SecretBinary` parameter,
but not both. If you include `SecretString` or `SecretBinary` then Secrets
Manager also creates an initial secret version and automatically attaches
the staging label `AWSCURRENT` to the new version.
<note> <ul> <li> If you call an operation to encrypt or decrypt the
`SecretString` or `SecretBinary` for a secret in the same account as the
calling user and that secret doesn't specify a AWS KMS encryption key,
Secrets Manager uses the account's default AWS managed customer master key
(CMK) with the alias `aws/secretsmanager`. If this key doesn't already
exist in your account then Secrets Manager creates it for you
automatically. All users and roles in the same AWS account automatically
have access to use the default CMK. Note that if an Secrets Manager API
call results in AWS creating the account's AWS-managed CMK, it can result
in a one-time significant delay in returning the result.
</li> <li> If the secret resides in a different AWS account from the
credentials calling an API that requires encryption or decryption of the
secret value then you must create and use a custom AWS KMS CMK because you
can't access the default CMK for the account using credentials from a
different AWS account. Store the ARN of the CMK in the secret when you
create the secret or when you update it by including it in the `KMSKeyId`.
If you call an API that must encrypt or decrypt `SecretString` or
`SecretBinary` using credentials from a different account then the AWS KMS
key policy must grant cross-account access to that other account's user or
role for both the kms:GenerateDataKey and kms:Decrypt operations.
</li> </ul> </note>
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:CreateSecret
</li> <li> kms:GenerateDataKey - needed only if you use a customer-managed
AWS KMS key to encrypt the secret. You do not need this permission to use
the account default AWS managed CMK for Secrets Manager.
</li> <li> kms:Decrypt - needed only if you use a customer-managed AWS KMS
key to encrypt the secret. You do not need this permission to use the
account default AWS managed CMK for Secrets Manager.
</li> <li> secretsmanager:TagResource - needed only if you include the
`Tags` parameter.
</li> </ul> **Related operations**
<ul> <li> To delete a secret, use `DeleteSecret`.
</li> <li> To modify an existing secret, use `UpdateSecret`.
</li> <li> To create a new version of a secret, use `PutSecretValue`.
</li> <li> To retrieve the encrypted secure string and secure binary
values, use `GetSecretValue`.
</li> <li> To retrieve all other details for a secret, use
`DescribeSecret`. This does not include the encrypted secure string and
secure binary values.
</li> <li> To retrieve the list of secret versions associated with the
current secret, use `DescribeSecret` and examine the
`SecretVersionsToStages` response value.
</li> </ul>
"""
def create_secret(client, input, options \\ []) do
request(client, "CreateSecret", input, options)
end
@doc """
Deletes the resource-based permission policy attached to the secret.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:DeleteResourcePolicy
</li> </ul> **Related operations**
<ul> <li> To attach a resource policy to a secret, use `PutResourcePolicy`.
</li> <li> To retrieve the current resource-based policy that's attached to
a secret, use `GetResourcePolicy`.
</li> <li> To list all of the currently available secrets, use
`ListSecrets`.
</li> </ul>
"""
def delete_resource_policy(client, input, options \\ []) do
request(client, "DeleteResourcePolicy", input, options)
end
@doc """
Deletes an entire secret and all of its versions. You can optionally
include a recovery window during which you can restore the secret. If you
don't specify a recovery window value, the operation defaults to 30 days.
Secrets Manager attaches a `DeletionDate` stamp to the secret that
specifies the end of the recovery window. At the end of the recovery
window, Secrets Manager deletes the secret permanently.
At any time before recovery window ends, you can use `RestoreSecret` to
remove the `DeletionDate` and cancel the deletion of the secret.
You cannot access the encrypted secret information in any secret that is
scheduled for deletion. If you need to access that information, you must
cancel the deletion with `RestoreSecret` and then retrieve the information.
<note> <ul> <li> There is no explicit operation to delete a version of a
secret. Instead, remove all staging labels from the `VersionStage` field of
a version. That marks the version as deprecated and allows Secrets Manager
to delete it as needed. Versions that do not have any staging labels do not
show up in `ListSecretVersionIds` unless you specify `IncludeDeprecated`.
</li> <li> The permanent secret deletion at the end of the waiting period
is performed as a background task with low priority. There is no guarantee
of a specific time after the recovery window for the actual delete
operation to occur.
</li> </ul> </note> **Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:DeleteSecret
</li> </ul> **Related operations**
<ul> <li> To create a secret, use `CreateSecret`.
</li> <li> To cancel deletion of a version of a secret before the recovery
window has expired, use `RestoreSecret`.
</li> </ul>
"""
def delete_secret(client, input, options \\ []) do
request(client, "DeleteSecret", input, options)
end
@doc """
Retrieves the details of a secret. It does not include the encrypted
fields. Secrets Manager only returns fields populated with a value in the
response.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:DescribeSecret
</li> </ul> **Related operations**
<ul> <li> To create a secret, use `CreateSecret`.
</li> <li> To modify a secret, use `UpdateSecret`.
</li> <li> To retrieve the encrypted secret information in a version of the
secret, use `GetSecretValue`.
</li> <li> To list all of the secrets in the AWS account, use
`ListSecrets`.
</li> </ul>
"""
def describe_secret(client, input, options \\ []) do
request(client, "DescribeSecret", input, options)
end
@doc """
Generates a random password of the specified complexity. This operation is
intended for use in the Lambda rotation function. Per best practice, we
recommend that you specify the maximum length and include every character
type that the system you are generating a password for can support.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:GetRandomPassword
</li> </ul>
"""
def get_random_password(client, input, options \\ []) do
request(client, "GetRandomPassword", input, options)
end
@doc """
Retrieves the JSON text of the resource-based policy document attached to
the specified secret. The JSON request string input and response output
displays formatted code with white space and line breaks for better
readability. Submit your input as a single line JSON string.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:GetResourcePolicy
</li> </ul> **Related operations**
<ul> <li> To attach a resource policy to a secret, use `PutResourcePolicy`.
</li> <li> To delete the resource-based policy attached to a secret, use
`DeleteResourcePolicy`.
</li> <li> To list all of the currently available secrets, use
`ListSecrets`.
</li> </ul>
"""
def get_resource_policy(client, input, options \\ []) do
request(client, "GetResourcePolicy", input, options)
end
@doc """
Retrieves the contents of the encrypted fields `SecretString` or
`SecretBinary` from the specified version of a secret, whichever contains
content.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:GetSecretValue
</li> <li> kms:Decrypt - required only if you use a customer-managed AWS
KMS key to encrypt the secret. You do not need this permission to use the
account's default AWS managed CMK for Secrets Manager.
</li> </ul> **Related operations**
<ul> <li> To create a new version of the secret with different encrypted
information, use `PutSecretValue`.
</li> <li> To retrieve the non-encrypted details for the secret, use
`DescribeSecret`.
</li> </ul>
"""
def get_secret_value(client, input, options \\ []) do
request(client, "GetSecretValue", input, options)
end
@doc """
Lists all of the versions attached to the specified secret. The output does
not include the `SecretString` or `SecretBinary` fields. By default, the
list includes only versions that have at least one staging label in
`VersionStage` attached.
<note> Always check the `NextToken` response parameter when calling any of
the `List*` operations. These operations can occasionally return an empty
or shorter than expected list of results even when there more results
become available. When this happens, the `NextToken` response parameter
contains a value to pass to the next call to the same API to request the
next part of the list.
</note> **Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:ListSecretVersionIds
</li> </ul> **Related operations**
<ul> <li> To list the secrets in an account, use `ListSecrets`.
</li> </ul>
"""
def list_secret_version_ids(client, input, options \\ []) do
request(client, "ListSecretVersionIds", input, options)
end
@doc """
Lists all of the secrets that are stored by Secrets Manager in the AWS
account. To list the versions currently stored for a specific secret, use
`ListSecretVersionIds`. The encrypted fields `SecretString` and
`SecretBinary` are not included in the output. To get that information,
call the `GetSecretValue` operation.
<note> Always check the `NextToken` response parameter when calling any of
the `List*` operations. These operations can occasionally return an empty
or shorter than expected list of results even when there more results
become available. When this happens, the `NextToken` response parameter
contains a value to pass to the next call to the same API to request the
next part of the list.
</note> **Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:ListSecrets
</li> </ul> **Related operations**
<ul> <li> To list the versions attached to a secret, use
`ListSecretVersionIds`.
</li> </ul>
"""
def list_secrets(client, input, options \\ []) do
request(client, "ListSecrets", input, options)
end
@doc """
Attaches the contents of the specified resource-based permission policy to
a secret. A resource-based policy is optional. Alternatively, you can use
IAM identity-based policies that specify the secret's Amazon Resource Name
(ARN) in the policy statement's `Resources` element. You can also use a
combination of both identity-based and resource-based policies. The
affected users and roles receive the permissions that are permitted by all
of the relevant policies. For more information, see [Using Resource-Based
Policies for AWS Secrets
Manager](http://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html).
For the complete description of the AWS policy syntax and grammar, see [IAM
JSON Policy
Reference](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html)
in the *IAM User Guide*.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:PutResourcePolicy
</li> </ul> **Related operations**
<ul> <li> To retrieve the resource policy attached to a secret, use
`GetResourcePolicy`.
</li> <li> To delete the resource-based policy that's attached to a secret,
use `DeleteResourcePolicy`.
</li> <li> To list all of the currently available secrets, use
`ListSecrets`.
</li> </ul>
"""
def put_resource_policy(client, input, options \\ []) do
request(client, "PutResourcePolicy", input, options)
end
@doc """
Stores a new encrypted secret value in the specified secret. To do this,
the operation creates a new version and attaches it to the secret. The
version can contain a new `SecretString` value or a new `SecretBinary`
value. You can also specify the staging labels that are initially attached
to the new version.
<note> The Secrets Manager console uses only the `SecretString` field. To
add binary data to a secret with the `SecretBinary` field you must use the
AWS CLI or one of the AWS SDKs.
</note> <ul> <li> If this operation creates the first version for the
secret then Secrets Manager automatically attaches the staging label
`AWSCURRENT` to the new version.
</li> <li> If another version of this secret already exists, then this
operation does not automatically move any staging labels other than those
that you explicitly specify in the `VersionStages` parameter.
</li> <li> If this operation moves the staging label `AWSCURRENT` from
another version to this version (because you included it in the
`StagingLabels` parameter) then Secrets Manager also automatically moves
the staging label `AWSPREVIOUS` to the version that `AWSCURRENT` was
removed from.
</li> <li> This operation is idempotent. If a version with a `VersionId`
with the same value as the `ClientRequestToken` parameter already exists
and you specify the same secret data, the operation succeeds but does
nothing. However, if the secret data is different, then the operation fails
because you cannot modify an existing version; you can only create new
ones.
</li> </ul> <note> <ul> <li> If you call an operation to encrypt or decrypt
the `SecretString` or `SecretBinary` for a secret in the same account as
the calling user and that secret doesn't specify a AWS KMS encryption key,
Secrets Manager uses the account's default AWS managed customer master key
(CMK) with the alias `aws/secretsmanager`. If this key doesn't already
exist in your account then Secrets Manager creates it for you
automatically. All users and roles in the same AWS account automatically
have access to use the default CMK. Note that if an Secrets Manager API
call results in AWS creating the account's AWS-managed CMK, it can result
in a one-time significant delay in returning the result.
</li> <li> If the secret resides in a different AWS account from the
credentials calling an API that requires encryption or decryption of the
secret value then you must create and use a custom AWS KMS CMK because you
can't access the default CMK for the account using credentials from a
different AWS account. Store the ARN of the CMK in the secret when you
create the secret or when you update it by including it in the `KMSKeyId`.
If you call an API that must encrypt or decrypt `SecretString` or
`SecretBinary` using credentials from a different account then the AWS KMS
key policy must grant cross-account access to that other account's user or
role for both the kms:GenerateDataKey and kms:Decrypt operations.
</li> </ul> </note> **Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:PutSecretValue
</li> <li> kms:GenerateDataKey - needed only if you use a customer-managed
AWS KMS key to encrypt the secret. You do not need this permission to use
the account's default AWS managed CMK for Secrets Manager.
</li> </ul> **Related operations**
<ul> <li> To retrieve the encrypted value you store in the version of a
secret, use `GetSecretValue`.
</li> <li> To create a secret, use `CreateSecret`.
</li> <li> To get the details for a secret, use `DescribeSecret`.
</li> <li> To list the versions attached to a secret, use
`ListSecretVersionIds`.
</li> </ul>
"""
def put_secret_value(client, input, options \\ []) do
request(client, "PutSecretValue", input, options)
end
@doc """
Cancels the scheduled deletion of a secret by removing the `DeletedDate`
time stamp. This makes the secret accessible to query once again.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:RestoreSecret
</li> </ul> **Related operations**
<ul> <li> To delete a secret, use `DeleteSecret`.
</li> </ul>
"""
def restore_secret(client, input, options \\ []) do
request(client, "RestoreSecret", input, options)
end
@doc """
Configures and starts the asynchronous process of rotating this secret. If
you include the configuration parameters, the operation sets those values
for the secret and then immediately starts a rotation. If you do not
include the configuration parameters, the operation starts a rotation with
the values already stored in the secret. After the rotation completes, the
protected service and its clients all use the new version of the secret.
This required configuration information includes the ARN of an AWS Lambda
function and the time between scheduled rotations. The Lambda rotation
function creates a new version of the secret and creates or updates the
credentials on the protected service to match. After testing the new
credentials, the function marks the new secret with the staging label
`AWSCURRENT` so that your clients all immediately begin to use the new
version. For more information about rotating secrets and how to configure a
Lambda function to rotate the secrets for your protected service, see
[Rotating Secrets in AWS Secrets
Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html)
in the *AWS Secrets Manager User Guide*.
Secrets Manager schedules the next rotation when the previous one
completes. Secrets Manager schedules the date by adding the rotation
interval (number of days) to the actual date of the last rotation. The
service chooses the hour within that 24-hour date window randomly. The
minute is also chosen somewhat randomly, but weighted towards the top of
the hour and influenced by a variety of factors that help distribute load.
The rotation function must end with the versions of the secret in one of
two states:
<ul> <li> The `AWSPENDING` and `AWSCURRENT` staging labels are attached to
the same version of the secret, or
</li> <li> The `AWSPENDING` staging label is not attached to any version of
the secret.
</li> </ul> If the `AWSPENDING` staging label is present but not attached
to the same version as `AWSCURRENT` then any later invocation of
`RotateSecret` assumes that a previous rotation request is still in
progress and returns an error.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:RotateSecret
</li> <li> lambda:InvokeFunction (on the function specified in the secret's
metadata)
</li> </ul> **Related operations**
<ul> <li> To list the secrets in your account, use `ListSecrets`.
</li> <li> To get the details for a version of a secret, use
`DescribeSecret`.
</li> <li> To create a new version of a secret, use `CreateSecret`.
</li> <li> To attach staging labels to or remove staging labels from a
version of a secret, use `UpdateSecretVersionStage`.
</li> </ul>
"""
def rotate_secret(client, input, options \\ []) do
request(client, "RotateSecret", input, options)
end
@doc """
Attaches one or more tags, each consisting of a key name and a value, to
the specified secret. Tags are part of the secret's overall metadata, and
are not associated with any specific version of the secret. This operation
only appends tags to the existing list of tags. To remove tags, you must
use `UntagResource`.
The following basic restrictions apply to tags:
<ul> <li> Maximum number of tags per secret—50
</li> <li> Maximum key length—127 Unicode characters in UTF-8
</li> <li> Maximum value length—255 Unicode characters in UTF-8
</li> <li> Tag keys and values are case sensitive.
</li> <li> Do not use the `aws:` prefix in your tag names or values because
AWS reserves it for AWS use. You can't edit or delete tag names or values
with this prefix. Tags with this prefix do not count against your tags per
secret limit.
</li> <li> If you use your tagging schema across multiple services and
resources, remember other services might have restrictions on allowed
characters. Generally allowed characters: letters, spaces, and numbers
representable in UTF-8, plus the following special characters: + - = . _ :
/ @.
</li> </ul> <important> If you use tags as part of your security strategy,
then adding or removing a tag can change permissions. If successfully
completing this operation would result in you losing your permissions for
this secret, then the operation is blocked and returns an Access Denied
error.
</important> **Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:TagResource
</li> </ul> **Related operations**
<ul> <li> To remove one or more tags from the collection attached to a
secret, use `UntagResource`.
</li> <li> To view the list of tags attached to a secret, use
`DescribeSecret`.
</li> </ul>
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes one or more tags from the specified secret.
This operation is idempotent. If a requested tag is not attached to the
secret, no error is returned and the secret metadata is unchanged.
<important> If you use tags as part of your security strategy, then
removing a tag can change permissions. If successfully completing this
operation would result in you losing your permissions for this secret, then
the operation is blocked and returns an Access Denied error.
</important> **Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:UntagResource
</li> </ul> **Related operations**
<ul> <li> To add one or more tags to the collection attached to a secret,
use `TagResource`.
</li> <li> To view the list of tags attached to a secret, use
`DescribeSecret`.
</li> </ul>
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Modifies many of the details of the specified secret. If you include a
`ClientRequestToken` and *either* `SecretString` or `SecretBinary` then it
also creates a new version attached to the secret.
To modify the rotation configuration of a secret, use `RotateSecret`
instead.
<note> The Secrets Manager console uses only the `SecretString` parameter
and therefore limits you to encrypting and storing only a text string. To
encrypt and store binary data as part of the version of a secret, you must
use either the AWS CLI or one of the AWS SDKs.
</note> <ul> <li> If a version with a `VersionId` with the same value as
the `ClientRequestToken` parameter already exists, the operation results in
an error. You cannot modify an existing version, you can only create a new
version.
</li> <li> If you include `SecretString` or `SecretBinary` to create a new
secret version, Secrets Manager automatically attaches the staging label
`AWSCURRENT` to the new version.
</li> </ul> <note> <ul> <li> If you call an operation to encrypt or decrypt
the `SecretString` or `SecretBinary` for a secret in the same account as
the calling user and that secret doesn't specify a AWS KMS encryption key,
Secrets Manager uses the account's default AWS managed customer master key
(CMK) with the alias `aws/secretsmanager`. If this key doesn't already
exist in your account then Secrets Manager creates it for you
automatically. All users and roles in the same AWS account automatically
have access to use the default CMK. Note that if an Secrets Manager API
call results in AWS creating the account's AWS-managed CMK, it can result
in a one-time significant delay in returning the result.
</li> <li> If the secret resides in a different AWS account from the
credentials calling an API that requires encryption or decryption of the
secret value then you must create and use a custom AWS KMS CMK because you
can't access the default CMK for the account using credentials from a
different AWS account. Store the ARN of the CMK in the secret when you
create the secret or when you update it by including it in the `KMSKeyId`.
If you call an API that must encrypt or decrypt `SecretString` or
`SecretBinary` using credentials from a different account then the AWS KMS
key policy must grant cross-account access to that other account's user or
role for both the kms:GenerateDataKey and kms:Decrypt operations.
</li> </ul> </note> **Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:UpdateSecret
</li> <li> kms:GenerateDataKey - needed only if you use a custom AWS KMS
key to encrypt the secret. You do not need this permission to use the
account's AWS managed CMK for Secrets Manager.
</li> <li> kms:Decrypt - needed only if you use a custom AWS KMS key to
encrypt the secret. You do not need this permission to use the account's
AWS managed CMK for Secrets Manager.
</li> </ul> **Related operations**
<ul> <li> To create a new secret, use `CreateSecret`.
</li> <li> To add only a new version to an existing secret, use
`PutSecretValue`.
</li> <li> To get the details for a secret, use `DescribeSecret`.
</li> <li> To list the versions contained in a secret, use
`ListSecretVersionIds`.
</li> </ul>
"""
def update_secret(client, input, options \\ []) do
request(client, "UpdateSecret", input, options)
end
@doc """
Modifies the staging labels attached to a version of a secret. Staging
labels are used to track a version as it progresses through the secret
rotation process. You can attach a staging label to only one version of a
secret at a time. If a staging label to be added is already attached to
another version, then it is moved--removed from the other version first and
then attached to this one. For more information about staging labels, see
[Staging
Labels](https://docs.aws.amazon.com/secretsmanager/latest/userguide/terms-concepts.html#term_staging-label)
in the *AWS Secrets Manager User Guide*.
The staging labels that you specify in the `VersionStage` parameter are
added to the existing list of staging labels--they don't replace it.
You can move the `AWSCURRENT` staging label to this version by including it
in this call.
<note> Whenever you move `AWSCURRENT`, Secrets Manager automatically moves
the label `AWSPREVIOUS` to the version that `AWSCURRENT` was removed from.
</note> If this action results in the last label being removed from a
version, then the version is considered to be 'deprecated' and can be
deleted by Secrets Manager.
**Minimum permissions**
To run this command, you must have the following permissions:
<ul> <li> secretsmanager:UpdateSecretVersionStage
</li> </ul> **Related operations**
<ul> <li> To get the list of staging labels that are currently associated
with a version of a secret, use ` `DescribeSecret` ` and examine the
`SecretVersionsToStages` response value.
</li> </ul>
"""
def update_secret_version_stage(client, input, options \\ []) do
request(client, "UpdateSecretVersionStage", input, options)
end
@doc """
Validates the JSON text of the resource-based policy document attached to
the specified secret. The JSON request string input and response output
displays formatted code with white space and line breaks for better
readability. Submit your input as a single line JSON string. A
resource-based policy is optional.
"""
def validate_resource_policy(client, input, options \\ []) do
request(client, "ValidateResourcePolicy", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "secretsmanager"}
host = build_host("secretsmanager", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "secretsmanager.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/secrets_manager.ex
| 0.876793
| 0.657209
|
secrets_manager.ex
|
starcoder
|
defmodule ThousandIsland.Handler do
@moduledoc """
`ThousandIsland.Handler` defines the behaviour required of the application layer of a Thousand Island server. When starting a
Thousand Island server, you must pass the name of a module implementing this behaviour as the `handler_module` parameter.
Thousand Island will then use the specified module to handle each connection that is made to the server.
The lifecycle of a Handler instance is as follows:
1. After a client connection to a Thousand Island server is made, Thousand Island will complete the initial setup of the
connection (performing a TLS handshake, for example), and then call `c:handle_connection/2`.
2. A handler implementation may choose to process a client connection within the `c:handle_connection/2` callback by
calling functions against the passed `ThousandIsland.Socket`. In many cases, this may be all that may be required of
an implementation & the value `{:close, state}` can be returned which will cause Thousand Island to close the connection
to the client.
3. In cases where the server wishes to keep the connection open and wait for subsequent requests from the client on the
same socket, it may elect to return `{:continue, state}`. This will cause Thousand Island to wait for client data
asynchronously; `c:handle_data/3` will be invoked when the client sends more data.
4. In the meantime, the process which is hosting connection is idle & able to receive messages sent from elsewhere in your
application as needed. The implementation included in the `use ThousandIsland.Handler` macro uses a `GenServer` structure,
so you may implement such behaviour via standard `GenServer` patterns. Note that in these cases that state is provided (and
must be returned) in a `{socket, state}` format, where the second tuple is the same state value that is passed to the various `handle_*` callbacks
defined on this behaviour. Note also that any `GenServer` `handle_*` calls which are processed directly by an implementing module
will cancel any async read timeout values which may have been set. Such calls are able to reset the timeout by returning a four element
tuple with `timeout` as the fourth argument as specified in the `GenServer` documentation.
It is fully supported to intermix synchronous `ThousandIsland.Socket.recv` calls with async return values from `c:handle_connection/2`
and `c:handle_data/3` callbacks.
# Example
A simple example of a Hello World server is as follows:
```elixir
defmodule HelloWorld do
use ThousandIsland.Handler
@impl ThousandIsland.Handler
def handle_connection(socket, state) do
ThousandIsland.Socket.send(socket, "Hello, World")
{:close, state}
end
end
```
Another example of a server that echoes back all data sent to it is as follows:
```elixir
defmodule Echo do
use ThousandIsland.Handler
@impl ThousandIsland.Handler
def handle_data(data, socket, state) do
ThousandIsland.Socket.send(socket, data)
{:continue, state}
end
end
```
Note that in this example there is no `c:handle_connection/2` callback defined. The default implementation of this
callback will simply return `{:continue, state}`, which is appropriate for cases where the client is the first
party to communicate.
Another example of a server which can send and receive messages asynchronously is as follows:
```elixir
defmodule Messenger do
use ThousandIsland.Handler
@impl ThousandIsland.Handler
def handle_data(msg, _socket, state) do
IO.puts(msg)
{:continue, state}
end
def handle_info({:send, msg}, {socket, state}) do
ThousandIsland.Socket.send(socket, msg)
{:noreply, {socket, state}}
end
end
```
Note that in this example we make use of the fact that the handler process is really just a GenServer to send it messages
which are able to make use of the underlying socket. This allows for bidirectional sending and receiving of messages in
an asynchronous manner.
# When Handler Isn't Enough
The `use ThousandIsland.Handler` implementation should be flexible enough to power just about any handler, however if
this should not be the case for you, there is an escape hatch available. If you require more flexibility than the
`ThousandIsland.Handler` behaviour provides, you are free to specify any module which implements `start_link/1` as the
`handler_module` parameter. The process of getting from this new process to a ready-to-use socket is somewhat
delicate, however. The steps required are as follows:
1. Thousand Island calls `start_link/1` on the configured `handler_module`, passing in the configured
`handler_options` as the sole argument. This function is expected to return a conventional `GenServer.on_start()`
style tuple. Note that this newly created process is not passed the connection socket immediately.
2. The socket will be passed to the new process via a message of the form `{:thousand_island_ready, socket}`.
3. Once the process receives the socket, it must call `ThousandIsland.Socket.handshake/1` with the socket as the sole
argument in order to finalize the setup of the socket.
4. The socket is now ready to use.
In addition to this process, there are several other considerations to be aware of:
* The underlying socket is closed automatically when the handler process ends.
* Handler processes should have a restart strategy of `:temporary` to ensure that Thousand Island does not attempt to
restart crashed handlers.
* Handler processes should trap exit if possible so that existing connections can be given a chance to cleanly shut
down when shutting down a Thousand Island server instance.
* The `:handler` family of telemetry events are emitted by the `ThousandIsland.Handler` implementation. If you use your
own implementation in its place you will not see any such telemetry events.
"""
@typedoc """
The value returned by `c:handle_connection/2` and `c:handle_data/3`
"""
@type handler_result ::
{:continue, state :: term()}
| {:continue, state :: term(), timeout()}
| {:close, state :: term()}
| {:error, String.t(), state :: term()}
@doc """
This callback is called shortly after a client connection has been made, immediately after the socket handshake process has
completed. It is called with the server's configured `handler_options` value as initial state. Handlers may choose to
interact synchronously with the socket in this callback via calls to various `ThousandIsland.Socket` functions.
The value returned by this callback causes Thousand Island to proceed in once of several ways:
* Returning `{:close, state}` will cause Thousand Island to close the socket & call the `c:handle_close/2` callback to
allow final cleanup to be done.
* Returning `{:continue, state}` will cause Thousand Island to switch the socket to an asynchronous mode. When the
client subsequently sends data (or if there is already unread data waiting from the client), Thousand Island will call
`c:handle_data/3` to allow this data to be processed.
* Returning `{:continue, state, timeout}` is identical to the previous case with the
addition of a timeout. If `timeout` milliseconds passes with no data being received, the socket
will be closed and `c:handle_timeout/2` will be called.
* Returning `{:error, reason, state}` will cause Thousand Island to close the socket & call the `c:handle_error/3` callback to
allow final cleanup to be done.
"""
@callback handle_connection(socket :: ThousandIsland.Socket.t(), state :: term()) ::
handler_result()
@doc """
This callback is called whenever client data is received after `c:handle_connection/2` or `c:handle_data/3` have returned an
`{:continue, state}` tuple. The data received is passed as the first argument, and handlers may choose to interact
synchronously with the socket in this callback via calls to various `ThousandIsland.Socket` functions.
The value returned by this callback causes Thousand Island to proceed in once of several ways:
* Returning `{:close, state}` will cause Thousand Island to close the socket & call the `c:handle_close/2` callback to
allow final cleanup to be done.
* Returning `{:continue, state}` will cause Thousand Island to switch the socket to an asynchronous mode. When the
client subsequently sends data (or if there is already unread data waiting from the client), Thousand Island will call
`c:handle_data/3` to allow this data to be processed.
* Returning `{:continue, state, timeout}` is identical to the previous case with the
addition of a timeout. If `timeout` milliseconds passes with no data being received, the socket
will be closed and `c:handle_timeout/2` will be called.
* Returning `{:error, reason, state}` will cause Thousand Island to close the socket & call the `c:handle_error/3` callback to
allow final cleanup to be done.
"""
@callback handle_data(data :: binary(), socket :: ThousandIsland.Socket.t(), state :: term()) ::
handler_result()
@doc """
This callback is called when the underlying socket is closed by the remote end; it should perform any cleanup required
as it is the last callback called before the process backing this connection is terminated. The underlying socket
has already been closed by the time this callback is called. The return value is ignored.
This callback is not called if the connection is explicitly closed via `ThousandIsland.Socket.close/1`, however it
will be called in cases where `handle_connection/2` or `handle_data/3` return a `{:close, state}` tuple.
"""
@callback handle_close(socket :: ThousandIsland.Socket.t(), state :: term()) :: term()
@doc """
This callback is called when the underlying socket encounters an error; it should perform any cleanup required
as it is the last callback called before the process backing this connection is terminated. The underlying socket
has already been closed by the time this callback is called. The return value is ignored.
In addition to socket level errors, this callback is also called in cases where `handle_connection/2` or `handle_data/3`
return a `{:error, reason, state}` tuple.
"""
@callback handle_error(
reason :: String.t(),
socket :: ThousandIsland.Socket.t(),
state :: term()
) ::
term()
@doc """
This callback is called when the server process itself is being shut down; it should perform any cleanup required
as it is the last callback called before the process backing this connection is terminated. The underlying socket
has NOT been closed by the time this callback is called. The return value is ignored.
This callback is only called when the shutdown reason is `:normal`, and is subject to the same caveats described
in `c:GenServer.terminate/2`.
"""
@callback handle_shutdown(
socket :: ThousandIsland.Socket.t(),
state :: term()
) ::
term()
@doc """
This callback is called when an async read call times out (ie: when a tuple of the form `{:continue, state, timeout}`
is returned by `c:handle_connection/2` or `c:handle_data/3` and `timeout` ms have passed). Note that it is NOT called
on explicit `ThousandIsland.Socket.recv/3` calls as they have their own timeout semantics. The underlying socket
has NOT been closed by the time this callback is called. The return value is ignored.
"""
@callback handle_timeout(
socket :: ThousandIsland.Socket.t(),
state :: term()
) ::
term()
@optional_callbacks handle_connection: 2,
handle_data: 3,
handle_close: 2,
handle_error: 3,
handle_shutdown: 2,
handle_timeout: 2
defmacro __using__(_opts) do
quote location: :keep do
@behaviour ThousandIsland.Handler
use GenServer, restart: :temporary
# Dialyzer gets confused by handle_continuation being a defp and not a def
@dialyzer {:no_match, handle_continuation: 2}
def handle_connection(_socket, state), do: {:continue, state}
def handle_data(_data, _socket, state), do: {:continue, state}
def handle_close(_socket, _state), do: :ok
def handle_error(_error, _socket, _state), do: :ok
def handle_shutdown(_socket, _state), do: :ok
def handle_timeout(_socket, _state), do: :ok
defoverridable ThousandIsland.Handler
def start_link(arg) do
GenServer.start_link(__MODULE__, arg)
end
@impl GenServer
def init(handler_options) do
Process.flag(:trap_exit, true)
{:ok, {nil, handler_options}}
end
@impl GenServer
def handle_info({:thousand_island_ready, socket}, {_, state}) do
%{address: address, port: port} = ThousandIsland.Socket.peer_info(socket)
:telemetry.execute([:handler, :start], %{}, %{
remote_address: address,
remote_port: port,
connection_id: socket.connection_id,
acceptor_id: socket.acceptor_id
})
ThousandIsland.Socket.handshake(socket)
{:noreply, {socket, state}, {:continue, :handle_connection}}
end
# Use a continue pattern here so that we have committed the socket
# to state in case the `c:handle_connection/2` callback raises an error.
# This ensures that the `c:terminate/2` calls below are able to properly
# close down the process
@impl GenServer
def handle_continue(:handle_connection, {socket, state}) do
__MODULE__.handle_connection(socket, state)
|> handle_continuation(socket)
end
def handle_info({msg, _, data}, {socket, state}) when msg in [:tcp, :ssl] do
:telemetry.execute([:handler, :async_recv], %{data: data}, %{
connection_id: socket.connection_id
})
__MODULE__.handle_data(data, socket, state)
|> handle_continuation(socket)
end
def handle_info({msg, _}, {socket, state}) when msg in [:tcp_closed, :ssl_closed] do
{:stop, {:shutdown, :peer_closed}, {socket, state}}
end
def handle_info({msg, _, reason}, {socket, state}) when msg in [:tcp_error, :ssl_error] do
{:stop, reason, {socket, state}}
end
def handle_info(:timeout, {socket, state}) do
{:stop, :timeout, {socket, state}}
end
@impl GenServer
def terminate(:shutdown, {socket, state}) do
:telemetry.execute([:handler, :shutdown], %{reason: :shutdown}, %{
connection_id: socket.connection_id
})
__MODULE__.handle_shutdown(socket, state)
end
@impl GenServer
def terminate({:shutdown, reason}, {socket, state}) do
ThousandIsland.Socket.close(socket)
:telemetry.execute([:handler, :shutdown], %{reason: reason}, %{
connection_id: socket.connection_id
})
__MODULE__.handle_close(socket, state)
end
@impl GenServer
def terminate(:timeout, {socket, state}) do
:telemetry.execute([:handler, :shutdown], %{reason: :timeout}, %{
connection_id: socket.connection_id
})
__MODULE__.handle_timeout(socket, state)
end
def terminate(reason, {socket, state}) do
ThousandIsland.Socket.close(socket)
:telemetry.execute([:handler, :error], %{error: reason}, %{
connection_id: socket.connection_id
})
__MODULE__.handle_error(reason, socket, state)
end
defp handle_continuation(continuation, socket) do
case continuation do
{:continue, state} ->
ThousandIsland.Socket.setopts(socket, active: :once)
{:noreply, {socket, state}}
{:continue, state, timeout} ->
ThousandIsland.Socket.setopts(socket, active: :once)
{:noreply, {socket, state}, timeout}
{:close, state} ->
{:stop, {:shutdown, :local_closed}, {socket, state}}
{:error, reason, state} ->
{:stop, reason, {socket, state}}
end
end
end
end
end
|
lib/thousand_island/handler.ex
| 0.937555
| 0.888614
|
handler.ex
|
starcoder
|
defmodule Sanbase.Clickhouse.HistoricalBalance do
@moduledoc ~s"""
Module providing functions for historical balances and balance changes.
This module dispatches to underlaying modules and serves as common interface
for many different database tables and schemas.
"""
use AsyncWith
@async_with_timeout 29_000
alias Sanbase.Model.Project
alias Sanbase.Clickhouse.HistoricalBalance.{
BchBalance,
BnbBalance,
BtcBalance,
Erc20Balance,
EthBalance,
LtcBalance,
XrpBalance
}
@infrastructure_to_module %{
"BCH" => BchBalance,
"BNB" => BnbBalance,
"BEP2" => BnbBalance,
"BTC" => BtcBalance,
"LTC" => LtcBalance,
"XRP" => XrpBalance,
"ETH" => [EthBalance, Erc20Balance]
}
@supported_infrastructures Map.keys(@infrastructure_to_module)
def supported_infrastructures(), do: @supported_infrastructures
@type selector :: %{
required(:infrastructure) => String.t(),
optional(:currency) => String.t(),
optional(:slug) => String.t()
}
@type slug :: String.t()
@type address :: String.t() | list(String.t())
@typedoc ~s"""
An interval represented as string. It has the format of number followed by one of:
ns, ms, s, m, h, d or w - each representing some time unit
"""
@type interval :: String.t()
@typedoc ~s"""
The type returned by the historical_balance/5 function
"""
@type historical_balance_return ::
{:ok, []}
| {:ok, list(%{datetime: DateTime.t(), balance: number()})}
| {:error, String.t()}
@doc ~s"""
Return a list of the assets that a given address currently holds or
has held in the past.
This can be combined with the historical balance query to see the historical
balance of all currently owned assets
"""
@spec assets_held_by_address(map()) :: {:ok, list(map())} | {:error, String.t()}
def assets_held_by_address(%{infrastructure: "ETH", address: address}) do
async with {:ok, erc20_assets} <- Erc20Balance.assets_held_by_address(address),
{:ok, ethereum} <- EthBalance.assets_held_by_address(address) do
{:ok, ethereum ++ erc20_assets}
end
end
def assets_held_by_address(%{infrastructure: infr, address: address}) do
case Map.get(@infrastructure_to_module, infr) do
nil -> {:error, "Infrastructure #{infr} is not supported."}
module -> module.assets_held_by_address(address)
end
end
@doc ~s"""
For a given address or list of addresses returns the `slug` balance change for the
from-to period. The returned lists indicates the address, before balance, after balance
and the balance change
"""
@spec balance_change(selector, address, from :: DateTime.t(), to :: DateTime.t()) ::
__MODULE__.Behaviour.balance_change_result()
def balance_change(selector, address, from, to) do
infrastructure = Map.fetch!(selector, :infrastructure)
slug = Map.get(selector, :slug)
case {infrastructure, slug} do
{"ETH", "ethereum"} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug("ethereum"),
do: EthBalance.balance_change(address, contract, decimals, from, to)
{"ETH", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug(slug || "ethereum"),
do: Erc20Balance.balance_change(address, contract, decimals, from, to)
{"XRP", _} ->
currency = Map.get(selector, :currency, "XRP")
XrpBalance.balance_change(address, currency, 0, from, to)
{"BTC", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug("bitcoin"),
do: BtcBalance.balance_change(address, contract, decimals, from, to)
{"BCH", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug("bitcoin-cash"),
do: BchBalance.balance_change(address, contract, decimals, from, to)
{"LTC", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug("litecoin"),
do: LtcBalance.balance_change(address, contract, decimals, from, to)
{"BNB", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug(slug || "binance-coin"),
do: BnbBalance.balance_change(address, contract, decimals, from, to)
end
end
@doc ~s"""
For a given address or list of addresses returns the combined `slug` balance for each bucket
of size `interval` in the from-to time period
"""
@spec historical_balance(selector, address, from :: DateTime.t(), to :: DateTime.t(), interval) ::
__MODULE__.Behaviour.historical_balance_result()
def historical_balance(selector, address, from, to, interval) do
infrastructure = Map.fetch!(selector, :infrastructure)
slug = Map.get(selector, :slug)
case {infrastructure, slug} do
{"ETH", ethereum} when ethereum in [nil, "ethereum"] ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug("ethereum"),
do: EthBalance.historical_balance(address, contract, decimals, from, to, interval)
{"ETH", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug(slug),
do: Erc20Balance.historical_balance(address, contract, decimals, from, to, interval)
{"XRP", _} ->
currency = Map.get(selector, :currency, "XRP")
XrpBalance.historical_balance(address, currency, 0, from, to, interval)
{"BTC", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug("bitcoin"),
do: BtcBalance.historical_balance(address, contract, decimals, from, to, interval)
{"BCH", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug("bitcoin-cash"),
do: BchBalance.historical_balance(address, contract, decimals, from, to, interval)
{"LTC", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug("litecoin"),
do: LtcBalance.historical_balance(address, contract, decimals, from, to, interval)
{"BNB", _} ->
with {:ok, contract, decimals} <- Project.contract_info_by_slug(slug || "binance-coin"),
do: BnbBalance.historical_balance(address, contract, decimals, from, to, interval)
end
end
end
|
lib/sanbase/clickhouse/historical_balance/historical_balance.ex
| 0.851552
| 0.435361
|
historical_balance.ex
|
starcoder
|
defmodule Dynamo do
@moduledoc """
Run, Dynamo, Run!
This is the main module in the Dynamo repository. It allows
users to configure the Dynamo framework and define their
own Dynamos.
A very simple Dynamo can be defined as follow:
defmodule MyDynamo do
use Dynamo
endpoint SomeRouter
end
A Dynamo can be used on top of a `Dynamo.Router` in case
you want to extend a single router to a single file Dynamo.
## Configuration
A Dynamo comes with a configuration API that allows a
developer to customize how dynamo works and custom
extensions.
For example, here is a snippet that configures Dynamo
to serve public assets from the :myapp application
everytime we have a request at `/static`:
config :dynamo,
env: "prod",
otp_app: :myapp,
static_root: "priv/static",
static_route: "/static"
The available `:dynamo` configurations are:
* `:compile_on_demand` - Compiles modules as they are needed
* `:env` - The environment this Dynamo runs on
* `:endpoint` - The endpoint to dispatch requests too
* `:exceptions_editor` - Some exception handlers show editors information
to help debugging (defaults to the DYNAMO_EDITOR environment variable)
* `:exceptions_handler` - How to handle and display exceptions (defaults to `Exceptions.Public`)
* `:reload_modules` - Reload modules after they are changed
* `:session_store` - The session store to be used, may be `CookieStore` and `ETSStore`
* `:session_options` - The session options to be used
* `:source_paths` - The paths to search when compiling modules on demand
* `:static_route` - The route to serve static assets
* `:static_root` - The location static assets are defined. It is a path from the otp_app root
* `:supervisor` - The supervisor local node name
* `:templates_paths` - The paths to find templates
Check `Dynamo.Base` for more information on `config` and
other initialize configuration.
## Filters
A Dynamo also contains a set of filters that are meant
to be used on all requests. Some of these filters are added
based on the configuration options above. Others are included by
default under the following conditions:
* `Dynamo.Filters.Static` - when a static_route and static_root are set,
this filter is added to serve static assets;
* `Dynamo.Filters.Head` - converts HEAD requests to GET, added by default;
* `Dynamo.Filters.Loader` - when `:compile_on_demand` or `:reload_modules`
configs are set to true, this filter is added to compiled and reloaded
code on demand;
* `Dynamo.Filters.Session` - when a `:session_store` is configured, it adds
session functionality to the Dynamo;
* `Dynamo.Filters.Exceptions` - responsible for logging and handling
exceptions, added by default;
Filters can be added and removed using `filter` and `remove_filter`
macros. You can get the list of all dynamos filters using:
`mix dynamo.filters`.
For more information, check `Dynamo.Router.Filters` docs.
## Initialization
A Dynamo allows you to register initializers which are
invoked when the dynamo starts. A Dynamo is initialized
in three steps:
* The `:dynamos` registered in your project are compiled
* The dynamos supervision trees are started via `DYNAMO.start_link`
* A dynamo is hooked into a web server via `DYNAMO.run`
The step 2 can be extended via initializers. For example:
defmodule MyDynamo do
use Dynamo
initializer :some_config do
# Connect to the database
end
end
"""
@doc """
Gets the Dynamo used by default under test.
"""
def under_test() do
{ :ok, mod } = :application.get_env(:dynamo, :under_test)
mod
end
@doc """
Sets the Dynamo to be used under test.
"""
def under_test(mod) do
:application.set_env(:dynamo, :under_test, mod)
end
@doc false
defmacro __using__(_) do
Dynamo.App.start
setup =
quote do
if Module.get_attribute(__MODULE__, :dynamo_router) do
raise "Dynamo needs to be used before Dynamo.Router"
end
@before_compile { unquote(__MODULE__), :load_env_file }
@before_compile { unquote(__MODULE__), :define_endpoint }
@before_compile { unquote(__MODULE__), :define_filters }
@before_compile { unquote(__MODULE__), :define_templates_paths }
@before_compile { unquote(__MODULE__), :define_static }
@before_compile { unquote(__MODULE__), :define_root }
use Dynamo.Utils.Once
alias Dynamo.Filters.Session, as: Session
alias Dynamo.Filters.Exceptions, as: Exceptions
use_once Dynamo.Base
use_once Dynamo.Router.Filters
config :dynamo, unquote(default_dynamo_config(__CALLER__))
config :server, [handler: Dynamo.Cowboy, port: 4000]
end
definitions =
quote location: :keep do
@doc """
Starts the Dynamo supervisor and run all
registered initializers.
"""
def start_link(opts // []) do
info = Dynamo.Supervisor.start_link(config[:dynamo][:supervisor], opts)
run_initializers
info
end
@doc """
Runs the Dynamo in the configured web server.
"""
def run(options // []) do
dynamo = config[:dynamo]
options = Keyword.put(options, :ssl, config[:ssl])
options = Keyword.put(options, :env, dynamo[:env])
options = Keyword.put(options, :otp_app, dynamo[:otp_app])
options = Keyword.merge(config[:server], options)
options[:handler].run(__MODULE__, options)
end
initializer :start_dynamo_reloader do
dynamo = config[:dynamo]
if dynamo[:compile_on_demand] do
callback = fn
path, acc when is_binary(path) ->
(path |> Path.expand(root) |> Path.wildcard) ++ acc
_, acc ->
acc
end
source = Enum.reduce dynamo[:source_paths], [], callback
templates = Enum.reduce dynamo[:templates_paths], [], callback
Dynamo.Loader.append_paths(source -- templates)
Dynamo.Loader.enable
if Code.ensure_loaded?(IEx) and IEx.started? do
IEx.after_spawn(fn -> Dynamo.Loader.enable end)
end
end
end
initializer :start_dynamo_renderer do
precompiled = Enum.all?(templates_paths, Dynamo.Templates.Finder.requires_precompilation?(&1))
unless precompiled do
supervisor = config[:dynamo][:supervisor]
renderer = templates_server()
Dynamo.Supervisor.start_child(supervisor, Dynamo.Templates.Renderer, [renderer])
if config[:dynamo][:compile_on_demand] do
Dynamo.Loader.on_purge(fn -> Dynamo.Templates.Renderer.clear(renderer) end)
end
end
end
end
quote do
unquote(setup)
unquote(definitions)
end
end
## Helpers
defp default_dynamo_config(env) do
[ cache_static: true,
compile_on_demand: true,
compiled_templates: env.module.CompiledTemplates,
env: "prod",
environments_path: Path.expand("../environments", env.file),
exceptions_editor: System.get_env("DYNAMO_EDITOR"),
exceptions_handler: Dynamo.Filters.Exceptions.Public,
reload_modules: false,
session_options: [],
source_paths: ["web/*"],
static_root: "priv/static",
supervisor: env.module.Supervisor,
templates_paths: ["web/templates"] ]
end
## __before_compile__ callbacks
@doc false
defmacro load_env_file(_) do
dynamo = Module.get_attribute(__CALLER__.module, :config)[:dynamo]
dir = dynamo[:environments_path]
env = dynamo[:env]
if dir && File.dir?(dir) do
file = "#{dir}/#{env}.exs"
Code.string_to_quoted! File.read!(file), file: file
end
end
@doc false
defmacro define_filters(_) do
quote location: :keep do
Enum.each Dynamo.define_filters(__MODULE__, []), prepend_filter(&1)
end
end
@doc false
def define_filters(mod, filters) do
dynamo = Module.get_attribute(mod, :config)[:dynamo]
if dynamo[:static_route] do
static = Dynamo.Filters.Static.new(dynamo[:static_route], dynamo[:static_root])
filters = [static|filters]
end
if dynamo[:compile_on_demand] || dynamo[:reload_modules] do
reloader = Dynamo.Filters.Loader.new(dynamo[:compile_on_demand], dynamo[:reload_modules])
filters = [reloader|filters]
end
if dynamo[:reload_modules] && !dynamo[:compile_on_demand] do
raise "Cannot have reload_modules set to true and compile_on_demand set to false"
end
if dynamo[:session_store] && dynamo[:session_options] do
session = Dynamo.Filters.Session.new(dynamo[:session_store], dynamo[:session_options])
filters = [session|filters]
end
if dynamo[:exceptions_handler] do
exceptions = Dynamo.Filters.Exceptions.new(dynamo[:exceptions_handler])
filters = [exceptions|filters]
end
filters = [Dynamo.Filters.Head|filters]
filters
end
@doc false
defmacro define_endpoint(env) do
endpoint = Module.get_attribute(env.module, :config)[:dynamo][:endpoint] |> Macro.escape
if endpoint do
quote location: :keep do
@doc """
Receives a connection and dispatches it to #{inspect unquote(endpoint)}
"""
def service(conn) do
unquote(endpoint).service(conn)
end
end
end
end
@doc false
defmacro define_templates_paths(env) do
module = env.module
dynamo = Module.get_attribute(module, :config)[:dynamo]
templates_paths = dynamo[:templates_paths]
{ runtime, to_compile } =
if dynamo[:compile_on_demand] do
{ templates_paths, [] }
else
Enum.partition(templates_paths, Dynamo.Templates.Finder.requires_precompilation?(&1))
end
if to_compile != [] do
module = dynamo[:compiled_templates]
templates_paths = [module|runtime]
end
templates_server = dynamo[:supervisor].TemplatesServer
templates_paths = lc path inlist templates_paths do
if is_binary(path) do
quote do: Path.expand(unquote(path), root)
else
Macro.escape(path)
end
end
quote location: :keep do
@doc """
Returns templates paths after being processed.
If compilation on demand is disabled, templates paths
that can be precompiled will be precompiled and stored
into a given module for performance.
"""
def templates_paths, do: unquote(templates_paths)
@doc """
The worker responsible for rendering templates.
"""
def templates_server, do: unquote(templates_server)
end
end
@doc false
defmacro define_static(env) do
module = env.module
dynamo = Module.get_attribute(module, :config)[:dynamo]
supervisor = dynamo[:supervisor]
if dynamo[:static_route] do
quote location: :keep do
@doc """
Returns the static ets table and server name
used by this Dynamo.
"""
def static_cache do
{ unquote(supervisor.StaticTable), unquote(supervisor.StaticServer) }
end
initializer :start_dynamo_static do
Dynamo.Supervisor.start_child(config[:dynamo][:supervisor], Dynamo.Static, [__MODULE__])
end
end
end
end
@doc false
defmacro define_root(env) do
module = env.module
dynamo = Module.get_attribute(module, :config)[:dynamo]
root =
cond do
dynamo[:root] -> dynamo[:root]
nil?(dynamo[:otp_app]) -> File.cwd!
true -> nil
end
if root do
quote location: :keep do
@doc """
Returns the root path for this Dynamo.
"""
def root, do: unquote(root)
end
else
app = dynamo[:otp_app]
tmp = "#{app}/tmp/#{dynamo[:env]}/#{app}"
quote location: :keep do
@doc """
Returns the root path for this Dynamo
based on the OTP app directory.
"""
def root do
case :code.lib_dir(unquote(app)) do
list when is_list(list) ->
bin = String.from_char_list!(list)
size = size(bin)
if size > unquote(size(tmp)) do
:binary.replace bin, unquote(tmp),
unquote(atom_to_binary(app)), scope: { size, unquote(-size(tmp)) }
else
bin
end
_ ->
raise "could not find OTP app #{unquote(dynamo[:otp_app])} for #{inspect __MODULE__}. " <>
"This may happen if the directory name is different than the application name."
end
end
end
end
end
end
|
lib/dynamo.ex
| 0.873781
| 0.576572
|
dynamo.ex
|
starcoder
|
defmodule Nx.Defn.Tree do
@moduledoc """
Helper functions to traverse expressions.
"""
alias Nx.Defn.Expr
alias Nx.Tensor, as: T
@doc """
Helper to traverse the arguments of a tensor expression.
Note the arguments of function nodes are never traversed, as it is
not always desired to recursively modify them. If you want to modify
a function, you will need to build a new function node by wrapping
the function node `fun` with the new desired logic.
"""
def traverse_args(expr, acc, fun)
def traverse_args(%T{data: %Expr{op: :fun, args: args}}, acc, _fun) do
{args, acc}
end
def traverse_args(%T{data: %Expr{op: :cond, args: [clauses, last]}}, acc, fun) do
{clauses, acc} =
Enum.map_reduce(clauses, acc, fn {condition, expr}, acc ->
{condition, acc} = fun.(condition, acc)
{expr, acc} = composite(expr, acc, fun)
{{condition, expr}, acc}
end)
{last, acc} = composite(last, acc, fun)
{[clauses, last], acc}
end
def traverse_args(%T{data: %Expr{op: :concatenate, args: [list | args]}}, acc, fun) do
{list, acc} = Enum.map_reduce(list, acc, fun)
{[list | args], acc}
end
def traverse_args(%T{data: %Expr{args: args}}, acc, fun) do
Enum.map_reduce(args, acc, fn
%T{data: %Expr{}} = arg, acc -> fun.(arg, acc)
arg, acc -> {arg, acc}
end)
end
@doc """
Traverses the given composite type of tensor expressions with `fun`.
This function exists to handle composite types that may
have multiple tensor expressions inside.
If composite tensor expressions are given, such as a tuple,
the composite type is recursively traversed and returned.
If a non-composite tensor expression is given, the function
is invoked for it but not for its arguments (see `traverse_args/3`
for that).
"""
def composite(expr, fun) when is_function(fun, 1) do
{result, []} = composite(expr, [], fn expr, [] -> {fun.(expr), []} end)
result
end
@doc """
Traverses the given composite type of tensor expressions with `acc` and `fun`.
This function exists to handle composite types that may
have multiple tensor expressions inside.
If composite tensor expressions are given, such as a tuple,
the composite type is recursively traversed and returned.
If a non-composite tensor expression is given, the function
is invoked for it but not for its arguments (see `traverse_args/3`
for that).
"""
def composite(tuple, acc, fun) when is_tuple(tuple) and is_function(fun, 2) do
{list, acc} = Enum.map_reduce(Tuple.to_list(tuple), acc, &composite(&1, &2, fun))
{List.to_tuple(list), acc}
end
def composite(%T{} = expr, acc, fun) when is_function(fun, 2) do
fun.(expr, acc)
end
def composite(other, _acc, _fun) do
raise ArgumentError,
"expected a tensor expression or a tuple of tensor expressions, got: #{inspect(other)}"
end
## Type helpers
@doc """
Rewrites the types of the given tensor expressions according to
the given options.
## Options
* `:max_float_type` - set the max float type
* `:max_signed_type` - set the max signed integer type
* `:max_unsigned_type` - set the max unsigned integer type
"""
def rewrite_types(tensor_expr, opts \\ []) when is_list(opts) do
{_, max_float_size} = max_float_type = opts[:max_float_type] || {:f, 64}
{_, max_signed_size} = max_signed_type = opts[:max_signed_type] || {:s, 64}
{_, max_unsigned_size} = max_unsigned_type = opts[:max_unsigned_type] || {:u, 64}
if not Nx.Type.float?(max_float_type) do
raise ArgumentError, ":max_float_type must be float type, got: #{inspect(max_float_type)}"
end
if max_float_type != {:f, 64} or max_signed_type != {:s, 64} or max_unsigned_type != {:u, 64} do
rewrite_type(tensor_expr, fn
{:u, size} when size >= max_unsigned_size -> max_unsigned_type
{:s, size} when size >= max_signed_size -> max_signed_type
{:f, size} when size >= max_float_size -> max_float_type
{:bf, size} when size >= max_float_size -> max_float_type
type -> type
end)
else
tensor_expr
end
end
defp rewrite_type(expr, fun) do
{res, _} = rewrite_type(expr, %{}, fun)
res
end
defp rewrite_type(expr, cache, fun) do
composite(expr, cache, fn %T{data: %Expr{id: id, op: op}} = t, cache ->
case cache do
%{^id => res} ->
{res, cache}
%{} ->
{args, cache} = traverse_args(t, cache, &rewrite_type(&1, &2, fun))
res = rewrite_type(op, args, t, fun)
{res, Map.put(cache, id, res)}
end
end)
end
defp rewrite_type(:parameter, _args, t, type_fun) do
Nx.as_type(t, type_fun.(t.type))
end
defp rewrite_type(:fun, [params, _expr, fun], _t, type_fun) do
{:arity, arity} = Function.info(fun, :arity)
params = Enum.map(params, &%{&1 | type: type_fun.(&1.type)})
Expr.fun(params, rewrite_type_fun(arity, fun, type_fun))
end
defp rewrite_type(:tensor, [arg], t, type_fun) do
type = type_fun.(t.type)
rewrite_type_args(t, type, [Nx.as_type(arg, type)])
end
defp rewrite_type(_op, args, t, type_fun) do
rewrite_type_args(t, type_fun.(t.type), args)
end
for arity <- 0..15 do
args = Macro.generate_arguments(arity, __MODULE__)
defp rewrite_type_fun(unquote(arity), op_fun, type_fun) do
fn unquote_splicing(args) -> rewrite_type(op_fun.(unquote_splicing(args)), type_fun) end
end
end
defp rewrite_type_args(%{data: data} = t, type, args) do
%{t | data: %{data | id: Expr.id(), args: args}, type: type}
end
## Nx.Defn callbacks
@doc false
# Returns tensors from flat args.
def from_flat_args(vars) do
for var <- vars do
case var do
%T{} = head ->
head
number when is_number(number) ->
Nx.tensor(number)
tuple when is_tuple(tuple) ->
raise ArgumentError,
"defn functions expects either numbers or tensors as arguments. " <>
"If you want to pass a tuple, you must explicitly pattern match on the tuple in the signature" <>
"Got: #{inspect(tuple)}"
other ->
raise ArgumentError,
"defn functions expects either numbers or tensors as arguments. " <>
"If you want to pass Elixir values, they need to be sent as options and " <>
"tagged as default arguments. Got: #{inspect(other)}"
end
end
end
@doc false
# Returns tensors from nested args.
def from_nested_args(args) do
args
|> Enum.reduce([], &from_nested_args/2)
|> Enum.reverse()
end
defp from_nested_args(tuple, acc) when is_tuple(tuple),
do: tuple |> Tuple.to_list() |> Enum.reduce(acc, &from_nested_args/2)
defp from_nested_args(other, acc),
do: [from_arg(other) | acc]
@doc false
# Returns tensor from a single arg.
def from_arg(%T{} = t), do: t
def from_arg(number) when is_number(number), do: Nx.tensor(number)
def from_arg(other) do
raise(
ArgumentError,
"arguments to defn functions must numbers, tensors, or tuples, got: #{inspect(other)}"
)
end
@doc false
# Converts nested args to nested params.
def to_nested_params(args, params) do
{args, {[], _}} =
to_nested_args(args, {params, 0}, fn _arg, {[param | params], i} ->
{Expr.parameter(param, :root, i), {params, i + 1}}
end)
args
end
@doc false
# Converts flat args to flat params.
# TODO: Use Enum.with_index/2 on Elixir v1.12+
def to_flat_params(vars),
do: to_flat_params(vars, 0)
defp to_flat_params([head | tail], i),
do: [Expr.parameter(head, :root, i) | to_flat_params(tail, i + 1)]
defp to_flat_params([], _i),
do: []
@doc false
# Converts nested args to nested templates.
def to_nested_templates(args, params) do
{args, []} =
to_nested_args(args, params, fn _arg, [param | params] ->
{Nx.template(param, param.type), params}
end)
args
end
@doc false
def to_result(tuple) when is_tuple(tuple),
do: tuple |> Tuple.to_list() |> Enum.map(&to_result/1) |> List.to_tuple()
def to_result(%T{data: %Expr{}} = t),
do: t
def to_result(other) do
raise ArgumentError,
"defn must return a tensor expression or a tuple, got: #{inspect(other)}"
end
defp to_nested_args(args, acc, fun) when is_list(args) do
Enum.map_reduce(args, acc, &to_nested_each(&1, &2, fun))
end
defp to_nested_each(arg, acc, fun) when is_tuple(arg) do
{list, acc} =
arg
|> Tuple.to_list()
|> Enum.map_reduce(acc, &to_nested_each(&1, &2, fun))
{List.to_tuple(list), acc}
end
defp to_nested_each(arg, acc, fun) do
fun.(arg, acc)
end
end
|
lib/nx/defn/tree.ex
| 0.882624
| 0.713907
|
tree.ex
|
starcoder
|
defmodule Grizzly.Trace do
@moduledoc """
Module that tracks the commands that are sent and received by Grizzly
The trace will hold in memory the last 300 messages. If you want to generate
a log file of the trace records you use `Grizzly.Trace.dump/1`.
The log format is:
```
timestamp source destination sequence_number command_name command_parameters
```
If you want to list the records that are currently being held in memory you
can use `Grizzly.Trace.list/0`.
If you want to start traces from a fresh start you can call
`Grizzly.Trace.clear/0`.
"""
use GenServer
alias Grizzly.Trace.{Record, RecordQueue}
@type src() :: String.t()
@type dest() :: String.t()
@type log_opt() :: {:src, src()} | {:dest, dest()}
@doc """
Start the trace server
"""
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(args) do
GenServer.start_link(__MODULE__, args, name: __MODULE__)
end
@doc """
Log the trace information
"""
@spec log(binary(), [log_opt()]) :: :ok
def log(binary, opts \\ []) do
GenServer.cast(__MODULE__, {:log, binary, opts})
end
@doc """
Dump the trace records into a file
"""
@spec dump(Path.t()) :: :ok
def dump(file) do
GenServer.call(__MODULE__, {:dump, file})
end
@doc """
Force clear the records from the trace
"""
@spec clear() :: :ok
def clear() do
GenServer.call(__MODULE__, :clear)
end
@doc """
List all the records currently being traced
"""
@spec list() :: [Record.t()]
def list() do
GenServer.call(__MODULE__, :list)
end
@impl GenServer
def init(_args) do
{:ok, RecordQueue.new()}
end
@impl GenServer
def handle_cast({:log, binary, opts}, records) do
record = Record.new(binary, opts)
{:noreply, RecordQueue.add_record(records, record)}
end
@impl GenServer
def handle_call({:dump, file}, _from, records) do
records_list = RecordQueue.to_list(records)
file_contents = records_to_contents(records_list)
case File.write(file, file_contents) do
:ok ->
{:reply, :ok, records}
{:error, _reason} = error ->
{:reply, error, records}
end
end
def handle_call(:clear, _from, _records) do
{:reply, :ok, RecordQueue.new()}
end
def handle_call(:list, _from, records) do
{:reply, RecordQueue.to_list(records), records}
end
defp records_to_contents(records) do
Enum.reduce(records, "", fn record, str ->
str <> Record.to_string(record) <> "\n"
end)
end
end
|
lib/grizzly/trace.ex
| 0.834778
| 0.702594
|
trace.ex
|
starcoder
|
defmodule Benchmark do
@moduledoc """
Benchmarks the CPU and Memory consumption for struct operations
with type checking comparing to native ones.
"""
alias Benchmark.{Inputs, Samples, Tweet}
@warmup_time_s 2
@cpu_time_s 8
@memory_time_s 2
def run do
count = 3_000
puts_title("Generating #{count} inputs, may take a while.")
{tweet_maps, user_maps} =
[Samples.tweet_map(), Samples.user_map()]
|> Stream.zip()
|> Enum.take(count)
|> Enum.unzip()
count = length(tweet_maps)
tweets_approx_size_kb = :erlang.term_to_binary(tweet_maps) |> byte_size() |> Kernel.*(2) |> div(3) |> div(1024)
users_approx_size_kb = :erlang.term_to_binary(user_maps) |> byte_size() |> Kernel.*(2) |> div(3) |> div(1024)
tweet_maps_input1 = init_input(:tweet_maps1, tweet_maps, count)
tweet_maps_input2 = init_input(:tweet_maps2, tweet_maps, count)
tweets = Enum.map(tweet_maps, &Tweet.new!/1)
tweets_input1 = init_input(:tweet_maps1, tweets, count)
tweets_input2 = init_input(:tweet_maps2, tweets, count)
users = Enum.map(user_maps, &Tweet.User.new!/1)
users_input1 = init_input(:users1, users, count)
users_input2 = init_input(:users2, users, count)
puts_title("""
Generated #{count} tweet inputs with summary approx. size of #{tweets_approx_size_kb}KB.
Generated #{count} user inputs with summary approx. size of #{users_approx_size_kb}KB.\
""")
for {title, fun} <- [
{"struct's construction",
fn ->
benchee(%{
"__MODULE__.new!(map)" => fn -> loop(fn -> Tweet.new!(next_random_value(tweet_maps_input1, count)) end) end,
"struct!(__MODULE__, map)" => fn -> loop(fn -> struct!(Tweet, next_random_value(tweet_maps_input2, count)) end) end
})
end},
{"struct's field modification",
fn ->
benchee(%{
"struct!(tweet, user: user) |> __MODULE__.ensure_type!()" => fn ->
loop(fn ->
struct!(next_random_value(tweets_input1, count), user: next_random_value(users_input1, count)) |> Tweet.ensure_type!()
end)
end,
"struct!(tweet, user: user)" => fn ->
loop(fn ->
struct!(next_random_value(tweets_input1, count), user: next_random_value(users_input1, count))
end)
end
})
end}
] do
puts_title("Benchmark #{title}")
fun.()
end
end
def puts_title(title) do
IO.puts("")
IO.puts(title)
IO.puts("=========================================")
end
def init_input(name, values, max_position) do
input_table = :ets.new(name, [:set, :public])
values_tuple =
values
|> List.to_tuple()
|> Tuple.insert_at(0, :values)
:ets.insert(input_table, values_tuple)
:ets.insert(input_table, {:position, max_position + 1})
input_table
end
def next_random_value(input_table, max_position) do
position = :ets.update_counter(input_table, :position, {2, -1, 1, max_position})
:ets.lookup_element(input_table, :values, position + 1)
end
def benchee(plan) do
Benchee.run(plan, warmup: @warmup_time_s, time: @cpu_time_s, memory_time: @memory_time_s)
end
def loop(fun) do
# this makes the execution time close to 1ms
Enum.each(1..2000, fn _ -> fun.() end)
end
end
|
benchmark/lib/benchmark.ex
| 0.77373
| 0.540742
|
benchmark.ex
|
starcoder
|
defmodule Fastfwd.Modules do
alias Fastfwd.Namespace
@moduledoc """
Interact with Fastfwd-compatible modules - find, filter, build maps.
"""
@doc """
Lists *all* modules, whether or not they are using Fastfwd.
Returns a list of module names, including both Elixir style and Erlang atoms.
## Examples
iex> Fastfwd.Modules.all |> List.first()
:io
"""
@spec all() :: [module]
def all() do
:code.all_loaded()
|> Enum.map(&elem(&1, 0))
end
@doc """
Lists all modules in a module namespace (with names under the module name)
Returns a list of module names
## Examples
iex> Fastfwd.Modules.in_namespace(Icecream)
[Icecream.Pistachio, Icecream.Spoon, Icecream.Chocolate, Icecream.ShavedIce, Icecream.Strawberry, Icecream.DoubleChocolate]
"""
@spec in_namespace(module) :: [module]
def in_namespace(namespace) do
all()
|> in_namespace(namespace)
end
@doc """
Filters a list of modules to only include those under a particular namespace
Returns filtered list of modules
## Examples
iex> module_list = [Icecream.Pistachio, FrozenYogurt.FullCellphoneBattery, Icecream.Chocolate]
iex> Fastfwd.Modules.in_namespace(module_list, Icecream)
[Icecream.Pistachio, Icecream.Chocolate]
"""
@spec in_namespace([module], module) :: [module]
def in_namespace(modules, namespace) do
namespace = Namespace.normalize(namespace)
modules
|> Enum.filter(&String.starts_with?(Atom.to_string(&1), "#{namespace}."))
end
@doc """
Lists all modules with the specified behaviour.
Returns a list of module names
## Examples
iex> Fastfwd.Modules.with_behaviour(Fastfwd.Behaviours.Sender)
[Icecream]
"""
@spec with_behaviour(module) :: [module]
def with_behaviour(behaviour) do
all()
|> with_behaviour(behaviour)
end
@doc """
Filters a list of modules to only include those with the specified behaviour
Returns filtered list of modules
## Examples
iex> module_list = [Icecream.Pistachio, Icecream.Spoon, Icecream.Chocolate, Icecream.ShavedIce, Icecream.Strawberry, Icecream.DoubleChocolate]
iex> Fastfwd.Modules.with_behaviour(module_list, Fastfwd.Behaviours.Receiver)
[Icecream.Pistachio, Icecream.Chocolate, Icecream.ShavedIce, Icecream.Strawberry, Icecream.DoubleChocolate]
"""
@spec with_behaviour([module], module) :: [module]
def with_behaviour(modules, nil), do: modules
def with_behaviour(modules, behaviour) do
modules
|> Enum.filter(&Fastfwd.Module.has_behaviour?(&1, behaviour))
end
@doc """
Find modules that have tags (any tags at all)
Returns a filtered list of modules
## Examples
iex> module_list = [Icecream.Pistachio, Icecream.Spoon]
iex> Fastfwd.Modules.with_tags(module_list)
[Icecream.Pistachio]
"""
@spec with_tags([module]) :: [module]
def with_tags(modules) do
modules
|> Enum.filter(fn (module) -> Fastfwd.Module.tagged?(module) end)
end
@doc """
Find all modules that have the specified tag.
Tags are not necessarily unique - more than one module may have the same tag.
Returns a filtered list of modules
## Examples
iex> module_list = [Icecream.Pistachio, Icecream.Spoon, Icecream.Chocolate, Icecream.ShavedIce, Icecream.Strawberry, Icecream.DoubleChocolate]
iex> Fastfwd.Modules.with_tag(module_list, :chocolate)
[Icecream.Chocolate, Icecream.DoubleChocolate]
"""
@spec with_tag([module(), ...], atom()) :: [module(), ...]
def with_tag(modules, tag) do
modules
|> with_tags()
|> Enum.filter(fn (module) -> Fastfwd.Module.has_tag?(module, tag) end)
end
@doc """
Find the first module that has the specified tag.
Returns a single module name.
## Examples
iex> modules_list = Fastfwd.modules(Icecream, Fastfwd.Behaviours.Receiver)
iex> Fastfwd.Modules.find(modules_list, :chocolate)
Icecream.Chocolate
"""
@spec find([module(), ...], atom, module() | nil) :: module()
def find(modules, tag, default \\ nil) do
modules
|> Enum.find(default, fn (module) -> Fastfwd.Module.has_tag?(module, tag) end)
end
@doc """
List all tags found in a collection of modules
Returns a list of atoms
Returns a list of atoms
## Examples
iex> modules_list = [Icecream.Pistachio, Icecream.Spoon, Icecream.Chocolate]
iex> Fastfwd.Modules.tags(modules_list)
[:pistachio, :chocolate]
"""
@spec tags([module]) :: [atom]
def tags(modules) do
modules
|> Enum.map(fn (module) -> Fastfwd.Module.tags(module) end)
|> List.flatten
end
@doc """
Build a map of tags to modules, *without duplicated tags*.
Returns a map of atoms to module names.
## Examples
iex> modules_list = [Icecream.Pistachio, Icecream.Spoon, Icecream.Chocolate, Icecream.DoubleChocolate]
iex> Fastfwd.Modules.routes(modules_list)
%{
pistachio: Icecream.Pistachio,
chocolate: Icecream.DoubleChocolate,
double_chocolate: Icecream.DoubleChocolate,
}
"""
@spec routes([module]) :: map
def routes(modules) do
for module <- modules,
tag <- Fastfwd.Module.tags(module),
into: Map.new(),
do: {tag, module}
end
end
|
lib/fastfwd/modules.ex
| 0.76782
| 0.411613
|
modules.ex
|
starcoder
|
defmodule Trunk.Storage do
@moduledoc """
This is a behaviour that can be implemented by any storage module to make sure it supports all required functions.
"""
@type opts :: Keyword.t()
@doc ~S"""
Save the `source_file` to the storage system on path `Path.join(directory, filename)`
- `directory` - The directory
- `filename` - The name of the file
- `source_path` - The full path to the file to be stored. This is a path to the uploaded file or a temporary file that has undergone transformation
- `opts` - The options for the storage system, set in Trunk option `:storage_opts`
## Example:
The file should be saved to <storage system>/my-bucket/path/to/file.ext
```
MyStorage.save("path/to/", "file.ext", "/tmp/uploaded_file.ext", some: :opt)
"""
@callback save(directory :: String.t(), filename :: String.t(), source_path :: String.t(), opts) ::
:ok | {:error, any}
@callback retrieve(
directory :: String.t(),
filename :: String.t(),
destination_path :: String.t(),
opts
) :: :ok | {:error, any}
@doc ~S"""
Deletes the version file from the storage system at path `Path.join(directory, filename)`
- `directory` - The directory
- `filename` - The name of the file
- `opts` - The options for the storage system, set in Trunk option `:storage_opts`
## Example:
The file should be removed from <storage system>/my-bucket/path/to/file.ext
```
MyStorage.delete("path/to/", "file.ext", some: :opt)
"""
@callback delete(directory :: String.t(), filename :: String.t(), opts) :: :ok | {:error, any}
@doc ~S"""
Generates a URL to the S3 object
- `directory` - The directory
- `filename` - The name of the file
- `opts` - The options for the storage system, set in Trunk option `:storage_opts`
## Example:
```
MyStorage.build_url("path/to", "file.ext", some: :opt)
#=> "https://my-storage/my-bucket/path/to/file.ext"
```
"""
@callback build_uri(directory :: String.t(), filename :: String.t(), opts) :: String.t()
end
|
lib/trunk/storage.ex
| 0.806281
| 0.679425
|
storage.ex
|
starcoder
|
defmodule Clickhousex.Codec.Binary.Extractor do
@moduledoc """
Allows modules that `use` this module to create efficient extractor functions that speak clickhouse's binary protocol.
To define extractors, annotate a function with the `extract` attribute like this:
@extract length: :varint
def extract_length(<<data::binary>>, length, other_param) do
do_something_with_length(data, length, other_param)
end
def do_something_with_length(_data, length, other_param) do
{other_param, length}
end
In the above example, a function named `extract_length/2` will be created, which, when passed a binary, will
extract the length varint from it, and call the function above, passing the unparsed part of the binary and the extracted
length varint to it.
Usage looks like this
{:ok, binary_from_network} = :gen_tcp.recv(conn, 0)
{:this_is_passed_along, length} = extract_length(binary_from_network, :this_is_passed_along)
If there isn't enough data to parse, a resume tuple is returned. The second element of the tuple is a function that when
called with more data, picks up the parse operation where it left off.
{:resume, resume_fn} = extract_length(<<>>, :this_is_passed_along)
{:ok, data} = :gen_tcp.recv(conn, 0)
{:this_is_passed_along, length} = resume_fn.(data)
# Performance
All functions generated by this module take advantage of binary optimizations, resuse match contexts and won't create sub-binaries.
# Completeness
The following extractors are implemented:
1. Variable length integers `:varint`
1. Signed integers: `:i8`, `:i16`, `:i32`, `i64`
1. Unsigned integers: `:u8`, `:u16`, `:u32`, `:u64`
1. Floats: `:f32`, `:f64`
1. Strings: `:string`
1. Booleans: `:boolean`
1. Dates: `:date`, `:datetime`
1. Lists of the above scalar types `{:list, scalar}`
1. Nullable instances of all the above `{:nullable, scalar}` or `{:list, {:nullable, scalar}}`
"""
defmacro __using__(_) do
quote do
use Bitwise
Module.register_attribute(__MODULE__, :extract, accumulate: true)
Module.register_attribute(__MODULE__, :extractors, accumulate: true)
@on_definition {unquote(__MODULE__), :on_definition}
@before_compile unquote(__MODULE__)
end
end
@doc false
defmacro __before_compile__(env) do
for {name, visibility, args, [extractors]} <- Module.get_attribute(env.module, :extractors),
{arg_name, arg_type} <- extractors do
[_ | non_binary_args] = args
extractor_args = reject_argument(non_binary_args, arg_name)
landing_call =
quote do
unquote(name)(rest, unquote_splicing(non_binary_args))
end
extractor_fn_name = unique_name(name)
jump_functions =
build_jump_fn(name, extractor_fn_name, extractor_args)
|> rewrite_visibility(visibility)
|> collapse_blocks()
extractors =
arg_type
|> build_extractor(arg_name, extractor_fn_name, landing_call, args)
|> rewrite_visibility(visibility)
quote do
unquote_splicing(jump_functions)
unquote(extractors)
end
end
|> collapse_blocks()
end
@doc false
def on_definition(env, visibility, name, args, _guards, _body) do
extractors = Module.get_attribute(env.module, :extract)
Module.delete_attribute(env.module, :extract)
Module.put_attribute(env.module, :extractors, {name, visibility, args, extractors})
end
defp build_jump_fn(base_fn_name, extractor_fn_name, extractor_args) do
quote do
def unquote(base_fn_name)(<<>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_fn_name)(&1, unquote_splicing(extractor_args))}
end
def unquote(base_fn_name)(<<rest::binary>>, unquote_splicing(extractor_args)) do
unquote(extractor_fn_name)(rest, unquote_splicing(extractor_args))
end
end
end
defp build_extractor(:varint, arg_name, extractor_name, landing_call, [_ | non_binary_args]) do
extractor_args = reject_argument(non_binary_args, arg_name)
int_variable = Macro.var(arg_name, nil)
vars = quote do: [a, b, c, d, e, f, g, h, i, j]
# ZigZag encoding is defined for arbitrary sized integers, but for
# our purposes up to 10 parts are enough. Let's unroll the decoding loop.
extractor_clauses =
for parts_count <- 1..10 do
vars_for_clause = Enum.take(vars, parts_count)
pattern = varint_pattern(vars_for_clause)
decoding = varint_decoding(vars_for_clause)
quote do
def unquote(extractor_name)(unquote(pattern), unquote_splicing(extractor_args)) do
unquote(int_variable) = unquote(decoding)
unquote(landing_call)
end
end
end
quote do
def unquote(extractor_name)(<<>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(&1, unquote_splicing(extractor_args))}
end
unquote_splicing(extractor_clauses)
def unquote(extractor_name)(<<rest::binary>>, unquote_splicing(extractor_args)) do
{:resume, fn more_data -> unquote(extractor_name)(rest <> more_data, unquote_splicing(extractor_args)) end}
end
end
end
# `vars` are variables for binding varint parts, from high to low
defp varint_pattern([_ | _] = vars) do
[last | rest] = Enum.reverse(vars)
tag = quote do: 1 :: size(1)
init = quote do: [0 :: size(1), unquote(last) :: size(7), rest :: binary]
patterns = Enum.reduce(rest, init, &[tag, quote(do: unquote(&1) :: size(7)) | &2])
{:<<>>, [], patterns}
end
# `vars` are varint parts, from high to low
defp varint_decoding([_ | _] = vars) do
vars
|> Enum.reverse()
|> Enum.with_index()
|> Enum.map(fn
{var, 0} -> var
{var, index} -> {:<<<, [], [var, index * 7]}
end)
|> Enum.reduce(&{:|||, [], [&2, &1]})
end
@int_extractors [
{:i64, :signed, 64},
{:u64, :unsigned, 64},
{:i32, :signed, 32},
{:u32, :unsigned, 32},
{:i16, :signed, 16},
{:u16, :unsigned, 16},
{:i8, :signed, 8},
{:u8, :unsigned, 8}
]
for {type_name, signed, width} <- @int_extractors do
defp build_extractor(unquote(type_name), arg_name, extractor_name, landing_call, [_ | args]) do
extractor_args = reject_argument(args, arg_name)
value_variable = Macro.var(arg_name, nil)
width = unquote(width)
signedness = Macro.var(unquote(signed), nil)
match =
quote do
<<unquote(value_variable)::little-unquote(signedness)-size(unquote(width)), rest::binary>>
end
quote do
def unquote(extractor_name)(unquote(match), unquote_splicing(extractor_args)) do
unquote(landing_call)
end
def unquote(extractor_name)(<<>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(&1, unquote_splicing(extractor_args))}
end
def unquote(extractor_name)(<<data::binary>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(data <> &1, unquote_splicing(extractor_args))}
end
end
end
end
# Float extractors
for width <- [32, 64],
type_name = :"f#{width}" do
defp build_extractor(unquote(type_name), arg_name, extractor_name, landing_call, [_ | args]) do
extractor_args = reject_argument(args, arg_name)
value_variable = Macro.var(arg_name, nil)
width = unquote(width)
quote do
def unquote(extractor_name)(
<<unquote(value_variable)::little-signed-float-size(unquote(width)), rest::binary>>,
unquote_splicing(extractor_args)
) do
unquote(landing_call)
end
def unquote(extractor_name)(<<>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(&1, unquote_splicing(extractor_args))}
end
def unquote(extractor_name)(<<rest::binary>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(rest <> &1, unquote_splicing(extractor_args))}
end
end
end
end
defp build_extractor(:boolean, arg_name, extractor_name, landing_call, [_ | args]) do
extractor_args = reject_argument(args, arg_name)
value_variable = Macro.var(arg_name, nil)
quote do
def unquote(extractor_name)(<<1, rest::binary>>, unquote_splicing(extractor_args)) do
unquote(value_variable) = true
unquote(landing_call)
end
def unquote(extractor_name)(<<0, rest::binary>>, unquote_splicing(extractor_args)) do
unquote(value_variable) = false
unquote(landing_call)
end
def unquote(extractor_name)(<<>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(&1, unquote_splicing(extractor_args))}
end
end
end
defp build_extractor(:date, arg_name, extractor_name, landing_call, [_ | args]) do
extractor_args = reject_argument(args, arg_name)
value_variable = Macro.var(arg_name, nil)
quote do
def unquote(extractor_name)(
<<days_since_epoch::little-unsigned-size(16), rest::binary>>,
unquote_splicing(extractor_args)
) do
{:ok, date} = Date.new(1970, 01, 01)
unquote(value_variable) = Date.add(date, days_since_epoch)
unquote(landing_call)
end
def unquote(extractor_name)(<<>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(&1, unquote_splicing(extractor_args))}
end
def unquote(extractor_name)(<<rest::binary>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(rest <> &1, unquote_splicing(extractor_args))}
end
end
end
defp build_extractor(:datetime, arg_name, extractor_name, landing_call, [_ | args]) do
extractor_args = reject_argument(args, arg_name)
value_variable = Macro.var(arg_name, nil)
quote do
def unquote(extractor_name)(
<<seconds_since_epoch::little-unsigned-size(32), rest::binary>>,
unquote_splicing(extractor_args)
) do
{:ok, date_time} = NaiveDateTime.new(1970, 1, 1, 0, 0, 0)
unquote(value_variable) = NaiveDateTime.add(date_time, seconds_since_epoch)
unquote(landing_call)
end
def unquote(extractor_name)(<<>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(&1, unquote_splicing(extractor_args))}
end
def unquote(extractor_name)(<<rest::binary>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(rest <> &1, unquote_splicing(extractor_args))}
end
end
end
defp build_extractor({:nullable, type}, arg_name, extractor_name, landing_call, [_ | non_binary_args] = args) do
extractor_args = reject_argument(non_binary_args, arg_name)
value_variable = Macro.var(arg_name, nil)
value_extractor_name = :"#{extractor_name}_value"
value_extractors =
type
|> build_extractor(arg_name, value_extractor_name, landing_call, args)
|> collapse_blocks()
quote do
unquote_splicing(value_extractors)
def unquote(extractor_name)(<<>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(&1, unquote_splicing(extractor_args))}
end
def unquote(extractor_name)(<<0, rest::binary>>, unquote_splicing(extractor_args)) do
unquote(value_extractor_name)(rest, unquote_splicing(extractor_args))
end
def unquote(extractor_name)(<<1, rest::binary>>, unquote_splicing(extractor_args)) do
unquote(value_variable) = nil
unquote(landing_call)
end
end
end
defp build_extractor(:string, arg_name, extractor_name, landing_call, [binary_arg | non_binary_args]) do
extractor_args = reject_argument(non_binary_args, arg_name)
length_variable_name = unique_name("string_length")
length_variable = Macro.var(length_variable_name, nil)
length_extractor_name = :"#{extractor_name}_length"
length_extractor_args = extractor_args
length_landing_call =
quote do
unquote(extractor_name)(rest, unquote_splicing(extractor_args), unquote(length_variable))
end
length_extractors =
build_extractor(
:varint,
length_variable_name,
length_extractor_name,
length_landing_call,
[binary_arg | length_extractor_args] ++ [length_variable]
)
|> collapse_blocks()
value_arg = Macro.var(arg_name, nil)
# The string extractor call chain looks like this:
# top_level function -> length_extractor -> value_extractor
quote do
# Size exctractors
unquote_splicing(length_extractors)
# Value extractors
# Empty string optimization, prevents concatenating large data to an empty string and
# reallocating the large data
def unquote(extractor_name)(<<>>, unquote_splicing(extractor_args), unquote(length_variable)) do
{:resume, &unquote(extractor_name)(&1, unquote_splicing(extractor_args), unquote(length_variable))}
end
def unquote(extractor_name)(<<rest::binary>>, unquote_splicing(extractor_args), unquote(length_variable)) do
case rest do
<<unquote(value_arg)::binary-size(unquote(length_variable)), rest::binary>> ->
unquote(landing_call)
_ ->
{:resume, &unquote(extractor_name)(rest <> &1, unquote_splicing(extractor_args), unquote(length_variable))}
end
end
# Starts the size extractor chain
def unquote(extractor_name)(<<b::binary>>, unquote_splicing(extractor_args)) do
unquote(length_extractor_name)(b, unquote_splicing(extractor_args))
end
end
end
defp build_extractor({:array, item_type}, arg_name, extractor_name, landing_call, args) do
build_extractor({:list, item_type}, arg_name, extractor_name, landing_call, args)
end
defp build_extractor({:list, item_type}, arg_name, extractor_name, landing_call, [binary_arg | non_binary_args]) do
extractor_args = reject_argument(non_binary_args, arg_name)
length_extractor_name = :"#{extractor_name}_list_length"
length_name = :length |> unique_name()
length_variable = length_name |> Macro.var(nil)
length_extractor_args = [binary_arg | extractor_args] ++ [length_variable]
list_extractor_name = unique_name("#{extractor_name}_list")
item_name = :item |> unique_name()
item_variable = Macro.var(item_name, nil)
item_accumulator_variable = Macro.var(arg_name, nil)
count_variable = Macro.var(:"#{extractor_name}_count", nil)
item_extractor_name = unique_name("#{extractor_name}_item")
item_extractor_call_args = extractor_args ++ [count_variable, item_accumulator_variable]
item_extractor_args = [binary_arg] ++ item_extractor_call_args
list_extractor_args = extractor_args
length_landing_call =
quote do
unquote(item_extractor_name)(rest, unquote_splicing(extractor_args), unquote(length_variable), [])
end
list_landing_call =
quote do
unquote(list_extractor_name)(
rest,
unquote_splicing(list_extractor_args),
unquote(count_variable) - 1,
unquote(item_variable),
unquote(item_accumulator_variable)
)
end
item_extractors =
item_type
|> build_extractor(item_name, item_extractor_name, list_landing_call, item_extractor_args)
|> collapse_blocks
length_extractors =
:varint
|> build_extractor(length_name, length_extractor_name, length_landing_call, length_extractor_args)
|> collapse_blocks()
quote do
def unquote(extractor_name)(<<>>, unquote_splicing(extractor_args)) do
{:resume, &unquote(extractor_name)(&1, unquote_splicing(extractor_args))}
end
# Starts the chain by calling the length extractor
def unquote(extractor_name)(<<rest::binary>>, unquote_splicing(extractor_args)) do
unquote(length_extractor_name)(rest, unquote_splicing(extractor_args))
end
unquote_splicing(length_extractors)
unquote_splicing(item_extractors)
# This clause matches when we've extracted all items (remaining count is 0)
def unquote(list_extractor_name)(
<<rest::binary>>,
unquote_splicing(list_extractor_args),
0,
unquote(item_variable),
unquote(item_accumulator_variable)
) do
unquote(item_accumulator_variable) = Enum.reverse([unquote(item_variable) | unquote(item_accumulator_variable)])
unquote(landing_call)
end
# This matches when there's more work to do. It accumulates the extracted item
# and calls the item extractor again
def unquote(list_extractor_name)(
<<rest::binary>>,
unquote_splicing(list_extractor_args),
unquote(count_variable),
unquote(item_variable),
unquote(item_accumulator_variable)
) do
unquote(item_accumulator_variable) = [unquote(item_variable) | unquote(item_accumulator_variable)]
unquote(item_extractor_name)(rest, unquote_splicing(item_extractor_call_args))
end
end
end
# Helper functions
defp rewrite_visibility(ast, :def) do
ast
end
defp rewrite_visibility(ast, :defp) do
Macro.prewalk(ast, fn
{:def, context, rest} -> {:defp, context, rest}
other -> other
end)
end
defp collapse_blocks({:__block__, _, defs}) do
defs
end
defp collapse_blocks(ast) when is_list(ast) do
Enum.reduce(ast, [], fn
{:__block__, _context, clauses}, acc ->
acc ++ clauses
_, acc ->
acc
end)
|> Enum.reverse()
end
defp collapse_blocks(ast) do
[ast]
end
defp reject_argument(args, arg_name) do
Enum.reject(args, fn
{^arg_name, _, _} -> true
_ -> false
end)
end
defp unique_name(base_name) do
unique = System.unique_integer([:positive, :monotonic])
:"#{base_name}_#{unique}"
end
end
|
lib/clickhousex/codec/binary/extractor.ex
| 0.80502
| 0.514034
|
extractor.ex
|
starcoder
|
defmodule Day13.Paper do
defstruct dots: MapSet.new(), folds_left: []
@doc """
Get a Paper struct from a string representation (a.k.a. puzzle input)
"""
def parse(string) do
[dots_input, folds_input] = String.split(string, "\n\n")
%__MODULE__{
dots: parse_dots(dots_input),
folds_left: parse_folds(folds_input)
}
end
defp parse_dots(string) do
string
|> String.split("\n")
|> Enum.map(fn line ->
String.split(line, ",")
|> Enum.map(&String.to_integer/1)
|> then(fn [x, y] -> %{"x" => x, "y" => y} end)
end)
|> MapSet.new()
end
defp parse_folds(string) do
string
|> String.split("\n")
|> Enum.map(fn line ->
Regex.run(~r/([xy])=(\d+)/, line, capture: :all_but_first)
|> List.to_tuple
|> then(fn {d, num} -> {d, String.to_integer(num)} end)
end)
end
@doc """
Fold paper according to the next instruction
"""
def fold(%{folds_left: []} = paper), do: paper
def fold(%{dots: dots, folds_left: folds_left}) do
[next_fold | folds_left] = folds_left
%__MODULE__{dots: fold(next_fold, dots), folds_left: folds_left}
end
defp fold({axis, place}, dots) do
{before_fold, after_fold} = Enum.split_with(dots, fn %{^axis => pos} -> pos < place end)
fold_dot = fn %{^axis => pos} = loc -> %{loc | axis => 2*place - pos} end
folded = Enum.map(after_fold, fold_dot)
MapSet.union(MapSet.new(before_fold), MapSet.new(folded))
end
@doc """
Fold the paper until all instructions are consumed.
"""
def fold_all(%{dots: dots, folds_left: folds_left}) do
%__MODULE__{dots: Enum.reduce(folds_left, dots, &fold/2), folds_left: []}
end
@doc """
Generate a visual representation of the paper as a string.
"""
def render(%{dots: dots}) do
{min_x, max_x} = Enum.min_max(Enum.map(dots, fn %{"x" => x} -> x end))
{min_y, max_y} = Enum.min_max(Enum.map(dots, fn %{"y" => y} -> y end))
for y <- min_y..max_y do
for x <- min_x..max_x do
if %{"x" => x, "y" => y} in dots, do: ?\#, else: ?\s
end
|> to_string()
end
|> Enum.join("\n")
|> then(&(&1 <> "\n"))
end
@doc """
Count the number of visible dots in the paper.
"""
def visible_dots(%{dots: dots}), do: Enum.count(dots)
end
|
day13/solver.ex
| 0.823967
| 0.513607
|
solver.ex
|
starcoder
|
defmodule DiscUnion do
@moduledoc """
Discriminated unions for Elixir - for building algebraic data types.
Allows for building data structure with a closed set of representations/cases as
an alternative for a set of tuple+atom combo. Elixir already had product type -
tuples. With DiscUnion library, sum-types, types with a fixed set of values can
be created (also called discriminated unions or disjoint unions).
Provides macros and functions for creating and matching on datastructres which
throw compile-time and run-time exceptions if an unknow case was used or not all
cases were covered in a match. It's inspired by ML/OCaml/F# way of building
discriminated unions. Unfortunately, Elixir does not support such a strong
typing and this library will not solve this. However, it allows to easily catch
common mistakes at compile-time instead of run-time (those can be sometimes hard
to detect).
To use it, you need to add: `use DiscUnion` to your module.
## How it works
Underneath, it's just a module containg a struct with tuples and some
dynamically built macros. This property can be used for matching in function
definitions, although it will not look as clearly as a `case` macro built for a
discriminated union.
The `Shape` union creates a `%Shape{}` struct with current active case held in
`case` field and all possible cases can be get by `Shape.__union_cases__/0`
function:
``` elixir
%Shape{case: Point} = Shape.c Point
%Shape{case: {Circle, :foo}} = Shape.c Circle, :foo
```
Cases that have arguments are just tuples; *n*-argument union case is a
*n+1*-tuple with a case tag as it's first element. This should work seamlessly
with existing conventions:
``` elixir
defmodule Result do
use DiscUnion
defunion :ok in any | :error in atom
end
defmodule Test do
use Result
def run(file) do
res = Result.from! File.open(file)
Result.case res do
r={:ok, io_dev} -> {:yey, r, io_dev}
:error in reason when reason==:eacces -> :too_much_protections
:error in :enoent -> :why_no_file
:error in _reason -> :ney
end
end
end
```
Since cases are just a tuples, they can be also used as a clause for `case`
macro. Matching and gaurds also works!
"""
defmacro __using__(opts) do
opts = opts ++ [named_constructors: false]
if true == Keyword.get opts, :named_constructors do
Module.put_attribute(__CALLER__.module, :named_constructors, true)
end
quote do
require DiscUnion
require DiscUnion.Utils.Case
require DiscUnion.Utils.Constructors
import DiscUnion, only: [defunion: 1]
end
end
@doc """
Defines a discriminated union.
To define a discriminated union, `defunion` macro is used. Use `|` to separate
union cases from each other. Union cases can have arguments and an asterisk
`*` can be used to combine several arguments. Underneath, it's just a struct
with union cases represented as atoms and tuples. Type specs in definitions
are passed to `@spec` declaration, so dialyzer can be used. However, DiscUnion
does not type-check anything by it self.
### Usage
``` elixir
defmodule Shape do
use DiscUnion
defunion Point
| Circle in float()
| Rectangle in any * any
end
```
Type specs in `Circle` or `Rectangle` definitions are only for description and
have no influence on code nor are they used for any type checking - there is
no typchecking other then checking if correct cases were used!
When constructing a case (an union tag), you have couple of options:
* `c` macro, where arity depends on number of arguments you set for
cases (compile-time checking),
* `c!` function, where arity depends on number of arguments you set for
cases (run-time checking),
* `from/1` macro, accepts a tuple (compile-time checking),
* `from!/` or `from!/2` functions, accepts a tuple (only run-time checking).
* a dynamically built macro (aka "named constructors") named after union tag
(in a camelized form, i.e. `Point`'s `Rectangle` case, would be available
as `Point.rectangle/2` macro and also with compile-time checking),
Run-time constructors `from!` can be overridden. Any changes in functionality
introduced to them will also impact `c!` constructors which are based on
`from!`. This, for example, allows for defining variant cases with some
run-time validations.
Preferred way to construct a variant case is via `c` macros or `c!`
functions. `from/1` and `from!/1` construcotrs are mainly to be used when
interacting with return values like in example with opening a file. If you'd
like to enable named constructors do:
`use DiscUnion, named_constructors: true`.
If `Score.from {Pointz, 1, 2}` or `Score.c Pointz, 1, 2`, from tennis kata
example, be placed somewhere in `run_test_match/0` function compiler would
throw this error:
``` elixir
== Compilation error on file example/tennis_kata.exs ==
** (UndefinedUnionCaseError) undefined union case: Pointz in _, _
(disc_union) expanding macro: Score.from/1
(disc_union) example/tennis_kata.exs:38: Tennis.run_test_match/0
```
If you would use `from!/1` or `c!`, this error would be thrown at run-time,
or, in the case of `from!/2`, not at all! Function `from!/2` returns it's
second argument when unknow clause is passed to the function.
For each discriminated union, a special `case` macro is created. This macro
checks if all cases were covered in it's clauses (at compile-time) and expects
it's predicate to be evaluated to this discriminated union's struct (checked
at run-time).
If `Game in _`, in `Tennis.score_point/2` functions, would be commented,
compiler would throw this error:
``` elixir
== Compilation error on file example/tennis_kata.exs ==
** (MissingUnionCaseError) not all defined union cases are used, should be all of: Points in "PlayerPoints" * "PlayerPoints", Advantage in "Player", Deuce, Game in "Player"
(disc_union) expanding macro: Score.case/2
(disc_union) example/tennis_kata.exs:64: Tennis.score_point/2
```
You can also use a catch-all statement (_), like in a regular `case` macro
(`Kernel.SpecialForms.case/2`), but here, it needs to be explicitly enabled by
passing `allow_underscore: true` option to the macro:
``` elixir
Score.case score, allow_underscore: true do
Points in PlayerPoints.forty, PlayerPoints.forty -> Score.duce
_ -> score
end
```
Otherwise you would see a smillar error like above.
"""
defmacro defunion(expr) do
cases = DiscUnion.Utils.extract_union_case_definitions(expr)
Module.register_attribute __CALLER__.module, :cases_canonical, persist: true
Module.put_attribute(__CALLER__.module,
:cases_canonical,
cases |> Enum.map(&DiscUnion.Utils.canonical_form_of_union_case/1))
case DiscUnion.Utils.is_cases_valid? cases do
{:error, :not_atoms} -> raise ArgumentError, "union case tag must be an atom"
{:error, :not_unique} -> raise ArgumentError, "union case tag must be unique"
:ok -> build_union cases
end
end
defp build_union(cases) do
union_typespec = DiscUnion.Utils.build_union_cases_specs(cases)
main_body = quote location: :keep, unquote: true do
all_cases = unquote(cases)
@enforce_keys [:case]
defstruct case: []
defimpl Inspect do
import Inspect.Algebra
def inspect(union, opts) do
mod = @for |> Module.split
concat ["##{mod}<", Inspect.inspect(union.case, opts), ">"]
end
end
@doc "Returns a list with all acceptable union cases."
def __union_cases__ do
unquote(cases)
end
def __using__(_) do
quote do
require unquote(__MODULE__)
end
end
@doc """
Matches the given expression against the given clauses. The expressions
needs to be evaluate to `%#{DiscUnion.Utils.module_name __MODULE__}{}`.
"""
defmacro case(expr, do: block) do
do_case expr, [], do: block
end
defmacro case(expr, [allow_underscore: true], do: block) do
do_case expr, [allow_underscore: true], do: block
end
defmacro case(expr, opts, do: block) do
do_case expr, [], do: block
end
@spec do_case(Macro.t, Keyword.t, [do: Macro.t]) :: Macro.t
defp do_case(expr, opts, do: block) do
opts = opts ++ [allow_underscore: false]
mod = __MODULE__
allow_underscore = Keyword.get opts, :allow_underscore
block = DiscUnion.Utils.Case.transform_case_clauses(block,
@cases_canonical, allow_underscore)
quote location: :keep do
precond = unquote expr
mod = unquote mod
if not match?(%{__struct__: mod}, precond) do
raise BadStructError, struct: mod, term: precond
end
case precond.case do
unquote(block)
end
end
end
DiscUnion.Utils.Constructors.build_constructor_functions __MODULE__, all_cases
end
[union_typespec, main_body]
end
end
|
lib/disc_union.ex
| 0.892469
| 0.838084
|
disc_union.ex
|
starcoder
|
defmodule Stripe.Customers do
@moduledoc """
Customer objects allow you to perform recurring charges and track multiple
charges that are associated with the same customer. The API allows you to
create, delete, and update your customers. You can retrieve individual
customers as well as a list of all your customers.
"""
@endpoint "customers"
@doc """
Creates a new customer object.
## Arguments
- `account_balance` - `optional` - An integer amount in cents that is the
starting account balance for your customer. A negative amount represents a
credit that will be used before attempting any charges to the customer’s
card; a positive amount will be added to the next invoice.
- `card` - `optional` - The card can either be a token, like the ones returned
by our Stripe.js, or a dictionary containing a user’s credit card details
(with the options shown below). Passing card will create a new card, make
it the new customer default card, and delete the old customer default if
one exists. If you want to add additional cards instead of replacing the
existing default, use the card creation API. Whenever you attach a card to
a customer, Stripe will automatically validate the card.
- `number` - required - The card number, as a string without any
separators.
- `exp_month` - required - Two digit number representing the card's
expiration month.
- `exp_year` - required - Two or four digit number representing the
card's expiration year.
- `cvc` - optional, highly recommended - Card security code.
- `name` - optional - Cardholder's full name.
- `address_line1` - optional
- `address_line2` - optional
- `address_city` - optional
- `address_zip` - optional
- `address_state` - optional
- `address_country` - optional
- `coupon` - `optional` - If you provide a coupon code, the customer will have
a discount applied on all recurring charges. Charges you create through
the API will not have the discount.
- `description` - `optional` - An arbitrary string that you can attach to a
customer object. It is displayed alongside the customer in the dashboard.
This will be unset if you POST an empty value.
- `email` - `optional` - Customer’s email address. It’s displayed alongside
the customer in your dashboard and can be useful for searching and
tracking. This will be unset if you POST an empty value.
- `metadata` - `optional` - A set of key/value pairs that you can attach to a
customer object. It can be useful for storing additional information about
the customer in a structured format. This will be unset if you POST an
empty value.
- `plan` - `optional` - The identifier of the plan to subscribe the customer
to. If provided, the returned customer object will have a list of
subscriptions that the customer is currently subscribed to. If you
subscribe a customer to a plan without a free trial, the customer must
have a valid card as well.
- `quantity` - `optional` - The quantity you’d like to apply to the
subscription you’re creating (if you pass in a plan). For example, if your
plan is 10 cents/user/month, and your customer has 5 users, you could pass
5 as the quantity to have the customer charged 50 cents (5 x 10 cents)
monthly. Defaults to 1 if not set. Only applies when the plan parameter is
also provided.
- `trial_end` - `optional` - Unix timestamp representing the end of the trial
period the customer will get before being charged for the first time. If
set, trial_end will override the default trial period of the plan the
customer is being subscribed to. The special value now can be provided to
end the customer’s trial immediately. Only applies when the plan parameter
is also provided.
## Returns
Returns a customer object if the call succeeded. The returned object will have
information about subscriptions, discount, and cards, if that information has
been provided. If a non-free plan is specified and a card is not provided
(unless the plan has a trial period), the call will return an error. If a
non-existent plan or a non-existent or expired coupon is provided, the call
will return an error.
If a card has been attached to the customer, the returned customer object will
have a default_card attribute, which is an ID that can be expanded into the
full card details when retrieving the customer.
"""
def create(params) do
Stripe.make_request(:post, @endpoint, params)
|> Stripe.Util.handle_stripe_response
end
def get(id) do
Stripe.make_request(:get, "#{@endpoint}/#{id}")
|> Stripe.Util.handle_stripe_response
end
def change_subscription(id, sub_id, opts) do
Stripe.make_request(:post, "#{@endpoint}/#{id}/subscriptions/#{sub_id}", opts)
|> Stripe.Util.handle_stripe_response
end
def create_subscription(opts) do
Stripe.make_request(:post, "#{@endpoint}", opts)
|> Stripe.Util.handle_stripe_response
end
def create_subscription(id, opts) do
Stripe.make_request(:post, "#{@endpoint}/#{id}/subscriptions", opts)
|> Stripe.Util.handle_stripe_response
end
def get_subcription(id, sub_id) do
Stripe.make_request(:get, "#{@endpoint}/#{id}/subscriptions/#{sub_id}")
|> Stripe.Util.handle_stripe_response
end
def cancel_subscription(id, sub_id) do
Stripe.make_request(:delete, "#{@endpoint}/#{id}/subscriptions/#{sub_id}")
|> Stripe.Util.handle_stripe_response
end
def get_subscriptions(id) do
Stripe.make_request(:get, "#{@endpoint}/#{id}/subscriptions")
|> Stripe.Util.handle_stripe_response
end
def list(limit \\ 10) do
Stripe.make_request(:get, "#{@endpoint}?limit=#{limit}")
|> Stripe.Util.handle_stripe_response
end
def delete(id) do
Stripe.make_request(:delete, "#{@endpoint}/#{id}")
|> Stripe.Util.handle_stripe_response
end
end
|
lib/stripe/customers.ex
| 0.86053
| 0.712282
|
customers.ex
|
starcoder
|
defmodule BitwiseIp do
@moduledoc """
A struct representing an IP address encoded as an integer.
The [Internet Protocol](https://en.wikipedia.org/wiki/Internet_Protocol)
defines computer network addresses using fixed-width integers, which are
efficient for both transmission and the implementation of logic using bitwise
operations. [IPv4](https://en.wikipedia.org/wiki/IPv4) uses 32-bit integers,
providing a space of 4,294,967,296 unique addresses. Due to the growing size
of the internet, [IPv6](https://en.wikipedia.org/wiki/IPv6) uses 128-bit
integers to provide an absurdly large address space.
These integers, however, are hard for humans to read. Therefore, we've
adopted customary notations that are a little easier to digest. IPv4 uses a
dotted octet notation, where each of the four bytes are written in decimal
notation and separated by `.`, as in `127.0.0.1`. IPv6 is similar, but uses
hexadecimal notation on each of eight hextets separated by `:`, as in
`fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b`.
As such, representations for IP addresses in modern software have drifted
away from fixed-width integers. `:inet` represents IP addresses as tuples
like `{127, 0, 0, 1}` for IPv4 and `{0xA, 1, 0xB, 2, 0xC, 3, 0xD, 4}` for
IPv6. These are less efficient in both the space to store the addresses and
the time it takes to perform various operations. For example, whereas
comparing two 32-bit IPv4 addresses is typically one machine instruction,
comparing two tuples involves memory indirection for the tuple layout and 4
separate integer comparisons. This could be even worse if you represent IPs
as strings in their human-readable format.
The difference is probably negligible for your application. In fact, Elixir &
Erlang don't have great support for fixed-width integer representations (see
`t:t/0` for details). But in the interest of getting back to basics,
`BitwiseIp` provides the missing interface for manipulating IP addresses as
the integers they were designed to be. This makes certain logic much easier
to express and improves micro-benchmarks compared to tuple-based libraries
(for whatever that's worth). The most useful functionality is in
`BitwiseIp.Block`, which represents a
[CIDR](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) block.
However, `BitwiseIp` is the fundamental structure that `BitwiseIp.Block` is
built on.
"""
defstruct [:proto, :addr]
@typedoc """
An integer-encoded IP address.
This type takes on two different shapes depending on the IP protocol. The
supported protocols are IPv4 and IPv6.
Normally, the distinction would be down to the number of bits in a
fixed-width integer representation. However, the Erlang VM doesn't support
fixed-width integers, so there's no way to tell IPv4 addresses apart from
IPv6 addresses using just a number. Therefore, this type is a struct with two
fields:
* `:proto` - the protocol, either `:v4` or `:v6`
* `:addr` - the integer encoding of the address
But again, the VM does not support fixed-width integers for the `:addr`. In
the Erlang runtime system, the smallest unit of memory is a *word*: 4 bytes
on a 32-bit architecture, 8 bytes on a 64-bit architecture. Data is stored
using *tagged pointers*, where one word has 4 bits reserved as a *tag*
enumerating type information. One pattern of 4 bits says "I'm a float",
another pattern says "I'm an integer", and so on. When the data is small
enough to fit in the remaining bits of the word (28 bits or 60 bits,
depending on the architecture), it is stored as an *immediate* value.
Otherwise, it is *boxed* and the word instead contains a pointer to a section
of memory on the heap, which can basically be arbitrarily large. Read more in
[*A staged tag scheme for
Erlang*](http://www.it.uu.se/research/publications/reports/2000-029/) by
<NAME>.
What this means for us is that `:addr` may or may not spill onto the heap. On
a 32-bit machine, only IP addresses in the range of 0 to 2^28 fit as
immediate values. This covers most of the IPv4 range, but only a small
portion of the IPv6 range. 64-bit machines have 60 bits to play with, which
would comfortably fit any IPv4 address, but still requires boxing of IPv6
addresses. According to the [Erlang efficiency
guide](http://erlang.org/doc/efficiency_guide/advanced.html), large integers
are stored across at least 3 words. What's more, because we have to
distinguish between integers using the struct with the `:proto` field, each
IP address requires an additional map allocation, which carries some
overhead.
So this isn't going to be a maximally compact representation of an IP
address. Such a thing isn't really possible on the Erlang VM. However,
storing the bulk of it as a single integer still lets us perform efficient
bitwise operations with less overhead than, say, `:inet`-style tuples of
multiple integers.
"""
@type t() :: v4() | v6()
@typedoc """
An IPv4 address.
The `:addr` is an unsigned integer between 0 and 2^32 - 1. See `t:t/0` for
discussion about the in-memory representation.
"""
@type v4() :: %BitwiseIp{proto: :v4, addr: integer()}
@typedoc """
An IPv6 address.
The `:addr` is an unsigned integer between 0 and 2^128 - 1. See `t:t/0` for
discussion about the in-memory representation.
"""
@type v6() :: %BitwiseIp{proto: :v6, addr: integer()}
@doc """
An error-raising variant of `parse/1`.
This function parses IPv4 and IPv6 strings in their respective notations and
produces an encoded `BitwiseIp` struct. If the string is invalid, it raises
an `ArgumentError`.
`BitwiseIp` implements the `String.Chars` protocol, so parsing can be undone
using `to_string/1`.
## Examples
```
iex> BitwiseIp.parse!("127.0.0.1")
%BitwiseIp{proto: :v4, addr: 2130706433}
iex> BitwiseIp.parse!("::1")
%BitwiseIp{proto: :v6, addr: 1}
iex> BitwiseIp.parse!("not an ip")
** (ArgumentError) Invalid IP address "not an ip"
iex> BitwiseIp.parse!("192.168.0.1") |> to_string()
"192.168.0.1"
iex> BitwiseIp.parse!("fc00::") |> to_string()
"fc00::"
```
"""
@spec parse!(String.t()) :: t()
def parse!(address) do
case parse(address) do
{:ok, ip} -> ip
{:error, message} -> raise ArgumentError, message
end
end
@doc """
Parses a string into a bitwise IP.
This function parses IPv4 and IPv6 strings in their respective notations and
produces an encoded `BitwiseIp` struct. This is done in an error-safe way by
returning a tagged tuple. To raise an error, use `parse!/1` instead.
`BitwiseIp` implements the `String.Chars` protocol, so parsing can be undone
using `to_string/1`.
## Examples
```
iex> BitwiseIp.parse("127.0.0.1")
{:ok, %BitwiseIp{proto: :v4, addr: 2130706433}}
iex> BitwiseIp.parse("::1")
{:ok, %BitwiseIp{proto: :v6, addr: 1}}
iex> BitwiseIp.parse("not an ip")
{:error, "Invalid IP address \\"not an ip\\""}
iex> BitwiseIp.parse("192.168.0.1") |> elem(1) |> to_string()
"192.168.0.1"
iex> BitwiseIp.parse("fc00::") |> elem(1) |> to_string()
"fc00::"
```
"""
@spec parse(String.t()) :: {:ok, t()} | {:error, String.t()}
def parse(address) do
case :inet.parse_strict_address(address |> to_charlist()) do
{:ok, ip} -> {:ok, encode(ip)}
{:error, _} -> {:error, "Invalid IP address #{inspect(address)}"}
end
end
@doc """
Encodes an `:inet`-style tuple as a bitwise IP.
The Erlang standard library represents IP addresses as tuples of integers: 4
octet values for IPv4, 8 hextet values for IPv6. This function encodes the
separate values as a single number, which gets wrapped into a `BitwiseIp`
struct. This can be undone with `decode/1`.
Beware of redundant usage in performance-critical paths. Because of the
overhead in encoding the integer, excessive translation back & forth between
the formats may outweigh any benefits gained from other operations on the
single-integer representation.
## Examples
```
iex> BitwiseIp.encode({127, 0, 0, 1})
%BitwiseIp{proto: :v4, addr: 2130706433}
iex> BitwiseIp.encode({0, 0, 0, 0, 0, 0, 0, 1})
%BitwiseIp{proto: :v6, addr: 1}
```
"""
def encode(inet)
@spec encode(:inet.ip4_address()) :: v4()
def encode({a, b, c, d}) do
<<ip::32>> = <<a::8, b::8, c::8, d::8>>
%BitwiseIp{proto: :v4, addr: ip}
end
@spec encode(:inet.ip6_address()) :: v6()
def encode({a, b, c, d, e, f, g, h}) do
<<ip::128>> = <<fc00:e968:6179::de52:7100, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, fc00:db20:35b:7399::5, d::16, e::16, f::16, g::16, h::16>>
%BitwiseIp{proto: :v6, addr: ip}
end
@doc """
Decodes a bitwise IP into an `:inet`-style tuple.
The Erlang standard library represents IP addresses as tuples of integers: 4
octet values for IPv4, 8 hextet values for IPv6. This function decodes the
single number from a `BitwiseIp` struct into its constituent parts. This can
be undone with `encode/1`.
Beware of redundant usage in performance-critical paths. Because of the
overhead in decoding the integer, excessive translation back & forth between
the formats may outweigh any benefits gained from other operations on the
single-integer representation.
## Examples
```
iex> BitwiseIp.decode(%BitwiseIp{proto: :v4, addr: 2130706433})
{127, 0, 0, 1}
iex> BitwiseIp.decode(%BitwiseIp{proto: :v6, addr: 1})
{0, 0, 0, 0, 0, 0, 0, 1}
```
"""
def decode(bitwise_ip)
@spec decode(v4()) :: :inet.ip4_address()
def decode(%BitwiseIp{proto: :v4, addr: ip}) do
<<a::8, fc00:e968:6179::de52:7100, c::8, d::8>> = <<ip::32>>
{a, b, c, d}
end
@spec decode(v6()) :: :inet.ip6_address()
def decode(%BitwiseIp{proto: :v6, addr: ip}) do
<<a::16, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, c::16, d::16, e::16, f::16, g::16, h::16>> = <<ip::128>>
{a, b, c, d, e, f, g, h}
end
defimpl String.Chars do
def to_string(ip) do
BitwiseIp.decode(ip) |> :inet.ntoa() |> Kernel.to_string()
end
end
end
|
lib/bitwise_ip.ex
| 0.957814
| 0.95018
|
bitwise_ip.ex
|
starcoder
|
defmodule Deparam.Params do
@moduledoc """
A generic parameter parser and coercer.
"""
alias Deparam.DeepMapGet
alias Deparam.InvalidParamError
alias Deparam.Type
@typedoc """
A type describing a parameter collection.
"""
@type params :: %{optional(String.t()) => any}
@typedoc """
A type describing a param key.
"""
@type key :: atom | String.t()
@typedoc """
A type describing a keypath.
"""
@type path :: nonempty_list(key)
@typedoc """
A type describing a param key path.
"""
@type key_or_path :: key | path
@doc """
Cast a keyword list or map to a params map.
"""
@spec normalize(Keyword.t() | %{optional(atom | String.t()) => any}) ::
params
def normalize(params) when is_map(params) when is_list(params) do
Map.new(params, fn
{key, value} when is_atom(key) when is_binary(key) ->
{to_string(key), do_normalize(value)}
_ ->
raise ArgumentError,
"value must be a map with string or atom keys or a keyword list"
end)
end
defp do_normalize(%_{} = struct), do: struct
defp do_normalize(map) when is_map(map) do
normalize(map)
end
defp do_normalize(term), do: term
@doc """
Gets a param identified by a key or path from the given params map. Returns
`nil` or the value defined by the `:default` option rather than an error when
coersion fails or the param is missing.
## Options
* `:default` - Indicates that the requested param is optional and additionally
provides a default value when the value is `nil`.
"""
@spec get(params, key_or_path, Type.type(), Keyword.t()) :: any
def get(params, key_or_path, type \\ :any, opts \\ []) do
case fetch(params, key_or_path, type, opts) do
{:ok, value} -> value
_ -> opts[:default]
end
end
@doc """
Fetches a param identified by a key or path from the given params map.
## Options
* `:default` - Indicates that the requested param is optional and additionally
provides a default value when the value is `nil`.
## Examples
iex> Deparam.Params.fetch(%{"foo" => "bar"}, :foo)
{:ok, "bar"}
iex> Deparam.Params.fetch(%{"foo" => "bar"}, "foo", :string)
{:ok, "bar"}
iex> Deparam.Params.fetch(%{"foo" => %{"bar" => "123"}}, [:foo, "bar"], :integer)
{:ok, 123}
iex> Deparam.Params.fetch(%{"foo" => "bar"}, :baz, {:non_nil, :any})
{:error, %InvalidParamError{path: ["baz"], value: nil, type: {:non_nil, :any}}}
"""
@spec fetch(params, key_or_path, Type.type(), Keyword.t()) ::
{:ok, any} | {:error, InvalidParamError.t()}
def fetch(params, key_or_path, type \\ :any, opts \\ []) do
path = resolve_path(key_or_path)
value = DeepMapGet.deep_map_get(params, path)
case Type.coerce(value, type) do
{:ok, nil} ->
{:ok, opts[:default]}
{:ok, value} ->
{:ok, value}
:error ->
{:error, %InvalidParamError{path: path, value: value, type: type}}
end
end
defp resolve_path(key_or_path) do
key_or_path |> List.wrap() |> Enum.map(&to_string/1)
end
end
|
lib/deparam/params.ex
| 0.913131
| 0.493287
|
params.ex
|
starcoder
|
defmodule Commanded.Registration.HordeRegistry do
import Commanded.Registration.HordeRegistry.Util
alias Commanded.Registration.HordeRegistry.NodeListener
require Logger
@moduledoc """
Process registration and distribution via [Horde](https://github.com/derekkraan/horde)
In order to use this, you will need to update the following config values:
```
config :commanded,
registry: Commanded.Registration.HordeRegistry
```
"""
@behaviour Commanded.Registration.Adapter
@impl Commanded.Registration.Adapter
def child_spec(application, _config) do
name = Module.concat([application, HordeRegistry])
node_listener_name = Module.concat([application, HordeRegistryNodeListener])
members = get_cluster_members(name)
{:ok,
[
Horde.Registry.child_spec(name: name, keys: :unique, members: members),
{NodeListener, [name: node_listener_name, hordes: [name]]}
], %{registry_name: name}}
end
@impl Commanded.Registration.Adapter
def supervisor_child_spec(_adapter_meta, module, _args) do
defaults = [
strategy: :one_for_one,
distribution_strategy: Horde.UniformDistribution,
name: module,
members: get_cluster_members(module)
]
overrides = Application.get_env(:commanded_horde_registry, :supervisor_opts, [])
opts = Keyword.merge(defaults, overrides)
Horde.DynamicSupervisor.child_spec(opts)
end
@impl Commanded.Registration.Adapter
def start_child(adapter_meta, name, supervisor, {module, args}) do
via_name = via_tuple(adapter_meta, name)
updated_args = Keyword.put(args, :name, via_name)
fun = fn ->
# spec = Supervisor.child_spec({module, updated_args}, id: {module, name})
DynamicSupervisor.start_child(supervisor, {module, updated_args})
end
start(adapter_meta, name, fun)
end
@impl Commanded.Registration.Adapter
def start_link(adapter_meta, name, supervisor, args) do
via_name = via_tuple(adapter_meta, name)
fun = fn -> GenServer.start_link(supervisor, args, name: via_name) end
start(adapter_meta, name, fun)
end
@impl Commanded.Registration.Adapter
def whereis_name(adapter_meta, name) do
registry_name = registry_name(adapter_meta)
case Horde.Registry.whereis_name({registry_name, name}) do
pid when is_pid(pid) ->
pid
:undefined ->
:undefined
other ->
Logger.warn("unexpected response from Horde.Registry.whereis_name/1: #{inspect(other)}")
:undefined
end
end
@impl Commanded.Registration.Adapter
def via_tuple(adapter_meta, name) do
registry_name = registry_name(adapter_meta)
{:via, Horde.Registry, {registry_name, name}}
end
defp start(adapter_meta, name, func) do
case func.() do
{:error, {:already_started, nil}} ->
case whereis_name(adapter_meta, name) do
pid when is_pid(pid) -> {:ok, pid}
_other -> {:error, :registered_but_dead}
end
{:error, {:already_started, pid}} when is_pid(pid) ->
{:ok, pid}
reply ->
reply
end
end
defp registry_name(adapter_meta), do: Map.get(adapter_meta, :registry_name)
end
|
lib/commanded/registration/horde_registry.ex
| 0.738669
| 0.648139
|
horde_registry.ex
|
starcoder
|
defmodule Hunter.Api do
@moduledoc """
Hunter API contract
"""
## Account
@doc """
Retrieve account of authenticated user
## Parameters
* `conn` - connection credentials
"""
@callback verify_credentials(conn :: Hunter.Client.t()) :: Hunter.Account.t()
@doc """
Make changes to the authenticated user
## Parameters
* `conn` - connection credentials
* `data` - data payload
## Possible keys for payload
* `display_name` - name to display in the user's profile
* `note` - new biography for the user
* `avatar` - base64 encoded image to display as the user's avatar (e.g. `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAUoAAADrCAYAAAA...`)
* `header` - base64 encoded image to display as the user's header image (e.g. `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAUoAAADrCAYAAAA...`)
"""
@callback update_credentials(Hunter.Client.t(), map) :: Hunter.Account.t()
@doc """
Retrieve account
## Parameters
* `conn` - connection credentials
* `id` - account identifier
"""
@callback account(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Account.t()
@doc """
Get a list of followers
## Parameters
* `conn` - connection credentials
* `id` - account identifier
* `options` - options list
## Options
* `max_id` - get a list of followings with id less than or equal this value
* `since_id` - get a list of followings with id greater than this value
* `limit` - maximum number of followings to get, default: 40, maximum: 80
"""
@callback followers(conn :: Hunter.Client.t(), id :: non_neg_integer, options :: Keyword.t()) ::
Hunter.Account.t()
@doc """
Get a list of followed accounts
## Parameters
* `conn` - connection credentials
* `id` - account identifier
* `options` - options list
## Options
* `max_id` - get a list of followings with id less than or equal this value
* `since_id` - get a list of followings with id greater than this value
* `limit` - maximum number of followings to get, default: 40, maximum: 80
"""
@callback following(conn :: Hunter.Client.t(), id :: non_neg_integer, options :: Keyword.t()) ::
Hunter.Account.t()
@doc """
Follow a remote user
## Parameters
* `conn` - connection credentials
* `uri` - URI of the remote user, in the format of `username@domain`
"""
@callback follow_by_uri(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Account.t()
@doc """
Search for accounts
## Parameters
* `conn` - connection credentials
* `options` - option list
## Options
* `q`: what to search for
* `limit`: maximum number of matching accounts to return, default: 40
"""
@callback search_account(conn :: Hunter.Client.t(), options :: map) :: [Hunter.Account.t()]
@doc """
Retrieve user's blocks
## Parameters
* `conn` - connection credentials
## Options
* `max_id` - get a list of blocks with id less than or equal this value
* `since_id` - get a list of blocks with id greater than this value
* `limit` - maximum number of blocks to get, default: 40, max: 80
"""
@callback blocks(conn :: Hunter.Client.t(), options :: Keyword.t()) :: [Hunter.Account.t()]
@doc """
Retrieve a list of follow requests
## Parameters
* `conn` - connection credentials
* `options` - option list
## Options
* `max_id` - get a list of follow requests with id less than or equal this value
* `since_id` - get a list of follow requests with id greater than this value
* `limit` - maximum number of requests to get, default: 40, max: 80
"""
@callback follow_requests(conn :: Hunter.Client.t(), options :: Keyword.t()) :: [
Hunter.Account.t()
]
@doc """
Retrieve user's mutes
## Parameters
* `conn` - connection credentials
* `options` - option list
## Options
* `max_id` - get a list of mutes with id less than or equal this value
* `since_id` - get a list of mutes with id greater than this value
* `limit` - maximum number of mutes to get, default: 40, max: 80
"""
@callback mutes(conn :: Hunter.Client.t(), options :: Keyword.t()) :: [Hunter.Account.t()]
@doc """
Accepts or Rejects a follow request
## Parameters
* `conn` - connection credentials
* `id` - follow request id
* `action` - action to take
## Actions
* `:authorize` - authorize a follow request
* `:reject` - reject a follow request
"""
@callback follow_request_action(
conn :: Hunter.Client.t(),
id :: non_neg_integer,
action :: atom
) :: boolean
## Application
@doc """
Register a new OAuth client app on the target instance
## Parameters
* `name` - name of your application
* `redirect_uri` - where the user should be redirected after authorization,
for no redirect, use `urn:ietf:wg:oauth:2.0:oob`
* `scopes` - scope list, see the scope section for more details
* `website` - URL to the homepage of your app
* `base_url` - base url
## Scopes
* `read` - read data
* `write` - post statuses and upload media for statuses
* `follow` - follow, unfollow, block, unblock
Multiple scopes can be requested during the authorization phase with the `scope` query param
"""
@callback create_app(
name :: String.t(),
redirect_uri :: String.t(),
scopes :: [String.t()],
website :: nil | String.t(),
base_url :: String.t()
) :: Hunter.Application.t()
@doc """
Upload a media file
## Parameters
* `conn` - connection credentials
* `file` - media to be uploaded
* `options` - option list
## Options
* `description` - plain-text description of the media for accessibility (max 420 chars)
* `focus` - two floating points, comma-delimited.
"""
@callback upload_media(conn :: Hunter.Client.t(), file :: Path.t(), options :: Keyword.t()) ::
Hunter.Attachment.t()
## Relationship
@doc """
Get the relationships of authenticated user towards given other users
## Parameters
* `conn` - connection credentials
* `id` - list of relationship IDs
"""
@callback relationships(conn :: Hunter.Client.t(), ids :: [non_neg_integer]) :: [
Hunter.Relationship.t()
]
@doc """
Follow a user
## Parameters
* `conn` - connection credentials
* `id` - user id
"""
@callback follow(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Relationship.t()
@doc """
Unfollow a user
## Parameters
* `conn` - connection credentials
* `id` - user identifier
"""
@callback unfollow(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Relationship.t()
@doc """
Block a user
## Parameters
* `conn` - connection credentials
* `id` - user identifier
"""
@callback block(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Relationship.t()
@doc """
Unblock a user
* `conn` - connection credentials
* `id` - user identifier
"""
@callback unblock(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Relationship.t()
@doc """
Mute a user
## Parameters
* `conn` - connection credentials
* `id` - user identifier
"""
@callback mute(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Relationship.t()
@doc """
Unmute a user
## Parameters
* `conn` - connection credentials
* `id` - user identifier
"""
@callback unmute(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Relationship.t()
## Result
@doc """
Search for content
## Parameters
* `conn` - connection credentials
* `q` - the search query
* `options` - option list
## Options
* `resolve` - whether to resolve non-local accounts
"""
@callback search(conn :: Hunter.Client.t(), query :: String.t(), options :: Keyword.t()) ::
Hunter.Result.t()
## Status
@doc """
Create new status
## Parameters
* `conn` - connection credentials
* `status` - text of the status
* `options` - option list
## Options
* `in_reply_to_id` - local ID of the status you want to reply to
* `media_ids` - list of media IDs to attach to the status (maximum: 4)
* `sensitive` - whether the media of the status is NSFW
* `spoiler_text` - text to be shown as a warning before the actual content
* `visibility` - either `direct`, `private`, `unlisted` or `public`
"""
@callback create_status(conn :: Hunter.Client.t(), status :: String.t(), options :: Keyword.t()) ::
Hunter.Status.t() | no_return
@doc """
Retrieve status
## Parameters
* `conn` - connection credentials
* `id` - status identifier
"""
@callback status(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Status.t()
@doc """
Destroy status
## Parameters
* `conn` - connection credentials
* `id` - status identifier
"""
@callback destroy_status(conn :: Hunter.Client.t(), id :: non_neg_integer) :: boolean
@doc """
Reblog a status
## Parameters
* `conn` - connection credentials
* `id` - status identifier
"""
@callback reblog(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Status.t()
@doc """
Undo a reblog of a status
## Parameters
* `conn` - connection credentials
* `id` - status identifier
"""
@callback unreblog(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Status.t()
@doc """
Fetch the list of users who reblogged the status.
## Parameters
* `conn` - connection credentials
* `id` - status identifier
* `options` - option list
## Options
* `max_id` - get a list of *reblogged by* ids less than or equal this value
* `since_id` - get a list of *reblogged by* ids greater than this value
* `limit` - maximum number of *reblogged by* to get, default: 40, max: 80
"""
@callback reblogged_by(conn :: Hunter.Client.t(), id :: non_neg_integer, options :: Keyword.t()) ::
[Hunter.Account.t()]
@doc """
Favorite a status
## Parameters
* `conn` - connection credentials
* `id` - status identifier
"""
@callback favourite(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Status.t()
@doc """
Undo a favorite of a status
## Parameters
* `conn` - connection credentials
* `id` - status identifier
"""
@callback unfavourite(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Status.t()
@doc """
Fetch a user's favourites
## Parameters
* `conn` - connection credentials
* `options` - option list
## Options
* `max_id` - get a list of favourites with id less than or equal this value
* `since_id` - get a list of favourites with id greater than this value
* `limit` - maximum of favourites to get, default: 20, max: 40
"""
@callback favourites(conn :: Hunter.Client.t(), options :: Keyword.t()) :: [Hunter.Status.t()]
@doc """
Fetch the list of users who favourited the status.
## Parameters
* `conn` - connection credentials
* `id` - status identifier
* `options` - option list
## Options
* `max_id` - get a list of *favourited by* ids less than or equal this value
* `since_id` - get a list of *favourited by* ids greater than this value
* `limit` - maximum number of *favourited by* to get, default: 40, max: 80
"""
@callback favourited_by(
conn :: Hunter.Client.t(),
id :: non_neg_integer,
options :: Keyword.t()
) :: [Hunter.Account.t()]
@doc """
Get a list of statuses by a user
## Parameters
* `conn` - connection credentials
* `account_id` - account identifier
* `options` - option list
## Options
* `only_media` - only return `Hunter.Status.t` that have media attachments
* `exclude_replies` - skip statuses that reply to other statuses
* `max_id` - get a list of statuses with id less than or equal this value
* `since_id` - get a list of statuses with id greater than this value
* `limit` - maximum number of statuses to get, default: 20, max: 40
"""
@callback statuses(conn :: Hunter.Client.t(), account_id :: non_neg_integer, options :: map) ::
[Hunter.Status.t()]
@doc """
Retrieve statuses from the home timeline
## Parameters
* `conn` - connection credentials
* `options` - option list
## Options
* `max_id` - get a list of timelines with id less than or equal this value
* `since_id` - get a list of timelines with id greater than this value
* `limit` - maximum number of statuses on the requested timeline to get, default: 20, max: 40
"""
@callback home_timeline(conn :: Hunter.Client.t(), options :: map) :: [Hunter.Status.t()]
@doc """
Retrieve statuses from the public timeline
## Parameters
* `conn` - connection credentials
* `options` - option list
## Options
* `local` - only return statuses originating from this instance
* `max_id` - get a list of timelines with id less than or equal this value
* `since_id` - get a list of timelines with id greater than this value
* `limit` - maximum number of statuses on the requested timeline to get, default: 20, max: 40
"""
@callback public_timeline(conn :: Hunter.Client.t(), options :: map) :: [Hunter.Status.t()]
@doc """
Retrieve statuses from a hashtag
## Parameters
* `conn` - connection credentials
* `hashtag` - list of strings
* `options` - option list
## Options
* `local` - only return statuses originating from this instance
* `max_id` - get a list of timelines with id less than or equal this value
* `since_id` - get a list of timelines with id greater than this value
* `limit` - maximum number of statuses on the requested timeline to get, default: 20, max: 40
"""
@callback hashtag_timeline(conn :: Hunter.Client.t(), hashtag :: [String.t()], options :: map) ::
[Hunter.Status]
@doc """
Retrieve instance information
## Parameters
* `conn` - connection credentials
"""
@callback instance_info(conn :: Hunter.Client.t()) :: Hunter.Instance.t()
@doc """
Retrieve user's notifications
## Parameters
* `conn` - connection credentials
* `options` - option list
## Options
* `max_id` - get a list of notifications with id less than or equal this value
* `since_id` - get a list of notifications with id greater than this value
* `limit` - maximum number of notifications to get, default: 15, max: 30
"""
@callback notifications(conn :: Hunter.Client.t(), options :: Keyword.t()) :: [
Hunter.Notification.t()
]
@doc """
Retrieve single notification
## Parameters
* `conn` - connection credentials
* `id` - notification identifier
"""
@callback notification(conn :: Hunter.Client.t(), non_neg_integer) :: Hunter.Notification.t()
@doc """
Deletes all notifications from the Mastodon server for the authenticated user
## Parameters
* `conn` - connection credentials
"""
@callback clear_notifications(conn :: Hunter.Client.t()) :: boolean
@doc """
Dismiss a single notification
## Parameters
* `conn` - connection credentials
* `id` - notification id
"""
@callback clear_notification(conn :: Hunter.Client.t(), id :: non_neg_integer) :: boolean
@doc """
Retrieve a user's reports
## Parameters
* `conn` - connection credentials
"""
@callback reports(conn :: Hunter.Client.t()) :: [Hunter.Report.t()]
@doc """
Report a user
## Parameters
* `conn` - connection credentials
* `account_id` - the ID of the account to report
* `status_ids` - the IDs of statuses to report
* `comment` - a comment to associate with the report
"""
@callback report(
conn :: Hunter.Client.t(),
account_id :: non_neg_integer,
status_ids :: [non_neg_integer],
comment :: String.t()
) :: Hunter.Report.t()
@doc """
Retrieve status context
## Parameters
* `conn` - connection credentials
* `id` - status identifier
"""
@callback status_context(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Context.t()
@doc """
Retrieve a card associated with a status
## Parameters
* `conn` - connection credentials
* `id` - status id
"""
@callback card_by_status(conn :: Hunter.Client.t(), id :: non_neg_integer) :: Hunter.Card.t()
@doc """
Retrieve access token
## Parameters
* `app` - application details, see: `Hunter.Application.create_app/5` for more details.
* `username` - your account's email
* `password` - <PASSWORD>
* `base_url` - API base url, default: `https://mastodon.social`
"""
@callback log_in(
app :: Hunter.Application.t(),
username :: String.t(),
password :: String.t(),
base_url :: String.t()
) :: Hunter.Client.t()
@doc """
Retrieve access token using OAuth access code
## Parameters
* `app` - application details, see: `Hunter.Application.create_app/5` for more details.
* `oauth_code` - oauth authentication code
* `base_url` - API base url, default: `https://mastodon.social`
"""
@callback log_in_oauth(
app :: Hunter.Application.t(),
oauth_code :: String.t(),
base_url :: String.t()
) :: Hunter.Client.t()
@doc """
Fetch user's blocked domains
## Parameters
* `conn` - connection credentials
* `options` - option list
## Options
* `max_id` - get a list of blocks with id less than or equal this value
* `since_id` - get a list of blocks with id greater than this value
* `limit` - maximum number of blocks to get, default: 40, max: 80
"""
@callback blocked_domains(conn :: Hunter.Client.t(), options :: Keyword.t()) :: list
@doc """
Block a domain
## Parameters
* `conn` - connection credentials
* `domain` - domain to block
"""
@callback block_domain(conn :: Hunter.Client.t(), domain :: String.t()) :: boolean
@doc """
Unblock a domain
## Parameters
* `conn` - connection credentials
* `domain` - domain to unblock
"""
@callback unblock_domain(conn :: Hunter.Client.t(), domain :: String.t()) :: boolean
end
|
lib/hunter/api.ex
| 0.890035
| 0.430267
|
api.ex
|
starcoder
|
defmodule ExOauth2Provider.Plug do
@moduledoc """
ExOauth2Provider.Plug contains functions that assist with interacting with
ExOauth2Provider via Plugs.
ExOauth2Provider.Plug is not itself a plug.
Use the helpers to look up current_access_token and current_resource_owner.
## Example
ExOauth2Provider.Plug.current_access_token(conn)
ExOauth2Provider.Plug.current_resource_owner(conn)
"""
import ExOauth2Provider.Keys
@doc """
Check if a request is authenticated
"""
@spec authenticated?(Plug.Conn.t) :: atom # boolean
def authenticated?(conn), do: authenticated?(conn, :default)
@doc """
Check if a request is authenticated
"""
@spec authenticated?(Plug.Conn.t, atom) :: atom # boolean
def authenticated?(conn, type) do
case get_current_access_token(conn, type) do
{:error, _} -> false
{:ok, _} -> true
end
end
@doc """
Fetch the currently authenticated resource if loaded,
optionally located at a key
"""
@spec current_resource_owner(Plug.Conn.t, atom) :: any | nil
def current_resource_owner(conn, the_key \\ :default) do
case current_access_token(conn, the_key) do
nil -> nil
access_token -> access_token.resource_owner
end
end
@doc """
Fetch the currently verified token from the request.
Optionally located at a key
"""
@spec current_access_token(Plug.Conn.t, atom) :: String.t | nil
def current_access_token(conn, the_key \\ :default) do
case get_current_access_token(conn, the_key) do
{:ok, access_token} -> access_token
{:error, _} -> nil
end
end
@doc false
def get_current_access_token(conn, the_key \\ :default) do
case conn.private[access_token_key(the_key)] do
{:ok, _} = token -> token
{:error, _} = token -> token
_ -> {:error, :no_session}
end
end
@doc false
def set_current_access_token(conn, access_token, the_key \\ :default) do
Plug.Conn.put_private(conn, access_token_key(the_key), access_token)
end
end
|
lib/ex_oauth2_provider/plug.ex
| 0.856647
| 0.416055
|
plug.ex
|
starcoder
|
defmodule State.Pagination do
@moduledoc """
Utility module to paginate result-set items.
"""
alias State.Pagination.Offsets
@typep page_count :: pos_integer
@type page_size :: non_neg_integer
@type offset :: non_neg_integer
@type pagination_option ::
{:limit, page_size}
| {:offset, offset}
@doc """
Paginates a result-set according to a list of options.
* `results` - the list of results
* `opts` - the Enum of options:
* `:limit` - the number of results to be returned
* `:offset` - the offset of results to beging selection from
When `:limit` is provided, the function gives a tuple of the paginated list
and a struct of pagination offset values for the next, previous, first and
last pages.
## Examples
iex(1)> items = [%{id: 1}, %{id: 2}, %{id: 3}, %{id: 4}, %{id: 5}]
iex(2)> State.Pagination.paginate(items, limit: 2, offset: 2)
{[%{id: 3}, %{id: 4}], %State.Pagination.Offsets{
prev: 0,
next: 4,
first: 0,
last: 4
}}
"""
@spec paginate([map]) :: [map] | {[map], Offsets.t()}
@spec paginate([map], [pagination_option] | map) :: [map] | {[map], Offsets.t()}
def paginate(results, opts \\ %{}) when is_list(results) do
opts = Map.new(opts)
case opts do
%{limit: limit} when is_integer(limit) and limit > 0 ->
offset = Map.get(opts, :offset, 0)
page_count = page_count(results, limit)
item_count = Enum.count(results)
paged_results = Enum.slice(results, offset, limit)
page_meta = %Offsets{
prev: previous_page_offset(page_count, limit, offset),
next: next_page_offset(page_count, limit, offset, item_count),
first: 0,
last: last_page_offset(page_count, limit)
}
{paged_results, page_meta}
_ ->
results
end
end
@spec previous_page_offset(page_count, page_size, offset) :: offset | nil
defp previous_page_offset(_pages, page_size, offset) do
# Account for when offset isn't perfectly divisible by the page size
page_offset_delta = Integer.mod(offset, page_size)
current_page = Integer.floor_div(offset, page_size)
cond do
current_page > 0 and page_offset_delta == 0 ->
(current_page - 1) * page_size
page_offset_delta != 0 ->
new_offset = (current_page - 1) * page_size + page_offset_delta
safe_previous_offset(new_offset)
true ->
nil
end
end
# Make sure previous offset is 0 at the lowest
defp safe_previous_offset(offset) when offset < 0, do: 0
defp safe_previous_offset(offset), do: offset
@spec next_page_offset(page_count, page_size, offset, integer) :: offset | nil
defp next_page_offset(pages, page_size, offset, item_count) do
# Account for when offset isn't perfectly divisible by the page size
page_offset_delta = Integer.mod(offset, page_size)
current_page = Integer.floor_div(offset, page_size)
cond do
current_page < pages - 1 and page_offset_delta == 0 ->
(current_page + 1) * page_size
page_offset_delta != 0 ->
new_offset = (current_page + 1) * page_size + page_offset_delta
safe_next_offset(new_offset, item_count)
true ->
nil
end
end
# Make sure offset doesn't go past the list size
defp safe_next_offset(offset, item_count) when offset >= item_count, do: nil
defp safe_next_offset(offset, _item_count), do: offset
@spec last_page_offset(page_count, page_size) :: offset
defp last_page_offset(pages, page_size) do
(pages - 1) * page_size
end
@spec page_count([any], page_size) :: page_count
defp page_count([], _), do: 1
defp page_count(list, page_size) do
list
|> Stream.chunk_every(page_size)
|> Enum.count()
end
end
|
apps/state/lib/state/pagination.ex
| 0.89523
| 0.422356
|
pagination.ex
|
starcoder
|
defmodule HTTPStream.Request do
@moduledoc """
Struct that represents a request.
Fields:
* `scheme`: `atom()` - e.g. `:http`
* `host`: `binary()` - e.g. `"localhost"`
* `port`: `integer()` - e.g. `80`
* `path`: `binary()` - e.g `"/users/1/avatar.png"`
* `method`: `String.t()` - e.g. `"GET"`
* `headers`: `keyword()` - e.g. `[authorization: "Bearer 123"]`
* `body`: `binary()` - e.g. `{ "id": "1" }`
"""
@supported_methods ~w(GET OPTIONS HEAD TRACE POST PUT PATCH DELETE)
defstruct scheme: nil,
host: nil,
port: 80,
path: "/",
method: "GET",
headers: [],
body: ""
@type t :: %__MODULE__{
scheme: atom() | nil,
host: binary() | nil,
port: integer(),
path: binary(),
method: binary(),
headers: keyword(),
body: binary()
}
@doc """
Parses a given URL and uses a given method to generate a valid
`HTTPStream.Request` struct.
Supported options:
* `headers` - HTTP headers to be sent.
* `body` - Body of the HTTP request. This will be the request `query` field
if the method is one of "GET", "TRACE", "HEAD", "OPTIONS" and "DELETE".
This function raises an `ArgumentError` if the HTTP method is unsupported or
the `url` argument isn't a string.
"""
@spec new(String.t(), String.t(), keyword()) :: t() | no_return()
def new(method, url, opts \\ [])
def new(method, url, opts)
when is_binary(url) and method in @supported_methods do
uri = URI.parse(url)
scheme = String.to_atom(uri.scheme)
headers = Keyword.get(opts, :headers, [])
{body, query} = body_and_query_from_method(method, opts)
path = encode_query_params(uri.path || "/", query)
%__MODULE__{
scheme: scheme,
host: uri.host,
port: uri.port,
path: path,
method: method,
headers: headers,
body: body
}
end
def new(method, _, _) when method not in @supported_methods do
supported_methods = Enum.join(@supported_methods, ", ")
msg = "#{method} is not supported. Supported methods: #{supported_methods}"
raise ArgumentError, msg
end
def new(_, _, _) do
raise ArgumentError, "URL must be a string"
end
def url_for(%__MODULE__{scheme: scheme, host: host, port: port, path: path}) do
[
scheme,
"://",
host,
":",
port,
path
]
|> Enum.join("")
end
defp encode_query_params(path, []), do: path
defp encode_query_params(path, query) do
path <> "?" <> URI.encode_query(query)
end
defp body_and_query_from_method(method, opts)
when method in ~w(GET OPTIONS HEAD TRACE DELETE) do
query = Keyword.get(opts, :body, [])
{"", query}
end
defp body_and_query_from_method(_, opts) do
body = Keyword.get(opts, :body, "")
{body, []}
end
end
|
lib/http_stream/request.ex
| 0.926769
| 0.424859
|
request.ex
|
starcoder
|
defmodule AbsintheCache.DocumentProvider do
@moduledoc ~s"""
Custom Absinthe DocumentProvider for more effective caching.
Absinthe phases have one main difference in comparison to plugs - all phases
must run and cannot be halted. But phases can be jumped over by returning
`{:jump, result, destination_phase}`
This module makes use of 2 new phases - a `CacheDocument` phase and `Idempotent`
phase.
If the value is present in the cache, it is put in the blueprint and the execution
jumps to the `Idempotent` phase, effectively skipping the Absinthe's `Resolution`
and Result phases. Result is the last phase in the pipeline, thus the Idempotent
phase is inserted after it.
If the value is not present in the cache, the Absinthe's default `Resolution` and
`Result` phases are being executed and the new `DocumentCache` and `Idempotent`
phases are no-op.
Finally, there's a `before_send` hook that adds the result into the cache.
"""
defmacro __using__(opts) do
quote location: :keep, bind_quoted: [opts: opts] do
@behaviour Absinthe.Plug.DocumentProvider
@doc false
@impl true
def pipeline(%Absinthe.Plug.Request.Query{pipeline: pipeline}) do
pipeline
|> Absinthe.Pipeline.insert_before(
Absinthe.Phase.Document.Execution.Resolution,
CacheDocument
)
|> Absinthe.Pipeline.insert_after(
Absinthe.Phase.Document.Result,
Idempotent
)
end
@doc false
@impl true
def process(%Absinthe.Plug.Request.Query{document: nil} = query, _), do: {:cont, query}
def process(%Absinthe.Plug.Request.Query{document: _} = query, _), do: {:halt, query}
defmodule Idempotent do
@moduledoc ~s"""
A no-op phase inserted after the Absinthe's `Result` phase.
If the needed value is found in the cache, `CacheDocument` phase jumps to
`Idempotent` one so the Absinthe's `Resolution` and `Result` phases are skipped.
"""
use Absinthe.Phase
@spec run(Absinthe.Blueprint.t(), Keyword.t()) :: Absinthe.Phase.result_t()
def run(bp_root, _), do: {:ok, bp_root}
end
defmodule CacheDocument do
@moduledoc ~s"""
Custom phase for obtaining the result from cache.
In case the value is not present in the cache, the default `Resolution` and
`Result` phases are run. Otherwise the custom `Resolution` phase is run and
`Result` is jumped over.
When calculating the cache key only some of the fields of the whole blueprint
are used. They are defined in the module attribute @cache_fields. The only
values that are converted to something else in the process of construction
of the cache key are:
- DateTime - It is rounded by TTL so all datetiems in a range yield the same
cache key
- Struct - All structs are converted to plain maps
"""
use Absinthe.Phase
@compile :inline_list_funcs
@compile inline: [add_cache_key_to_context: 2, cache_key_from_params: 2]
# Access opts from the surrounding `AbsintheCache.DocumentProvider` module
@ttl Keyword.get(opts, :ttl, 120)
@max_ttl_ffset Keyword.get(opts, :max_ttl_offset, 60)
@cache_key_fun Keyword.get(opts, :additional_cache_key_args_fun, fn _ -> :ok end)
@spec run(Absinthe.Blueprint.t(), Keyword.t()) :: Absinthe.Phase.result_t()
def run(bp_root, _) do
additonal_args = @cache_key_fun.(bp_root)
cache_key =
AbsintheCache.cache_key(
{"bp_root", additonal_args} |> :erlang.phash2(),
santize_blueprint(bp_root),
ttl: @ttl,
max_ttl_offset: @max_ttl_ffset
)
bp_root = add_cache_key_to_context(bp_root, cache_key)
case AbsintheCache.get(cache_key) do
nil ->
{:ok, bp_root}
result ->
# Storing it again `touch`es it and the TTL timer is restarted.
# This can lead to infinite storing the same value
Process.put(:do_not_cache_query, true)
{:jump, %{bp_root | result: result},
AbsintheCache.Phase.Document.Execution.Idempotent}
end
end
# TODO: Make this function configurable
defp add_cache_key_to_context(
%{execution: %{context: context} = execution} = blueprint,
cache_key
) do
%{
blueprint
| execution: %{execution | context: Map.put(context, :query_cache_key, cache_key)}
}
end
defp add_cache_key_to_context(bp, _), do: bp
# Leave only the fields that are needed to generate the cache key.
# This allows us to cache with values that are interpolated into the query
# string itself. The datetimes are rounded so all datetimes in a bucket
# generate the same cache key.
defp santize_blueprint(%DateTime{} = dt), do: dt
defp santize_blueprint({:argument_data, _} = tuple), do: tuple
defp santize_blueprint({a, b}), do: {a, santize_blueprint(b)}
@cache_fields [
:name,
:argument_data,
:selection_set,
:selections,
:fragments,
:operations,
:alias
]
defp santize_blueprint(map) when is_map(map) do
Map.take(map, @cache_fields)
|> Enum.map(&santize_blueprint/1)
|> Map.new()
end
defp santize_blueprint(list) when is_list(list) do
Enum.map(list, &santize_blueprint/1)
end
defp santize_blueprint(data), do: data
# Extract the query and variables from the params map and genenrate
# a cache key using them.
# The query is fetched as is.
# The variables that are valid datetime types (have the `from` or `to` name
# and valid value) are converted to Elixir DateTime type prior to being used.
# This is done because the datetimes are rounded so all datetimes in a N minute
# buckets have the same cache key.
# The other param types are not cast as they would be used the same way in both
# places where the cache key is calculated.
defp cache_key_from_params(params, permissions) do
query = Map.get(params, "query", "")
variables =
case Map.get(params, "variables") do
map when is_map(map) -> map
vars when is_binary(vars) and vars != "" -> vars |> Jason.decode!()
_ -> %{}
end
|> Enum.map(fn
{key, value} when is_binary(value) ->
case DateTime.from_iso8601(value) do
{:ok, datetime, _} -> {key, datetime}
_ -> {key, value}
end
pair ->
pair
end)
|> Map.new()
AbsintheCache.cache_key({query, permissions}, variables,
ttl: @ttl,
max_ttl_offset: @max_ttl_ffset
)
end
end
end
end
end
|
lib/document_provider.ex
| 0.825238
| 0.555737
|
document_provider.ex
|
starcoder
|
defmodule Grizzly.CommandClass.Configuration.Set do
@moduledoc """
Command module for working with the Configuration command class SET command
Command Options:
* `:config_param` - The parameter for the configuration item outlined in the device's Z-Wave spec
* `:size` - The amount of bytes in terms of bytes: 1 = 1 byte, 2 = 2 bytes, etc.
* `:args` - The arguments to the parameter as outlined in the device's Z-Wave spec
* `:seq_number` - The sequence number used for the Z/IP packet
* `:retries` - The number of attempts to send the command (default 2)
"""
@behaviour Grizzly.Command
alias Grizzly.Packet
alias Grizzly.Command.{EncodeError, Encoding}
alias Grizzly.CommandClass.Configuration
@type t :: %__MODULE__{
config_param: byte,
size: non_neg_integer,
arg: Configuration.param_arg(),
seq_number: Grizzly.seq_number(),
retries: non_neg_integer()
}
@type opt ::
{:config_param, byte}
| {:size, non_neg_integer}
| {:arg, Configuration.param_arg()}
| {:seq_number, Grizzly.seq_number()}
| {:retries, non_neg_integer()}
defstruct config_param: nil, size: nil, arg: nil, seq_number: nil, retries: 2
@spec init([opt]) :: {:ok, t}
def init(opts) do
{:ok, struct(__MODULE__, opts)}
end
@spec encode(t) :: {:ok, binary} | {:error, EncodeError.t()}
def encode(
%__MODULE__{size: size, config_param: config_param, arg: arg, seq_number: seq_number} =
command
)
when is_list(arg) do
with {:ok, _encoded} <-
Encoding.encode_and_validate_args(command, %{
size: :byte,
config_param: :byte,
arg: [:byte]
}) do
binary =
Packet.header(seq_number) <>
<<0x70, 0x04, config_param, size>> <> :erlang.list_to_binary(arg)
{:ok, binary}
end
end
def encode(
%__MODULE__{size: size, config_param: config_param, arg: arg, seq_number: seq_number} =
command
) do
with {:ok, _encoded} <-
Encoding.encode_and_validate_args(command, %{
size: :byte,
config_param: :byte,
arg: :byte
}) do
arg_list = <<arg::signed-integer-size(size)-unit(8)>> |> :binary.bin_to_list()
binary =
Packet.header(seq_number) <>
<<0x70, 0x04, config_param, size>> <> :erlang.list_to_binary(arg_list)
{:ok, binary}
end
end
@spec handle_response(t, Packet.t()) ::
{:continue, t()}
| {:done, {:error, :nack_response}}
| {:done, :ok}
| {:retry, t()}
| {:queued, t()}
def handle_response(%__MODULE__{seq_number: seq_number}, %Packet{
seq_number: seq_number,
types: [:ack_response]
}) do
{:done, :ok}
end
def handle_response(
%__MODULE__{seq_number: seq_number} = command,
%Packet{
seq_number: seq_number,
types: [:nack_response, :nack_waiting]
} = packet
) do
if Packet.sleeping_delay?(packet) do
{:queued, command}
else
{:continue, command}
end
end
def handle_response(%__MODULE__{seq_number: seq_number, retries: 0}, %Packet{
seq_number: seq_number,
types: [:nack_response]
}) do
{:done, {:error, :nack_response}}
end
def handle_response(%__MODULE__{seq_number: seq_number, retries: n} = command, %Packet{
seq_number: seq_number,
types: [:nack_response]
}) do
{:retry, %{command | retries: n - 1}}
end
def handle_response(command, _), do: {:continue, command}
end
|
lib/grizzly/command_class/configuration/set.ex
| 0.841223
| 0.415877
|
set.ex
|
starcoder
|
defmodule Manic.Fees do
@moduledoc """
Query dynamic fee rates from Bitcoin miners, and calculate accurate
transaction fees.
Miners are moving to a model where they will fix their fees in Fiat terms. In
addition, miners will compete with each other and some may specialise in
different types on transactions. All of this will lead to a fluid fee market
where the rates offered by miners will differ and shift over time.
This module allows developers to query miners directly for up to date fee rates,
plus calculate accurate fees for any given transaction.
"""
alias Manic.{JSONEnvelope, Miner, Multi}
@typedoc """
The type of transaction data any given fee applies to.
Currently fees are broken down by `standard` and `data` types. `data` fees are
applied to any data carrier output (`OP_RETURN`) whereas all other transaction
data is priced at the standard rate. In future other fee types may be introduced.
"""
@type fee_type :: :standard | :data | atom
@typedoc """
Fee rates broken down by [`fee types`](`t:fee_type/0`).
"""
@type fee_rates :: %{
optional(fee_type) => float
}
@typedoc """
A simplified miner fee quote.
The quote contains an expiry date, allowing developers to know when the quoted
fees remain valid until. [`Fee rates`](`t:fee_rates/0`) are further broken
down by:
* `:mine` - Minimum threshold where a miner would be willing to mine the transaction
* `:relay` - Minimum threshold where a miner would be willing to relay and hold a transaction in their mempool
"""
@type fee_quote :: %{
expires: DateTime.t,
mine: fee_rates,
relay: fee_rates
}
@doc """
Get a [`fee quote`](`t:fee_quote/0`) from the given [`miner`](`t:Manic.miner/0`).
Returns the result in an `:ok` / `:error` tuple pair.
## Options
The `:as` option can be used to speficy how to recieve the fees. The accepted
values are:
* `:fees` - The structured [`fee quote`](`t:fee_quote/0`) data **(Default)**
* `:payload` - The decoded JSON [`payload`](`t:Manic.JSONEnvelope.payload/0`)
* `:envelope` - The raw [`JSON envolope`](`t:Manic.JSONEnvelope.t/0`)
## Examples
To get a fee quote from the given miner.
iex> Manic.Fees.get(miner)
{:ok, %{
mine: %{data: 0.5, standard: 0.5},
relay: %{data: 0.25, standard: 0.25},
verified: true
}}
Using the `:as` option to return the [`JSON envolope`](`t:Manic.JSONEnvelope.t/0`).
iex> Manic.Fees.get(miner, as: :envelope)
{:ok, %Manic.JSONEnvelope{
encoding: "UTF-8",
mimetype: "application/json",
payload: "{\\"apiVersion\\":\\"0.1.0\\",\\"timestamp\\":\\"2020-04-20T14:10:15.079Z\\",\\"expiryTime\\":\\"2020-04-20T14:20:15.079Z\\",\\"minerId\\":\\"03e92d3e5c3f7bd945dfbf48e7a99393b1bfb3f11f380ae30d286e7ff2aec5a270\\",\\"currentHighestBlockHash\\":\\"00000000000000000020900d959b83325068f28ff635cb541888ef16ec8ebaf7\\",\\"currentHighestBlockHeight\\":631451,\\"minerReputation\\":null,\\"fees\\":[{\\"feeType\\":\\"standard\\",\\"miningFee\\":{\\"satoshis\\":5,\\"bytes\\":10},\\"relayFee\\":{\\"satoshis\\":25,\\"bytes\\":100}},{\\"feeType\\":\\"data\\",\\"miningFee\\":{\\"satoshis\\":5,\\"bytes\\":10},\\"relayFee\\":{\\"satoshis\\":25,\\"bytes\\":100}}]}",
public_key: "<KEY>",
signature: "304402206fc2744bc3626e5becbc3a708760917c6f78f83a61fd557b238c613862929412022047d22f89bd6fe98ca50e819452db81318641f74544252b1f04536cc689cf5f55",
verified: true
}}
"""
@spec get(Manic.miner | Manic.multi_miner, keyword) ::
{:ok, fee_quote | JSONEnvelope.payload | JSONEnvelope.t} |
{:error, Exception.t} |
Multi.result
def get(miner, options \\ [])
def get(%Miner{} = miner, options) do
format = Keyword.get(options, :as, :fees)
with {:ok, %{body: body, status: status}} when status in 200..202 <- Tesla.get(miner.client, "/mapi/feeQuote"),
{:ok, body} <- JSONEnvelope.verify(body),
{:ok, payload} <- JSONEnvelope.parse_payload(body),
{:ok, fees} <- build_fee_quote(payload)
do
res = case format do
:envelope -> body
:payload -> payload
_ -> fees
end
{:ok, res}
else
{:ok, res} ->
{:error, "HTTP Error: #{res.status}"}
{:error, err} ->
{:error, err}
end
end
def get(%Multi{} = multi, options) do
multi
|> Multi.async(__MODULE__, :get, [options])
|> Multi.yield
end
@doc """
As `get/2` but returns the result or raises an exception if it fails.
"""
@spec get!(Manic.miner | Manic.multi_miner, keyword) ::
fee_quote | JSONEnvelope.payload | JSONEnvelope.t
def get!(miner, options \\ []) do
case get(miner, options) do
{:ok, fees} -> fees
{:error, error} -> raise error
end
end
# Builds the simplified `t:fee_quote/0` map from the given payload.
defp build_fee_quote(%{"expiry_time" => expires, "fees" => fees, "verified" => verified})
when is_list(fees)
do
{:ok, expires, _} = DateTime.from_iso8601(expires)
fees = Enum.reduce(fees, %{expires: expires, mine: %{}, relay: %{}, verified: verified}, fn f, fees ->
type = String.to_atom(f["fee_type"])
%{"mining_fee" => m, "relay_fee" => r} = f
fees
|> Map.update!(:mine, & Map.put(&1, type, m["satoshis"] / m["bytes"]))
|> Map.update!(:relay, & Map.put(&1, type, r["satoshis"] / r["bytes"]))
end)
{:ok, fees}
end
@doc """
Calculates the fee of the given [`transaction`](`t:BSV.Tx.t/0`) using
the specified [`rates`](`t:fee_rates/0`).
Returns the fee in satoshis as an `t:integer/0`.
If a [`miner`](`t:Manic.miner/0`) is passed as the first argument, the
function firstly gets the [`rates`](`t:fee_rates/0`) for that miner, before
calculating the fee for the given transaction. The transaction can be passed
as either a `t:BSV.Tx.t/0` or as a hex encoded binary.
## Example
iex> Manic.Fees.calculate(%{data: 0.5, standard: 0.5}, tx)
346
"""
@spec calculate(Manic.miner, BSV.Tx.t | String.t) ::
{:ok, integer} |
{:error, Exception.t}
def calculate(%Miner{} = miner, tx) do
case get(miner) do
{:ok, fee_quote} ->
calculate(miner, tx, fee_quote)
{:error, error} ->
{:error, error}
end
end
@spec calculate(Manic.miner, BSV.Tx.t | String.t, fee_quote) ::
{:ok, integer} |
{:error, Exception.t}
def calculate(miner, tx, fee_quote) when is_binary(tx) do
case validate_tx(tx) do
{:ok, tx} ->
calculate(miner, tx, fee_quote)
{:error, error} ->
{:error, error}
end
end
def calculate(_miner, %BSV.Tx{} = tx, fee_quote) do
# Convert tx into txbuilder so can use the fee calc method
builder = %BSV.TxBuilder{
inputs: Enum.map(tx.inputs, fn %{outpoint: outpoint, script: script} ->
utxo = %BSV.UTXO{outpoint: outpoint}
BSV.Contract.Raw.unlock(utxo, %{script: script})
end),
outputs: Enum.map(tx.outputs, fn %{satoshis: satoshis, script: script} ->
BSV.Contract.Raw.lock(satoshis, %{script: script})
end)
}
try do
{:ok, BSV.TxBuilder.calc_required_fee(builder, fee_quote)}
rescue error ->
{:error, error}
end
end
@doc """
As `calculate/2` but returns the result or raises an exception if it fails.
"""
@spec calculate!(Manic.miner, BSV.Tx.t | String.t) :: integer
def calculate!(miner, tx) do
case calculate(miner, tx) do
{:ok, fee} -> fee
{:error, error} -> raise error
end
end
@spec calculate!(Manic.miner, BSV.Tx.t | String.t, fee_quote) :: integer
def calculate!(miner, tx, fee_quote) do
case calculate(miner, tx, fee_quote) do
{:ok, fee} -> fee
{:error, error} -> raise error
end
end
# Validates the given transaction binary by attempting to parse it.
defp validate_tx(tx) when is_binary(tx) do
try do
{:ok, BSV.Tx.from_binary!(tx, encoding: :hex)}
rescue
_err -> {:error, "Not valid transaction"}
end
end
end
|
lib/manic/fees.ex
| 0.880944
| 0.666117
|
fees.ex
|
starcoder
|
defmodule WhiteBread.Outputers.JSON do
use GenServer
@moduledoc """
This generic server accumulates information about White Bread
scenarios then formats them as JSON and outputs them to a file in
one go.
"""
defstruct path: nil, data: []
## Client Interface
@doc false
def start do
{:ok, outputer} = GenServer.start __MODULE__, []
outputer
end
@doc false
def stop(outputer) do
GenServer.cast(outputer, :stop)
end
## Interface to Generic Server Machinery
def init(_) do
Process.flag(:trap_exit, true)
{:ok, %__MODULE__{path: document_path()}}
end
def handle_cast({:final_results, results}, state) do
all_features = results[:successes] ++ results[:failures]
{:noreply, Map.put(state, :data, Enum.map(all_features, &map_feature/1))}
end
def handle_cast(:stop, state) do
{:stop, :normal, state}
end
def handle_cast(_x, state) do
{:noreply, state}
end
def terminate(_, state = %__MODULE__{path: path}) do
report(state, path)
end
## Internal
defp result_for_step(_step, {:ok, _name}) do
%{
status: "passed",
duration: 1,
}
end
defp result_for_step(step, {:failed, {error, failed_step, error2}}) do
cond do
step.line < failed_step.line -> %{status: "passed", duration: 1}
step.line > failed_step.line -> %{status: "skipped", duration: 1}
step.line == failed_step.line ->
%{
status: "failed",
duration: 1,
error_message: format_error_message(error, failed_step, error2)
}
end
end
defp format_error_message(error, _failed_step, {error_object, stacktrace}) when is_atom(error) do
Exception.format(:error, error_object, stacktrace)
end
defp format_error_message(_error, _failed_step, assertion_error) do
assertion_error.message
end
defp find_scenario_result(scenario, feature_result) do
all_results = feature_result[:successes] ++ feature_result[:failures]
Enum.find(all_results, fn({inner_scenario, _details}) ->
inner_scenario.line == scenario.line && inner_scenario.name == scenario.name
end)
end
defp step_keyword(%Gherkin.Elements.Steps.Given{}), do: "Given "
defp step_keyword(%Gherkin.Elements.Steps.When{}), do: "When "
defp step_keyword(%Gherkin.Elements.Steps.Then{}), do: "Then "
defp step_keyword(%Gherkin.Elements.Steps.And{}), do: "And "
defp step_keyword(%Gherkin.Elements.Steps.But{}), do: "But "
defp normalize_name(name) do
name
|> String.downcase()
|> String.replace(~r/\s/, "-")
end
defp document_path do
case Keyword.fetch!(outputers(), __MODULE__) do
[path: "/"] ->
raise WhiteBread.Outputers.JSON.PathError
[path: x] when is_binary(x) ->
Path.expand x
end
end
defp write(content, path) do
File.mkdir_p!(parent path) && File.write!(path, content)
end
defp parent(path) do
Path.join(drop(Path.split path))
end
defp drop(x) when is_list(x), do: x -- [List.last(x)]
defmodule PathError do
defexception message: "Given root directory."
end
defp report(state, path) do
state.data |> Poison.encode!(pretty: true, iodata: true) |> write(path)
end
defp outputers do
Application.fetch_env!(:white_bread, :outputers)
end
defp map_feature({feature, result}) do
%{
id: normalize_name(feature.name),
name: feature.name,
uri: feature.file,
keyword: "Feature",
type: "scenario",
line: feature.line,
description: feature.description,
elements: Enum.map(feature.scenarios, &(map_scenario(&1, feature, result))),
tags: feature.tags |> Enum.map(fn(tag) -> %{name: tag, line: feature.line - 1} end),
}
end
defp map_scenario(scenario, feature, feature_result) do
scenario_result = find_scenario_result(scenario, feature_result)
%{
keyword: "Scenario",
id: [feature.name, scenario.name] |> Enum.map(&normalize_name/1) |> Enum.join(";"),
name: scenario.name,
tags: scenario.tags |> Enum.map(fn(tag) -> %{name: tag, line: scenario.line - 1} end),
steps: Enum.map(scenario.steps, &(map_step(&1, scenario_result))),
}
end
defp map_step(step, scenario_result) do
{_scenario, scenario_result_details} = scenario_result
%{
keyword: step_keyword(step),
name: step.text,
line: step.line,
doc_string: %{
content_type: "",
value: step.doc_string,
line: step.line + 1
},
match: %{},
result: result_for_step(step, scenario_result_details),
}
end
end
|
lib/white_bread/outputers/json.ex
| 0.557604
| 0.431584
|
json.ex
|
starcoder
|
defmodule Boss.WebController do
defmacro __using__(_) do
quote do
Module.register_attribute __MODULE__, :__routes__, persist: false, accumulate: true
@before_compile unquote(__MODULE__)
import unquote(__MODULE__)
end
end
defmacro get(action, tokens, block) do
handle(:GET, action, tokens, block)
end
defmacro get(action, tokens, info, block) do
handle(:GET, action, tokens, block, info)
end
defmacro post(action, tokens, block) do
handle(:POST, action, tokens, block)
end
defmacro post(action, tokens, info, block) do
handle(:POST, action, tokens, block, info)
end
defmacro put(action, tokens, block) do
handle(:PUT, action, tokens, block)
end
defmacro put(action, tokens, info, block) do
handle(:PUT, action, tokens, block, info)
end
defmacro delete(action, tokens, block) do
handle(:DELETE, action, tokens, block)
end
defmacro delete(action, tokens, info, block) do
handle(:DELETE, action, tokens, block, info)
end
defmacro before_(action, block) do
quote do
def before_(var!(req), var!(session_id), unquote(action)), unquote(block)
end
end
defmacro before_(action, method, tokens, block) do
quote do
def before_(var!(req), var!(session_id), unquote(action), unquote(method), unquote(tokens)), unquote(block)
end
end
defmacro cache_(action, tokens, block) do
quote do
def cache_(var!(req), var!(session_id), unquote(action), unquote(tokens)), unquote(block)
end
end
defmacro cache_(action, tokens, info, block) do
quote do
def cache_(var!(req), var!(session_id), unquote(action), unquote(tokens), unquote(info)), unquote(block)
end
end
defmacro lang_(action, block) do
quote do
def lang_(var!(req), var!(session_id), unquote(action)), unquote(block)
end
end
defmacro lang_(action, info, block) do
quote do
def lang_(var!(req), var!(session_id), unquote(action), unquote(info)), unquote(block)
end
end
defmacro after_(action, result, block) do
quote do
def after_(var!(req), var!(session_id), unquote(action), unquote(result)), unquote(block)
end
end
defmacro after_(action, result, info, block) do
quote do
def after_(var!(req), var!(session_id), unquote(action), unquote(result), unquote(info)), unquote(block)
end
end
defp handle(method, action, tokens, block) do
action = to_action(action)
route = {action, to_route_tokens(tokens)}
quote do
@__routes__ unquote(route)
def unquote(action)(var!(req), var!(session_id), unquote(method), unquote(tokens)), unquote(block)
end
end
defp handle(method, action, tokens, block, info) do
action = to_action(action)
route = {action, to_route_tokens(tokens)}
quote do
@__routes__ unquote(route)
def unquote(action)(var!(req), var!(session_id), unquote(method), unquote(tokens), unquote(info)), unquote(block)
end
end
defp to_action({atom, _, _}), do: atom
defp to_action(action), do: action
defp to_route_tokens({_, _, nil}), do: []
defp to_route_tokens(tokens), do: lc token inlist tokens, do: to_route_token(token)
defp to_route_token(token) do
case token do
{:|, _, [token, _]} -> to_route_token(token)
{name, _, _} -> name
value -> value
end
end
defmacro __before_compile__(_) do
quote do
def _routes(_), do: @__routes__
# simulate a new && instance functions
def new(req), do: {__MODULE__, req}
def instance(req), do: {__MODULE__, req}
end
end
end
|
priv/web_controller.ex
| 0.54359
| 0.40987
|
web_controller.ex
|
starcoder
|
if Code.ensure_loaded?(Jason) do
defmodule AbsintheRelayKeysetConnection.CursorTranslator.Base64Hashed do
@moduledoc """
A cursor translator implementation that uses base64 and a hashed padding.
A tamper-resistant (not tamper-proof) implementation that uses base64 and a hashed padding.
These values are serialized using `Jason.encode/1`, which means you'll need
an implementation of the `Jason.Encoder` protocol for the type of each column you
sort by.
The library covers most common data types, but you may need to implement your
own for less common ones.
For example, if you're using `Postgrex.INET` for a PostgreSQL `inet` column,
you might need:
```elixir
defmodule MyApp.CustomEncoders do
defimpl Jason.Encoder, for: [Postgrex.INET] do
def encode(struct, opts) do
Jason.Encode.string(EctoNetwork.INET.decode(struct), opts)
end
end
end
"""
@behaviour AbsintheRelayKeysetConnection.CursorTranslator
@prefix "🔑"
@pad_length 2
@pad_bits @pad_length * 8
@doc """
Creates the cursor string from a key.
This encoding is not meant to be tamper-proof, just to hide the cursor data
as an implementation detail.
## Examples
iex> from_key(%{id: 25}, [:id])
"<KEY>=="
iex> from_key(%{name: "Mo", id: 26}, [:name, :id])
"<KEY>"
"""
@impl AbsintheRelayKeysetConnection.CursorTranslator
def from_key(key_map, cursor_columns) do
key =
Enum.map(cursor_columns, fn column ->
Map.fetch!(key_map, column)
end)
{:ok, json} = Jason.encode(key)
# Makes it easy to visually distinguish between cursors
padding = padding_from(json)
Base.encode64(padding <> @prefix <> json)
end
@doc """
Rederives the key from the cursor string.
The cursor string is supplied by users and may have been tampered with.
However, we ensure that only the expected column values may appear in the
cursor, so at worst, they could paginate from a different spot, which is
fine.
## Examples
iex> to_key("<KEY> [:id])
{:ok, %{id: 25}}
"""
@impl AbsintheRelayKeysetConnection.CursorTranslator
def to_key(encoded_cursor, expected_columns) do
with {:ok, <<digest::size(@pad_bits)>> <> @prefix <> json_cursor} <-
Base.decode64(encoded_cursor),
true <- valid_digest?(digest, json_cursor),
{:ok, decoded_list} <- Jason.decode(json_cursor),
true <- Enum.count(expected_columns) == Enum.count(decoded_list) do
key =
expected_columns
|> Enum.zip(decoded_list)
|> Map.new()
{:ok, key}
else
_ -> {:error, :invalid_cursor}
end
rescue
ArgumentError ->
{:error, :invalid_cursor}
end
# Since we built the padding from a hash of the original contents of the
# cursor, we can check whether the cursor we got back would have produced the
# same padding. This is not a very strong check because someone could find
# a tampered value that would produce the same hash, especially since we
# don't use the entire hash in the padding. But casual tampering would be
# rejected.
defp valid_digest?(digest, json_cursor) do
<<check_digest::size(@pad_bits)>> = padding_from(json_cursor)
check_digest == digest
end
# Builds a varied but deterministic padding string from the input.
defp padding_from(string) do
:crypto.hash(:sha, string)
|> Kernel.binary_part(0, @pad_length)
end
end
end
|
lib/absinthe_relay_keyset_connection/cursor_translator/base64_hashed.ex
| 0.896546
| 0.817101
|
base64_hashed.ex
|
starcoder
|
defmodule RDF.XSD.Facet do
@moduledoc """
A behaviour for XSD restriction facets.
Here's a list of all the `RDF.XSD.Facet`s RDF.ex implements out-of-the-box:
| XSD facet | `RDF.XSD.Facet` |
| :-------------- | :------------- |
| length | `RDF.XSD.Facets.Length` |
| minLength | `RDF.XSD.Facets.MinLength` |
| maxLength | `RDF.XSD.Facets.MaxLength` |
| maxInclusive | `RDF.XSD.Facets.MaxInclusive` |
| maxExclusive | `RDF.XSD.Facets.MaxExclusive` |
| minInclusive | `RDF.XSD.Facets.MinInclusive` |
| minExclusive | `RDF.XSD.Facets.MinExclusive` |
| totalDigits | `RDF.XSD.Facets.TotalDigits` |
| fractionDigits | `RDF.XSD.Facets.FractionDigits` |
| explicitTimezone | `RDF.XSD.Facets.ExplicitTimezone` |
| pattern | `RDF.XSD.Facets.Pattern` |
| whiteSpace | ❌ |
| enumeration | ❌ |
| assertions | ❌ |
Every `RDF.XSD.Datatype.Primitive` defines a set of applicable constraining facets which are can
be used on derivations of this primitive or any of its existing derivations:
| Primitive datatype | Applicable facets |
| :----------------- | :---------------- |
|string | `RDF.XSD.Facets.Length`, `RDF.XSD.Facets.MaxLength`, `RDF.XSD.Facets.MinLength`, `RDF.XSD.Facets.Pattern` |
|boolean | `RDF.XSD.Facets.Pattern` |
|float | `RDF.XSD.Facets.MaxExclusive`, `RDF.XSD.Facets.MaxInclusive`, `RDF.XSD.Facets.MinExclusive`, `RDF.XSD.Facets.MinInclusive`, `RDF.XSD.Facets.Pattern` |
|double | `RDF.XSD.Facets.MaxExclusive`, `RDF.XSD.Facets.MaxInclusive`, `RDF.XSD.Facets.MinExclusive`, `RDF.XSD.Facets.MinInclusive`, `RDF.XSD.Facets.Pattern` |
|decimal | `RDF.XSD.Facets.MaxExclusive`, `RDF.XSD.Facets.MaxInclusive`, `RDF.XSD.Facets.MinExclusive`, `RDF.XSD.Facets.MinInclusive`, `RDF.XSD.Facets.Pattern`, `RDF.XSD.Facets.TotalDigits`, `RDF.XSD.Facets.FractionDigits` |
|decimal | `RDF.XSD.Facets.MaxExclusive`, `RDF.XSD.Facets.MaxInclusive`, `RDF.XSD.Facets.MinExclusive`, `RDF.XSD.Facets.MinInclusive`, `RDF.XSD.Facets.Pattern`, `RDF.XSD.Facets.TotalDigits` |
|duration | `RDF.XSD.Facets.MaxExclusive`, `RDF.XSD.Facets.MaxInclusive`, `RDF.XSD.Facets.MinExclusive`, `RDF.XSD.Facets.MinInclusive`, `RDF.XSD.Facets.Pattern` |
|dateTime | `RDF.XSD.Facets.ExplicitTimezone`, `RDF.XSD.Facets.MaxExclusive`, `RDF.XSD.Facets.MaxInclusive`, `RDF.XSD.Facets.MinExclusive`, `RDF.XSD.Facets.MinInclusive`, `RDF.XSD.Facets.Pattern` |
|time | `RDF.XSD.Facets.ExplicitTimezone`, `RDF.XSD.Facets.MaxExclusive`, `RDF.XSD.Facets.MaxInclusive`, `RDF.XSD.Facets.MinExclusive`, `RDF.XSD.Facets.MinInclusive`, `RDF.XSD.Facets.Pattern` |
|date | `RDF.XSD.Facets.ExplicitTimezone`, `RDF.XSD.Facets.MaxExclusive`, `RDF.XSD.Facets.MaxInclusive`, `RDF.XSD.Facets.MinExclusive`, `RDF.XSD.Facets.MinInclusive`, `RDF.XSD.Facets.Pattern` |
|anyURI | `RDF.XSD.Facets.Length`, `RDF.XSD.Facets.MaxLength`, `RDF.XSD.Facets.MinLength`, `RDF.XSD.Facets.Pattern` |
<https://www.w3.org/TR/xmlschema11-2/datatypes.html#rf-facets>
"""
@type t :: module
@doc """
The name of a `RDF.XSD.Facet`.
"""
@callback name :: String.t()
defmacro __using__(opts) do
name = Keyword.fetch!(opts, :name)
type_ast = Keyword.fetch!(opts, :type)
quote bind_quoted: [], unquote: true do
@behaviour RDF.XSD.Facet
@doc """
Returns the value of this `RDF.XSD.Facet` on specific `RDF.XSD.Datatype`.
"""
@callback unquote(name)() :: unquote(type_ast) | nil
@doc """
Validates if a `value` and `lexical` conforms with a concrete `facet_constaint_value` for this `RDF.XSD.Facet`.
This function must be implemented on a `RDF.XSD.Datatype` using this `RDF.XSD.Facet`.
"""
@callback unquote(conform_fun_name(name))(
facet_constaint_value :: any,
value :: any,
RDF.XSD.Datatype.uncanonical_lexical()
) :: boolean
@name unquote(Atom.to_string(name))
@impl RDF.XSD.Facet
def name, do: @name
@doc """
Checks if a `value` and `lexical` conforms with the `c:#{unquote(conform_fun_name(name))}/3` implementation on the `datatype` `RDF.XSD.Datatype`.
"""
@spec conform?(RDF.XSD.Datatype.t(), any, RDF.XSD.Datatype.uncanonical_lexical()) :: boolean
def conform?(datatype, value, lexical) do
constrain_value = apply(datatype, unquote(name), [])
is_nil(constrain_value) or
apply(datatype, unquote(conform_fun_name(name)), [constrain_value, value, lexical])
end
defmacro __using__(_opts) do
import unquote(__MODULE__)
default_facet_impl(__MODULE__, unquote(name))
end
end
end
defp conform_fun_name(facet_name), do: :"#{facet_name}_conform?"
@doc """
Macro for the definition of concrete constraining `value` for a `RDF.XSD.Facet` on a `RDF.XSD.Datatype`.
"""
defmacro def_facet_constraint(facet, value) do
facet_mod = Macro.expand_once(facet, __CALLER__)
facet_name = String.to_atom(facet_mod.name)
quote do
unless unquote(facet) in @base.applicable_facets,
do: raise("#{unquote(facet_name)} is not an applicable facet of #{@base}")
@facets unquote(facet_name)
@impl unquote(facet)
def unquote(facet_name)(), do: unquote(value)
end
end
@doc false
def default_facet_impl(facet_mod, facet_name) do
quote do
@behaviour unquote(facet_mod)
Module.put_attribute(__MODULE__, unquote(facet_mod), nil)
@impl unquote(facet_mod)
def unquote(facet_name)(), do: nil
defoverridable [{unquote(facet_name), 0}]
end
end
@doc false
def restriction_impl(facets, applicable_facets) do
Enum.map(applicable_facets, fn applicable_facet ->
applicable_facet_name = String.to_atom(applicable_facet.name)
quote do
@behaviour unquote(applicable_facet)
unless unquote(applicable_facet_name in facets) do
@impl unquote(applicable_facet)
def unquote(applicable_facet_name)(),
do: apply(@base, unquote(applicable_facet_name), [])
end
@impl unquote(applicable_facet)
def unquote(conform_fun_name(applicable_facet_name))(constrain_value, value, lexical) do
apply(@base, unquote(conform_fun_name(applicable_facet_name)), [
constrain_value,
value,
lexical
])
end
end
end)
end
end
|
lib/rdf/xsd/facet.ex
| 0.891646
| 0.948585
|
facet.ex
|
starcoder
|
defmodule Fix do
@type fix() :: (Macro.t() -> Macro.t()) | built_in()
@typep built_in() :: tuple()
@doc """
Transforms code in `string` according to `fixes`.
A "fix" is a 1-arity function that transforms the AST. The function is
given to `Macro.prewalk/2`. There's also a list of pre-prepared fixes
that can be accessed as tuples:
* `{:rename_function_def, :foo, :bar}`
* `{:rename_function_call, {Foo, :foo}, {Foo, :bar}}`
`opts` are passed down to `Code.format_string!/2`.
**Note**: the AST that the fix accepts and returns is not the "regular"
Elixir AST, but the annotated Elixir formatter AST. Given we rely on Elixir
internals, this function may not work on future Elixir versions. It has been
tested only on Elixir v1.10.3.
Here's an example fix that transforms `def foo` into `def bar`:
foo2bar = fn
{:def, meta1, [{:foo, meta2, args}, expr]} ->
{:def, meta1, [{:bar, meta2, args}, expr]}
other ->
other
end
This fix happens to be a built-in one: `{:rename_function_def, :foo, :bar}`.
When defining your fixes, remember to add a "catch-all" clause at the end!
## Examples
iex> Fix.fix("A.a(1, 2)", [{:rename_function_call, {A, :a}, {B, :b}}])
"B.b(1, 2)"
"""
@spec fix(String.t(), [fix()], keyword()) :: String.t()
def fix(string, fixes, opts \\ [], fix_opts \\ []) do
if Keyword.get(fix_opts, :compile, true) do
{:ok, _} = Fix.Server.start_link()
Code.compiler_options(parser_options: [columns: true], tracers: [Fix.Tracer])
Code.compile_string(string)
end
Enum.reduce(fixes, string, fn fix, acc ->
acc
|> format_string!([transform: fix(fix)] ++ opts)
|> IO.iodata_to_binary()
end) <> "\n"
end
defmodule Tracer do
def trace({:imported_function, _meta, _module, _name, _arity} = event, _env) do
Fix.Server.record(event)
end
def trace(_other, _env) do
:ok
end
end
defmodule Server do
use Agent
@name __MODULE__
def start_link() do
Agent.start_link(fn -> [] end, name: @name)
end
def record(event) do
Agent.update(@name, &[event | &1])
end
def events() do
Agent.get(@name, & &1)
end
end
defp fix(fun) when is_function(fun, 1) do
fun
end
defp fix({:rename_function_def, from, to}) do
fn
{:def, meta1, [{^from, meta2, args}, expr]} ->
{:def, meta1, [{to, meta2, args}, expr]}
other ->
other
end
end
defp fix({:rename_function_call, {from_mod, from_fun}, {to_mod, to_fun}}) do
from_alias = from_mod |> Module.split() |> Enum.map(&String.to_atom/1)
to_alias = to_mod |> Module.split() |> Enum.map(&String.to_atom/1)
fn
{{:., meta1, [{:__aliases__, meta2, ^from_alias}, ^from_fun]}, meta3, args} ->
{{:., meta1, [{:__aliases__, meta2, to_alias}, to_fun]}, meta3, args}
other ->
other
end
end
defp fix({:replace_imported_calls, module}) do
events = Fix.Server.events()
calls =
for {:imported_function, meta, ^module, function, arity} <- events do
{function, arity, meta[:line], meta[:column]}
end
alias = {:__aliases__, [], module |> Module.split() |> Enum.map(&String.to_atom/1)}
fn
{name, meta, args} = ast ->
if {name, length(args), meta[:line], meta[:column]} in calls do
{{:., [], [alias, name]}, [], args}
else
ast
end
other ->
other
end
end
defp fix({:add_dep, {:hex, name, requirement}}) do
fn
{:defp, meta, [{:deps, _, _} = fun, body]} ->
[{{_, _, [:do]} = do_ast, block_ast}] = body
{:__block__, meta1, [deps]} = block_ast
deps =
deps ++
[
{:__block__, [],
[{{:__block__, [], [name]}, {:__block__, [delimiter: "\""], [requirement]}}]}
]
{:defp, meta, [fun, [{do_ast, {:__block__, meta1, [deps]}}]]}
other ->
other
end
end
# Copied from https://github.com/elixir-lang/elixir/blob/v1.10.3/lib/elixir/lib/code.ex#L652
defp format_string!(string, opts) when is_binary(string) and is_list(opts) do
line_length = Keyword.get(opts, :line_length, 98)
algebra = Fix.Formatter.to_algebra!(string, opts)
Inspect.Algebra.format(algebra, line_length)
end
end
|
lib/fix.ex
| 0.873417
| 0.532121
|
fix.ex
|
starcoder
|
defmodule Odo.Bucket do
@moduledoc """
`Odo.Bucket` is a simple `GenServer` based token bucket.
To start a new token bucket call `Odo.Bucket.new_bucket/2` with the name for the bucket and values for the
number of tokens the bucket should hold.
Once the bucket is running call `Odo.Bucket.get_token/1` with the name of the bucket.
"""
use GenServer
# The state for the rate limiter which takes the form of the current number of tokens in the bucket,
# the time the bucket count began at and the time skew (or buffer) estimated for the remote service.
@typedoc false
@type rate_state :: %{
tokens: pos_integer,
tick_started_at: pos_integer | :init,
bucket_size: pos_integer,
tick_duration: pos_integer,
tick_refill_amount: pos_integer,
buffer: non_neg_integer
}
@doc """
new_bucket starts a new bucket process. It takes the bucket name, the number of tokens the bucket holds, the
refill duration and (optionally) the skew, or buffer, to add to the bucket refill time.
You may need to add a buffer to the token refill time if you're operating near the rate limit and need to account
for latency effects - i.e. you dispatch 10 tokens in a given time but they do not arrive at their destination within
that same timeframe.
"""
@spec new_bucket(name :: String.t, [tokens: pos_integer, tick_refill_amount: pos_integer, tick_duration: pos_integer, buffer: non_neg_integer]) :: Supervisor.on_start_child
def new_bucket(name, opts \\ []) do
tokens = Keyword.get(opts, :tokens, 100)
bucket_refill_amount = Keyword.get(opts, :tick_refill_amount, tokens)
refill_duration = Keyword.get(opts, :tick_duration, 10_000)
buffer = Keyword.get(opts, :buffer, 0)
Odo.BucketSupervisor.start_child(name, tokens, bucket_refill_amount, refill_duration, buffer)
end
@doc false
def start_link(name, bucket_size, bucket_refill_amount, bucket_refill_duration, buffer) do
GenServer.start_link(__MODULE__, {name, bucket_size, bucket_refill_amount, bucket_refill_duration, buffer}, name: via(name))
end
defp via(name) do
{:via, Registry, {:odo_bucket_registry, name}}
end
def init({_name, bucket_size, tick_refill_amount, tick_duration, buffer}) do
{:ok,
%{
tokens: 0,
tick_started_at: :init,
bucket_size: bucket_size,
tick_refill_amount: tick_refill_amount,
tick_duration: tick_duration + buffer,
buffer: buffer
}
}
end
@doc """
`Odo.Bucket.get_token/1` attempts to claim a token and responds with `{:go, tokens_remaining, until}` if the call
is safe to proceed or with `{:stop, until}` that tells the caller to wait until the next likely availability of
tokens in terms of `until` number of milliseconds.
"""
@spec get_token(String.t) :: {:go, tokens_remaining :: pos_integer, until :: pos_integer} | {:stop, until :: pos_integer}
def get_token(name) do
GenServer.call(via(name), :get_token)
end
@doc """
`Odo.Bucket.reset/2` restarts the bucket timer with the given buffer from the next call to `Odo.Bucket.get_token/1`
"""
@spec reset(name :: String.t, buffer :: non_neg_integer) :: {:ok, new_buffer :: non_neg_integer}
def reset(name, buffer) do
GenServer.call(via(name), {:reset, buffer})
end
@doc """
`Odo.Bucket.reset/1` restarts the bucket timer from the next call to `Odo.Bucket.get_token/1`
"""
@spec reset(name :: String.t) :: :ok
def reset(name) do
GenServer.call(via(name), :reset)
end
@doc """
`Odo.Bucket.set_tick_start/2` updates the start of the current bucket refill tick to the figure provided. If the
remote server provides information about the current status of the rate limit you can use that to update the bucket
manually and better match the remote server.
"""
@spec set_tick_start(name :: String.t, start_time: pos_integer) :: {:ok, new_tick_start :: pos_integer}
def set_tick_start(name, start_time) do
GenServer.call(via(name), {:set_tick_start, start_time})
end
@doc """
`Odo.Bucket.set_tick_end/2` updates the start of the current bucket refill tick based on the time it was due to end.
If the remote server lets you know when the next refill will happen you can provde this time and the current tick
start time will be recalculated.
"""
@spec set_tick_end(name :: String.t, tick_end :: pos_integer) :: {:ok, new_tick_start :: pos_integer}
def set_tick_end(name, tick_end) do
GenServer.call(via(name), {:set_tick_end, tick_end})
end
@doc """
`Odo.Bucket.stop_bucket/1` stops the named bucket process.
"""
@spec stop_bucket(name :: String.t) :: :ok | {:error, String.t}
def stop_bucket(name) do
case Registry.lookup(:odo_bucket_registry, name) do
[{pid, _}] -> Supervisor.terminate_child(Odo.BucketSupervisor, pid)
[] -> {:error, "No such bucket #{name}"}
end
end
def handle_call({:set_tick_end, tick_end}, _, %{tick_duration: tick_duration} = state) do
new_tick_start = tick_end - tick_duration
{:reply, {:ok, new_tick_start}, %{state | tick_started_at: new_tick_start}}
end
def handle_call({:set_tick_start, start_time}, _, state) do
{:reply, {:ok, start_time}, %{state | tick_started_at: start_time}}
end
def handle_call(:reset, _, state) do
{:reply, :ok, %{state | tick_started_at: :init, tokens: 0}}
end
def handle_call({:reset, buffer}, _, %{buffer: cur_buffer, tick_duration: cur_tick_duration} = state) do
{:reply, :ok, %{state | tick_started_at: :init, tokens: 0, buffer: buffer, tick_duration: cur_tick_duration - cur_buffer + buffer}}
end
def handle_call(:get_token, _, %{tick_started_at: :init} = state) do
now = :erlang.system_time(:millisecond)
update_token_bucket(now, %{state | tick_started_at: now})
end
def handle_call(:get_token, _, state) do
now = :erlang.system_time(:millisecond)
update_token_bucket(now, state)
end
@doc false
@spec update_token_bucket(update_time :: non_neg_integer, state :: rate_state) :: {:reply, :ok | {:stop, pos_integer}, state :: rate_state}
def update_token_bucket(update_time,
%{
tokens: tokens,
bucket_size: bucket_size,
tick_refill_amount: tick_refill_amount,
tick_started_at: tick_started_at,
tick_duration: tick_duration
} = state) do
diff = update_time - tick_started_at
ticks = div(diff, tick_duration)
until = tick_duration - rem(diff, tick_duration)
current_tick_started_at = tick_started_at + ticks * tick_duration
cond do
ticks > 0 ->
new_tokens = calc_tokens(tokens, ticks, tick_refill_amount)
{:reply, {:go, bucket_size - new_tokens, until}, %{state | tokens: new_tokens, tick_started_at: current_tick_started_at}}
tokens + 1 > bucket_size -> {:reply, {:stop, until}, state}
true -> {:reply, {:go, bucket_size - (tokens + 1), until}, %{state | tokens: tokens + 1}}
end
end
@spec calc_tokens(current_tokens :: non_neg_integer, ticks :: pos_integer, refill_amount :: pos_integer) :: pos_integer
defp calc_tokens(current_tokens, ticks, refill_amount) do
tokens = current_tokens - (ticks * refill_amount) + 1
if tokens <= 0, do: 1, else: tokens
end
end
|
lib/odo.ex
| 0.826817
| 0.627752
|
odo.ex
|
starcoder
|
defmodule Gyx.Agents.SARSA.Agent do
@moduledoc """
This agent implements SARSA, it takes into account the current
state, action, reward (s<sub>t</sub>, a<sub>t</sub>, r<sub>t</sub>)
and on policy estimates for the best next action a<sub>t+1</sub> and state s<sub>t+1</sub>.
<br/>The Q update is given by:

<br/>
The Q table process must be referenced on struct `Q` key, which must follow the `Gyx.Qstorage` behaviour
"""
defstruct Q: nil, learning_rate: nil, gamma: nil, epsilon: nil, epsilon_min: nil
@type t :: %__MODULE__{
Q: any(),
learning_rate: float(),
gamma: float(),
epsilon: float(),
epsilon_min: float()
}
alias Gyx.Qstorage.QGenServer
alias Gyx.Core.Spaces
def init(process_q) do
{:ok, qgenserver} =
case is_pid(process_q) do
true -> {:ok, process_q}
false -> QGenServer.start_link([], [])
end
IO.puts(inspect(qgenserver))
{:ok,
%__MODULE__{
Q: qgenserver,
learning_rate: 0.8,
gamma: 0.9,
epsilon: 0.8,
epsilon_min: 0.1
}}
end
def start_link(opts) do
GenServer.start_link(__MODULE__, [], opts)
end
def start_link(process_q, opts) when is_pid(process_q) do
GenServer.start_link(__MODULE__, process_q, opts)
end
def act_greedy(agent, environment_state) do
GenServer.call(agent, {:act_greedy, environment_state})
end
def act_epsilon_greedy(agent, environment_state) do
GenServer.call(agent, {:act_epsilon_greedy, environment_state})
end
def td_learn(agent, sarsa) do
GenServer.call(agent, {:td_learn, sarsa})
end
def handle_call(
{:td_learn, {s, a, r, ss, aa}},
_from,
state = %{Q: qtable, learning_rate: learning_rate, gamma: gamma}
) do
predict = QGenServer.q_get(qtable, s, a)
target = r + gamma * QGenServer.q_get(qtable, ss, aa)
expected_return = predict + learning_rate * (target - predict)
QGenServer.q_set(qtable, s, a, expected_return)
{:reply, expected_return, state}
end
def handle_call(
{:act_epsilon_greedy, environment_state},
_from,
state = %{Q: qtable, epsilon: epsilon}
) do
{:ok, random_action} = Spaces.sample(environment_state.action_space)
max_action =
case QGenServer.get_max_action(qtable, environment_state.current_state) do
{:ok, action} -> action
{:error, _} -> random_action
end
final_action =
case :rand.uniform() < 1 - epsilon do
true -> max_action
false -> random_action
end
{:reply, final_action, state}
end
def handle_call({:act_greedy, environment_state}, _from, state = %{Q: qtable}) do
{:reply, qtable.get_max_action(environment_state.current_state), state}
end
end
|
lib/agents/sarsa/sarsa_agent.ex
| 0.92415
| 0.637687
|
sarsa_agent.ex
|
starcoder
|
defmodule DecemberThree do
@moduledoc """
Third Advent of Code task.
"""
@doc """
The solution for part one.
"""
def part_one(file) do
{_, contents} = File.read(file)
[first, second] =
contents
|> String.trim()
|> String.split("\n")
|> Enum.map(&draw_wire/1)
first
|> Map.delete("0,0")
|> Enum.reduce(%{}, fn x, acc ->
{k, v} = x
case Map.has_key?(second, k) do
true ->
%{k => v}
|> Map.merge(acc)
false ->
acc
end
end)
|> Enum.map(fn {k, _v} -> manhattan_distance(k) end)
|> Enum.min()
end
@doc """
The solution for part two.
"""
def part_two(file) do
{_, contents} = File.read(file)
[first, second] =
contents
|> String.trim()
|> String.split("\n")
|> Enum.map(&draw_wire/1)
common =
first
|> Map.delete("0,0")
|> Enum.reduce(%{}, fn x, acc ->
{k, v} = x
case Map.has_key?(second, k) do
true ->
%{k => v}
|> Map.merge(acc)
false ->
acc
end
end)
|> Enum.map(fn {k, _v} ->
Map.get(first, k) + Map.get(second, k)
end)
|> Enum.min()
end
@doc """
Calculate manhattan distance, always from 0,0 because that's where I start.
abs(x0-x1) + abs(y0-y1)
"""
def manhattan_distance(xy) do
[x, y] =
xy
|> String.split(",")
|> Enum.map(fn x -> x |> String.to_integer() end)
Kernel.abs(0 - x) + Kernel.abs(0 - y)
end
@doc """
Convert each step to a map of direction and length, then pass it to
to_coordinates.
"""
def draw_wire(steps) do
steps
|> String.split(",")
|> Enum.map(fn a ->
convert_steps(a)
end)
|> to_coordinates
end
@doc """
Convert each step instruct to a map of direction and length.
"""
def convert_steps(step) do
direction =
step
|> String.at(0)
length =
step
|> String.slice(1, String.length(step))
|> String.to_integer()
%{"direction" => direction, "length" => length}
end
@doc """
Fold each instruction and merge all coordinates and the distance to each
coordinate in a map.
## Examples
iex> [
...> %{"direction" => "U", "length" => 3},
...> %{"direction" => "R","length" => 5}
...> ] |> DecemberThree.to_coordinates
%{
"0,0" => 0,
"0,1" => 1,
"0,2" => 2,
"0,3" => 3,
"1,3" => 4,
"2,3" => 5,
"3,3" => 6,
"4,3" => 7,
"5,3" => 8
}
"""
def to_coordinates(instructions) do
instructions
|> Enum.reduce(%{}, fn instruction, acc ->
# IO.puts "Iteration #{Map.get(acc, "steps")}: x,y: #{Map.get(acc, "x", 0)},#{Map.get(acc, "y", 0)}"
update(
%{},
instruction["direction"],
Map.get(acc, "x", 0),
Map.get(acc, "y", 0),
Map.get(acc, "steps", 0),
Map.get(acc, "steps", 0) + Map.get(instruction, "length")
)
|> Map.merge(acc, fn _k, v1, v2 ->
# Always keep v1 values
v1
end)
end)
|> Map.delete("x")
|> Map.delete("y")
|> Map.delete("steps")
end
@doc """
Caulculate the distance to each coordinate. Start by sending the current x and
y coordinates combined with the current number of steps. The function will run
until the number of steps reaches stop.
## Examples
iex> DecemberThree.update(%{}, "D", 5, 8, 3, 6)
%{
"5,5" => 6,
"5,6" => 5,
"5,7" => 4,
"5,8" => 3,
"steps" => 6,
"x" => 5,
"y" => 5
}
"""
def update(co, direction, x, y, current, stop) do
case current do
x when x > stop ->
co
_ ->
# Store the current position and step.
co =
co
|> Map.put("x", x)
|> Map.put("y", y)
|> Map.put("steps", current)
case direction do
"U" ->
co
|> Map.put("#{x},#{y}", current)
|> update(direction, x, y + 1, current + 1, stop)
"D" ->
co
|> Map.put("#{x},#{y}", current)
|> update(direction, x, y - 1, current + 1, stop)
"L" ->
co
|> Map.put("#{x},#{y}", current)
|> update(direction, x - 1, y, current + 1, stop)
"R" ->
co
|> Map.put("#{x},#{y}", current)
|> update(direction, x + 1, y, current + 1, stop)
end
end
end
end
|
03/elixir/lib/december_three.ex
| 0.813053
| 0.430387
|
december_three.ex
|
starcoder
|
defmodule Day14 do
def from_file(path) do
File.stream!(path)
|> Enum.to_list
|> Enum.map(&parse_row/1)
end
def parse_material([amount, type]), do: {String.to_integer(amount), String.to_atom(type)}
def parse_row(row) do
[from, to] = Regex.run(~r{(.*) => (.*)}, row, capture: :all_but_first)
{
from |> String.split(", ") |> Enum.map(&String.split/1) |> Enum.map(&parse_material/1),
to |> String.split |> parse_material
}
end
def reaction(reactions, to_material) do
Enum.find(reactions, fn {_, {_, material}} -> material == to_material end)
end
def simplify(reactions, {amount, material}) do
if amount <= 0 do
[]
else
{from, {to_amount, _}} = reaction(reactions, material)
multiplier =
if amount <= to_amount do
1
else
m = div(amount, to_amount)
if m * to_amount >= amount, do: m, else: m + 1
end
produced = to_amount * multiplier
rest = produced - amount
if rest > 0 do
Enum.map(from, fn {a, m} -> {a * multiplier, m} end) ++ [{-rest, material}]
else
Enum.map(from, fn {a, m} -> {a * multiplier, m} end)
end
end
end
def simplify(reactions, simplified) when is_list(simplified) do
added = add(simplified)
case added do
[] -> 0
[{a, :ORE} | rest] -> a + simplify(reactions, rest)
[m | rest] -> simplify(reactions, simplify(reactions, m) ++ rest |> List.flatten)
end
end
def add(list) do
list
|> Enum.reduce(%{}, fn {a, m}, acc -> acc |> Map.put(m, a + Map.get(acc, m, 0)) end)
|> Map.to_list
|> Enum.map(fn {m, a} -> {a, m} end)
|> Enum.sort_by(fn {a, _} -> a end, &>=/2)
end
def max_fuel(reactions, ore) do
ore_per_fuel = simplify(reactions, [{1, :FUEL}])
min = div(ore, ore_per_fuel)
max = min * 2
{min_s, max_s} =
Stream.iterate(
{min, max},
fn {min, max} ->
produced = min + div(max - min, 2)
used = simplify(reactions, [{produced, :FUEL}])
if used > ore do
{min, produced}
else
{produced, max}
end
end
)
|> Enum.take_while(fn {min, max} -> max > (min + 1) end)
|> List.last
Enum.map(min_s..max_s, fn produced -> {produced, simplify(reactions, [{produced, :FUEL}])} end)
|> Enum.filter(fn {_, used} -> used < ore end)
|> List.last
|> elem(0)
end
def solution do
IO.puts("#{from_file("day14_input.txt") |> simplify([{1, :FUEL}])}")
IO.puts("#{from_file("day14_input.txt") |> max_fuel(1000000000000)}")
end
end
|
lib/day14.ex
| 0.508544
| 0.482124
|
day14.ex
|
starcoder
|
defmodule LastfmArchive.Transform do
@moduledoc """
This module provides functions for reading and transforming downloaded Lastfm data.
"""
alias LastfmArchive.Utils
@default_delimiter "\t"
@tsv_headers "id\tname\tscrobble_date\tscrobble_date_iso\tmbid\turl\tartist\tartist_mbid\tartist_url\talbum\talbum_mbid"
@doc """
Transform a downloaded raw JSON page into a list of tab-delimited track data.
### Example
```
# transform a page of scrobbles from the file archive
LastfmArchive.Transform.transform("a_lastfm_user", "2007/200_1.gz")
```
A row of tab-delimited track currently contains (if any):
- `id` auto-generated by the system to uniquely identify a scrobble
- `name` the track name
- `scrobble_date` Unix timestamp of the scrobble date
- `scrobble_date_iso` scrobble date in ISO 8601 datetime format
- `mbid` MusicBrainz identifier for the track
- `url` web address of the track on Last.fm
- `artist`
- `artist_mbid` MusicBrainz identifier for the artist
- `artist_url` web address of the artist on Last.fm
- `album`
- `album_mbid` MusicBrainz identifier for the album
"""
@spec transform(binary, binary, :atom) :: list(binary) | {:error, :file.posix()}
def transform(user, filename, mode \\ :tsv)
def transform(user, filename, :tsv) do
case Utils.read(user, filename) do
{:ok, resp} ->
tracks = resp |> Jason.decode!()
index = initial_index(tracks["recenttracks"]["@attr"])
[track | rest] = tracks["recenttracks"]["track"]
if track["@attr"]["nowplaying"],
do: _transform(user, rest, index, [@tsv_headers]),
else: _transform(user, tracks, index, [@tsv_headers])
error ->
error
end
end
def tsv_headers(), do: @tsv_headers
defp _transform(_user, [], _index, acc), do: acc
defp _transform(user, [track | tracks], index, acc) do
next_index = index + 1
_transform(user, tracks, next_index, acc ++ [_transform(user, track, index)])
end
# id,name,scrobble_date,date_iso,mbid,url,artist,artist_mbid,artist_url,album,album_mbid
defp _transform(user, track, index) do
id = "#{user}_#{track["date"]["uts"]}_#{index |> to_string}"
uts = if is_binary(track["date"]["uts"]), do: String.to_integer(track["date"]["uts"]), else: track["date"]["uts"]
date_s = uts |> DateTime.from_unix!() |> DateTime.to_iso8601()
track_info = [id, track["name"] |> String.trim(), track["date"]["uts"], date_s, track["mbid"], track["url"]]
artist_info = [track["artist"]["name"], track["artist"]["mbid"], track["artist"]["url"]]
album_info = [track["album"]["#text"], track["album"]["mbid"]]
Enum.join(track_info ++ artist_info ++ album_info, @default_delimiter)
end
defp initial_index(%{"page" => page, "perPage" => per_page}) when is_binary(page) and is_binary(per_page) do
(String.to_integer(page) - 1) * String.to_integer(per_page) + 1
end
defp initial_index(info), do: (info["page"] - 1) * info["perPage"] + 1
end
|
lib/transform.ex
| 0.784484
| 0.76145
|
transform.ex
|
starcoder
|
defmodule PhoenixDatatables.Query do
@moduledoc """
Functions for updating an `Ecto.Query` based on Datatables request parameters.
"""
import Ecto.Query
use PhoenixDatatables.Query.Macros
alias Ecto.Query.JoinExpr
alias PhoenixDatatables.Request.Params
alias PhoenixDatatables.Request.Column
alias PhoenixDatatables.Request.Search
alias PhoenixDatatables.Query.Attribute
alias PhoenixDatatables.QueryException
@doc """
Add order_by clauses to the provided queryable based on the "order" params provided
in the Datatables request.
For some queries, `:columns` need to be passed - see documentation for `PhoenixDatatables.execute`
for details.
"""
def sort(queryable, params, sortable \\ nil)
def sort(queryable, %Params{order: orders} = params, sortable) when is_list(sortable) do
sorts =
for order <- orders do
with dir when is_atom(dir) <- cast_dir(order.dir),
%Column{} = column <- params.columns[order.column],
true <- column.orderable,
{column, join_index} when is_number(join_index)
<- cast_column(column.data, sortable) do
{dir, column, join_index}
end
end
do_sorts(queryable, sorts)
end
def sort(queryable, %Params{order: orders} = params, _sortable) do
schema = schema(queryable)
sorts =
for order <- orders do
with dir when is_atom(dir) <- cast_dir(order.dir),
%Column{} = column <- params.columns[order.column],
true <- column.orderable,
%Attribute{} = attribute <- Attribute.extract(column.data, schema),
join_index when is_number(join_index)
<- join_order(queryable, attribute.parent) do
{dir, attribute.name, join_index}
end
end
do_sorts(queryable, sorts)
end
defp do_sorts(queryable, sorts) do
Enum.reduce(sorts, queryable, fn {dir, column, join_index}, queryable ->
order_relation(queryable, join_index, dir, column)
end)
end
@doc false
def join_order(_, nil), do: 0
def join_order(%Ecto.Query{} = queryable, parent) do
case Enum.find_index(queryable.joins, &(join_relation(&1) == parent)) do
nil -> nil
number when is_number(number) -> number + 1
end
end
def join_order(queryable, parent) do
QueryException.raise(:join_order, """
An attempt was made to interrogate the join structure of #{inspect queryable}
This is not an %Ecto.Query{}. The most likely cause for this error is using
dot-notation(e.g. 'category.name') in the column name defined in the datatables
client config but a simple Schema (no join) is used as the underlying queryable.
Please check the client config for the fields belonging to #{inspect parent}. If
the required field does belong to a different parent schema, that schema needs to
be joined in the Ecto query.
""")
end
defp join_relation(%JoinExpr{assoc: {_, relation}}), do: relation
defp join_relation(_) do
QueryException.raise(:join_relation, """
PhoenixDatatables queryables with non-assoc joins must be accompanied by :columns
options to define sortable column names and join orders.
See docs for PhoenixDatatables.execute for more information.
""")
end
defp schema(%Ecto.Query{} = query), do: query.from |> check_from() |> elem(1)
defp schema(schema) when is_atom(schema), do: schema
defp check_from(%Ecto.SubQuery{}) do
QueryException.raise(:schema, """
PhoenixDatatables queryables containing subqueries must be accompanied by :columns
options to define sortable column names and join orders.
See docs for PhoenixDatatables.execute for more information.
""")
end
defp check_from(from), do: from
defp cast_column(column_name, sortable)
when is_list(sortable)
and is_tuple(hd(sortable))
and is_atom(elem(hd(sortable), 0)) do #Keyword
[parent | child] = String.split(column_name, ".")
if parent in Enum.map(Keyword.keys(sortable), &Atom.to_string/1) do
member = Keyword.fetch!(sortable, String.to_atom(parent))
case member do
children when is_list(children) ->
with [child] <- child,
[child] <- Enum.filter(Keyword.keys(children),
&(Atom.to_string(&1) == child)),
{:ok, order} when is_number(order)
<- Keyword.fetch(children, child) do
{child, order}
else
_ -> {:error, "#{column_name} is not a sortable column."}
end
order when is_number(order) -> {String.to_atom(parent), order}
end
else
{:error, "#{column_name} is not a sortable column."}
end
end
defp cast_column(column_name, sortable) do
if column_name in Enum.map(sortable, &Atom.to_string/1) do
{String.to_atom(column_name), 0}
end
end
defp cast_dir("asc"), do: :asc
defp cast_dir("desc"), do: :desc
defp cast_dir(wrong), do: {:error, "#{wrong} is not a valid sort order."}
@doc """
Add offset and limit clauses to the provided queryable based on the "length" and
"start" parameters passed in the Datatables request.
"""
def paginate(queryable, params) do
length = convert_to_number_if_string(params.length)
start = convert_to_number_if_string(params.start)
queryable
|> limit(^length)
|> offset(^start)
end
defp convert_to_number_if_string(num) do
case is_binary(num) do
true ->
{num, _} = Integer.parse(num)
num
false -> num
end
end
@doc """
Add AND where clause to the provided queryable based on the "search" parameter passed
in the Datatables request.
For some queries, `:columns` need to be passed - see documentation for `PhoenixDatatables.execute`
for details.
"""
def search(queryable, params, options \\ []) do
columns = options[:columns]
do_search(queryable, params, columns)
end
defp do_search(queryable, %Params{search: %Search{value: ""}}, _), do: queryable
defp do_search(queryable, %Params{} = params, searchable) when is_list(searchable) do
search_term = "%#{params.search.value}%"
dynamic = dynamic([], false)
dynamic = Enum.reduce params.columns, dynamic, fn({_, v}, acc_dynamic) ->
with {column, join_index} when is_number(join_index)
<- v.data |> cast_column(searchable),
true <- v.searchable do
acc_dynamic
|> search_relation(join_index,
column,
search_term)
else
_ -> acc_dynamic
end
end
where(queryable, [], ^dynamic)
end
defp do_search(queryable, %Params{search: search, columns: columns}, _searchable) do
search_term = "%#{search.value}%"
schema = schema(queryable)
dynamic = dynamic([], false)
dynamic =
Enum.reduce columns, dynamic, fn({_, v}, acc_dynamic) ->
with %Attribute{} = attribute <- v.data |> Attribute.extract(schema),
true <- v.searchable do
acc_dynamic
|> search_relation(join_order(queryable, attribute.parent),
attribute.name,
search_term)
else
_ -> acc_dynamic
end
end
where(queryable, [], ^dynamic)
end
# credo:disable-for-lines:2
# credit to scrivener library:
# https://github.com/drewolson/scrivener_ecto/blob/master/lib/scrivener/paginater/ecto/query.ex
# Copyright (c) 2016 <NAME>
@doc """
Calculate the number of records that will retrieved with the provided queryable.
"""
def total_entries(queryable, repo) do
total_entries =
queryable
|> exclude(:preload)
|> exclude(:select)
|> exclude(:order_by)
|> exclude(:limit)
|> exclude(:offset)
|> subquery
|> select(count("*"))
|> repo.one
total_entries || 0
end
end
defmodule PhoenixDatatables.QueryException do
defexception [:message, :operation]
@dialyzer {:no_return, raise: 1} #yes we know it raises
def raise(operation, message \\ "") do
Kernel.raise __MODULE__, [operation: operation, message: message]
end
end
|
lib/phoenix_datatables/query.ex
| 0.757436
| 0.473353
|
query.ex
|
starcoder
|
defmodule EVM.Logger do
require Logger
alias EVM.{MachineState, Operation}
@doc """
Helper function to log the stack given the machine state
"""
@spec log_stack(MachineState.t()) :: MachineState.t()
def log_stack(machine_state) do
stack =
machine_state.stack
|> Enum.map(&stack_value_string/1)
Logger.debug(fn -> "Stack: #{inspect(stack)}" end)
machine_state
end
@doc """
This function logs state in the same format as Parity's `evm-debug` function. This makes comparing implementations and debugging easier.
`cargo test --features "json-tests evm/evm-debug-tests" --release -- BlockchainTests_GeneralStateTest_stSystemOperationsTest --nocapture`
"""
@spec log_state(MachineState.t(), EVM.Operation.Metadata.t()) :: MachineState.t()
def log_state(machine_state, operation) do
log_opcode_and_gas_left(operation, machine_state)
log_inputs(operation, machine_state)
machine_state
end
defp log_opcode_and_gas_left(operation, machine_state) do
Logger.debug(fn ->
"[#{current_step(machine_state)}] pc(#{machine_state.program_counter}) [#{
operation_string(operation)
}(0x#{opcode_string(operation)}) Gas Left: #{machine_state.gas})"
end)
end
defp log_inputs(operation, machine_state) do
inputs = Operation.inputs(operation, machine_state)
if !Enum.empty?(inputs) do
inputs
|> Enum.reverse()
|> Stream.with_index()
|> Enum.each(fn {value, i} ->
value_string = stack_value_string(value)
Logger.debug(fn -> " | #{i}: #{value_string}" end)
end)
end
end
defp current_step(machine_state) do
machine_state.step + 1
end
defp stack_value_string(value) do
string_value =
if value == 0 do
"0"
else
value
|> :binary.encode_unsigned()
|> Base.encode16(case: :lower)
|> String.trim_leading("0")
end
"0x" <> string_value
end
defp operation_string(operation) do
operation.sym
|> Atom.to_string()
|> String.upcase()
|> String.pad_leading(8)
end
defp opcode_string(operation) do
operation.id
|> :binary.encode_unsigned()
|> Base.encode16(case: :lower)
|> String.trim_leading("0")
|> String.pad_trailing(2)
end
end
|
apps/evm/lib/evm/logger.ex
| 0.716219
| 0.513607
|
logger.ex
|
starcoder
|
defmodule Day23.Router do
@moduledoc """
A packet router for a network of Intcode computers.
A router keeps track of a collection of `Day23.PacketQueue`s which are each
assigned a unique address within the router. When the computers in the network
produce output, their packet queue sends that output as a packet to the router,
which then ensures it gets added to the packet queue it is addressed to.
Packet queues also report their idle status to the router. When all queues are
idle, the router sends a message to its NAT to indicate this. The NAT may
respond by sending a message that will kickstart activity again.
"""
@typedoc """
A router PID.
"""
@type t :: pid
@typedoc """
An address to a particular computer in the network.
"""
@type addr :: number
@typedoc """
An addressed packet containing a point value.
"""
@type packet :: {addr, number, number}
defstruct queues: %{}, next_addr: 0, idle: MapSet.new(), nat: nil
@doc """
Starts a new router as an async `Task`.
"""
@spec async :: Task.t()
def async do
Task.async(__MODULE__, :run, [])
end
@doc """
Runs the router's message processing loop forever.
"""
@spec run :: none
def run do
loop(%Day23.Router{})
end
@doc """
Set the `Day23.NAT` for a particular router.
"""
@spec set_nat(t, Day23.NAT.t()) :: any
def set_nat(pid, nat) do
send(pid, {:set_nat, nat})
end
@doc """
Add a new packet queue to the router.
The router will choose a unique address for the queue and use
`Day23.PacketQueue.assign_addr/3` to tell the queue its address.
"""
@spec add_queue(t, Day23.PacketQueue.t()) :: any
def add_queue(pid, queue) do
send(pid, {:add_queue, queue})
end
@doc """
Asks the router to route a new packet.
The router will find the queue that matches the address in the packet
and add the packet to that queue.
"""
@spec route(t, packet) :: any
def route(pid, packet) do
send(pid, {:route_packet, packet})
end
@doc """
Report that the queue at an address is idle.
"""
@spec report_idle(t, addr) :: any
def report_idle(pid, addr) do
send(pid, {:report_idle, addr})
end
@doc """
Report that the queue at an address is active (not idle).
"""
@spec report_active(t, addr) :: any
def report_active(pid, addr) do
send(pid, {:report_active, addr})
end
defp loop(router) do
%Day23.Router{queues: queues, next_addr: next_addr} = router
receive do
{:set_nat, nat} ->
loop(%{router | nat: nat})
{:add_queue, queue} ->
new_queues = Map.put_new(queues, next_addr, queue)
Day23.PacketQueue.assign_addr(queue, next_addr, self())
loop(%{router | next_addr: next_addr + 1, queues: new_queues})
{:route_packet, {addr, x, y}} ->
case addr do
255 -> send(router.nat, {:packet, {x, y}})
_ -> Day23.PacketQueue.enqueue(queues[addr], {x, y})
end
loop(router)
{:report_idle, addr} ->
idle = MapSet.put(router.idle, addr)
if MapSet.size(idle) == map_size(queues) do
send(router.nat, :all_idle)
end
loop(%{router | idle: idle})
{:report_active, addr} ->
loop(%{router | idle: MapSet.delete(router.idle, addr)})
end
end
end
|
aoc2019_elixir/apps/day23/lib/router.ex
| 0.836488
| 0.65896
|
router.ex
|
starcoder
|
defmodule Entrance.User do
@moduledoc """
This module provider helpers functions for your app users management
"""
alias Entrance.Auth.Secret
import Entrance.Config, only: [config: 1]
@doc """
Execute this behind the scenes:
```
alias Entrance.Auth.Secret
# ...
%YourUser{}
|> YourUser.create_changeset(user_params)
|> Secret.put_session_secret()
|> YourRepo.insert()
```
Returns `{:ok, user}` or `{:error, changeset}`
Requires `user_module` and `repo` to be configured via
`Mix.Config`.
### Examples
```
{:ok, user} = Entrance.User.create(%{"email => "<EMAIL>", "password" => "<PASSWORD>"})
```
If you want to use `create/2` with other user schema, you can set the module directly.
```
{:ok, customer} = Entrance.User.create(Customer, %{"email => "<EMAIL>", "password" => "<PASSWORD>"})
```
"""
def create(user_module \\ nil, user_params) do
user_module = user_module || config(:user_module)
struct(user_module)
|> user_module.create_changeset(user_params)
|> Secret.put_session_secret()
|> config(:repo).insert()
end
@doc """
Similar to `Entrance.User.create/2`, but returns the user struct and raises an error if `user_params` is invalid.
Execute this behind the scenes:
```
alias Entrance.Auth.Secret
# ...
%YourUser{}
|> YourUser.create_changeset(user_params)
|> Secret.put_session_secret()
|> YourRepo.insert!()
```
Requires `user_module` and `repo` to be configured via
`Mix.Config`.
### Examples
```
user = Entrance.User.create!(%{"email => "<EMAIL>", "password" => "<PASSWORD>"})
```
If you want to use `create!/2` with other user schema, you can set the module directly.
```
customer = Entrance.User.create!(Customer, %{"email => "<EMAIL>", "password" => "<PASSWORD>"})
```
"""
def create!(user_module \\ nil, user_params) do
user_module = user_module || config(:user_module)
struct(user_module)
|> user_module.create_changeset(user_params)
|> Secret.put_session_secret()
|> config(:repo).insert!()
end
@doc """
Execute this behind the scenes:
```
YourUser.create_changeset(%YourUser{}, %{})
```
Returns an `Ecto.Changeset` struct
Requires `user_module` to be configured via `Mix.Config`.
### Example
```
# YourAppWeb.UserController ...
def new(conn, _params) do
conn |> render("new.html", changeset: Entrance.User.create_changeset)
end
```
"""
def create_changeset do
user_module = config(:user_module)
user_module.create_changeset(struct(user_module), %{})
end
@doc """
Similar to `Entrance.User.create_changeset/0` but not need the `user_module` to be configured via `Mix.Config`
### Example
```
# YourAppWeb.UserController ...
def new(conn, _params) do
conn |> render("new.html", changeset: Entrance.User.create_changeset(Customer))
end
```
"""
def create_changeset(user_module) do
user_module.create_changeset(struct(user_module), %{})
end
end
|
lib/user.ex
| 0.873626
| 0.609553
|
user.ex
|
starcoder
|
defmodule Opencensus.Honeycomb.Sender do
@event_start [:opencensus, :honeycomb, :start]
@event_stop_failure [:opencensus, :honeycomb, :stop, :failure]
@event_stop_success [:opencensus, :honeycomb, :stop, :success]
@moduledoc """
Sends events to Honeycomb.
## Telemetry
`send_batch/1` calls `:telemetry.execute/3` with with an `event_name` of:
* `#{inspect(@event_start)}` before sending
* `#{inspect(@event_stop_success)}` after sending successfully
* `#{inspect(@event_stop_failure)}` after sending unsuccessfully
The measurements map contains `count` and, in the trailing events, `ms`.
The metadata map contains:
* `events` on all three events
* `exception` on `#{inspect(@event_stop_failure)}`
* `payload` on `#{inspect(@event_stop_success)}`
To watch from your `console` or `remote_console` while troubleshooting:
```elixir
alias Opencensus.Honeycomb.Sender
handle_event = fn n, measure, meta, _ -> IO.inspect({n, measure, meta}) end
:telemetry.attach_many("test", Sender.telemetry_events(), handle_event, nil)
```
"""
require Logger
alias Opencensus.Honeycomb.Config
alias Opencensus.Honeycomb.Event
@doc false
def telemetry_events, do: [@event_stop_failure, @event_start, @event_stop_success]
@doc """
Send a batch of Honeycomb events to the Honeycomb batch API.
"""
@spec send_batch(list(Event.t())) :: {:ok, integer()} | {:error, Exception.t()}
def send_batch(events) when is_list(events) do
count = length(events)
begin = System.monotonic_time(:microsecond)
:telemetry.execute(@event_start, %{count: count}, %{events: events})
try do
config = Config.effective()
payload = Jason.encode!(events)
url = "#{config.api_endpoint}/1/batch/#{config.dataset}"
headers = [
{"X-Honeycomb-Team", config.write_key},
{"Content-Type", "application/json"},
{"User-Agent", "opencensus_honeycomb/0.0.0"}
]
if has_set_write_key?(config), do: send_it(url, headers, payload)
:telemetry.execute(
@event_stop_success,
%{count: count, ms: ms_since(begin)},
%{events: events, payload: payload}
)
{:ok, count}
rescue
e ->
:telemetry.execute(
@event_stop_failure,
%{count: count, ms: ms_since(begin)},
%{events: events, exception: e}
)
{:error, e}
end
end
defp send_it(url, headers, payload) do
with {:ok, status, _headers, client_ref} <-
:hackney.request(:post, url, headers, payload, []),
{:ok} <- check_status(status, client_ref),
{:ok, body} <- :hackney.body(client_ref),
{:ok, replies} <- Jason.decode(body),
{:ok} <- check_replies(replies) do
nil
end
end
defp ms_since(begin), do: (System.monotonic_time(:microsecond) - begin) / 1000.0
defp has_set_write_key?(config) do
case config.write_key do
nil -> false
"" -> false
_ -> true
end
end
defp check_status(status, client_ref) when is_integer(status) do
if status == 200 do
{:ok}
else
:hackney.close(client_ref)
{:error, :bad_status, status}
end
end
defp check_replies(replies) when is_list(replies) do
case replies |> Enum.filter(&lacks_status_202?/1) |> length() do
0 -> {:ok}
_ -> {:error, :unexpected_replies}
end
end
defp lacks_status_202?(reply) when is_map(reply), do: reply["status"] != 202
defp lacks_status_202?(_reply), do: false
end
|
lib/opencensus/honeycomb/sender.ex
| 0.692538
| 0.780328
|
sender.ex
|
starcoder
|
defmodule Cloudinary.Transformation.Width do
@moduledoc false
defguardp is_width(width) when is_number(width) and width >= 0
defguardp is_rounding_step(rounding_step) when is_number(rounding_step) and rounding_step > 0
defguardp is_bytes_step(bytes_step)
when is_integer(bytes_step) and bytes_step > 0 and rem(bytes_step, 1000) == 0
defguardp is_max_images(max_images) when max_images in 3..200
@spec to_url_string(Cloudinary.Transformation.width()) :: String.t()
def to_url_string(width) when is_width(width) or width == :auto, do: "#{width}"
def to_url_string({:auto, %{rounding_step: _, breakpoints: _}}) do
raise ArgumentError, ":rounding_step and :breakpoints options cannot be put at same time"
end
def to_url_string({:auto, %{width: _, breakpoints: _}}) do
raise ArgumentError, ":width and :breakpoints options cannot be put at same time"
end
def to_url_string({:auto, %{rounding_step: rounding_step, width: width}})
when is_rounding_step(rounding_step) and is_width(width) do
"auto:#{rounding_step}:#{width}"
end
def to_url_string({:auto, %{rounding_step: rounding_step}})
when is_rounding_step(rounding_step) do
"auto:#{rounding_step}"
end
def to_url_string({:auto, %{width: width}}) when is_width(width), do: "auto:100:#{width}"
def to_url_string({:auto, %{breakpoints: true}}), do: "auto:breakpoints"
def to_url_string({:auto, %{breakpoints: breakpoints}}) when is_map(breakpoints) do
"auto:#{breakpoints_to_url_string(breakpoints)}"
end
defp breakpoints_to_url_string(%{min_width: mn, max_width: mx, bytes_step: stp, max_images: i})
when is_width(mx) and mn <= mx and is_bytes_step(stp) and is_max_images(i) do
"breakpoints_#{mn}_#{mx}_#{div(stp, 1000)}_#{i}"
end
defp breakpoints_to_url_string(breakpoints) do
breakpoints
|> Map.put_new(:max_images, 20)
|> Map.put_new(:bytes_step, 20000)
|> Map.put_new(:max_width, 1000)
|> Map.put_new(:min_width, 50)
|> breakpoints_to_url_string()
end
end
|
lib/cloudinary/transformation/width.ex
| 0.731155
| 0.424889
|
width.ex
|
starcoder
|
defmodule EspEx.Projection do
@moduledoc """
Project events upon an entity to convert it to an up-to-date value.
## use EspEx.Projection
Will provide a default `apply` implementation that will catch any event and
just return the entity as is. It will also log a warn, reporting that the
event is unhandled.
The developer is expected to `use` this module and provide its own
implementations of `apply` (using guard-clauses).
### Examples
```
defmodule UserProjection do
use EspEx.Projection
def apply(%User{} = user, %EmailChanged{email: email}) do
Map.put(user, :email, email)
end
end
In this case, when called with `apply(%User{}, %NotHandledEvent{})` the
developer will simply get back `%User{}`, however if called with:
```
apply(%User{email: "<EMAIL>"}, %EmailChanged{email: "<EMAIL>"}))
```
The returned value is `%User{email: "<EMAIL>"}`
"""
alias EspEx.Entity
alias EspEx.Logger
@callback apply(entity :: Entity.t(), event :: struct) :: Entity.t()
@callback apply_all(
entity :: Entity.t(),
events :: list(struct)
) :: Entity.t()
defmacro __using__(_) do
quote location: :keep do
@behaviour unquote(__MODULE__)
@before_compile EspEx.Projection.Unhandled
@impl unquote(__MODULE__)
def apply_all(entity, events) when is_list(events) do
unquote(__MODULE__).apply_all(entity, __MODULE__, events)
end
end
end
@doc """
Takes an entity, a list of events and a module that can project such events
on the passed entity and it applies all of them, returning the newly updated
entity.
It also logs (debug) info whenever an event is applied
"""
@spec apply_all(
current_entity :: Entity.t(),
projection :: module,
events :: Enumerable.t()
) :: Entity.t()
def apply_all(current_entity, projection, events \\ []) do
Enum.reduce(events, current_entity, fn event, entity ->
Logger.debug(fn ->
"Applying #{event.__struct__} to #{entity.__struct__}"
end)
projection.apply(entity, event)
end)
end
end
|
lib/esp_ex/projection.ex
| 0.820326
| 0.768473
|
projection.ex
|
starcoder
|
defmodule Geo.Turf.Helpers do
@moduledoc """
A collection of helper utilities.
Usually users will not have to refer to this directly but it is here
if the need arises.
"""
@min_bounds {+1.0e+10, +1.0e+10, -1.0e+10, -1.0e+10}
@doc """
Create a bounding box for a given `t:Geo.geometry/0`.
## Examples
iex> Geo.Turf.Helpers.bbox(%Geo.Polygon{coordinates: [{1,1}, {1,3}, {3,3}, {3,1}]})
{1,1,3,3}
iex> Geo.Turf.Helpers.bbox([{1,1},{2,2},{3,3}])
{1,1,3,3}
"""
@spec bbox([{Number.t, Number.t}] | Geo.geometry()) :: {Number.t, Number.t, Number.t, Number.t}
def bbox(geometries) when is_map(geometries) do
flatten_coords(geometries)
|> List.foldl(@min_bounds, &bbox_folder/2)
end
def bbox(geometries) when is_list(geometries) do
List.foldl(geometries, @min_bounds, &bbox_folder/2)
end
defp bbox_folder({x,y}, {min_x, min_y, max_x, max_y}) do
{
(if (x < min_x), do: x, else: min_x),
(if (y < min_y), do: y, else: min_y),
(if (x > max_x), do: x, else: max_x),
(if (y > max_y), do: y, else: max_y)
}
end
@doc """
Flatten a `t:Geo.geometry()` to a simple list of coordinates
## Examples
iex> Geo.Turf.Helpers.flatten_coords(%Geo.GeometryCollection{geometries: [
...> %Geo.Point{coordinates: {1,1}},
...> %Geo.Point{coordinates: {2,2}}
...> ]})
[{1,1}, {2,2}]
"""
@spec flatten_coords(Geo.geometry()) :: [{Number.t, Number.t}]
def flatten_coords(geometry), do: flatten_coords(geometry, [])
defp flatten_coords(%Geo.Point{coordinates: coords}, acc), do: acc ++ [coords]
defp flatten_coords(%Geo.MultiPoint{coordinates: coords}, acc), do: acc ++ List.flatten(coords)
defp flatten_coords(%Geo.Polygon{coordinates: coords}, acc), do: acc ++ List.flatten(coords)
defp flatten_coords(%Geo.LineString{coordinates: coords}, acc), do: acc ++ List.flatten(coords)
defp flatten_coords(%Geo.MultiLineString{coordinates: coords}, acc),
do: acc ++ List.flatten(coords)
defp flatten_coords(%Geo.MultiPolygon{coordinates: coords}, acc),
do: acc ++ List.flatten(coords)
defp flatten_coords(%Geo.GeometryCollection{geometries: geom}, acc),
do: acc ++ Enum.map(geom, &flatten_coords/1) |> List.flatten()
end
|
lib/geo/turf/helpers.ex
| 0.869922
| 0.586138
|
helpers.ex
|
starcoder
|
defmodule Remedy.ColourHelpers do
@moduledoc """
This module provides a number of helper functions for working with colours.
"""
@type rgb_tuple :: {0..0xFF, 0..0xFF, 0..0xFF}
@type hex_binary :: String.t()
@type hex_integer :: 0..0xFFFFFF
@doc false
@doc section: :guards
defguard is_component(r) when r in 0..0xFF
@doc """
Converts a colour to a tuple of {red, green, blue}
"""
@doc since: "0.6.8"
@spec to_rgb(rgb_tuple | hex_binary | hex_integer) :: rgb_tuple
def to_rgb({r, g, b}) when r in 0..0xFF and g in 0..0xFF and b in 0..0xFF do
{r, g, b}
end
def to_rgb(integer) when is_integer(integer) and integer in 0..0xFFFFFF do
<<r, g, b>> = Integer.to_string(integer, 16) |> Base.decode16!()
{r, g, b}
end
def to_rgb(hex) when is_binary(hex) do
case valid_hex?(hex) do
true ->
<<r, g, b>> = parse_hex(hex) |> Base.decode16!()
{r, g, b}
false ->
:error
end
end
@doc """
Convert a value to its HEX representation.
"""
def to_hex({r, g, b} = rgb) when r in 0..0xFF and g in 0..0xFF and b in 0..0xFF do
rgb
|> Tuple.to_list()
|> Enum.map(&Integer.to_string(&1, 16))
|> to_string()
end
def to_hex(0) do
"000000"
end
def to_hex(integer) when is_integer(integer) and integer in 0..0xFFFFFF do
Integer.to_string(integer, 16)
end
def to_hex(hex) when is_binary(hex) do
case valid_hex?(hex) do
true ->
hex
false ->
:error
end
end
@doc """
Convert a value to its integer representation.
"""
def to_integer(integer) when is_integer(integer) and integer in 0..0xFFFFFF do
integer
end
def to_integer({r, g, b}) when r in 0..0xFF and g in 0..0xFF and b in 0..0xFF do
r * 0x10000 + g * 0x100 + b
end
def to_integer(hex) when is_binary(hex) do
case valid_hex?(hex) do
true ->
{integer, ""} = parse_hex(hex) |> Integer.parse(16)
integer
false ->
:error
end
end
def to_integer(nil) do
nil
end
#### Private
defp valid_hex?(hex) when is_binary(hex) and byte_size(hex) in [3, 4, 6, 7] do
hex
|> String.trim_leading("#")
|> String.upcase()
|> String.match?(~r/^([0-9A-F]{3}){1,2}$/)
end
defp parse_hex(hex) when is_binary(hex) and byte_size(hex) in [3, 4, 6, 7] do
hex
|> String.trim_leading("#")
|> String.upcase()
end
end
|
lib/remedy/helpers/colour_helpers.ex
| 0.783947
| 0.513425
|
colour_helpers.ex
|
starcoder
|
defmodule Scenic do
@moduledoc """
The Scenic module itself is a supervisor that manages all the machinery that
makes the [Scenes](overview_scene.html), [ViewPorts](overview_viewport.html),
and [Drivers](overview_driver.html) run.
In order to run any Scenic application, you will need to start the Scenic
supervisor in your supervision tree.
Load a configuration for one or more ViewPorts, then add Scenic to your root
supervisor.
defmodule MyApp do
def start(_type, _args) do
import Supervisor.Spec, warn: false
# load the viewport configuration from config
main_viewport_config = Application.get_env(:my_app :viewport)
# start the application with the viewport
children = [
supervisor(Scenic, [viewports: [main_viewport_config]]),
]
Supervisor.start_link(children, strategy: :one_for_one)
end
end
Note that you can start the Scenic supervisor without any ViewPort
Configurations. In that case, you are responsible for supervising
the ViewPorts yourself. This is not recommended for devices
as Scenic should know how to restart the main ViewPort in the event
of an error.
"""
use Supervisor
@viewports :scenic_dyn_viewports
@version Mix.Project.config()[:version]
@mix_env Mix.env()
@doc """
Return the current version of scenic
"""
def version(), do: @version
@doc """
Return the current Mix env
"""
def mix_env(), do: @mix_env
# --------------------------------------------------------
@doc false
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :supervisor,
restart: :permanent,
shutdown: 500
}
end
# --------------------------------------------------------
@doc false
def start_link(opts \\ [])
def start_link({a, b}), do: start_link([{a, b}])
def start_link(opts) when is_list(opts) do
Supervisor.start_link(__MODULE__, opts, name: :scenic)
end
# --------------------------------------------------------
@doc false
def init(opts) do
opts
|> Keyword.get(:viewports, [])
|> do_init
end
# --------------------------------------------------------
# init with no default viewports
defp do_init([]) do
[
{Scenic.ViewPort.Tables, nil},
{Scenic.Cache.Support.Supervisor, [nil]},
{DynamicSupervisor, name: @viewports, strategy: :one_for_one}
]
|> Supervisor.init(strategy: :one_for_one)
end
# --------------------------------------------------------
# init with default viewports
defp do_init(viewports) do
[
{Scenic.ViewPort.Tables, nil},
{Scenic.Cache.Support.Supervisor, [nil]},
{Scenic.ViewPort.SupervisorTop, [viewports]},
{DynamicSupervisor, name: @viewports, strategy: :one_for_one}
]
|> Supervisor.init(strategy: :one_for_one)
end
end
|
lib/scenic.ex
| 0.649912
| 0.447219
|
scenic.ex
|
starcoder
|
defmodule Extatus.Process do
@moduledoc """
This module defines a behaviour to instrument a `GenServer`s.
For an uninstrumented `GenServer` process, i.e:
```elixir
defmodule Uninstrumented do
use GenServer
def start_link, do: GenServer.start_link(__MODULE__, nil)
def stop(pid), do: GenServer.stop(pid)
def value(pid), do: GenServer.call(pid, :value)
def inc(pid), do: GenServer.call(pid, :inc)
def dec(pid), do: GenServer.call(pid, :dec)
def init(_), do: {:ok, 0}
def handle_call(:value, _from, n), do: {:reply, {:ok, n}, n}
def handle_call(:inc, _from, n), do: {:reply, :ok, n + 1}
def handle_call(:dec, _from, n), do: {:reply, :ok, n - 1}
def handle_call(_, n), do: {:noreply, n}
end
```
It is necessary to provide the metric definition and the function definitions
for `get_name/1` and `report/1`. `get_name/1` is used to generate a name for
the process used as a label in Prometheus and the `report/1` function is to
report the custom metrics to Prometheus. Both functions receive the current
`GenServer` state. So, the instrumentating the previous `GenServer`, i.e:
```elixir
defmodule Instrumented do
use GenServer
use Extatus.Process # Extatus.Process behaviour
def start_link, do: GenServer.start_link(__MODULE__, nil)
def stop(pid), do: GenServer.stop(pid)
def value(pid), do: GenServer.call(pid, :value)
def inc(pid), do: GenServer.call(pid, :inc)
def dec(pid), do: GenServer.call(pid, :dec)
# Metric
defmetrics do
gauge :instrument_gauge do
label :label
registry :default
help "Instrument gauge"
end
end
# Name of the process. This must be unique.
def get_name(_n), do: {:ok, "instrumented_process"}
# Report
def report(n) do
Gauge.set(:instrument_gauge, [label: "Label"], n)
end
def init(_) do
{:ok, _} = Extatus.set(__MODULE__, self()) # Add extatus handler.
{:ok, 0}
end
def handle_call(:value, _from, n), do: {:reply, {:ok, n}, n}
def handle_call(:inc, _from, n), do: {:reply, :ok, n + 1}
def handle_call(:dec, _from, n), do: {:reply, :ok, n - 1}
def handle_call(_, n), do: {:noreply, n}
end
```
This `GenServer` will report the current value stored in the server as the
metric `:instrument_gauge` to Prometheus.
Additionally, `Yggdrasil` subscriptions to the channel:
```elixir
%Yggdrasil.Channel{name: :extatus}
```
can be used to get the updates on the current state of the process i.e:
```elixir
iex> chan = %Yggdrasil.Channel{name: :extatus}
iex> Yggdrasil.subscribe(chan)
iex> flush()
{:Y_CONNECTED, (...)}
iex> {:ok, _} = Instrumented.start_link()
{:ok, #PID<0.603.0>}
iex> flush()
{:Y_EVENT, _, %Extatus.Message{name: "instrumented_process", state: :up}}
```
"""
@doc """
Gets the name of the proccess from its `state`.
"""
@callback get_name(state :: term) :: {:ok, term} | {:error, term}
@doc """
Gets the metrics from the process `state`.
"""
@callback report(state :: term) :: term
defmacro __using__(_) do
quote do
@behaviour Extatus.Process
use Extatus.Metric
@doc false
def get_name(state) do
{:ok, inspect(:erlang.phash2(state))}
end
@doc false
def report(_state) do
:ok
end
@doc false
def add_extatus_watchdog do
with {:ok, _} <- Extatus.set(__MODULE__, self()) do
:ok
end
end
defoverridable [get_name: 1, report: 1]
end
end
end
|
lib/extatus/process.ex
| 0.878432
| 0.896024
|
process.ex
|
starcoder
|
defmodule Binance do
alias Binance.Rest.HTTPClient
# Server
@doc """
Pings binance API. Returns `{:ok, %{}}` if successful, `{:error, reason}` otherwise
"""
def ping() do
HTTPClient.get_binance("/api/v3/ping")
end
@doc """
Get binance server time in unix epoch.
Returns `{:ok, time}` if successful, `{:error, reason}` otherwise
## Example
```
{:ok, 1515390701097}
```
"""
def get_server_time() do
case HTTPClient.get_binance("/api/v3/time") do
{:ok, %{"serverTime" => time}} -> {:ok, time}
err -> err
end
end
def get_exchange_info() do
case HTTPClient.get_binance("/api/v1/exchangeInfo") do
{:ok, data} -> {:ok, Binance.ExchangeInfo.new(data)}
err -> err
end
end
# Ticker
@doc """
Get all symbols and current prices listed in binance
Returns `{:ok, [%Binance.SymbolPrice{}]}` or `{:error, reason}`.
## Example
```
{:ok,
[%Binance.SymbolPrice{price: "0.07579300", symbol: "ETHBTC"},
%Binance.SymbolPrice{price: "0.01670200", symbol: "LTCBTC"},
%Binance.SymbolPrice{price: "0.00114550", symbol: "BNBBTC"},
%Binance.SymbolPrice{price: "0.00640000", symbol: "NEOBTC"},
%Binance.SymbolPrice{price: "0.00030000", symbol: "123456"},
%Binance.SymbolPrice{price: "0.04895000", symbol: "QTUMETH"},
...]}
```
"""
def get_all_prices() do
case HTTPClient.get_binance("/api/v3/ticker/price") do
{:ok, data} ->
{:ok, Enum.map(data, &Binance.SymbolPrice.new(&1))}
err ->
err
end
end
@doc """
Retrieves the current ticker information for the given trade pair.
Symbol can be a binance symbol in the form of `"ETHBTC"` or `%Binance.TradePair{}`.
Returns `{:ok, %Binance.Ticker{}}` or `{:error, reason}`
## Example
```
{:ok,
%Binance.Ticker{ask_price: "0.07548800", bid_price: "0.07542100",
close_time: 1515391124878, count: 661676, first_id: 16797673,
high_price: "0.07948000", last_id: 17459348, last_price: "0.07542000",
low_price: "0.06330000", open_price: "0.06593800", open_time: 1515304724878,
prev_close_price: "0.06593800", price_change: "0.00948200",
price_change_percent: "14.380", volume: "507770.18500000",
weighted_avg_price: "0.06946930"}}
```
"""
def get_ticker(%Binance.TradePair{} = symbol) do
case find_symbol(symbol) do
{:ok, binance_symbol} -> get_ticker(binance_symbol)
e -> e
end
end
def get_ticker(symbol) when is_binary(symbol) do
case HTTPClient.get_binance("/api/v3/ticker/24hr?symbol=#{symbol}") do
{:ok, data} -> {:ok, Binance.Ticker.new(data)}
err -> err
end
end
@doc """
Retrieves the bids & asks of the order book up to the depth for the given symbol
Returns `{:ok, %{bids: [...], asks: [...], lastUpdateId: 12345}}` or `{:error, reason}`
## Example
```
{:ok,
%Binance.OrderBook{
asks: [
["8400.00000000", "2.04078100", []],
["8405.35000000", "0.50354700", []],
["8406.00000000", "0.32769800", []],
["8406.33000000", "0.00239000", []],
["8406.51000000", "0.03241000", []]
],
bids: [
["8393.00000000", "0.20453200", []],
["8392.57000000", "0.02639000", []],
["8392.00000000", "1.40893300", []],
["8390.09000000", "0.07047100", []],
["8388.72000000", "0.04577400", []]
],
last_update_id: 113634395
}
}
```
"""
def get_depth(symbol, limit) do
case HTTPClient.get_binance("/api/v3/depth?symbol=#{symbol}&limit=#{limit}") do
{:ok, data} -> {:ok, Binance.OrderBook.new(data)}
err -> err
end
end
# Account
@doc """
Fetches user account from binance
Returns `{:ok, %Binance.Account{}}` or `{:error, reason}`.
In the case of a error on binance, for example with invalid parameters, `{:error, {:binance_error, %{code: code, msg: msg}}}` will be returned.
Please read https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data to understand API
"""
def get_account() do
api_key = Application.get_env(:binance, :api_key)
secret_key = Application.get_env(:binance, :secret_key)
case HTTPClient.get_binance("/api/v3/account", %{}, secret_key, api_key) do
{:ok, data} -> {:ok, Binance.Account.new(data)}
error -> error
end
end
# User data streams
@doc """
Creates a socket listen key that later can be used as parameter to listen for
user related events.
Returns `{:ok, %Binance.DataStream{}}` or `{:error, reason}`.
## Example
```
{:ok,
%Binance.DataStream{
listen_key: "<KEY>"
}
}
```
For more context please read https://github.com/binance/binance-spot-api-docs/blob/master/user-data-stream.md#create-a-listenkey
"""
def create_listen_key() do
case HTTPClient.unsigned_request_binance("/api/v3/userDataStream", "", :post) do
{:ok, data} -> {:ok, Binance.DataStream.new(data)}
error -> error
end
end
@doc """
Socket listen key expires after 30 minutes withouth a pong response, this
allows keeping it alive.
Returns `{:ok, %{}}` or `{:error, reason}`.
For more context please read https://github.com/binance/binance-spot-api-docs/blob/master/user-data-stream.md#pingkeep-alive-a-listenkey
"""
def keep_alive_listen_key(key) do
case HTTPClient.unsigned_request_binance(
"/api/v3/userDataStream",
"listenKey=#{key}",
:put
) do
{:ok, data} -> {:ok, data}
error -> error
end
end
@doc """
Closes/disables the listen key. To be used when you stop listening to the
stream.
Returns `{:ok, %{}}` or `{:error, reason}`.
For more context please read https://github.com/binance/binance-spot-api-docs/blob/master/user-data-stream.md#close-a-listenkey
"""
def close_listen_key(key) do
case HTTPClient.unsigned_request_binance(
"/api/v3/userDataStream?listenKey=#{key}",
nil,
:delete
) do
{:ok, data} -> {:ok, data}
error -> error
end
end
# Order
@doc """
Creates a new order on binance
Returns `{:ok, %{}}` or `{:error, reason}`.
In the case of a error on binance, for example with invalid parameters, `{:error, {:binance_error, %{code: code, msg: msg}}}` will be returned.
Please read https://www.binance.com/restapipub.html#user-content-account-endpoints to understand all the parameters
"""
def create_order(
symbol,
side,
type,
quantity,
price \\ nil,
time_in_force \\ nil,
new_client_order_id \\ nil,
stop_price \\ nil,
iceberg_quantity \\ nil,
receiving_window \\ 1000,
timestamp \\ nil
) do
timestamp =
case timestamp do
# timestamp needs to be in milliseconds
nil ->
:os.system_time(:millisecond)
t ->
t
end
arguments =
%{
symbol: symbol,
side: side,
type: type,
quantity: quantity,
timestamp: timestamp,
recvWindow: receiving_window
}
|> Map.merge(
unless(
is_nil(new_client_order_id),
do: %{newClientOrderId: new_client_order_id},
else: %{}
)
)
|> Map.merge(
unless(is_nil(stop_price), do: %{stopPrice: format_price(stop_price)}, else: %{})
)
|> Map.merge(
unless(is_nil(new_client_order_id), do: %{icebergQty: iceberg_quantity}, else: %{})
)
|> Map.merge(unless(is_nil(time_in_force), do: %{timeInForce: time_in_force}, else: %{}))
|> Map.merge(unless(is_nil(price), do: %{price: format_price(price)}, else: %{}))
case HTTPClient.signed_request_binance("/api/v3/order", arguments, :post) do
{:ok, %{"code" => code, "msg" => msg}} ->
{:error, {:binance_error, %{code: code, msg: msg}}}
data ->
data
end
end
@doc """
Creates a new **limit** **buy** order
Symbol can be a binance symbol in the form of `"ETHBTC"` or `%Binance.TradePair{}`.
Returns `{:ok, %{}}` or `{:error, reason}`
"""
def order_limit_buy(symbol, quantity, price, time_in_force \\ "GTC")
def order_limit_buy(
%Binance.TradePair{from: from, to: to} = symbol,
quantity,
price,
time_in_force
)
when is_number(quantity)
when is_number(price)
when is_binary(from)
when is_binary(to) do
case find_symbol(symbol) do
{:ok, binance_symbol} -> order_limit_buy(binance_symbol, quantity, price, time_in_force)
e -> e
end
end
def order_limit_buy(symbol, quantity, price, time_in_force)
when is_binary(symbol)
when is_number(quantity)
when is_number(price) do
create_order(symbol, "BUY", "LIMIT", quantity, price, time_in_force)
|> parse_order_response
end
@doc """
Creates a new **limit** **sell** order
Symbol can be a binance symbol in the form of `"ETHBTC"` or `%Binance.TradePair{}`.
Returns `{:ok, %{}}` or `{:error, reason}`
"""
def order_limit_sell(symbol, quantity, price, time_in_force \\ "GTC")
def order_limit_sell(
%Binance.TradePair{from: from, to: to} = symbol,
quantity,
price,
time_in_force
)
when is_number(quantity)
when is_number(price)
when is_binary(from)
when is_binary(to) do
case find_symbol(symbol) do
{:ok, binance_symbol} -> order_limit_sell(binance_symbol, quantity, price, time_in_force)
e -> e
end
end
def order_limit_sell(symbol, quantity, price, time_in_force)
when is_binary(symbol)
when is_number(quantity)
when is_number(price) do
create_order(symbol, "SELL", "LIMIT", quantity, price, time_in_force)
|> parse_order_response
end
@doc """
Creates a new **market** **buy** order
Symbol can be a binance symbol in the form of `"ETHBTC"` or `%Binance.TradePair{}`.
Returns `{:ok, %{}}` or `{:error, reason}`
"""
def order_market_buy(%Binance.TradePair{from: from, to: to} = symbol, quantity)
when is_number(quantity)
when is_binary(from)
when is_binary(to) do
case find_symbol(symbol) do
{:ok, binance_symbol} -> order_market_buy(binance_symbol, quantity)
e -> e
end
end
def order_market_buy(symbol, quantity)
when is_binary(symbol)
when is_number(quantity) do
create_order(symbol, "BUY", "MARKET", quantity)
end
@doc """
Creates a new **market** **sell** order
Symbol can be a binance symbol in the form of `"ETHBTC"` or `%Binance.TradePair{}`.
Returns `{:ok, %{}}` or `{:error, reason}`
"""
def order_market_sell(%Binance.TradePair{from: from, to: to} = symbol, quantity)
when is_number(quantity)
when is_binary(from)
when is_binary(to) do
case find_symbol(symbol) do
{:ok, binance_symbol} -> order_market_sell(binance_symbol, quantity)
e -> e
end
end
def order_market_sell(symbol, quantity)
when is_binary(symbol)
when is_number(quantity) do
create_order(symbol, "SELL", "MARKET", quantity)
end
defp parse_order_response({:ok, response}) do
{:ok, Binance.OrderResponse.new(response)}
end
defp parse_order_response({
:error,
{
:binance_error,
%{code: -2010, msg: "Account has insufficient balance for requested action."} = reason
}
}) do
{:error, %Binance.InsufficientBalanceError{reason: reason}}
end
# Misc
defp format_price(num) when is_float(num), do: :erlang.float_to_binary(num, [{:decimals, 8}])
defp format_price(num) when is_integer(num), do: inspect(num)
defp format_price(num) when is_binary(num), do: num
@doc """
Searches and normalizes the symbol as it is listed on binance.
To retrieve this information, a request to the binance API is done. The result is then **cached** to ensure the request is done only once.
Order of which symbol comes first, and case sensitivity does not matter.
Returns `{:ok, "SYMBOL"}` if successfully, or `{:error, reason}` otherwise.
## Examples
These 3 calls will result in the same result string:
```
find_symbol(%Binance.TradePair{from: "ETH", to: "REQ"})
```
```
find_symbol(%Binance.TradePair{from: "REQ", to: "ETH"})
```
```
find_symbol(%Binance.TradePair{from: "rEq", to: "eTH"})
```
Result: `{:ok, "REQETH"}`
"""
def find_symbol(%Binance.TradePair{from: from, to: to} = tp)
when is_binary(from)
when is_binary(to) do
case Binance.SymbolCache.get() do
# cache hit
{:ok, data} ->
from = String.upcase(from)
to = String.upcase(to)
found = Enum.filter(data, &Enum.member?([from <> to, to <> from], &1))
case Enum.count(found) do
1 -> {:ok, found |> List.first()}
0 -> {:error, :symbol_not_found}
end
# cache miss
{:error, :not_initialized} ->
case get_all_prices() do
{:ok, price_data} ->
price_data
|> Enum.map(fn x -> x.symbol end)
|> Binance.SymbolCache.store()
find_symbol(tp)
err ->
err
end
err ->
err
end
end
# Open orders
@doc """
Get all open orders, alternatively open orders by symbol
Returns `{:ok, [%Binance.Order{}]}` or `{:error, reason}`.
Weight: 1 for a single symbol; 40 when the symbol parameter is omitted
## Example
```
{:ok,
[%Binance.Order{price: "0.1", origQty: "1.0", executedQty: "0.0", ...},
%Binance.Order{...},
%Binance.Order{...},
%Binance.Order{...},
%Binance.Order{...},
%Binance.Order{...},
...]}
```
"""
def get_open_orders() do
api_key = Application.get_env(:binance, :api_key)
secret_key = Application.get_env(:binance, :secret_key)
case HTTPClient.get_binance("/api/v3/openOrders", %{}, secret_key, api_key) do
{:ok, data} -> {:ok, Enum.map(data, &Binance.Order.new(&1))}
err -> err
end
end
def get_open_orders(%Binance.TradePair{} = symbol) do
case find_symbol(symbol) do
{:ok, binance_symbol} -> get_open_orders(binance_symbol)
e -> e
end
end
def get_open_orders(symbol) when is_binary(symbol) do
api_key = Application.get_env(:binance, :api_key)
secret_key = Application.get_env(:binance, :secret_key)
case HTTPClient.get_binance("/api/v3/openOrders", %{:symbol => symbol}, secret_key, api_key) do
{:ok, data} -> {:ok, Enum.map(data, &Binance.Order.new(&1))}
err -> err
end
end
# Order
@doc """
Get order by symbol, timestamp and either orderId or origClientOrderId are mandatory
Returns `{:ok, [%Binance.Order{}]}` or `{:error, reason}`.
Weight: 1
## Example
```
{:ok, %Binance.Order{price: "0.1", origQty: "1.0", executedQty: "0.0", ...}}
```
Info: https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#query-order-user_data
"""
def get_order(
symbol,
timestamp,
order_id \\ nil,
orig_client_order_id \\ nil,
recv_window \\ nil
) do
case is_binary(symbol) do
true ->
fetch_order(symbol, timestamp, order_id, orig_client_order_id, recv_window)
false ->
case find_symbol(symbol) do
{:ok, binance_symbol} ->
fetch_order(binance_symbol, timestamp, order_id, orig_client_order_id, recv_window)
e ->
e
end
end
end
def fetch_order(symbol, timestamp, order_id, orig_client_order_id, recv_window)
when is_binary(symbol)
when is_integer(timestamp)
when is_integer(order_id) or is_binary(orig_client_order_id) do
api_key = Application.get_env(:binance, :api_key)
secret_key = Application.get_env(:binance, :secret_key)
arguments =
%{
symbol: symbol,
timestamp: timestamp
}
|> Map.merge(unless(is_nil(order_id), do: %{orderId: order_id}, else: %{}))
|> Map.merge(
unless(
is_nil(orig_client_order_id),
do: %{origClientOrderId: orig_client_order_id},
else: %{}
)
)
|> Map.merge(unless(is_nil(recv_window), do: %{recvWindow: recv_window}, else: %{}))
case HTTPClient.get_binance("/api/v3/order", arguments, secret_key, api_key) do
{:ok, data} -> {:ok, Binance.Order.new(data)}
err -> err
end
end
@doc """
Cancel an active order..
Symbol and either orderId or origClientOrderId must be sent.
Returns `{:ok, %Binance.Order{}}` or `{:error, reason}`.
Weight: 1
Info: https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#cancel-order-trade
"""
def cancel_order(
symbol,
timestamp,
order_id \\ nil,
orig_client_order_id \\ nil,
new_client_order_id \\ nil,
recv_window \\ nil
) do
case is_binary(symbol) do
true ->
cancel_order_(
symbol,
timestamp,
order_id,
orig_client_order_id,
new_client_order_id,
recv_window
)
false ->
case find_symbol(symbol) do
{:ok, binance_symbol} ->
cancel_order_(
binance_symbol,
timestamp,
order_id,
orig_client_order_id,
new_client_order_id,
recv_window
)
e ->
e
end
end
end
defp cancel_order_(
symbol,
timestamp,
order_id,
orig_client_order_id,
new_client_order_id,
recv_window
)
when is_binary(symbol)
when is_integer(timestamp)
when is_integer(order_id) or is_binary(orig_client_order_id) do
api_key = Application.get_env(:binance, :api_key)
secret_key = Application.get_env(:binance, :secret_key)
arguments =
%{
symbol: symbol,
timestamp: timestamp
}
|> Map.merge(unless(is_nil(order_id), do: %{orderId: order_id}, else: %{}))
|> Map.merge(
unless(
is_nil(orig_client_order_id),
do: %{origClientOrderId: orig_client_order_id},
else: %{}
)
)
|> Map.merge(
unless(is_nil(new_client_order_id),
do: %{newClientOrderId: new_client_order_id},
else: %{}
)
)
|> Map.merge(unless(is_nil(recv_window), do: %{recvWindow: recv_window}, else: %{}))
case HTTPClient.delete_binance("/api/v3/order", arguments, secret_key, api_key) do
{:ok, data} -> {:ok, Binance.Order.new(data)}
err -> err
end
end
end
|
lib/binance.ex
| 0.902539
| 0.740667
|
binance.ex
|
starcoder
|
defmodule Credo.Code.Block do
@moduledoc """
This module provides helper functions to analyse blocks, e.g. the block taken
by the `if` macro.
"""
@doc """
Returns the do: block of a given AST node.
"""
def all_blocks_for!(ast) do
[
do_block_for!(ast),
else_block_for!(ast),
rescue_block_for!(ast),
after_block_for!(ast)
]
end
@doc """
Returns true if the given `ast` has a do block.
"""
def do_block?(ast) do
case do_block_for(ast) do
{:ok, _block} ->
true
nil ->
false
end
end
@doc """
Returns the do: block of a given AST node.
"""
def do_block_for!(ast) do
case do_block_for(ast) do
{:ok, block} ->
block
nil ->
nil
end
end
@doc """
Returns a tuple {:ok, do_block} or nil for a given AST node.
"""
def do_block_for({_atom, _meta, arguments}) when is_list(arguments) do
do_block_for(arguments)
end
def do_block_for(do: block) do
{:ok, block}
end
def do_block_for(arguments) when is_list(arguments) do
Enum.find_value(arguments, &find_keyword(&1, :do))
end
def do_block_for(_) do
nil
end
@doc """
Returns true if the given `ast` has an else block.
"""
def else_block?(ast) do
case else_block_for(ast) do
{:ok, _block} ->
true
nil ->
false
end
end
@doc """
Returns the `else` block of a given AST node.
"""
def else_block_for!(ast) do
case else_block_for(ast) do
{:ok, block} ->
block
nil ->
nil
end
end
@doc """
Returns a tuple {:ok, else_block} or nil for a given AST node.
"""
def else_block_for({_atom, _meta, arguments}) when is_list(arguments) do
else_block_for(arguments)
end
def else_block_for(do: _do_block, else: else_block) do
{:ok, else_block}
end
def else_block_for(arguments) when is_list(arguments) do
Enum.find_value(arguments, &find_keyword(&1, :else))
end
def else_block_for(_) do
nil
end
@doc """
Returns true if the given `ast` has an rescue block.
"""
def rescue_block?(ast) do
case rescue_block_for(ast) do
{:ok, _block} ->
true
nil ->
false
end
end
@doc """
Returns the rescue: block of a given AST node.
"""
def rescue_block_for!(ast) do
case rescue_block_for(ast) do
{:ok, block} ->
block
nil ->
nil
end
end
@doc """
Returns a tuple {:ok, rescue_block} or nil for a given AST node.
"""
def rescue_block_for({_atom, _meta, arguments}) when is_list(arguments) do
rescue_block_for(arguments)
end
def rescue_block_for(do: _do_block, rescue: rescue_block) do
{:ok, rescue_block}
end
def rescue_block_for(arguments) when is_list(arguments) do
Enum.find_value(arguments, &find_keyword(&1, :rescue))
end
def rescue_block_for(_) do
nil
end
@doc """
Returns true if the given `ast` has an catch block.
"""
def catch_block?(ast) do
case catch_block_for(ast) do
{:ok, _block} ->
true
nil ->
false
end
end
@doc """
Returns the catch: block of a given AST node.
"""
def catch_block_for!(ast) do
case catch_block_for(ast) do
{:ok, block} ->
block
nil ->
nil
end
end
@doc """
Returns a tuple {:ok, catch_block} or nil for a given AST node.
"""
def catch_block_for({_atom, _meta, arguments}) when is_list(arguments) do
catch_block_for(arguments)
end
def catch_block_for(do: _do_block, catch: catch_block) do
{:ok, catch_block}
end
def catch_block_for(arguments) when is_list(arguments) do
Enum.find_value(arguments, &find_keyword(&1, :catch))
end
def catch_block_for(_) do
nil
end
@doc """
Returns true if the given `ast` has an after block.
"""
def after_block?(ast) do
case after_block_for(ast) do
{:ok, _block} ->
true
nil ->
false
end
end
@doc """
Returns the after: block of a given AST node.
"""
def after_block_for!(ast) do
case after_block_for(ast) do
{:ok, block} ->
block
nil ->
nil
end
end
@doc """
Returns a tuple {:ok, after_block} or nil for a given AST node.
"""
def after_block_for({_atom, _meta, arguments}) when is_list(arguments) do
after_block_for(arguments)
end
def after_block_for(do: _do_block, after: after_block) do
{:ok, after_block}
end
def after_block_for(arguments) when is_list(arguments) do
Enum.find_value(arguments, &find_keyword(&1, :after))
end
def after_block_for(_) do
nil
end
defp find_keyword(list, keyword) when is_list(list) do
if Keyword.has_key?(list, keyword) do
{:ok, list[keyword]}
else
nil
end
end
defp find_keyword(_, _), do: nil
@doc """
Returns the children of the `do` block of the given AST node.
"""
def calls_in_do_block({_op, _meta, arguments}) do
arguments
|> do_block_for!
|> instructions_for
end
def calls_in_do_block(arg) do
arg
|> do_block_for!
|> instructions_for
end
@doc """
Returns the children of the `rescue` block of the given AST node.
"""
def calls_in_rescue_block({_op, _meta, arguments}) do
arguments
|> rescue_block_for!
|> instructions_for
end
def calls_in_rescue_block(arg) do
arg
|> rescue_block_for!
|> instructions_for
end
@doc """
Returns the children of the `catch` block of the given AST node.
"""
def calls_in_catch_block({_op, _meta, arguments}) do
arguments
|> catch_block_for!
|> instructions_for
end
def calls_in_catch_block(arg) do
arg
|> catch_block_for!
|> instructions_for
end
defp instructions_for({:__block__, _meta, calls}), do: calls
defp instructions_for(v)
when is_atom(v) or is_tuple(v) or is_binary(v) or is_float(v) or is_integer(v),
do: List.wrap(v)
defp instructions_for(v) when is_list(v), do: [v]
end
|
lib/credo/code/block.ex
| 0.872998
| 0.670581
|
block.ex
|
starcoder
|
defmodule ExDiceRoller do
@moduledoc """
Converts strings into dice rolls and returns expected results. Ignores any
spaces, including tabs and newlines, in the provided string. A roll can be
invoked via `ExDiceRoller.roll/2`.
iex> ExDiceRoller.roll("2d6+3")
8
iex> ExDiceRoller.roll("(1d4)d(6*y)-(2/3+1dx)", [x: 2, y: 3])
11
iex> import ExDiceRoller.Sigil
iex> ExDiceRoller.roll(~a/1d2+z/, [z: ~a/1d2/, opts: [:explode]])
8
Rolls and invoked compiled functions can be supplied a number of options:
* `:cache`: Performs a cache lookup, with a miss generating a compiled
roll that is both cached and returned.
`ExDiceRoller.Cache.obtain/2` for more information.
* `:explode`: Causes dice to _explode_. This means that if a die roll results
in the highest possible value for a die (such as rolling a 20 on a d20), the
die will be rerolled until the result is no longer the max possible. It then
sums the total of all rolls and returns that value.
* `:keep`: Retains each dice roll.
For more information, see `ExDiceRoller.Compilers.Roll`.
* `:highest`: compares and selects the highest value(s) from a set of
expressions separated by the `,` operator
* `:lowest`: compares and selects the lowest value(s) from a set of
expressions separated by the `,` operator
## Order of Precedence
The following table shows order of precendence, from highest to lowest,
of the operators available to ExDiceRoller.
Operator | Associativity | Compiler
--------------------- | ------------- | ----------------------------
`d` | left-to-right | `ExDiceRoller.Compilers.Roll`
`+`, `-` | unary | NA (handled by the parser in `dice_parser.yrl`)
`*`, `/`, `%`, `^` | left-to-right | `ExDiceRoller.Compilers.Math`
`+`, `-` | left-to-right | `ExDiceRoller.Compilers.Math`
`,` | left-to-right | `ExDiceRoller.Compilers.Separator`
### Effects of Parentheses
As in math, parentheses can be used to create sub-expressions.
iex> ExDiceRoller.tokenize("1+3d4*1-2/-3") |> elem(1) |> ExDiceRoller.parse()
{:ok,
{{:operator, '-'},
{{:operator, '+'}, 1,
{{:operator, '*'}, {:roll, 3, 4}, 1}},
{{:operator, '/'}, 2, -3}}}
iex> ExDiceRoller.tokenize("(1+3)d4*1-2/-3") |> elem(1) |> ExDiceRoller.parse()
{:ok,
{{:operator, '-'},
{{:operator, '*'},
{:roll, {{:operator, '+'}, 1, 3}, 4},
1}, {{:operator, '/'}, 2, -3}}}
iex> ExDiceRoller.tokenize("1+3d(4*1)-2/-3") |> elem(1) |> ExDiceRoller.parse()
{:ok,
{{:operator, '-'},
{{:operator, '+'}, 1,
{:roll, 3, {{:operator, '*'}, 4, 1}}},
{{:operator, '/'}, 2, -3}}}
iex> ExDiceRoller.tokenize("1+3d4*(1-2)/-3") |> elem(1) |> ExDiceRoller.parse()
{:ok,
{{:operator, '+'}, 1,
{{:operator, '/'},
{{:operator, '*'}, {:roll, 3, 4},
{{:operator, '-'}, 1, 2}}, -3}}}
## Compiled Rolls
Some systems utilize complex dice rolling equations. Repeatedly tokenizing,
parsing, and interpreting complicated dice rolls strings can lead to a
performance hit on an application. To ease the burden, developers can
_compile_ a dice roll string into an anonymous function. This anonymous
function can be passed around as any other function and reused repeatedly
without having to re-tokenize the string, nor re-interpret a parsed
expression.
iex> {:ok, roll_fun} = ExDiceRoller.compile("2d6+3")
iex> ExDiceRoller.execute(roll_fun)
8
iex> ExDiceRoller.execute(roll_fun)
13
iex> ExDiceRoller.execute(roll_fun)
10
More information can be found in `ExDiceRoller.Compiler`.
## Variables
Single-letter variables can be used when compiling dice rolls. However, values
for those variables must be supplied upon invocation. Values can be any of the
following:
* numbers
* expressions, such as "1d6+2"
* compiled dice rolls
* results of `~a` sigil, as described in `ExDiceRoller.Sigil`
* lists of any of the above
```elixir
iex> {:ok, fun} = ExDiceRoller.compile("2d4+x")
iex> ExDiceRoller.execute(fun, x: 2)
7
iex> ExDiceRoller.execute(fun, x: "5d100")
245
iex> {:ok, fun_2} = ExDiceRoller.compile("3d8-2")
iex> ExDiceRoller.execute(fun, x: fun_2)
23
iex> import ExDiceRoller.Sigil
iex> ExDiceRoller.execute(fun, x: ~a/3d5+2d4/)
22
```
More information can be found in `ExDiceRoller.Compilers.Variable`.
## Caching
ExDiceRoller can cache and reuse dice rolls.
iex> ExDiceRoller.start_cache()
iex> ExDiceRoller.roll("8d6-(4d5)", opts: [cache: true])
20
iex> ExDiceRoller.roll("8d6-(4d5)", opts: [cache: true])
13
iex> ExDiceRoller.roll("1d3+x", [x: 4, cache: true])
6
iex> ExDiceRoller.roll("1d3+x", [x: 1, opts: [:cache, :explode]])
6
More details can be found in the documentation for `ExDiceRoller.Cache`.
## Sigil Support
ExDiceRoller comes with its own sigil, `~a`, that can be used to create
compiled dice roll functions or roll them on the spot.
iex> import ExDiceRoller.Sigil
iex> fun = ~a/2d6+2/
iex> ExDiceRoller.roll(fun)
7
iex> ExDiceRoller.roll(~a|1d4+x/5|, [x: 43])
11
iex> ExDiceRoller.roll(~a/xdy/, [x: fun, y: ~a/12d4-15/])
111
More information can be found in `ExDiceRoller.Sigil`.
## ExDiceRoller Examples
The following examples show a variety of types of rolls, and includes examples
of basic and complex rolls, caching, sigil support, variables, and
combinations of thereof.
iex> ExDiceRoller.roll("1d8")
1
iex> ExDiceRoller.roll("2d20 + 5")
34
iex> import ExDiceRoller.Sigil
iex> ExDiceRoller.roll(~a/2d8-2/)
3
iex> ExDiceRoller.roll("(1d4)d(6*5) - (2/3+1)")
18
iex> ExDiceRoller.roll("1+\t2*3d 4")
15
iex> ExDiceRoller.roll("1dx+6-y", [x: 10, y: 5])
10
iex> import ExDiceRoller.Sigil
iex> ExDiceRoller.roll(~a/2+5dx/, x: ~a|3d(7/2)|)
19
iex> ExDiceRoller.roll("1d2", opts: [:explode])
1
iex> ExDiceRoller.roll("1d2", opts: [:explode])
7
iex> ExDiceRoller.start_cache()
iex> ExDiceRoller.roll("1d2+x", [x: 3, cache: true])
4
iex> ExDiceRoller.roll("1d2+x", [x: 3, cache: true, opts: :explode])
10
iex> import ExDiceRoller.Sigil
iex> ~a/1d2+3/r
4
iex> ~a/1d2+2/re
9
"""
alias ExDiceRoller.{Args, Cache, Compiler, Parser, Tokenizer}
@cache_table Application.fetch_env!(:ex_dice_roller, :cache_table)
@doc """
Processes a given string as a dice roll and returns the final result. The
final result is a rounded integer.
iex> ExDiceRoller.roll("1d6+15")
18
Note that using variables with this call will result in errors. If you need
variables, use `roll/3` instead.
"""
@spec roll(String.t()) :: integer | list(integer)
def roll(roll_string), do: roll(roll_string, opts: [])
@doc """
Processes a given string as a dice roll and returns the calculated result. The
result is a rounded integer.
Any variable values should be specified in `vars`. Options can be passed in
`opts`.
Possible values for `opts` include:
* `:cache`: Performs a cache lookup, with a miss generating a compiled
roll that is both cached and returned.
`ExDiceRoller.Cache.obtain/2` for more information.
* `:explode`: Causes dice to _explode_. This means that if a die roll results
in the highest possible value for a die (such as rolling a 20 on a d20), the
die will be rerolled until the result is no longer the max possible. It then
sums the total of all rolls and returns that value.
* `:keep`: Retains each dice roll.
For more information, see `ExDiceRoller.Compilers.Roll`.
* `:highest`: Selects the highest of all calculated values when using the `,`
operator.
* `:lowest`: Selects the lowest of all calculated values when using the `,`
operator.
### Examples
iex> ExDiceRoller.roll("1+x", [x: 1])
2
iex> ExDiceRoller.roll("1d6+15", [])
18
iex> ExDiceRoller.roll("1d8+x", [x: 5])
6
iex> ExDiceRoller.roll("1d3", opts: :explode)
5
iex> ExDiceRoller.start_cache(ExDiceRoller.Cache)
iex> ExDiceRoller.roll("(1d6)d4-3+y", [y: 3, cache: true])
10
iex> ExDiceRoller.roll("1d2+y", y: 1, cache: true, opts: [:explode])
2
iex> ExDiceRoller.roll("1d2+y", y: 2, cache: true, opts: [:explode])
11
iex> ExDiceRoller.roll("1,2", opts: [:highest])
2
iex> ExDiceRoller.roll("10,12,45,3,100", opts: [:lowest])
3
"""
@spec roll(String.t() | Compiler.compiled_fun(), Keyword.t()) :: integer
def roll(roll_string, args) when is_bitstring(roll_string) do
case Args.use_cache?(args) do
false ->
with {:ok, tokens} <- Tokenizer.tokenize(roll_string),
{:ok, parsed_tokens} <- Parser.parse(tokens) do
calculate(parsed_tokens, args)
else
{:error, _} = err -> err
end
true ->
@cache_table
|> Cache.obtain(roll_string)
|> execute(args)
end
end
def roll(compiled, args) when is_function(compiled) do
execute(compiled, args)
end
@doc "Helper function that calls `ExDiceRoller.Tokenizer.tokenize/1`."
@spec tokenize(String.t()) :: {:ok, Tokenizer.tokens()}
def tokenize(roll_string), do: Tokenizer.tokenize(roll_string)
@doc "Helper function that calls `ExDiceRoller.Tokenizer.tokenize/1`."
@spec parse(Tokenizer.tokens()) :: {:ok, Parser.expression()}
def parse(tokens), do: Parser.parse(tokens)
@doc """
Takes an `t:ExDiceRoller.Parser.expression/0` from parse and calculates the result.
"""
@spec calculate(Parser.expression(), Keyword.t()) :: number
def calculate(expression, args) do
expression
|> compile()
|> elem(1)
|> execute(args)
end
@doc """
Compiles a string or `t:expression/0` into an anonymous function.
iex> {:ok, roll_fun} = ExDiceRoller.compile("1d8+2d(5d3+4)/3")
iex> ExDiceRoller.execute(roll_fun)
5
If `roll` is not a string or expression compile/1 will return
`{:error, {:cannot_compile_roll, other}}`.
"""
@spec compile(String.t() | Parser.expression()) ::
{:ok, Compiler.compiled_function()} | {:error, any}
def compile(roll)
def compile(roll) when is_bitstring(roll) do
with {:ok, tokens} <- Tokenizer.tokenize(roll),
{:ok, parsed_tokens} <- Parser.parse(tokens) do
compile(parsed_tokens)
else
{:error, _} = err -> err
end
end
def compile(roll) when is_tuple(roll) or is_number(roll) do
{:ok, Compiler.compile(roll)}
end
def compile(other), do: {:error, {:cannot_compile_roll, other}}
@doc "Executes a function built by `compile/1`."
@spec execute(function, Keyword.t()) :: integer | list(integer)
def execute(compiled, args \\ []) when is_function(compiled) do
compiled.(args)
end
@doc """
Starts the underlying roll function cache. See `ExDiceRoller.Cache` for more
details.
"""
@spec start_cache(atom | none) :: {:ok, any}
def start_cache(cache \\ @cache_table) do
{:ok, _} = Cache.start_link(cache)
end
end
|
lib/ex_dice_roller.ex
| 0.827898
| 0.815306
|
ex_dice_roller.ex
|
starcoder
|
defmodule Bootleg.DSL do
@moduledoc """
Configuration DSL for Bootleg.
"""
alias Bootleg.{Config, Role, SSH, UI}
defmacro __using__(_) do
quote do
import Bootleg.DSL,
only: [
role: 2,
role: 3,
config: 2,
config: 1,
config: 0,
before_task: 2,
after_task: 2,
invoke: 1,
task: 2,
task: 3,
remote: 1,
remote: 2,
remote: 3,
load: 1,
upload: 3,
download: 3
]
end
end
@doc """
Defines a role.
Roles are a collection of hosts and their options that are responsible for the same function,
for example building a release, archiving a release, or executing commands against a running
application.
`name` is the name of the role, and is globally unique. Calling `role/3` multiple times with
the same name will result in the host lists being merged. If the same host shows up mutliple
times, it will have its `options` merged. The name `:all` is reserved and cannot be used here.
`hosts` can be a single hostname, or a `List` of hostnames.
`options` is an optional `Keyword` used to provide configuration details about a specific host
(or collection of hosts). Certain options are passed to SSH directly (see
`Bootleg.SSH.ssh_options/0`), others are used internally (`user` for example, is used
by both SSH and Git), and unknown options are simply stored. In the future `remote/1,2` will
allow for host filtering based on role options. Some Bootleg extensions may also add support
for additional options.
```
use Bootleg.DSL
role :build, ["build1.example.com", "build2.example.com"], user: "foo", identity: "~/.ssh/id_rsa"
```
"""
defmacro role(name, hosts, options \\ [])
defmacro role(:all, _, _) do
raise ArgumentError, ":all is reserved by bootleg and refers to all defined roles."
end
defmacro role(name, hosts, options) do
quote bind_quoted: binding() do
Role.define(name, hosts, options)
end
end
@doc """
Fetches all key/value pairs currently defined in the Bootleg configuration.
"""
defmacro config do
quote do
Config.get_all()
end
end
@doc """
Fetches the value for the supplied key from the Bootleg configuration. If the provided
key is a `Tuple`, the first element is considered the key, the second value is considered
the default value (and returned without altering the config) in case the key has not
been set. This uses the same semantics as `Keyword.get/3`.
```
use Bootleg.DSL
config :foo, :bar
# local_foo will be :bar
local_foo = config :foo
# local_foo will be :bar still, as :foo already has a value
local_foo = config {:foo, :car}
# local_hello will be :world, as :hello has not been defined yet
local_hello = config {:hello, :world}
config :hello, nil
# local_hello will be nil, as :hello has a value of nil now
local_hello = config {:hello, :world}
```
"""
defmacro config({key, default}) do
quote bind_quoted: binding() do
Config.get_key(key, default)
end
end
defmacro config(key) do
quote bind_quoted: binding() do
Config.get_key(key)
end
end
@doc """
Sets `key` in the Bootleg configuration to `value`.
One of the cornerstones of the Bootleg DSL, `config/2` is used to pass configuration options
to Bootleg. See the documentation for the specific task you are trying to configure for what
keys it supports.
```
use Bootleg.DSL
config :app, :my_cool_app
config :version, "1.0.0"
```
"""
defmacro config(key, value) do
quote bind_quoted: binding() do
Config.set_key(key, value)
end
end
defp add_callback(task, position, caller, do: block) do
file = caller.file()
line = caller.line()
quote do
hook_number = Bootleg.Config.Agent.increment(:next_hook_number)
module_name =
String.to_atom(
"Elixir.Bootleg.DynamicCallbacks." <>
String.capitalize("#{unquote(position)}") <>
String.capitalize("#{unquote(task)}") <> "#{hook_number}"
)
defmodule module_name do
@file unquote(file)
def execute, do: unquote(block)
def location, do: {unquote(file), unquote(line)}
hook_list_name = :"#{unquote(position)}_hooks"
hooks = Keyword.get(Bootleg.Config.Agent.get(hook_list_name), unquote(task), [])
Bootleg.Config.Agent.merge(
hook_list_name,
unquote(task),
hooks ++ [[module_name, :execute]]
)
end
end
end
@doc """
Defines a before hook for a task.
A hook is a piece of code that is executed before/after a task has been run. The hook can
either be a standalone code block, or the name of another task. Hooks are executed in an
unconditional fashion. Only an uncaught exeception will prevent futher execution. If a task
name is provided, it will be invoked via `invoke/1`.
Just like with `invoke/1`, a task does not need to be defined to have a hook registered for
it, nor does the task need to be defined in order to be triggered via a hook. Tasks may also
be defined at a later point, provided execution has not begun.
If multiple hooks are defined for the same task, they are executed in the order they were
originally defined.
```
use Bootleg.DSL
before_task :build, :checksum_code
before_task :deploy do
Notify.team "Here we go!"
end
```
Relying on the ordering of hook execution is heavily discouraged. It's better to explicitly
define the order using extra tasks and hooks. For example
```
use Bootleg.DSL
before_task :build, :do_first
before_task :build, :do_second
```
would be much better written as
```
use Bootleg.DSL
before_task :build, :do_first
before_task :do_first, :do_second
```
"""
defmacro before_task(task, do: block) when is_atom(task) do
add_callback(task, :before, __CALLER__, do: block)
end
defmacro before_task(task, other_task) when is_atom(task) and is_atom(other_task) do
quote do: before_task(unquote(task), do: invoke(unquote(other_task)))
end
@doc """
Defines an after hook for a task.
Behaves exactly like a before hook, but executes after the task has run. See `before_task/2`
for more details.
```
use Bootleg.DSL
after_task :build, :store_artifact
after_task :deploy do
Notify.team "Deployed!"
end
```
"""
defmacro after_task(task, do: block) when is_atom(task) do
add_callback(task, :after, __CALLER__, do: block)
end
defmacro after_task(task, other_task) when is_atom(task) and is_atom(other_task) do
quote do: after_task(unquote(task), do: invoke(unquote(other_task)))
end
@doc """
Defines a task idefintied by `task`.
This is one of the cornerstones of the Bootleg DSL. It takes a task name (`task`) a block of code
and registers the code to be executed when `task` is invoked. Inside the block, the full Bootleg
DSL is available.
A warning will be emitted if a task is redefined, unless the `override` option is specified with a value of `true`.
```
use Bootleg.DSL
task :hello do
IO.puts "Hello World!"
end
```
Tasks can override existing tasks:
```
use Bootleg.DSL
task :update, override: true do
alias Bootleg.UI
UI.info("No longer using stock update task")
end
"""
defmacro task(task, options \\ [], do: block) when is_atom(task) and is_list(options) do
file = __CALLER__.file()
line = __CALLER__.line()
module_name = module_for_task(task)
quote do
module_name = unquote(module_name)
# credo:disable-for-lines:275 Credo.Check.Design.AliasUsage
module_name
|> Code.ensure_compiled()
|> Bootleg.DSL.warn_task_redefined(
unquote(task),
unquote(module_name),
unquote(options[:override])
)
original_opts = Code.compiler_options()
Code.compiler_options(Map.put(original_opts, :ignore_module_conflict, true))
try do
defmodule module_name do
@file unquote(file)
def execute, do: unquote(block)
def location, do: {unquote(file), unquote(line)}
end
after
Code.compiler_options(original_opts)
end
:ok
end
end
@doc false
def warn_task_redefined({:module, _}, task, macro, override) do
{orig_file, orig_line} = macro.location
unless override do
UI.warn(
"Warning: task '#{task}' is being redefined. " <>
"The most recent definition will be used. " <>
"To prevent this warning, set `override: true` in the task options. " <>
"The previous definition was at: #{orig_file}:#{orig_line}"
)
end
end
@doc false
def warn_task_redefined({:error, _}, task, _, true) do
UI.warn("Warning: task '#{task}' is not already defined and has a needless override.")
end
@doc false
def warn_task_redefined(_, _, _, _), do: nil
@spec invoke_task_callbacks(atom, atom) :: :ok
defp invoke_task_callbacks(task, agent_key) do
agent_key
|> Bootleg.Config.Agent.get()
|> Keyword.get(task, [])
|> Enum.each(fn [module, fnref] -> apply(module, fnref, []) end)
end
@spec module_for_task(atom) :: atom
defp module_for_task(task) do
:"Elixir.Bootleg.DynamicTasks.#{Macro.camelize("#{task}")}"
end
@doc """
Invokes the task identified by `task`.
This is one of the cornerstones of the Bootleg DSL. Executing a task first calls any registered
`before_task/2` hooks, then executes the task itself (which was defined via `task/2`), then any
registered `after_task/2` hooks.
The execution of the hooks and the task are unconditional. Return values are ignored, though an
uncuaght exception will stop further execution. The `task` does not need to exist. Any
hooks for a task with the name of `task` will still be executed, and no error or warning will be
emitted. This can be used to create events which a developer wants to be able to install hooks
around without needing to define no-op tasks.
`invoke/1` executes immediately, so it should always be called from inside a task. If it's placed
directly inside `config/deploy.exs`, the task will be invoked when the configuration is first
read. This is probably not what is desired.
```
use Bootleg.DSL
task :hello do
IO.puts "Hello?"
invoke :world
end
task :world do
IO.puts "World!"
end
```
"""
@spec invoke(atom) :: :ok
def invoke(task) when is_atom(task) do
invoke_task_callbacks(task, :before_hooks)
module_name = module_for_task(task)
with {:module, _} <- Code.ensure_compiled(module_name) do
apply(module_name, :execute, [])
end
invoke_task_callbacks(task, :after_hooks)
end
@doc """
Executes commands on all remote hosts.
This is equivalent to calling `remote/2` with a role of `:all`.
"""
defmacro remote(do: block) do
quote do: remote(:all, do: unquote(block))
end
defmacro remote(lines) do
quote do: remote(:all, unquote(lines))
end
defmacro remote(role, do: {:__block__, _, lines}) do
quote do: remote(unquote(role), [], unquote(lines))
end
defmacro remote(role, do: lines) do
quote do: remote(unquote(role), [], unquote(lines))
end
@doc """
Executes commands on all remote hosts within a role.
This is equivalent to calling `remote/3` with an `options` of `[]`.
"""
defmacro remote(role, lines) do
quote do: remote(unquote(role), [], unquote(lines))
end
defmacro remote(role, options, do: {:__block__, _, lines}) do
quote do: remote(unquote(role), unquote(options), unquote(lines))
end
defmacro remote(role, options, do: lines) do
quote do: remote(unquote(role), unquote(options), unquote(lines))
end
@doc """
Executes commands on a remote host.
This is the workhorse of the DSL. It executes shell commands on all hosts associated with
the `role`. If any of the shell commands exits with a non-zero status, execution will be stopped
and an `SSHError` will be raised.
`lines` can be a `List` of commands to execute, or a code block where each line's return value is
used as a command. Each command will be simulataneously executed on all hosts in the role. Once
all hosts have finished executing the command, the next command in the list will be sent.
`options` is an optional `Keyword` list of options to customize the remote invocation. Currently two
keys are supported:
* `filter` takes a `Keyword` list of host options to filter with. Any host whose options match
the filter will be included in the remote execution. A host matches if it has all of the filtering
options defined and the values match (via `==/2`) the filter.
* `cd` changes the working directory of the remote shell prior to executing the remote
commands. The options takes either an absolute or relative path, with relative paths being
defined relative to the workspace configured for the role, or the default working directory
of the shell if no workspace is defined.
`role` can be a single role, a list of roles, or the special role `:all` (all roles). If the same host
exists in multiple roles, the commands will be run once for each role where the host shows up. In the
case of multiple roles, each role is processed sequentially.
Returns the results to the caller, per command and per host. See `Bootleg.SSH.run!` for more details.
```
use Bootleg.DSL
remote :build, ["uname -a", "date"]
remote :build do
"ls -la"
"echo " <> Time.to_string(Time.utc_now) <> " > local_now"
end
# will raise an error since `false` exits with a non-zero status
remote :build, ["false", "touch never_gonna_happen"]
# runs for hosts found in all roles
remote do: "hostname"
remote :all, do: "hostname"
# runs for hosts found in :build first, then for hosts in :app
remote [:build, :app], do: "hostname"
role :build, "host2.example.com"
role :build, "host1.example.com", primary: true, another_attr: :cat
# only runs on `host1.example.com`
remote :build, filter: [primary: true] do
"hostname"
end
# runs on `host1.example.com` inside the `tmp` directory found in the workspace
remote :build, filter: [primary: true], cd: "tmp/" do
"hostname"
end
```
"""
defmacro remote(role, options, lines) do
roles = Role.unpack_role(role)
quote bind_quoted: binding() do
Enum.reduce(roles, [], fn role, outputs ->
role
|> SSH.init([cd: options[:cd]], Keyword.get(options, :filter, []))
|> SSH.run!(lines)
|> SSH.merge_run_results(outputs)
end)
end
end
@doc """
Uploads a local file to remote hosts.
Uploading works much like `remote/3`, but instead of transferring shell commands over SSH,
it transfers files via SCP. The remote host does need to support SCP, which should be provided
by most SSH implementations automatically.
`role` can either be a single role name, a list of roles, or a list of roles and filter
attributes. The special `:all` role is also supported. See `remote/3` for details.
`local_path` can either be a file or directory found on the local machine. If its a directory,
the entire directory will be recursively copied to the remote hosts. Relative paths are resolved
relative to the root of the local project.
`remote_path` is the file or directory where the transfered files should be placed. The semantics
of how `remote_path` is treated vary depending on what `local_path` refers to. If `local_path` points
to a file, `remote_path` is treated as a file unless it's `.` or ends in `/`, in which case it's
treated as a directory and the filename of the local file will be used. If `local_path` is a directory,
`remote_path` is treated as a directory as well. Relative paths are resolved relative to the projects
remote `workspace`. Missing directories are not implicilty created.
The files on the remote server are created using the authenticating user's `uid`/`gid` and `umask`.
```
use Bootleg.DSL
# copies ./my_file to ./new_name on the remote host
upload :app, "my_file", "new_name"
# copies ./my_file to ./a_dir/my_file on the remote host. ./a_dir must already exist
upload :app, "my_file", "a_dir/"
# recursively copies ./some_dir to ./new_dir on the remote host. ./new_dir will be created if missing
upload :app, "some_dir", "new_dir"
# copies ./my_file to /tmp/foo on the remote host
upload :app, "my_file", "/tmp/foo"
"""
defmacro upload(role, local_path, remote_path) do
{roles, filters} = Role.split_roles_and_filters(role)
roles = Role.unpack_role(roles)
quote bind_quoted: binding() do
Enum.each(roles, fn role ->
role
|> SSH.init([], filters)
|> SSH.upload(local_path, remote_path)
end)
end
end
@doc """
Downloads files from remote hosts to the local machine.
Downloading works much like `remote/3`, but instead of transferring shell commands over SSH,
it transfers files via SCP. The remote host does need to support SCP, which should be provided
by most SSH implementations automatically.
`role` can either be a single role name, a list of roles, or a list of roles and filter
attributes. The special `:all` role is also supported. See `remote/3` for details. Note that
if multiple hosts match, files will be downloaded from all matching hosts, and any duplicate
file names will result in collisions. The exact semantics of how that works are handled by
`SSHKit.SCP`, but in general the file transfered last wins.
`local_path` is a path to local directory or file where the downloaded files(s) should be placed.
Absolute paths will be respected, relative paths will be resolved relative to the current working
directory of the invoking shell. If the `local_path` does not exist in the local file system, an
attempt will be made to create the missing directory. This does not handle nested directories,
and a `File.Error` will be raised.
`remote_path` is the file or directory to be copied from the remote hosts. If a directory is
specified, its contents will be recursively copied. Relative paths will be resolved relative to
the remote workspace, absolute paths will be respected.
The files on the local host are created using the current user's `uid`/`gid` and `umask`.
```
use Bootleg.DSL
# copies ./my_file from the remote host to ./new_name locally
download :app, "my_file", "new_name"
# copies ./my_file from the remote host to the file ./a_dir/my_file locally
download :app, "my_file", "a_dir"
# recursively copies ./some_dir on the remote host to ./new_dir locally, ./new_dir
# will be created if missing
download :app, "some_dir", "new_dir"
# copies /foo/my_file on the remote host to /tmp/foo locally
download :app, "/foo/my_file", "/tmp/foo"
"""
defmacro download(role, remote_path, local_path) do
{roles, filters} = Role.split_roles_and_filters(role)
roles = Role.unpack_role(roles)
quote bind_quoted: binding() do
Enum.each(roles, fn role ->
role
|> SSH.init([], filters)
|> SSH.download(remote_path, local_path)
end)
end
end
defmacro load(file) do
quote bind_quoted: binding() do
Config.load(file)
end
end
end
|
lib/bootleg/dsl.ex
| 0.872931
| 0.805861
|
dsl.ex
|
starcoder
|
defmodule Mop8.Bot.WordMap do
require Logger
alias Mop8.Bot.Ngram
alias Mop8.Bot.Selector
@opaque t() :: %{
String.t() => %{
heads: pos_integer(),
tails: pos_integer(),
count: pos_integer(),
nexts: [String.t()]
}
}
@spec new() :: t()
def new() do
%{}
end
@spec put(t(), Ngram.words()) :: t()
def put(word_map, words) when is_map(word_map) and is_list(words) do
put(word_map, words, true)
end
defp put(word_map, words, is_head) do
case words do
[] ->
word_map
[word] ->
word_map
|> update_by(word, fn
nil when is_head ->
%{heads: 1, tails: 1, count: 1, nexts: []}
nil ->
%{heads: 0, tails: 1, count: 1, nexts: []}
%{heads: heads, tails: tails, count: count} = stat when is_head ->
%{stat | heads: heads + 1, tails: tails + 1, count: count + 1}
%{tails: tails, count: count} = stat ->
%{stat | tails: tails + 1, count: count + 1}
end)
[word | [next_word | _] = rest] ->
word_map
|> update_by(word, fn
nil when is_head ->
%{heads: 1, tails: 0, count: 1, nexts: [next_word]}
nil ->
%{heads: 0, tails: 0, count: 1, nexts: [next_word]}
%{heads: heads, count: count, nexts: nexts} = stat when is_head ->
%{stat | heads: heads + 1, count: count + 1, nexts: Enum.uniq([next_word | nexts])}
%{count: count, nexts: nexts} = stat ->
%{stat | count: count + 1, nexts: Enum.uniq([next_word | nexts])}
end)
|> put(rest, false)
end
end
defp update_by(map, key, f) do
Map.put(map, key, f.(map[key]))
end
@spec build_sentence(t(), Selector.t()) :: {:ok, Ngram.words()} | {:error, :nothing_to_say}
def build_sentence(word_map, selector \\ &Selector.roulette/1) when is_map(word_map) do
with {:ok, word} <- select_first_word(word_map, selector) do
{:ok, build_sentence(word_map, selector, [word])}
else
{:error, :no_element} ->
{:error, :nothing_to_say}
end
end
def select_first_word(map, selector) do
map
|> Enum.filter(fn {_, %{heads: count}} -> count != 0 end)
|> Enum.map(fn {word, %{heads: count}} -> {word, count} end)
|> selector.()
end
defp build_sentence(word_map, selector, [head | _] = acc) do
if terminate?(word_map[head]) do
Enum.reverse(acc)
else
{:ok, word} =
Map.take(word_map, word_map[head][:nexts])
|> Enum.map(fn {word, %{count: count}} -> {word, count} end)
|> selector.()
build_sentence(word_map, selector, [word | acc])
end
end
defp terminate?(%{tails: tails, count: count}) do
# 0 <= uniform_real < 1
# always false if tails == 0
# always true if tails == count
:rand.uniform_real() < tails / count
end
end
|
lib/mop8/bot/word_map.ex
| 0.768646
| 0.478102
|
word_map.ex
|
starcoder
|
defmodule LineWalker do
# Return the overall distance walk to a point.
# nil when point NOT on the line.
def findPointDistanceOnLine([x, y| nextList], targetPoint, currentPoint \\ [0,0], currentDistance \\ 0) do
[tx, ty] = targetPoint
[cx, cy] = currentPoint
cond do
(ty == cy) && (MathUtil.inBetween(tx, cx, cx + x)) ->
# IO.inspect("case1: Find intercept on x movement")
currentDistance + abs(tx - cx)
(tx == cx + x) && (MathUtil.inBetween(ty, cy, cy + y)) ->
# IO.inspect("case2: Find intercept on y movement")
currentDistance + abs(x) + abs(ty - cy)
nextList == [] ->
IO.inspect("case5: Cannot find point on line!")
nil
length(nextList) == 1 ->
# IO.inspect("case3: end in x direction, will add a '0' movement for y.")
findPointDistanceOnLine(nextList ++ [0], targetPoint, [cx + x, cy + y], currentDistance + abs(x) + abs(y))
true ->
# IO.inspect("case4")
findPointDistanceOnLine(nextList, targetPoint, [cx + x, cy + y], currentDistance + abs(x) + abs(y))
end
end
def findPointOnMaps([x, y | nextLines], line1MapX, line1MapY, currentPoint) do
[cx, cy] = currentPoint
keysX = Map.keys(line1MapX)
keysY = Map.keys(line1MapY)
interceptX = keysX
|> Enum.filter(fn key -> MathUtil.inBetween(key, cx, cx + x) end)
|> Enum.map(fn key ->
Map.get(line1MapX, key)
|> Enum.map(fn value ->
[startY, endY] = value
cond do
MathUtil.inBetween(cy, startY, endY) ->
[key, cy]
true -> nil
end
end)
end)
|> Enum.filter(fn list -> StructureUtil.notEmptyList(list) end)
interceptY = keysY
|> Enum.filter(fn key -> MathUtil.inBetween(key, cy, cy + y) end)
|> Enum.map(fn key ->
Map.get(line1MapY, key)
|> Enum.map(fn value ->
[startX, endX] = value
cond do
MathUtil.inBetween(cx + x, startX, endX) ->
[cx + x, key]
true -> nil
end
end)
end)
|> Enum.filter(fn list -> StructureUtil.notEmptyList(list) end)
returnValue = interceptX ++ interceptY
cond do
nextLines == [] ->
returnValue
length(nextLines) == 1 ->
returnValue ++ findPointOnMaps(nextLines ++ [0], line1MapX, line1MapY, [cx + x, cy + y])
true ->
returnValue ++ findPointOnMaps(nextLines, line1MapX, line1MapY, [cx + x, cy + y])
end
end
end
defmodule CrossedWires2 do
def createLineList(inputList) do
line1 = hd(inputList)
line2 = hd(tl(inputList))
line1 = cond do
String.first(hd(line1)) == "U" || String.first(hd(line1)) == "D" ->
["R0" | line1]
true -> line1
end
line2 = cond do
String.first(hd(line2)) == "U" || String.first(hd(line2)) == "D" ->
["R0" | line2]
true -> line2
end
line1List = Enum.map(line1, &stringLineToInt/1)
line2List = Enum.map(line2, &stringLineToInt/1)
{line1MapX, line1MapY} = CrossedWires.linesToMapRecursive(line1, 0, %{}, %{}, {0, 0})
# {line2MapX, line2MapY} = CrossedWires.linesToMapRecursive(line2, 0, %{}, %{}, {0, 0})
pointList = LineWalker.findPointOnMaps(line2List, line1MapX, line1MapY, [0, 0])
# pointList2 = LineWalker.findPointOnMaps(line1List, line2MapX, line2MapY, [0, 0])
pointList
|> Enum.map(fn points ->
cond do
is_list(points) ->
[[x, y]] = points
line1ToPoint = LineWalker.findPointDistanceOnLine(line1List, [x, y])
line2ToPoint = LineWalker.findPointDistanceOnLine(line2List, [x, y])
line1ToPoint + line2ToPoint
true ->
IO.inspect("Failed to walk to point")
nil
end
end)
# |> IO.inspect
end
def stringLineToInt(line) do
{direction, distance} = String.split_at(line, 1)
distance = String.to_integer(distance)
cond do
direction == "R" || direction == "U" ->
distance
direction == "L" || direction == "D" ->
-1 * distance
end
end
end
|
lib/day3-2.ex
| 0.699254
| 0.818156
|
day3-2.ex
|
starcoder
|
defmodule Mazes.Maze do
@type vertex :: {integer, integer}
@type maze :: %{
adjacency_matrix: %{vertex => %{vertex => boolean}},
from: vertex,
to: vertex,
module: atom
}
@callback new(keyword) :: maze
@callback center(maze) :: vertex
@spec vertices(maze) :: [vertex]
def vertices(maze) do
Map.keys(maze.adjacency_matrix)
|> Enum.sort(&sorter/2)
end
defp sorter({x1, y1}, {x2, y2}) do
if y1 == y2 do
x1 < x2
else
y1 < y2
end
end
@doc "Returns all neighboring vertices that do not have a wall between themselves and the given one"
@spec adjacent_vertices(maze, vertex) :: [vertex]
def adjacent_vertices(maze, from) do
maze.adjacency_matrix[from]
|> Enum.filter(fn {_, adjacency} -> adjacency end)
|> Enum.map(fn {cell, _} -> cell end)
|> Enum.sort(&sorter/2)
end
@doc "Returns all neighboring vertices regardless of whether there is a wall or not"
@spec neighboring_vertices(maze, vertex) :: [vertex]
def neighboring_vertices(maze, from) do
maze.adjacency_matrix[from]
|> Enum.map(fn {cell, _} -> cell end)
|> Enum.sort(&sorter/2)
end
@doc "Groups vertices by the number of adjacent vertices they have"
@spec group_vertices_by_adjacent_count(maze) :: %{integer => [vertex]}
def group_vertices_by_adjacent_count(maze) do
vertices(maze)
|> Enum.group_by(fn vertex ->
length(adjacent_vertices(maze, vertex))
end)
end
@spec wall?(maze, vertex, vertex) :: boolean
def wall?(maze, from, to), do: !maze.adjacency_matrix[from][to]
@spec put_wall(maze, vertex, vertex) :: maze
def put_wall(maze, from, to), do: set_adjacency(maze, from, to, false)
@spec remove_wall(maze, vertex, vertex) :: maze
def remove_wall(maze, from, to), do: set_adjacency(maze, from, to, true)
defp set_adjacency(maze, from, to, value) do
adjacency_matrix =
maze.adjacency_matrix
|> put_in([from, to], value)
|> put_in([to, from], value)
%{maze | adjacency_matrix: adjacency_matrix}
end
end
|
lib/mazes/maze.ex
| 0.886629
| 0.608798
|
maze.ex
|
starcoder
|
defmodule Horde.Registry do
@moduledoc """
A distributed process registry.
Horde.Registry implements a distributed Registry backed by an add-wins last-write-wins δ-CRDT (provided by `DeltaCrdt.AWLWWMap`). This CRDT is used for both tracking membership of the cluster and implementing the registry functionality itself. Local changes to the registry will automatically be synced to other nodes in the cluster.
Because of the semantics of an AWLWWMap, the guarantees provided by Horde.Registry are more relaxed than those provided by the standard library Registry. Conflicts will be automatically silently resolved by the underlying AWLWWMap.
Cluster membership is managed with `Horde.Cluster`. Joining a cluster can be done with `Horde.Cluster.set_members/2`. To take a node out of the cluster, call `Horde.Cluster.set_members/2` without that node in the list.
Horde.Registry supports the common "via tuple", described in the [documentation](https://hexdocs.pm/elixir/GenServer.html#module-name-registration) for `GenServer`.
## Module-based Registry
Horde supports module-based registries to enable dynamic runtime configuration.
```elixir
defmodule MyRegistry do
use Horde.Registry
def init(options) do
{:ok, Keyword.put(options, :members, get_members())}
end
defp get_members() do
# ...
end
end
```
Then you can use `MyRegistry.child_spec/1` and `MyRegistry.start_link/1` in the same way as you'd use `Horde.Registry.child_spec/1` and `Horde.Registry.start_link/1`.
"""
@callback init(options :: Keyword.t()) :: {:ok, options :: Keyword.t()}
defmacro __using__(_opts) do
quote do
@behaviour Horde.Registry
def child_spec(options) do
options = Keyword.put_new(options, :id, __MODULE__)
%{
id: Keyword.get(options, :id, __MODULE__),
start: {__MODULE__, :start_link, [options]},
type: :supervisor
}
end
def start_link(options) do
Horde.Registry.start_link(Keyword.put(options, :init_module, __MODULE__))
end
end
end
@doc """
Child spec to enable easy inclusion into a supervisor.
Example:
```elixir
supervise([
Horde.Registry
])
```
Example:
```elixir
supervise([
{Horde.Registry, [name: MyApp.GlobalRegistry]}
])
```
"""
@spec child_spec(options :: list()) :: Supervisor.child_spec()
def child_spec(options \\ []) do
options = Keyword.put_new(options, :id, __MODULE__)
%{
id: Keyword.get(options, :id, __MODULE__),
start: {__MODULE__, :start_link, [options]},
type: :supervisor
}
end
@doc "Starts the registry as a supervised process"
def start_link(options) do
root_name = Keyword.get(options, :name)
case Keyword.get(options, :keys) do
:unique -> nil
_other -> raise ArgumentError, "Only `keys: :unique` is supported."
end
if is_nil(root_name) do
raise "must specify :name in options, got: #{inspect(options)}"
end
options = Keyword.put(options, :root_name, root_name)
options = Keyword.put_new(options, :members, [root_name])
Supervisor.start_link(Horde.RegistrySupervisor, options, name: :"#{root_name}.Supervisor")
end
@spec stop(Supervisor.supervisor(), reason :: term(), timeout()) :: :ok
def stop(supervisor, reason \\ :normal, timeout \\ 5000) do
Supervisor.stop(supervisor, reason, timeout)
end
### Public API
@doc """
Register a process under the given name
When 2 clustered registries register the same name at exactly the
same time, it will seem like name registration succeeds for both
registries. The function returns `{:ok, pid}` for both of these
calls.
However, due to the eventual consistent nature of the CRDT, a
conflict resolution will take place, and the CRDT will pick one of
the two processes as the "winner" of the name. The losing process
will be sent an exit signal (using `Process.exit/2`) with the
following reason:
`{:name_conflict, {name, value}, registry_name, winning_pid}`
When two registries are joined using `Horde.Cluster.set_members/2`,
this name conflict message can also occur.
"""
@spec register(
registry :: GenServer.server(),
name :: Registry.key(),
value :: Registry.value()
) :: {:ok, pid()} | {:error, :already_registered, pid()}
def register(registry, name, value) when is_atom(registry) do
case :ets.lookup(keys_ets_table(registry), name) do
[] ->
GenServer.call(registry, {:register, name, value, self()})
[{^name, _member, {pid, _value}}] ->
{:error, {:already_registered, pid}}
end
end
@doc "unregister the process under the given name"
@spec unregister(registry :: GenServer.server(), name :: GenServer.name()) :: :ok
def unregister(registry, name) when is_atom(registry) do
GenServer.call(registry, {:unregister, name, self()})
end
@doc false
def whereis(search), do: lookup(search)
@doc false
def lookup({:via, _, {registry, name}}), do: lookup(registry, name)
@doc "Finds the `{pid, value}` for the given `key` in `registry`"
def lookup(registry, key) when is_atom(registry) do
with [{^key, member, {pid, value}}] <- :ets.lookup(keys_ets_table(registry), key),
true <- member_in_cluster?(registry, member),
true <- process_alive?(pid) do
[{pid, value}]
else
_ -> :undefined
end
end
@spec meta(registry :: Registry.registry(), key :: Registry.meta_key()) ::
{:ok, Registry.meta_value()} | :error
@doc "Reads registry metadata given on `start_link/3`"
def meta(registry, key) when is_atom(registry) do
case :ets.lookup(registry_ets_table(registry), key) do
[{^key, value}] -> {:ok, value}
_ -> :error
end
end
@spec put_meta(
registry :: Registry.registry(),
key :: Registry.meta_key(),
value :: Registry.meta_value()
) :: :ok
def put_meta(registry, key, value) when is_atom(registry) do
GenServer.call(registry, {:put_meta, key, value})
end
@spec count(registry :: Registry.registry()) :: non_neg_integer()
@doc "Returns the number of keys in a registry. It runs in constant time."
def count(registry) when is_atom(registry) do
:ets.info(keys_ets_table(registry), :size)
end
@doc "See `Registry.match/4` for details."
def match(registry, key, pattern, guards \\ [])
when is_atom(registry) and is_list(guards) do
underscore_guard = {:"=:=", {:element, 1, :"$_"}, {:const, key}}
spec = [{{:_, :_, {:_, pattern}}, [underscore_guard | guards], [{:element, 3, :"$_"}]}]
:ets.select(keys_ets_table(registry), spec)
end
def count_match(registry, key, pattern, guards \\ [])
when is_atom(registry) and is_list(guards) do
underscore_guard = {:"=:=", {:element, 1, :"$_"}, {:const, key}}
spec = [{{:_, :_, {:_, pattern}}, [underscore_guard | guards], [true]}]
:ets.select_count(keys_ets_table(registry), spec)
end
def unregister_match(registry, key, pattern, guards \\ [])
when is_atom(registry) and is_list(guards) do
pid = self()
underscore_guard = {:"=:=", {:element, 1, :"$_"}, {:const, key}}
spec = [{{:_, :_, {pid, pattern}}, [underscore_guard | guards], [:"$_"]}]
:ets.select(keys_ets_table(registry), spec)
|> Enum.each(fn {key, _member, {pid, _val}} ->
GenServer.call(registry, {:unregister, key, pid})
end)
:ok
end
@spec keys(registry :: Registry.registry(), pid()) :: [Registry.key()]
@doc "Returns registered keys for `pid`"
def keys(registry, pid) when is_atom(registry) do
case :ets.lookup(pids_ets_table(registry), pid) do
[] -> []
[{_pid, matches}] -> matches
end
end
def dispatch(registry, key, mfa_or_fun, _opts \\ []) when is_atom(registry) do
case :ets.lookup(keys_ets_table(registry), key) do
[] ->
:ok
[{_key, _member, pid_value}] ->
do_dispatch(mfa_or_fun, [pid_value])
:ok
end
end
defp do_dispatch({m, f, a}, entries), do: apply(m, f, [entries | a])
defp do_dispatch(fun, entries), do: fun.(entries)
def update_value(registry, key, callback) when is_atom(registry) do
case :ets.lookup(keys_ets_table(registry), key) do
[{key, _member, {pid, value}}] when pid == self() ->
new_value = callback.(value)
:ok = GenServer.call(registry, {:update_value, key, pid, new_value})
{new_value, value}
_ ->
:error
end
end
@doc """
Get the process registry of the horde
"""
@deprecated "Use `select/2` instead."
def processes(registry) when is_atom(registry) do
:ets.match(keys_ets_table(registry), :"$1") |> Map.new(fn [{k, _m, v}] -> {k, v} end)
end
@doc """
Select key, pid, and values from the process registry of the horde
"""
def select(registry, spec) when is_atom(registry) and is_list(spec) do
spec =
for part <- spec do
case part do
{{key, pid, value}, guards, select} ->
{{key, :_, {pid, value}}, guards, select}
_ ->
raise ArgumentError,
"invalid match specification in Registry.select/2: #{inspect(spec)}"
end
end
:ets.select(keys_ets_table(registry), spec)
end
### Via callbacks
@doc false
# @spec register_name({pid, term}, pid) :: :yes | :no
def register_name({registry, key}, pid), do: register_name({registry, key, nil}, pid)
def register_name({registry, key, value}, pid) when is_atom(registry) do
case GenServer.call(registry, {:register, key, value, pid}) do
{:ok, _pid} -> :yes
{:error, _} -> :no
end
end
@doc false
# @spec whereis_name({pid, term}) :: pid | :undefined
def whereis_name({registry, name}) when is_atom(registry) do
case lookup(registry, name) do
:undefined -> :undefined
[{pid, _val}] -> pid
end
end
@doc false
def unregister_name({registry, name}), do: unregister(registry, name)
@doc false
def send({registry, name}, msg) when is_atom(registry) do
case lookup(registry, name) do
:undefined -> :erlang.error(:badarg, [{registry, name}, msg])
[{pid, _value}] -> Kernel.send(pid, msg)
end
end
defp process_alive?(pid) when node(pid) == node(), do: Process.alive?(pid)
defp process_alive?(pid) do
n = node(pid)
Node.list() |> Enum.member?(n) && :rpc.call(n, Process, :alive?, [pid])
end
defp member_in_cluster?(registry, member) do
case :ets.lookup(members_ets_table(registry), member) do
[] -> false
_ -> true
end
end
defp registry_ets_table(registry), do: registry
defp pids_ets_table(registry), do: :"pids_#{registry}"
defp keys_ets_table(registry), do: :"keys_#{registry}"
defp members_ets_table(registry), do: :"members_#{registry}"
end
|
lib/horde/registry.ex
| 0.901874
| 0.817793
|
registry.ex
|
starcoder
|
defmodule Util.LogRotator do
@vsn "0.1.0"
use GenServer
require Logger
@msg :log_rotate
@shift 10
@min_diff 1000
@moduledoc """
A napi logokat intezi a `:loggger_file_backend`-hez.
`app.ex`:
```elixir
defmodule App do
use Application
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [
Util.LogRotator.child_spec(:error_log)
]
Supervisor.start_link(children, strategy: :one_for_one, name: :supervisor)
end
end
```
`config.exs`:
```elixir
use Mix.Config
config :logger,
backends: [{LoggerFileBackend, :error_log}, :console]
# configuration for the {LoggerFileBackend, :error_log} backend
config :logger, :error_log,
path: "log/inst.log",
format: "$date $time $metadata[$level] $levelpad$message\n",
metadata: [:line],
level: :info
# level: :error
config :logger, :console,
format: "$date $time $metadata[$level] $levelpad$message\n",
metadata: [:line]
```
@vsn "#{@vsn}"
"""
defstruct logname: nil, date: nil, name: nil
@typedoc """
- `logname`: annak a lognak a neve, amit varialni akar.
- `date`: az aktualis log datuma.
"""
@type t :: %__MODULE__{
logname: atom,
date: Date.t(),
name: String.t() | nil
}
@spec start_link(atom) :: GenServer.on_start()
def start_link(logname) do
GenServer.start_link(__MODULE__, [logname])
end
@spec speci(atom) :: Supervisor.child_spec()
def speci(logname) do
%{
id: logname,
start: {__MODULE__, :start_link, [logname]}
}
end
@impl true
@spec init(List.t()) :: {:ok, t}
def init([logname]) do
date = Timex.local() |> Timex.shift(days: -3) |> Timex.to_date()
s = %__MODULE__{logname: logname, date: date}
s = rotate(s)
{:ok, s}
end
@spec rotate(t) :: t
def rotate(s) do
logname = s.logname
datetime = Timex.local()
date = datetime |> Timex.to_date()
s =
if date != s.date do
oldname = s.name
kl = Application.get_env(:logger, logname)
path = Keyword.get(kl, :path, "file#{logname}")
oldname = if oldname == nil, do: path, else: oldname
path = path |> String.replace(~r/-\d\d\d\d-\d\d-\d\d-/, "") |> String.replace(~r/\.[^.]*$/, "")
{:ok, frm} = Timex.format(date, "%Y-%m-%d", :strftime)
path = "#{path}-#{frm}-.log"
kl = Keyword.put(kl, :path, path)
Logger.configure_backend({LoggerFileBackend, logname}, kl)
Logger.warn("| LOG_ROTATOR | rotate | #{date} | new | #{path} | old | #{oldname} |")
%{s | date: date, name: path}
else
Logger.warn("| LOG_ROTATOR | still | #{date} |")
s
end
# Reschedule
d2 = datetime |> Timex.shift(days: 1) |> Timex.beginning_of_day() |> Timex.shift(milliseconds: @shift)
diff = Timex.diff(d2, datetime, :milliseconds)
diff = if diff < @min_diff, do: @min_diff, else: diff
Logger.warn("| LOG_ROTATOR | next_rotate | #{d2} |")
Process.send_after(self(), @msg, diff)
s
end
@impl true
def handle_info(msg, s) do
s =
case msg do
@msg -> rotate(s)
_ -> s
end
{:noreply, s}
end
end
|
lib/util/log_rotator.ex
| 0.728652
| 0.602997
|
log_rotator.ex
|
starcoder
|
defmodule Kaffe.Subscriber do
@moduledoc """
Consume messages from a single partition of a single Kafka topic.
Assignments are received from a group consumer member, `Kaffe.GroupMember`.
Messages are delegated to `Kaffe.Worker`. The worker is expected to cast back
a response, at which time the stored offset will be acked back to Kafka.
The options (`ops`) to `subscribe/7` may include the beginning offset
using `:begin_offset`.
The subscriber reads the following options out of the configuration:
- `max_bytes` - The maximum number of message bytes to receive in a batch
- `min_bytes` - The minimum number of message bytes to receive in a batch
- `max_wait_time` - Maximum number of milliseconds broker will wait for `:min_bytes` of messages
to be collected
- `offset_reset_policy` - The native `auto.offset.reset` option,
either `:reset_to_earliest` or `:reset_to_latest`.
See: https://github.com/klarna/brucke/blob/master/src/brucke_member.erl
Also: https://github.com/klarna/brod/blob/master/src/brod_consumer.erl
"""
use GenServer
require Logger
require Record
import Record, only: [defrecord: 2, extract: 2]
defrecord :kafka_message_set, extract(:kafka_message_set, from_lib: "brod/include/brod.hrl")
defrecord :kafka_message, extract(:kafka_message, from_lib: "brod/include/brod.hrl")
defmodule State do
defstruct subscriber_pid: nil,
group_coordinator_pid: nil,
gen_id: nil,
worker_pid: nil,
subscriber_name: nil,
topic: nil,
partition: nil,
subscribe_ops: nil,
retries_remaining: nil
end
## ==========================================================================
## Public API
## ==========================================================================
def subscribe(subscriber_name, group_coordinator_pid, worker_pid, gen_id, topic, partition, ops) do
GenServer.start_link(
__MODULE__,
[
subscriber_name,
group_coordinator_pid,
worker_pid,
gen_id,
topic,
partition,
ops
],
name: name(subscriber_name, topic, partition)
)
end
def stop(subscriber_pid) do
Logger.info("event#stopping=#{inspect(self())}")
GenServer.stop(subscriber_pid)
end
def commit_offsets(subscriber_pid, topic, partition, generation_id, offset) do
GenServer.cast(subscriber_pid, {:commit_offsets, topic, partition, generation_id, offset})
end
def request_more_messages(subscriber_pid, offset) do
GenServer.cast(subscriber_pid, {:request_more_messages, offset})
end
## ==========================================================================
## Public API
## ==========================================================================
def init([subscriber_name, group_coordinator_pid, worker_pid, gen_id, topic, partition, ops]) do
send(self(), {:subscribe_to_topic_partition})
{:ok,
%State{
group_coordinator_pid: group_coordinator_pid,
worker_pid: worker_pid,
gen_id: gen_id,
subscriber_name: subscriber_name,
topic: topic,
partition: partition,
subscribe_ops: ops ++ subscriber_ops(),
retries_remaining: max_retries()
}}
end
def handle_info(
{_pid, {:kafka_message_set, topic, partition, _high_wm_offset, _messages} = message_set},
state
) do
^topic = state.topic
^partition = state.partition
messages =
message_set
|> kafka_message_set
|> Enum.into(%{})
|> Map.get(:messages)
|> Enum.map(fn message ->
compile_message(message, state.topic, state.partition)
end)
Logger.debug("Sending #{Enum.count(messages)} messages to worker: #{inspect(state.worker_pid)}")
worker().process_messages(state.worker_pid, self(), topic, partition, state.gen_id, messages)
{:noreply, state}
end
def handle_info(
{:subscribe_to_topic_partition},
%{
subscriber_name: subscriber_name,
topic: topic,
partition: partition,
subscribe_ops: ops
} = state
) do
kafka().subscribe(subscriber_name, self(), topic, partition, ops)
|> handle_subscribe(state)
end
def handle_info({_pid, {:kafka_fetch_error, topic, partition, code, reason} = error}, state) do
Logger.info(
"event#kafka_fetch_error=#{inspect(self())} topic=#{topic} partition=#{partition} code=#{inspect(code)} reason=#{
inspect(reason)
}"
)
{:stop, {:shutdown, error}, state}
end
def handle_info({:DOWN, _ref, _process, pid, reason}, %{subscriber_pid: subscriber_pid} = state)
when pid == subscriber_pid do
Logger.warn("event#consumer_down=#{inspect(self())} reason=#{inspect(reason)}")
{:stop, {:shutdown, {:consumer_down, reason}}, state}
end
def handle_info(unknown, state) do
Logger.warn("event#unknown_message=#{inspect(self())} reason=#{inspect(unknown)}")
{:noreply, state}
end
def handle_cast({:commit_offsets, topic, partition, generation_id, offset}, state) do
Logger.debug(
"event#commit_offsets topic=#{state.topic} partition=#{state.partition} offset=#{offset} generation=#{
generation_id
}"
)
# Is this the ack we're looking for?
^topic = state.topic
^partition = state.partition
^generation_id = state.gen_id
# Update the offsets in the group
:ok =
group_coordinator().ack(
state.group_coordinator_pid,
state.gen_id,
state.topic,
state.partition,
offset
)
{:noreply, state}
end
def handle_cast({:request_more_messages, offset}, state) do
Logger.debug("event#request_more_messages topic=#{state.topic} partition=#{state.partition} offset=#{offset}")
:ok = kafka().consume_ack(state.subscriber_pid, offset)
{:noreply, state}
end
defp handle_subscribe({:ok, subscriber_pid}, state) do
Logger.debug("Subscribe success: #{inspect(subscriber_pid)}")
Process.monitor(subscriber_pid)
{:noreply, %{state | subscriber_pid: subscriber_pid}}
end
defp handle_subscribe({:error, reason}, %{retries_remaining: retries_remaining} = state)
when retries_remaining > 0 do
Logger.debug("Failed to subscribe with reason: #{inspect(reason)}, #{retries_remaining} retries remaining")
Process.send_after(self(), {:subscribe_to_topic_partition}, retry_delay())
{:noreply, %{state | retries_remaining: retries_remaining - 1}}
end
defp handle_subscribe({:error, reason}, state) do
Logger.warn("event#subscribe_failed=#{inspect(self())} reason=#{inspect(reason)}")
{:stop, {:subscribe_failed, :retries_exceeded, reason}, state}
end
## ==========================================================================
## Public API
## ==========================================================================
defp compile_message(msg, topic, partition) do
Map.merge(%{topic: topic, partition: partition}, kafka_message_to_map(msg))
end
defp kafka_message_to_map(msg) do
Enum.into(kafka_message(msg), %{})
end
defp kafka do
Application.get_env(:kaffe, :kafka_mod, :brod)
end
defp group_coordinator do
Application.get_env(:kaffe, :group_coordinator_mod, :brod_group_coordinator)
end
defp worker do
Application.get_env(:kaffe, :worker_mod, Kaffe.Worker)
end
defp subscriber_ops do
[
max_bytes: Kaffe.Config.Consumer.configuration().max_bytes,
min_bytes: Kaffe.Config.Consumer.configuration().min_bytes,
max_wait_time: Kaffe.Config.Consumer.configuration().max_wait_time,
offset_reset_policy: Kaffe.Config.Consumer.configuration().offset_reset_policy
]
end
defp max_retries do
Kaffe.Config.Consumer.configuration().subscriber_retries
end
defp retry_delay do
Kaffe.Config.Consumer.configuration().subscriber_retry_delay_ms
end
defp name(subscriber_name, topic, partition) do
:"#{__MODULE__}.#{subscriber_name}.#{topic}.#{partition}"
end
end
|
lib/kaffe/consumer_group/subscriber/subscriber.ex
| 0.860823
| 0.533276
|
subscriber.ex
|
starcoder
|
defmodule AWS.Route53RecoveryCluster do
@moduledoc """
Welcome to the Amazon Route 53 Application Recovery Controller API Reference
Guide for Recovery Control Data Plane .
Recovery control in Route 53 Application Recovery Controller includes extremely
reliable routing controls that enable you to recover applications by rerouting
traffic, for example, across Availability Zones or AWS Regions. Routing controls
are simple on/off switches hosted on a cluster. A cluster is a set of five
redundant regional endpoints against which you can execute API calls to update
or get the state of routing controls. You use routing controls to failover
traffic to recover your application across Availability Zones or Regions.
This API guide includes information about how to get and update routing control
states in Route 53 Application Recovery Controller.
For more information about Route 53 Application Recovery Controller, see the
following:
* You can create clusters, routing controls, and control panels by
using the control plane API for Recovery Control. For more information, see
[Amazon Route 53 Application Recovery Controller Recovery Control API Reference](https://docs.aws.amazon.com/recovery-cluster/latest/api/).
* Route 53 Application Recovery Controller also provides continuous
readiness checks to ensure that your applications are scaled to handle failover
traffic. For more information about the related API actions, see [Amazon Route 53 Application Recovery Controller Recovery Readiness API
Reference](https://docs.aws.amazon.com/recovery-readiness/latest/api/).
* For more information about creating resilient applications and
preparing for recovery readiness with Route 53 Application Recovery Controller,
see the [Amazon Route 53 Application Recovery Controller Developer Guide](r53recovery/latest/dg/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2019-12-02",
content_type: "application/x-amz-json-1.0",
credential_scope: nil,
endpoint_prefix: "route53-recovery-cluster",
global?: false,
protocol: "json",
service_id: "Route53 Recovery Cluster",
signature_version: "v4",
signing_name: "route53-recovery-cluster",
target_prefix: "ToggleCustomerAPI"
}
end
@doc """
Get the state for a routing control.
A routing control is a simple on/off switch that you can use to route traffic to
cells. When the state is On, traffic flows to a cell. When it's off, traffic
does not flow.
Before you can create a routing control, you first must create a cluster to host
the control. For more information, see
[CreateCluster](https://docs.aws.amazon.com/recovery-cluster/latest/api/cluster.html). Access one of the endpoints for the cluster to get or update the routing control
state to redirect traffic.
For more information about working with routing controls, see [Routing
control](https://docs.aws.amazon.com/r53recovery/latest/dg/routing-control.html)
in the Route 53 Application Recovery Controller Developer Guide.
"""
def get_routing_control_state(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetRoutingControlState", input, options)
end
@doc """
Set the state of the routing control to reroute traffic.
You can set the value to be On or Off. When the state is On, traffic flows to a
cell. When it's off, traffic does not flow.
For more information about working with routing controls, see [Routing control](https://docs.aws.amazon.com/r53recovery/latest/dg/routing-control.html)
in the Route 53 Application Recovery Controller Developer Guide.
"""
def update_routing_control_state(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRoutingControlState", input, options)
end
@doc """
Set multiple routing control states.
You can set the value for each state to be On or Off. When the state is On,
traffic flows to a cell. When it's off, traffic does not flow.
For more information about working with routing controls, see [Routing control](https://docs.aws.amazon.com/r53recovery/latest/dg/routing-control.html)
in the Route 53 Application Recovery Controller Developer Guide.
"""
def update_routing_control_states(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateRoutingControlStates", input, options)
end
end
|
lib/aws/generated/route53_recovery_cluster.ex
| 0.880874
| 0.480966
|
route53_recovery_cluster.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.