code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule NervesKey.Data do
@moduledoc """
This module handles Data Zone data stored in the Nerves Key.
"""
@doc """
Create a public/private key pair
The public key is returned on success. This can only be called on devices that
have their configuration locked, but not their data.
"""
@spec genkey(ATECC508A.Transport.t()) :: {:ok, X509.PublicKey.t()} | {:error, atom()}
def genkey(transport) do
with {:ok, raw_key} = ATECC508A.Request.genkey(transport, 0, true) do
{:ok, ATECC508A.Certificate.raw_to_public_key(raw_key)}
end
end
@doc """
Determine what's in all of the data slots
"""
@spec slot_data(ATECC508A.serial_number(), X509.Certificate.t(), X509.Certificate.t()) :: [
{ATECC508A.Request.slot(), binary()}
]
def slot_data(device_sn, device_cert, signer_cert) do
signer_template =
signer_cert
|> X509.Certificate.public_key()
|> ATECC508A.Certificate.Template.signer()
signer_compressed = ATECC508A.Certificate.compress(signer_cert, signer_template)
device_template =
ATECC508A.Certificate.Template.device(device_sn, signer_compressed.public_key)
device_compressed = ATECC508A.Certificate.compress(device_cert, device_template)
# See README.md for slot contents. We still need to program unused slots in order
# to lock the device so specify nothing so they'll get padded with zeros to the
# appropriate size.
[
{1, <<>>},
{2, <<>>},
{3, <<>>},
{4, <<>>},
{5, <<>>},
{6, <<>>},
{7, <<>>},
{8, <<>>},
{9, <<>>},
{10, device_compressed.data},
{11, signer_compressed.public_key},
{12, signer_compressed.data},
{13, <<>>},
{14, <<>>},
{15, <<>>}
]
|> Enum.map(fn {slot, data} -> {slot, ATECC508A.DataZone.pad_to_slot_size(slot, data)} end)
end
@doc """
Write all of the slots
"""
@spec write_slots(ATECC508A.Transport.t(), [{ATECC508A.Request.slot(), binary()}]) ::
:ok | {:error, atom()}
def write_slots(transport, slot_data) do
Enum.each(slot_data, fn {slot, data} ->
:ok = ATECC508A.DataZone.write_padded(transport, slot, data)
end)
end
# @doc """
# Lock the OTP and data zones.
# There's no going back!
# """
@spec lock(ATECC508A.Transport.t(), binary(), [{ATECC508A.Request.slot(), binary()}]) ::
:ok | {:error, atom()}
def lock(transport, otp_data, slot_data) do
sorted_slot_data =
Enum.sort(slot_data, fn {slot1, _data1}, {slot2, _data2} -> slot1 < slot2 end)
all_data =
[Enum.map(sorted_slot_data, fn {_slot, data} -> data end), otp_data]
|> IO.iodata_to_binary()
ATECC508A.DataZone.lock(transport, all_data)
end
end
|
lib/nerves_key/data.ex
| 0.780286
| 0.437103
|
data.ex
|
starcoder
|
defmodule Adventofcode.Day10SyntaxScoring do
use Adventofcode
alias __MODULE__.{Autocompleter, Parser, Part1, Part2, SyntaxChecker}
def part_1(input) do
input
|> Parser.parse()
|> Part1.solve()
end
def part_2(input) do
input
|> Parser.parse()
|> Part2.solve()
end
defmodule SyntaxChecker do
def analyze(line) do
line
|> Enum.reduce_while({[], 0}, fn
?(, {level, score} -> {:cont, {[?( | level], score}}
?[, {level, score} -> {:cont, {[?[ | level], score}}
?{, {level, score} -> {:cont, {[?{ | level], score}}
?<, {level, score} -> {:cont, {[?< | level], score}}
?), {[?( | level], score} -> {:cont, {level, score}}
?], {[?[ | level], score} -> {:cont, {level, score}}
?}, {[?{ | level], score} -> {:cont, {level, score}}
?>, {[?< | level], score} -> {:cont, {level, score}}
?), {level, score} -> {:halt, {level, score + 3}}
?], {level, score} -> {:halt, {level, score + 57}}
?}, {level, score} -> {:halt, {level, score + 1197}}
?>, {level, score} -> {:halt, {level, score + 25137}}
end)
|> (fn {level, score} -> {line, level, score} end).()
end
end
defmodule Part1 do
def solve(lines) do
lines
|> Enum.map(&SyntaxChecker.analyze/1)
|> Enum.map(fn {_, _, score} -> score end)
|> Enum.sum()
end
end
defmodule Autocompleter do
def complete({line, level, _}) do
completion = Enum.map(level, &do_complete/1)
{line ++ completion, completion_score(completion)}
end
def do_complete(?(), do: ?)
def do_complete(?[), do: ?]
def do_complete(?{), do: ?}
def do_complete(?<), do: ?>
defp completion_score(completion) do
completion
|> Enum.reduce(0, &(&2 * 5 + score(&1)))
end
defp score(?)), do: 1
defp score(?]), do: 2
defp score(?}), do: 3
defp score(?>), do: 4
end
defmodule Part2 do
def solve(state) do
state
|> Enum.map(&SyntaxChecker.analyze/1)
|> Enum.reject(fn {_, _, score} -> score > 0 end)
|> Enum.map(&Autocompleter.complete/1)
|> Enum.map(&elem(&1, 1))
|> Enum.sort()
|> (&Enum.at(&1, div(length(&1), 2))).()
end
end
defmodule Parser do
def parse(input) do
input
|> String.trim()
|> String.split("\n")
|> Enum.map(&to_charlist/1)
end
end
end
|
lib/day_10_syntax_scoring.ex
| 0.606498
| 0.484624
|
day_10_syntax_scoring.ex
|
starcoder
|
defmodule Churn.Processor.CyclomaticComplexity do
@moduledoc """
Cyclomatic Complexity calulator, source code based (stolen?) from:
https://github.com/rrrene/credo/blob/master/lib/credo/check/refactor/cyclomatic_complexity.ex
"""
alias Churn.File, as: ChurnFile
@spec calculate(ChurnFile.t()) :: pos_integer()
def calculate(%ChurnFile{path: file_path}) do
file_path
|> File.read!()
|> Code.string_to_quoted!()
|> complexity_for()
end
@def_ops [:def, :defp, :defmacro]
# these have two outcomes: it succeeds or does not
@double_condition_ops [:if, :unless, :for, :try, :and, :or, :&&, :||]
# these can have multiple outcomes as they are defined in their do blocks
@multiple_condition_ops [:case, :cond]
@op_complexity_map [
def: 1,
defp: 1,
defmacro: 1,
if: 1,
unless: 1,
for: 1,
try: 1,
and: 1,
or: 1,
&&: 1,
||: 1,
case: 1,
cond: 1
]
defp complexity_for({_def_op, _meta, _arguments} = ast) do
prewalk(ast, &traverse_complexity/2, 0)
end
for op <- @def_ops do
defp traverse_complexity(
{unquote(op) = op, _meta, arguments} = ast,
complexity
)
when is_list(arguments) do
{ast, complexity + @op_complexity_map[op]}
end
end
for op <- @double_condition_ops do
defp traverse_complexity(
{unquote(op) = op, _meta, arguments} = ast,
complexity
)
when is_list(arguments) do
{ast, complexity + @op_complexity_map[op]}
end
end
for op <- @multiple_condition_ops do
defp traverse_complexity({unquote(op), _meta, nil} = ast, complexity) do
{ast, complexity}
end
defp traverse_complexity(
{unquote(op) = op, _meta, arguments} = ast,
complexity
)
when is_list(arguments) do
block_cc =
arguments
|> do_block_for!()
|> do_block_complexity(op)
{ast, complexity + block_cc}
end
end
defp traverse_complexity(ast, complexity) do
{ast, complexity}
end
defp do_block_complexity(nil, _), do: 0
defp do_block_complexity(block, op) do
count =
block
|> List.wrap()
|> Enum.count()
count * @op_complexity_map[op]
end
defp prewalk(source_ast, fun, accumulator) do
{_, accumulated} = Macro.prewalk(source_ast, accumulator, fun)
accumulated
end
def do_block?(ast) do
case do_block_for(ast) do
{:ok, _block} ->
true
nil ->
false
end
end
def do_block_for!(ast) do
case do_block_for(ast) do
{:ok, block} ->
block
nil ->
nil
end
end
defp do_block_for({_atom, _meta, arguments}) when is_list(arguments) do
do_block_for(arguments)
end
defp do_block_for(do: block) do
{:ok, block}
end
defp do_block_for(arguments) when is_list(arguments) do
Enum.find_value(arguments, &find_keyword(&1, :do))
end
defp do_block_for(_) do
nil
end
defp find_keyword(list, keyword) when is_list(list) do
if Keyword.has_key?(list, keyword) do
{:ok, list[keyword]}
else
nil
end
end
defp find_keyword(_, _), do: nil
end
|
lib/churn/processor/cyclomatic_complexity.ex
| 0.761804
| 0.610947
|
cyclomatic_complexity.ex
|
starcoder
|
defmodule Utils.Graph do
def big_o_notation do
size = 600
widget =
VegaLite.new(width: size, height: size)
|> VegaLite.mark(:line)
|> VegaLite.encode_field(:x, "x", type: :quantitative)
|> VegaLite.encode_field(:y, "y", type: :quantitative)
|> VegaLite.transform(groupby: ["color"], extent: [2500, 6500])
|> VegaLite.encode_field(:color, "type", title: "Big O Notation", type: :nominal)
|> Kino.VegaLite.new()
max_x = 5
initial_x = 2
linear = Enum.map(initial_x..max_x, &%{x: &1, y: &1, type: "O(n)"})
constant = Enum.map(initial_x..max_x, &%{x: &1, y: 1, type: "O(1)"})
polonomial = Enum.map(initial_x..max_x, &%{x: &1, y: &1 ** 2, type: "O(n^2)"})
logarithmic = Enum.map(initial_x..max_x, &%{x: &1, y: :math.log2(&1), type: "O(log n)"})
exponential = Enum.map(initial_x..max_x, &%{x: &1, y: 2 ** &1, type: "O(2^n)"})
factorial = Enum.map(initial_x..max_x, &%{x: &1, y: Math.factorial(&1), type: "O(n!)"})
Kino.VegaLite.push_many(widget, factorial)
Kino.VegaLite.push_many(widget, exponential)
Kino.VegaLite.push_many(widget, polonomial)
Kino.VegaLite.push_many(widget, linear)
Kino.VegaLite.push_many(widget, logarithmic)
Kino.VegaLite.push_many(widget, constant)
widget
end
def binary_search do
size = 600
widget =
VegaLite.new(width: size, height: size)
|> VegaLite.mark(:line)
|> VegaLite.encode_field(:x, "number of elements", type: :quantitative)
|> VegaLite.encode_field(:y, "time", type: :quantitative)
|> VegaLite.transform(groupby: ["color"], extent: [2500, 6500])
|> VegaLite.encode_field(:color, "type", title: "Big O Notation", type: :nominal)
|> Kino.VegaLite.new()
init = 1
max = 500
logn = for n <- init..max, do: %{"number of elements": n, time: :math.log2(n), type: "log(n)"}
Kino.VegaLite.push_many(widget, logn)
widget
end
def comprehension_big_o do
size = 600
widget =
VegaLite.new(width: size, height: size)
|> VegaLite.mark(:line)
|> VegaLite.encode_field(:x, "number of elements", type: :quantitative)
|> VegaLite.encode_field(:y, "time", type: :quantitative)
|> VegaLite.transform(groupby: ["color"], extent: [2500, 6500])
|> VegaLite.encode_field(:color, "type", title: "Number Of Generators", type: :nominal)
|> Kino.VegaLite.new()
init = 1
max = 5
n1 = for n <- init..max, do: %{"number of elements": n, time: n, type: "1"}
n2 = for n <- init..max, do: %{"number of elements": n, time: n ** 2, type: "2"}
n3 = for n <- init..max, do: %{"number of elements": n, time: n ** 3, type: "3"}
n4 = for n <- init..max, do: %{"number of elements": n, time: n ** 4, type: "4"}
Kino.VegaLite.push_many(widget, n1)
Kino.VegaLite.push_many(widget, n2)
Kino.VegaLite.push_many(widget, n3)
Kino.VegaLite.push_many(widget, n4)
widget
end
def exponential_complexity do
size = 600
widget =
VegaLite.new(width: size, height: size)
|> VegaLite.mark(:line)
|> VegaLite.encode_field(:x, "x", type: :quantitative)
|> VegaLite.encode_field(:y, "y", type: :quantitative)
|> VegaLite.transform(groupby: ["color"], extent: [2500, 6500])
|> VegaLite.encode_field(:color, "type", title: "Exponential Growth", type: :nominal)
|> Kino.VegaLite.new()
init = 1
max = 10
exponential = for n <- init..max, do: %{x: n, y: 10 ** n, type: "2^n"}
Kino.VegaLite.push_many(widget, exponential)
widget
end
def factorial_complexity do
size = 600
widget =
VegaLite.new(width: size, height: size)
|> VegaLite.mark(:line)
|> VegaLite.encode_field(:x, "x", type: :quantitative)
|> VegaLite.encode_field(:y, "y", type: :quantitative)
|> VegaLite.transform(groupby: ["color"], extent: [2500, 6500])
|> VegaLite.encode_field(:color, "type", title: "Factorial Growth", type: :nominal)
|> Kino.VegaLite.new()
init = 1
max = 5
factorial = for n <- init..max, do: %{x: n, y: Math.factorial(n), type: "n!"}
Kino.VegaLite.push_many(widget, factorial)
widget
end
def linear_complexity do
size = 600
widget =
VegaLite.new(width: size, height: size)
|> VegaLite.mark(:line)
|> VegaLite.encode_field(:x, "number of elements", type: :quantitative)
|> VegaLite.encode_field(:y, "time", type: :quantitative)
|> VegaLite.encode_field(:color, "type", title: "Linear Growth", type: :nominal)
|> Kino.VegaLite.new()
init = 1
max = 100
linear = for n <- init..max, do: %{"number of elements": n, time: n, type: "O(n)"}
Kino.VegaLite.push_many(widget, linear)
widget
end
def pigeon_beats_internet do
size = 600
widget =
VegaLite.new(width: size, height: size)
|> VegaLite.mark(:line)
|> VegaLite.encode_field(:x, "data", type: :quantitative)
|> VegaLite.encode_field(:y, "time", type: :quantitative)
|> VegaLite.encode_field(:color, "type", title: "Linear Growth", type: :nominal)
|> Kino.VegaLite.new()
init = 1
max = 200
internet = for n <- init..max, do: %{data: n, time: n, type: "Internet"}
pigeon = for n <- init..max, do: %{data: n, time: 100, type: "Pigeon"}
Kino.VegaLite.push_many(widget, internet)
Kino.VegaLite.push_many(widget, pigeon)
widget
end
def polynomial_complexity do
size = 600
widget =
VegaLite.new(width: size, height: size)
|> VegaLite.mark(:line)
|> VegaLite.encode_field(:x, "number of elements", type: :quantitative)
|> VegaLite.encode_field(:y, "time", type: :quantitative)
|> VegaLite.transform(groupby: ["color"], extent: [2500, 6500])
|> VegaLite.encode_field(:color, "type", title: "Polonomial Growth", type: :nominal)
|> Kino.VegaLite.new()
init = 1
max = 5
n2 = for n <- init..max, do: %{"number of elements": n, time: n ** 2, type: "n^2"}
n3 = for n <- init..max, do: %{"number of elements": n, time: n ** 3, type: "n^3"}
n4 = for n <- init..max, do: %{"number of elements": n, time: n ** 4, type: "n^4"}
Kino.VegaLite.push_many(widget, n2)
Kino.VegaLite.push_many(widget, n3)
Kino.VegaLite.push_many(widget, n4)
widget
end
end
|
utils/lib/graph.ex
| 0.741768
| 0.413536
|
graph.ex
|
starcoder
|
defmodule Phoenix.PubSub.EventStore do
@moduledoc """
Phoenix PubSub adapter backed by EventStore.
An example usage (add this to your supervision tree):
```elixir
{Phoenix.PubSub,
[name: MyApp.PubSub,
adapter: Phoenix.PubSub.EventStore,
eventstore: MyApp.EventStore]
}
```
where `MyApp.EventStore` is configured separately based on the `EventStore`
documentation.
Optional parameters:
* `serializer`: used for converting messages into structs that `EventStore`
can handle. The default serializer is
`Phoenix.PubSub.EventStore.Serializer.Base64`. Any module that implements
`serialize/1` and `deserialize/1` may be used as long as they produce data
that `EventStore` can work with.
* `unique_id_fn`: function that generates a unique ID to identify each
instance of the pubsub. It receives one parameter, that is the pubsub's name.
If not specified, then a UUID is generated with `UUID.uuid4()`.
"""
@behaviour Phoenix.PubSub.Adapter
use GenServer
@metadata_fields [:source, :dispatcher, :destination_node, :source_node]
@doc """
Start the server
This function is called by `Phoenix.PubSub`
"""
def start_link(opts) do
GenServer.start_link(__MODULE__, opts, name: opts[:adapter_name])
end
@doc false
def init(opts) do
send(self(), :subscribe)
id = generate_unique_id(opts)
{:ok,
%{
id: id,
pubsub_name: opts[:name],
eventstore: opts[:eventstore],
serializer: opts[:serializer] || Phoenix.PubSub.EventStore.Serializer.Base64
}}
end
defp generate_unique_id(opts) do
unique_id_fn = opts[:unique_id_fn] || fn _name -> UUID.uuid4() end
unique_id_fn.(opts[:name])
end
@doc false
def node_name(nil), do: node()
def node_name(configured_name), do: configured_name
@doc false
def direct_broadcast(server, node_name, topic, message, dispatcher) do
metadata = %{
destination_node: to_string(node_name),
source_node: to_string(node())
}
broadcast(server, topic, message, dispatcher, metadata)
end
@doc false
def broadcast(server, topic, message, dispatcher, metadata \\ %{}) do
metadata = Map.put(metadata, :dispatcher, dispatcher)
GenServer.call(server, {:broadcast, topic, message, metadata})
end
@doc false
def handle_call(
{:broadcast, topic, message, metadata},
_from_pid,
%{id: id, eventstore: eventstore, serializer: serializer} = state
) do
event = %EventStore.EventData{
event_type: to_string(serializer),
data: serializer.serialize(message),
metadata: Map.put(metadata, :source, id)
}
res = eventstore.append_to_stream(topic, :any_version, [event])
{:reply, res, state}
end
@doc false
def handle_info(:subscribe, %{eventstore: eventstore} = state) do
eventstore.subscribe("$all")
{:noreply, state}
end
def handle_info({:subscribed, _subscription}, state) do
{:noreply, state}
end
def handle_info({:events, events}, state) do
Enum.each(events, &local_broadcast_event(&1, state))
{:noreply, state}
end
defp local_broadcast_event(
%EventStore.RecordedEvent{
data: data,
metadata: metadata,
stream_uuid: topic
},
%{id: id, serializer: serializer, pubsub_name: pubsub_name} = _state
) do
current_node = to_string(node())
case convert_metadata_keys_to_atoms(metadata) do
%{destination_node: destination_node}
when not is_nil(destination_node) and destination_node != current_node ->
# Direct broadcast and this is not the destination node.
:ok
%{source: ^id} ->
# This node is the source, nothing to do, because local dispatch already
# happened.
:ok
%{dispatcher: dispatcher} ->
# Otherwise broadcast locally
Phoenix.PubSub.local_broadcast(
pubsub_name,
topic,
serializer.deserialize(data),
maybe_convert_to_existing_atom(dispatcher)
)
end
end
defp convert_metadata_keys_to_atoms(metadata) do
@metadata_fields
|> Map.new(&{&1, Map.get(metadata, &1, Map.get(metadata, to_string(&1)))})
end
defp maybe_convert_to_existing_atom(string) when is_binary(string),
do: String.to_existing_atom(string)
defp maybe_convert_to_existing_atom(atom) when is_atom(atom), do: atom
end
|
lib/phoenix/pub_sub/event_store.ex
| 0.863348
| 0.715275
|
event_store.ex
|
starcoder
|
defmodule ExampleFiles do
@moduledoc """
A `GenServer` that provides access to the `ExampleFiles.File` processes for a
project.
"""
defmodule State do
@moduledoc """
A struct encapsulating the state of an `ExampleFiles` process.
"""
@enforce_keys [:options]
defstruct [:all, :options]
end
use GenServer
# TODO: Use `alias ExampleFiles.{English,Options,UI}` when targeting Elixir >= v1.2
alias ExampleFiles.English
alias ExampleFiles.Options
alias ExampleFiles.UI
alias IO.ANSI
# Client API
# TODO: Update this spec using the new `keyword` type under Elixir v1.3
# @spec start_link([%State{}] | [options: GenServer.server], keyword) :: GenServer.on_start
@spec start_link([%State{}] | [options: GenServer.server], [{atom, any}]) :: GenServer.on_start
@doc """
Starts an `ExampleFiles` process, linked to the current process, with the
specified `arguments` and `options`.
The process exits if `arguments` does not contain an `ExampleFiles.State`.
"""
def start_link([%State{}]=arguments, options) do
__MODULE__ |> GenServer.start_link(arguments, options)
end
@spec start_link([options: GenServer.server]) :: GenServer.on_start
@doc """
Starts an `ExampleFiles` process, linked to the current process, with the
specified `example_files_options` (`ExampleFiles.Options`).
"""
def start_link([options: example_files_options]) do
[%State{options: example_files_options}] |> start_link([])
end
@spec all(GenServer.server, timeout) :: [pid]
@doc """
Returns an `ExampleFiles.File` PID for each example file in the `System.cwd!`,
spawning such processes if they are not already running.
## Examples
iex> {:ok, options} = ExampleFiles.Options.start_link(~w(--ignore EXAMPLE-file1))
...> {:ok, example_files} = ExampleFiles.start_link(options: options)
...> File.cd!("spec/fixtures/collisions", fn -> example_files |> ExampleFiles.all end) |> Enum.map(&ExampleFiles.File.path/1) |> Enum.sort
["file1.example", "file2.example"] |> Enum.sort
iex> {:ok, options} = ExampleFiles.Options.start_link([])
...> {:ok, example_files} = ExampleFiles.start_link(options: options)
...> File.cd! "spec/fixtures/empty", fn -> example_files |> ExampleFiles.all end
[]
"""
def all(example_files, timeout \\ 5000) do
example_files |> GenServer.call({:all}, timeout)
end
@spec collisions(GenServer.server, timeout) :: [pid]
@doc """
Finds subsets of `all` that share a value for
`ExampleFiles.File.path_when_pulled`.
## Examples
iex> {:ok, options} = ExampleFiles.Options.start_link([])
...> {:ok, example_files} = ExampleFiles.start_link(options: options)
...> File.cd!("spec/fixtures/collisions", fn -> example_files |> ExampleFiles.collisions end) |> Enum.map(fn(collision_group) -> collision_group |> Enum.map(&ExampleFiles.File.path/1) |> Enum.sort end) |> Enum.sort
[~w(EXAMPLE-file1 file1.example)] |> Enum.map(&Enum.sort/1) |> Enum.sort
iex> {:ok, options} = ExampleFiles.Options.start_link([])
...> {:ok, example_files} = ExampleFiles.start_link(options: options)
...> File.cd!("spec/fixtures/no_collisions", fn -> example_files |> ExampleFiles.collisions end) |> Enum.map(fn(collision_group) -> collision_group |> Enum.map(&ExampleFiles.File.path/1) |> Enum.sort end) |> Enum.sort
[]
"""
def collisions(example_files, timeout \\ 5000) do
example_files |> GenServer.call({:collisions}, timeout)
end
@spec noncollisions(GenServer.server, timeout) :: [pid]
@doc """
Filters `all` for items having a unique value for
`ExampleFiles.File.path_when_pulled`.
## Examples
iex> {:ok, options} = ExampleFiles.Options.start_link([])
...> {:ok, example_files} = ExampleFiles.start_link(options: options)
...> File.cd!("spec/fixtures/collisions", fn -> example_files |> ExampleFiles.noncollisions end) |> Enum.map(&ExampleFiles.File.path/1) |> Enum.sort
["file2.example"] |> Enum.sort
"""
def noncollisions(example_files, timeout \\ 5000) do
example_files |> GenServer.call({:noncollisions}, timeout)
end
defp all_impl(%{all: nil, options: options}=state) do
UI |> UI.autoflush(false)
fileglobs = options |> Options.fileglobs
is_verbose = options |> Options.verbose?
all = fileglobs |> Enum.flat_map(fn(fileglob) ->
unfiltered = for filename <- fileglob |> Path.wildcard(match_dot: true) do
if is_verbose do
UI |> UI.info(["Fileglob ",
UI.underline(fileglob),
" found file ",
UI.underline(filename)])
end
if filename |> interesting?(options) do
filename |> start_link_file_for
else
nil
end
end
unfiltered |> Enum.reject(&is_nil/1)
end)
if is_verbose do
if 0 < length(all), do: UI |> UI.info_insert_at(0)
end
UI |> UI.autoflush(true)
%{state | all: all}
end
defp all_impl(state),do: state
# Server callbacks
def init([%State{}=state]), do: {:ok, state}
def init(_), do: {:stop, "Invalid state"}
def handle_call({:all}, _from, state) do
%{all: all}=new_state = state |> all_impl
{:reply, all, new_state}
end
def handle_call({:collisions}, _from, state) do
%{all: all}=new_state = all_impl(state)
collisions = all |> Enum.group_by(&ExampleFiles.File.path_when_pulled/1)
|> Map.values
|> Enum.filter(&(1 < length(&1)))
{:reply, collisions, new_state}
end
def handle_call({:noncollisions}, _from, state) do
%{all: all}=new_state = all_impl(state)
noncollisions = all |> Enum.group_by(&ExampleFiles.File.path_when_pulled/1)
|> Map.values
|> Enum.filter_map(&(length(&1) == 1),
&(&1 |> List.first))
{:reply, noncollisions, new_state}
end
@spec interesting?(binary, GenServer.server) :: boolean
defp interesting?(path, options) do
ignore = options |> Options.ignore
is_verbose = options |> Options.verbose?
if String.starts_with?(path, ignore) do
if is_verbose do
list = ignore |> Enum.map(&(&1 |> UI.underline
|> ANSI.format_fragment
|> IO.chardata_to_string))
|> English.list("or")
UI |> UI.info(["Ignoring ",
UI.underline(path),
" because it matches ignored path ",
list])
end
false
else
if File.dir?(path) do
if is_verbose do
UI |> UI.info(["Ignoring ",
UI.underline(path),
" because it is a directory"])
end
false
else
if ExampleFiles.File.example_file?(path) do
true
else
if is_verbose do
UI |> UI.info(["Ignoring ",
UI.underline(path),
" because it does not have the name of an example file"])
end
false
end
end
end
end
@spec start_link_file_for(binary) :: pid
defp start_link_file_for(path) do
{:ok, file} = [path] |> ExampleFiles.File.start_link
file
end
end
|
lib/example_files.ex
| 0.728459
| 0.545528
|
example_files.ex
|
starcoder
|
defmodule Velocity do
@moduledoc """
A simple `Elixir.Agent` for registering occurrences of different events and reporting event counts for the given time period.
Configuration can be passed to `start_link/1` as a keyword list. Supported parameters are:
- `:ttl` - the duration, in seconds, that events should be stored for. Default is 60;
- `:default_period` - period to consider by default when `Velocity.of/1` is called. Can be either an integer or a `@time_ranges` key. Default is :minute.
Anything can be used for event keys. Minimum time granularity is 1 second.
## Example
Velocity.start_link(ttl: 3 * 60, default_period: :minute)
Velocity.register(:foo)
Velocity.register(:bar)
Velocity.register(:foo)
Velocity.of(:foo)
#=> {:ok, 2}
Velocity.of(:foo, :minute)
#=> {:ok, 2}
Velocity.of(:bar, :minute)
#=> {:ok, 1}
Velocity.of(:baz, :minute)
#=> {:ok, 0}
#...after 2 minutes...
Velocity.of(:foo, :minute)
#=> {:ok, 0}
Velocity.of(:foo, 5 * 60)
#=> {:ok, 2}
#...after 3 minutes...
Velocity.of(:foo, 5 * 60)
#=> {:ok, 0}
"""
use Agent
@time_periods %{
hour: 60 * 60,
half_hour: 30 * 60,
minute: 60,
second: 1
}
@time_period_keys Map.keys(@time_periods)
def start_link(), do: start_link([])
def start_link(opts) do
{ttl, _} = Keyword.pop(opts, :ttl, 5 * 60)
{default_period, _} = Keyword.pop(opts, :default_period, :minute)
Agent.start_link(
fn -> %{config: %{ttl: ttl, default_period: default_period}, events: %{}} end,
name: __MODULE__
)
end
@doc """
Registers occurrence of an event. Anything can be used for the event key.
"""
@spec register(any()) :: :ok
def register(event) do
Agent.update(__MODULE__, fn %{config: config, events: events} ->
%{
config: config,
events:
Map.put(
events,
event,
case events do
%{^event => points} ->
put_and_expire(points, now(), config[:ttl])
_ ->
[now()]
end
)
}
end)
end
@doc """
Reports the number of given events registered within the last `period` seconds. Pre-defined constants such as `:minute` or `:hour` may be used.
"""
@spec of(any(), integer() | atom()) :: {:ok, integer()} | {:error, atom()}
def of(event, period) when period in @time_period_keys,
do: of(event, @time_periods[period])
def of(_, period) when not is_integer(period) do
{:error, :time_period_must_be_an_integer}
end
def of(_, period) when period <= 0,
do: {:error, :time_period_must_be_positive}
def of(event, period) do
Agent.get(__MODULE__, fn %{config: %{ttl: ttl}, events: events} ->
now = now()
count =
case events do
%{^event => points} ->
points
|> Enum.take_while(&(&1 > now - min(period, ttl)))
|> Enum.count()
_ ->
0
end
{:ok, count}
end)
end
@spec of(any()) :: {:ok, integer()} | {:error, atom()}
@doc """
Reports the number of given events registered within the default time period (see configuration details above).
"""
def of(event) do
default_period =
Agent.get(__MODULE__, fn %{config: %{default_period: default_period}} ->
default_period
end)
of(event, default_period)
end
defp now, do: DateTime.utc_now() |> DateTime.to_unix(:second)
defp put_and_expire(points, moment, ttl),
do: [moment | Enum.take_while(points, &(&1 > moment - ttl))]
end
|
lib/velocity.ex
| 0.919877
| 0.710829
|
velocity.ex
|
starcoder
|
defmodule Nebulex.Adapters.Local do
@moduledoc ~S"""
Adapter module for Local Generational Cache; inspired by
[epocxy](https://github.com/duomark/epocxy).
Generational caching using an ets table (or multiple ones when used with
`:shards`) for each generation of cached data. Accesses hit the newer
generation first, and migrate from the older generation to the newer
generation when retrieved from the stale table. When a new generation
is started, the oldest one is deleted. This is a form of mass garbage
collection which avoids using timers and expiration of individual
cached elements.
This implementation of generation cache uses only two generations
(which is more than enough) also referred like the `newer` and
the `older`.
## Overall features
* Configurable backend (`ets` or `:shards`).
* Expiration – A status based on TTL (Time To Live) option. To maintain
cache performance, expired entries may not be immediately removed or
evicted, they are expired or evicted on-demand, when the key is read.
* Eviction – [Generational Garbage Collection][gc].
* Sharding – For intensive workloads, the Cache may also be partitioned
(by using `:shards` backend and specifying the `:partitions` option).
* Support for transactions via Erlang global name registration facility.
* Support for stats.
[gc]: http://hexdocs.pm/nebulex/Nebulex.Adapters.Local.Generation.html
## Options
This adapter supports the following options and all of them can be given via
the cache configuration:
* `:backend` - Defines the backend or storage to be used for the adapter.
Supported backends are: `:ets` and `:shards`. Defaults to `:ets`.
* `:read_concurrency` - (boolean) Since this adapter uses ETS tables
internally, this option is used when a new table is created; see
`:ets.new/2`. Defaults to `true`.
* `:write_concurrency` - (boolean) Since this adapter uses ETS tables
internally, this option is used when a new table is created; see
`:ets.new/2`. Defaults to `true`.
* `:compressed` - (boolean) This option is used when a new ETS table is
created and it defines whether or not it includes X as an option; see
`:ets.new/2`. Defaults to `false`.
* `:backend_type` - This option defines the type of ETS to be used
(Defaults to `:set`). However, it is highly recommended to keep the
default value, since there are commands not supported (unexpected
exception may be raised) for types like `:bag` or `: duplicate_bag`.
Please see the [ETS](https://erlang.org/doc/man/ets.html) docs
for more information.
* `:partitions` - If it is set, an integer > 0 is expected, otherwise,
it defaults to `System.schedulers_online()`. This option is only
available for `:shards` backend.
* `:gc_interval` - If it is set, an integer > 0 is expected defining the
interval time in milliseconds to garbage collection to run, delete the
oldest generation and create a new one. If this option is not set,
garbage collection is never executed, so new generations must be
created explicitly, e.g.: `MyCache.new_generation(opts)`.
* `:max_size` - If it is set, an integer > 0 is expected defining the
max number of cached entries (cache limit). If it is not set (`nil`),
the check to release memory is not performed (the default).
* `:allocated_memory` - If it is set, an integer > 0 is expected defining
the max size in bytes allocated for a cache generation. When this option
is set and the configured value is reached, a new cache generation is
created so the oldest is deleted and force releasing memory space.
If it is not set (`nil`), the cleanup check to release memory is
not performed (the default).
* `:gc_cleanup_min_timeout` - An integer > 0 defining the min timeout in
milliseconds for triggering the next cleanup and memory check. This will
be the timeout to use when either the max size or max allocated memory
is reached. Defaults to `10_000` (10 seconds).
* `:gc_cleanup_max_timeout` - An integer > 0 defining the max timeout in
milliseconds for triggering the next cleanup and memory check. This is
the timeout used when the cache starts and there are few entries or the
consumed memory is near to `0`. Defaults to `600_000` (10 minutes).
## Usage
`Nebulex.Cache` is the wrapper around the cache. We can define a
local cache as follows:
defmodule MyApp.LocalCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Local
end
Where the configuration for the cache must be in your application
environment, usually defined in your `config/config.exs`:
config :my_app, MyApp.LocalCache,
gc_interval: :timer.hours(12),
max_size: 1_000_000,
allocated_memory: 2_000_000_000,
gc_cleanup_min_timeout: :timer.seconds(10),
gc_cleanup_max_timeout: :timer.minutes(10)
For intensive workloads, the Cache may also be partitioned using `:shards`
as cache backend (`backend: :shards`) and configuring the desired number of
partitions via the `:partitions` option. Defaults to
`System.schedulers_online()`.
config :my_app, MyApp.LocalCache,
gc_interval: :timer.hours(12),
max_size: 1_000_000,
allocated_memory: 2_000_000_000,
gc_cleanup_min_timeout: :timer.seconds(10),
gc_cleanup_max_timeout: :timer.minutes(10),
backend: :shards,
partitions: System.schedulers_online() * 2
If your application was generated with a supervisor (by passing `--sup`
to `mix new`) you will have a `lib/my_app/application.ex` file containing
the application start callback that defines and starts your supervisor.
You just need to edit the `start/2` function to start the cache as a
supervisor on your application's supervisor:
def start(_type, _args) do
children = [
{MyApp.LocalCache, []},
...
]
See `Nebulex.Cache` for more information.
## Eviction configuration
This section is to understand a bit better how the different configuration
options work and have an idea what values to set; especially if it is the
first time using Nebulex.
### `:ttl` option
The `:ttl` option that is used to set the expiration time for a key, it
doesn't work as eviction mechanism, since the local adapter implements a
generational cache, the options that control the eviction process are:
`:gc_interval`, `:gc_cleanup_min_timeout`, `:gc_cleanup_max_timeout`,
`:max_size` and `:allocated_memory`. The `:ttl` is evaluated on-demand
when a key is retrieved, and at that moment if it s expired, then remove
it from the cache, hence, it can not be used as eviction method, it is
more for keep the integrity and consistency in the cache. For this reason,
it is highly recommended to configure always the eviction options mentioned
before.
### Caveats when using `:ttl` option:
* When using the `:ttl` option, ensure it is less than `:gc_interval`,
otherwise, there may be a situation where the key is evicted and the
`:ttl` hasn't happened yet (maybe because the garbage collector ran
before the key had been fetched).
* Assuming you have `:gc_interval` set to 2 hrs, then you put a new key
with `:ttl` set to 1 hr, and 1 minute later the GC runs, that key will
be moved to the older generation so it can be yet retrieved. On the other
hand, if the key is never fetched till the next GC cycle (causing moving
it to the newer generation), since the key is already in the oldest
generation it will be evicted from the cache so it won't be retrievable
anymore.
### Garbage collection or eviction options
This adapter implements a generational cache, which means its main eviction
mechanism is pushing a new cache generation and remove the oldest one. In
this way, we ensure only the most frequently used keys are always available
in the newer generation and the the least frequently used are evicted when
the garbage collector runs, and the garbage collector is triggered uppon
these conditions:
* When the time interval defined by `:gc_interval` is completed.
This makes the garbage-collector process to run creating a new
generation and forcing to delete the oldest one.
* When the "cleanup" timeout expires, and then the limits `:max_size`
and `:allocated_memory` are checked, if one of those is reached,
then the garbage collector runs (a new generation is created and
the oldest one is deleted). The cleanup timeout is controlled by
`:gc_cleanup_min_timeout` and `:gc_cleanup_max_timeout`, it works
with an inverse linear backoff, which means the timeout is inverse
proportional to the memory growth; the bigger the cache size is,
the shorter the cleanup timeout will be.
### First-time configuration
For configuring the cache with accurate and/or good values it is important
to know several things in advance, like for example the size of an entry
in average so we can calculate a good value for max size and/or allocated
memory, how intensive will be the load in terms of reads and writes, etc.
The problem is most of these aspects are unknown when it is a new app or
we are using the cache for the first time. Therefore, the following
recommendations will help you to configure the cache for the first time:
* When configuring the `:gc_interval`, think about how that often the
least frequently used entries should be evicted, or what is the desired
retention period for the cached entries. For example, if `:gc_interval`
is set to 1 hr, it means you will keep in cache only those entries that
are retrieved periodically within a 2 hr period; `gc_interval * 2`,
being 2 the number of generations. Longer than that, the GC will
ensure is always evicted (the oldest generation is always deleted).
If it is the first time using Nebulex, perhaps you can start with
`gc_interval: :timer.hours(12)` (12 hrs), so the max retention
period for the keys will be 1 day; but ensure you also set either the
`:max_size` or `:allocated_memory`.
* It is highly recommended to set either `:max_size` or `:allocated_memory`
to ensure the oldest generation is deleted (least frequently used keys
are evicted) when one of these limits is reached and also to avoid
running out of memory. For example, for the `:allocated_memory` we can
set 25% of the total memory, and for the `:max_size` something between
`100_000` and `1_000_000`.
* For `:gc_cleanup_min_timeout` we can set `10_000`, which means when the
cache is reaching the size or memory limit, the polling period for the
cleanup process will be 10 seconds. And for `:gc_cleanup_max_timeout`
we can set `600_000`, which means when the cache is almost empty the
polling period will be close to 10 minutes.
## Stats
This adapter does support stats by using the default implementation
provided by `Nebulex.Adapter.Stats`.
## Queryable API
The adapter supports as query parameter the following values:
* `query` - `nil | :unexpired | :expired | :ets.match_spec()`
Internally, an entry is represented by the tuple `{key, val, vsn, exp}`,
which means the match pattern within the `:ets.match_spec()` must be
something like `{:"$1", :"$2", :"$3", :"$4"}`. In order to make query
building easier, you can use `Ex2ms` library.
## Examples
# built-in queries
MyCache.all()
MyCache.all(:unexpired)
MyCache.all(:expired)
# using a custom match spec (all values > 10)
spec = [{{:"$1", :"$2", :_, :_}, [{:>, :"$2", 10}], [{{:"$1", :"$2"}}]}]
MyCache.all(spec)
# using Ex2ms
import Ex2ms
spec =
fun do
{key, value, _version, _expire_at} when value > 10 -> {key, value}
end
MyCache.all(spec)
The `:return` option applies only for built-in queries, such as:
`nil | :unexpired | :expired`, if you are using a custom `:ets.match_spec()`,
the return value depends on it.
The same applies to the `stream` function.
## Extended API (convenience functions)
This adapter provides some additional convenience functions to the
`Nebulex.Cache` API.
Creating new generations:
MyCache.new_generation()
MyCache.new_generation(reset_timer: false)
Retrieving the current generations:
MyCache.generations()
Retrieving the newer generation:
MyCache.newer_generation()
"""
# Provide Cache Implementation
@behaviour Nebulex.Adapter
@behaviour Nebulex.Adapter.Entry
@behaviour Nebulex.Adapter.Queryable
# Inherit default transaction implementation
use Nebulex.Adapter.Transaction
# Inherit default persistence implementation
use Nebulex.Adapter.Persistence
# Inherit default stats implementation
use Nebulex.Adapter.Stats
import Nebulex.Adapter
import Record
alias Nebulex.Adapter.Stats
alias Nebulex.Adapters.Local.{Backend, Generation, Metadata}
alias Nebulex.{Entry, Time}
# Cache Entry
defrecord(:entry,
key: nil,
value: nil,
touched: nil,
ttl: nil
)
# Supported Backends
@backends ~w(ets shards)a
# Inline common instructions
@compile {:inline, list_gen: 1, newer_gen: 1}
## Nebulex.Adapter
@impl true
defmacro __before_compile__(_env) do
quote do
@doc """
A convenience function for creating new generations.
"""
def new_generation(opts \\ []) do
Generation.new(get_dynamic_cache(), opts)
end
@doc """
A convenience function for reset the GC timer.
"""
def reset_generation_timer do
Generation.reset_timer(get_dynamic_cache())
end
@doc """
A convenience function for retrieving the current generations.
"""
def generations do
Generation.list(get_dynamic_cache())
end
@doc """
A convenience function for retrieving the newer generation.
"""
def newer_generation do
Generation.newer(get_dynamic_cache())
end
end
end
@impl true
def init(opts) do
# Required options
cache = Keyword.fetch!(opts, :cache)
telemetry_prefix = Keyword.fetch!(opts, :telemetry_prefix)
# Init internal metadata table
meta_tab = opts[:meta_tab] || Metadata.init()
# Init stats_counter
stats_counter = Stats.init(opts)
# Resolve the backend to be used
backend =
opts
|> Keyword.get(:backend, :ets)
|> case do
val when val in @backends ->
val
val ->
raise "expected backend: option to be one of the supported backends " <>
"#{inspect(@backends)}, got: #{inspect(val)}"
end
adapter_meta = %{
cache: cache,
telemetry_prefix: telemetry_prefix,
meta_tab: meta_tab,
stats_counter: stats_counter,
backend: backend,
started_at: DateTime.utc_now()
}
child_spec = Backend.child_spec(backend, [adapter_meta: adapter_meta] ++ opts)
{:ok, child_spec, adapter_meta}
end
## Nebulex.Adapter.Entry
@impl true
def get(adapter_meta, key, _opts) do
with_span(adapter_meta, :get, fn ->
adapter_meta.meta_tab
|> list_gen()
|> do_get(key, adapter_meta.backend, adapter_meta.stats_counter)
|> return(:value)
|> update_stats(:get, adapter_meta.stats_counter)
end)
end
defp do_get([newer], key, backend, ref) do
gen_fetch(newer, key, backend, ref)
end
defp do_get([newer, older], key, backend, ref) do
with nil <- gen_fetch(newer, key, backend, ref),
entry(key: ^key) = cached <- gen_fetch(older, key, backend, ref, &pop_entry/4) do
true = backend.insert(newer, cached)
cached
end
end
defp gen_fetch(gen, key, backend, ref, fun \\ &get_entry/4) do
gen
|> fun.(key, nil, backend)
|> validate_ttl(gen, backend, ref)
end
@impl true
def get_all(adapter_meta, keys, _opts) do
with_span(adapter_meta, :get_all, fn ->
do_get_all(adapter_meta, keys)
end)
end
defp do_get_all(adapter_meta, keys) do
Enum.reduce(keys, %{}, fn key, acc ->
if obj = get(adapter_meta, key, []),
do: Map.put(acc, key, obj),
else: acc
end)
end
@impl true
def put(adapter_meta, key, value, ttl, on_write, _opts) do
with_span(adapter_meta, on_write, fn ->
do_put(
on_write,
adapter_meta.meta_tab,
adapter_meta.backend,
adapter_meta.stats_counter,
entry(
key: key,
value: value,
touched: Time.now(),
ttl: ttl
)
)
end)
end
defp do_put(:put, meta_tab, backend, ref, entry) do
meta_tab
|> put_entries(backend, entry)
|> update_stats(:put, ref)
end
defp do_put(:put_new, meta_tab, backend, ref, entry) do
meta_tab
|> put_new_entries(backend, entry)
|> update_stats(:put, ref)
end
defp do_put(:replace, meta_tab, backend, ref, entry(key: key, value: value, ttl: ttl)) do
meta_tab
|> update_entry(backend, key, [{3, value}, {4, nil}, {5, ttl}])
|> update_stats(:update, ref)
end
@impl true
def put_all(adapter_meta, entries, ttl, on_write, _opts) do
action =
case on_write do
:put -> :put_all
:put_new -> :put_new_all
end
with_span(adapter_meta, action, fn ->
touched = Time.now()
entries =
for {key, value} <- entries, value != nil do
entry(key: key, value: value, touched: touched, ttl: ttl)
end
do_put_all(
on_write,
adapter_meta.meta_tab,
adapter_meta.backend,
adapter_meta.stats_counter,
entries
)
end)
end
defp do_put_all(:put, meta_tab, backend, ref, entries) do
meta_tab
|> put_entries(backend, entries)
|> update_stats(:put_all, {ref, entries})
end
defp do_put_all(:put_new, meta_tab, backend, ref, entries) do
meta_tab
|> put_new_entries(backend, entries)
|> update_stats(:put_all, {ref, entries})
end
@impl true
def delete(adapter_meta, key, _opts) do
with_span(adapter_meta, :delete, fn ->
adapter_meta.meta_tab
|> list_gen()
|> Enum.each(&adapter_meta.backend.delete(&1, key))
|> update_stats(:delete, adapter_meta.stats_counter)
end)
end
@impl true
def take(adapter_meta, key, opts) do
with_span(adapter_meta, :take, fn ->
do_take(adapter_meta, key, opts)
end)
end
defp do_take(%{backend: backend, stats_counter: ref} = adapter_meta, key, _opts) do
adapter_meta.meta_tab
|> list_gen()
|> Enum.reduce_while(nil, fn gen, acc ->
case pop_entry(gen, key, nil, backend) do
nil ->
{:cont, acc}
res ->
value =
res
|> validate_ttl(gen, backend, ref)
|> return(:value)
{:halt, value}
end
end)
|> update_stats(:take, ref)
end
@impl true
def update_counter(adapter_meta, key, amount, ttl, default, _opts) do
with_span(adapter_meta, :incr, fn ->
adapter_meta.meta_tab
|> newer_gen()
|> adapter_meta.backend.update_counter(
key,
{3, amount},
entry(key: key, value: default, touched: Time.now(), ttl: ttl)
)
|> update_stats({:update, amount, default}, adapter_meta.stats_counter)
end)
end
@impl true
def has_key?(adapter_meta, key) do
with_span(adapter_meta, :has_key?, fn ->
case get(adapter_meta, key, []) do
nil -> false
_ -> true
end
end)
end
@impl true
def ttl(%{stats_counter: stats_counter} = adapter_meta, key) do
with_span(adapter_meta, :ttl, fn ->
adapter_meta.meta_tab
|> list_gen()
|> do_get(key, adapter_meta.backend, stats_counter)
|> return()
|> entry_ttl()
|> update_stats(:get, stats_counter)
end)
end
defp entry_ttl(nil), do: nil
defp entry_ttl(entry(ttl: :infinity)), do: :infinity
defp entry_ttl(entry(ttl: ttl, touched: touched)) do
ttl - (Time.now() - touched)
end
defp entry_ttl(entries) when is_list(entries) do
for entry <- entries, do: entry_ttl(entry)
end
@impl true
def expire(adapter_meta, key, ttl) do
with_span(adapter_meta, :expire, fn ->
adapter_meta.meta_tab
|> update_entry(adapter_meta.backend, key, [{4, Time.now()}, {5, ttl}])
|> update_stats(:update, adapter_meta.stats_counter)
end)
end
@impl true
def touch(adapter_meta, key) do
with_span(adapter_meta, :touch, fn ->
adapter_meta.meta_tab
|> update_entry(adapter_meta.backend, key, [{4, Time.now()}])
|> update_stats(:update, adapter_meta.stats_counter)
end)
end
## Nebulex.Adapter.Queryable
@impl true
def execute(adapter_meta, operation, query, opts) do
with_span(adapter_meta, operation, fn ->
do_execute(adapter_meta, operation, query, opts)
end)
end
defp do_execute(%{meta_tab: meta_tab, backend: backend}, :count_all, nil, _opts) do
meta_tab
|> list_gen()
|> Enum.reduce(0, fn gen, acc ->
gen
|> backend.info(:size)
|> Kernel.+(acc)
end)
end
defp do_execute(%{meta_tab: meta_tab, stats_counter: ref}, :delete_all, nil, _opts) do
meta_tab
|> Generation.delete_all()
|> update_stats(:delete_all, ref)
end
defp do_execute(%{meta_tab: meta_tab, backend: backend}, operation, query, opts) do
query =
query
|> validate_match_spec(opts)
|> maybe_match_spec_return_true(operation)
{reducer, acc_in} =
case operation do
:all -> {&(backend.select(&1, query) ++ &2), []}
:count_all -> {&(backend.select_count(&1, query) + &2), 0}
:delete_all -> {&(backend.select_delete(&1, query) + &2), 0}
end
meta_tab
|> list_gen()
|> Enum.reduce(acc_in, reducer)
end
@impl true
def stream(adapter_meta, query, opts) do
with_span(adapter_meta, :stream, fn ->
query
|> validate_match_spec(opts)
|> do_stream(adapter_meta, Keyword.get(opts, :page_size, 20))
end)
end
defp do_stream(match_spec, %{meta_tab: meta_tab, backend: backend}, page_size) do
Stream.resource(
fn ->
[newer | _] = generations = list_gen(meta_tab)
result = backend.select(newer, match_spec, page_size)
{result, generations}
end,
fn
{:"$end_of_table", [_gen]} ->
{:halt, []}
{:"$end_of_table", [_gen | generations]} ->
result =
generations
|> hd()
|> backend.select(match_spec, page_size)
{[], {result, generations}}
{{elements, cont}, [_ | _] = generations} ->
{elements, {backend.select(cont), generations}}
end,
& &1
)
end
## Nebulex.Adapter.Persistence
@impl true
def dump(adapter_meta, path, opts) do
with_span(adapter_meta, :dump, fn ->
super(adapter_meta, path, opts)
end)
end
@impl true
def load(adapter_meta, path, opts) do
with_span(adapter_meta, :load, fn ->
super(adapter_meta, path, opts)
end)
end
## Nebulex.Adapter.Transaction
@impl true
def transaction(adapter_meta, opts, fun) do
with_span(adapter_meta, :transaction, fn ->
super(adapter_meta, opts, fun)
end)
end
@impl true
def in_transaction?(adapter_meta) do
with_span(adapter_meta, :in_transaction?, fn ->
super(adapter_meta)
end)
end
## Nebulex.Adapter.Stats
@impl true
def stats(%{started_at: started_at} = adapter_meta) do
with_span(adapter_meta, :stats, fn ->
if stats = super(adapter_meta) do
%{stats | metadata: Map.put(stats.metadata, :started_at, started_at)}
end
end)
end
## Helpers
defp list_gen(meta_tab) do
Metadata.fetch!(meta_tab, :generations)
end
defp newer_gen(meta_tab) do
meta_tab
|> Metadata.fetch!(:generations)
|> hd()
end
defp get_entry(tab, key, default, backend) do
case backend.lookup(tab, key) do
[] -> default
[entry] -> entry
entries -> entries
end
end
defp pop_entry(tab, key, default, backend) do
case backend.take(tab, key) do
[] -> default
[entry] -> entry
entries -> entries
end
end
defp put_entries(meta_tab, backend, entry_or_entries) do
meta_tab
|> newer_gen()
|> backend.insert(entry_or_entries)
end
defp put_new_entries(meta_tab, backend, entry_or_entries) do
meta_tab
|> newer_gen()
|> backend.insert_new(entry_or_entries)
end
defp update_entry(meta_tab, backend, key, updates) do
meta_tab
|> newer_gen()
|> backend.update_element(key, updates)
end
defp return(entry_or_entries, field \\ nil)
defp return(nil, _field), do: nil
defp return(entry(value: value), :value), do: value
defp return(entry(key: _) = entry, _field), do: entry
defp return(entries, field) when is_list(entries) do
for entry <- entries, do: return(entry, field)
end
defp validate_ttl(nil, _, _, _), do: nil
defp validate_ttl(entry(ttl: :infinity) = entry, _, _, _), do: entry
defp validate_ttl(entry(key: key, touched: touched, ttl: ttl) = entry, gen, backend, ref) do
if Time.now() - touched >= ttl do
true = backend.delete(gen, key)
update_stats(nil, :expired, ref)
else
entry
end
end
defp validate_ttl(entries, gen, backend, ref) when is_list(entries) do
Enum.filter(entries, fn entry ->
not is_nil(validate_ttl(entry, gen, backend, ref))
end)
end
defp validate_match_spec(spec, opts) when spec in [nil, :unexpired, :expired] do
[
{
entry(key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"),
if(spec = comp_match_spec(spec), do: [spec], else: []),
ret_match_spec(opts)
}
]
end
defp validate_match_spec(spec, _opts) do
case :ets.test_ms({nil, nil, nil, :infinity}, spec) do
{:ok, _result} ->
spec
{:error, _result} ->
raise Nebulex.QueryError, message: "invalid match spec", query: spec
end
end
defp comp_match_spec(nil),
do: nil
defp comp_match_spec(:unexpired),
do: {:orelse, {:==, :"$4", :infinity}, {:<, {:-, Time.now(), :"$3"}, :"$4"}}
defp comp_match_spec(:expired),
do: {:not, comp_match_spec(:unexpired)}
defp ret_match_spec(opts) do
case Keyword.get(opts, :return, :key) do
:key -> [:"$1"]
:value -> [:"$2"]
{:key, :value} -> [{{:"$1", :"$2"}}]
:entry -> [%Entry{key: :"$1", value: :"$2", touched: :"$3", ttl: :"$4"}]
end
end
defp maybe_match_spec_return_true([{pattern, conds, _ret}], operation)
when operation in [:delete_all, :count_all] do
[{pattern, conds, [true]}]
end
defp maybe_match_spec_return_true(match_spec, _operation) do
match_spec
end
## Internal functions for updating stats
defp update_stats(value, _action, nil), do: value
defp update_stats(value, _action, {nil, _}), do: value
defp update_stats(nil, :get, counter_ref) do
:ok = Stats.incr(counter_ref, :misses)
nil
end
defp update_stats(value, :get, counter_ref) do
:ok = Stats.incr(counter_ref, :hits)
value
end
defp update_stats(value, :expired, counter_ref) do
:ok = Stats.incr(counter_ref, :evictions)
:ok = Stats.incr(counter_ref, :expirations)
value
end
defp update_stats(true, :put, counter_ref) do
:ok = Stats.incr(counter_ref, :writes)
true
end
defp update_stats(true, :put_all, {counter_ref, entries}) do
:ok = Stats.incr(counter_ref, :writes, length(entries))
true
end
defp update_stats(value, :delete, counter_ref) do
:ok = Stats.incr(counter_ref, :evictions)
value
end
defp update_stats(nil, :take, counter_ref) do
:ok = Stats.incr(counter_ref, :misses)
nil
end
defp update_stats(value, :take, counter_ref) do
:ok = Stats.incr(counter_ref, :hits)
:ok = Stats.incr(counter_ref, :evictions)
value
end
defp update_stats(value, :delete_all, counter_ref) do
:ok = Stats.incr(counter_ref, :evictions, value)
value
end
defp update_stats(true, :update, counter_ref) do
:ok = Stats.incr(counter_ref, :updates)
true
end
defp update_stats(value, {:update, amount, default}, counter_ref) do
offset = if amount >= 0, do: -1, else: 1
if value + amount * offset === default do
:ok = Stats.incr(counter_ref, :writes)
else
:ok = Stats.incr(counter_ref, :updates)
end
value
end
defp update_stats(value, _action, _counter_ref) do
value
end
end
|
lib/nebulex/adapters/local.ex
| 0.898349
| 0.789538
|
local.ex
|
starcoder
|
defmodule Flix do
@moduledoc ~S"""
Flix is an Elixir client for the [Flic](https://flic.io/) smart button.
Flic buttons don't connect directly to `Flix` nor the other way around. Flic buttons connect
to a `flicd` via bluetooth. `Flix` applications also connect to `flicd` but via a TCP. See
the diagram below.
```
+------------+ command(s) +---------+ +---------------+
| +------------->| | | |
| Flix App | TCP | flicd |<--------------+ Flic Button |
| |<-------------+ | Bluetooth | |
+------------+ event(s) +---------+ +---------------+
```
You can find more information about Flic's `flicd` in its
[official page](https://github.com/50ButtonsEach/fliclib-linux-hci).
Writing a Flix application is as simple as defining a new Elixir module,
using Flix's `__using__` macro (ie. `use Flix`) and implementing Flix's
`handle_event/2` callback function.
```elixir
defmodule MyFlixApp do
use Flix
def handle_event(event, state) do
new_state = do_something(event, state)
{:ok, new_state}
end
end
```
Below is a full example of a Flix application where a counter is initialised to `0`
and increased or decreased by one when someone does single- or double-clicks a Flic
button, respectively. The code makes the following assumptions:
- `flicd` is running and reachable on `raspberrypi.local:5551`.
- The Flic button (ie. `"80:E4:DA:78:45:1B"`) has already been paired with `flicd`.
```elixir
defmodule Flix.Examples.Counter do
use Flix
alias Flix.Protocol.Events.ButtonSingleOrDoubleClickOrHold
alias Flix.Protocol.Enums.ClickType
def start(host \\ 'raspberrypi.local', port \\ 5551) do
{:ok, client} = Flix.start(__MODULE__, 0, host, port)
:ok = set_up(client)
{:ok, client}
end
def start_link(host \\ 'raspberrypi.local', port \\ 5551) do
{:ok, client} = Flix.start_link(__MODULE__, 0, host, port)
:ok = set_up(client)
{:ok, client}
end
def set_up(client) do
:ok = Flix.create_connection_channel(client, "80:E4:DA:78:45:1B", 1)
end
def stop(client) do
:ok = Flix.stop(client)
end
def handle_event(
%ButtonSingleOrDoubleClickOrHold{click_type: ClickType.SingleClick},
count
) do
new_count = count + 1
IO.puts "Count = #{new_count}"
{:ok, new_count}
end
def handle_event(
%ButtonSingleOrDoubleClickOrHold{click_type: ClickType.DoubleClick},
count
) do
new_count = count - 1
IO.puts "Count = #{new_count}"
{:ok, new_count}
end
def handle_event(event, count) do
require Logger
Logger.debug("No handle_event/2 clause in #{__MODULE__} for #{inspect(event)}")
{:ok, count}
end
end
```
"""
## API
## =========================================================================
@doc """
Starts a new `Flix` client without links.
See `start_link/4` for more details.
"""
def start(module, state, host \\ 'localhost', port \\ 5551) do
GenServer.start(module, [host, port, module, state])
end
@doc """
Starts a new `Flix` client linked to the current process.
This is often used to start the `Flix` as part of a supervision tree.
A `Flix` client is nothing but a `GenServer` sprinkled with some custom logic.
See `GenServer.start_link/3` for more details.
"""
def start_link(module, state, host \\ 'localhost', port \\ 5551) do
GenServer.start_link(module, [host, port, module, state])
end
def stop(client) do
GenServer.cast(client, :stop)
end
defp send_command(client, command) do
encoded_command = Flix.Protocol.Commands.encode(command)
size = byte_size(encoded_command)
packet = <<size::16-little, encoded_command::binary>>
GenServer.call(client, {:send, packet})
end
@doc """
Retrieves the current state of the server (ie. `flicd` process the client is connected to).
After executing this function, `flicd` sends a `Flix.Protocol.Events.GetInfoResponse` event to the client.
"""
def get_info(client) do
command = %Flix.Protocol.Commands.GetInfo{}
send_command(client, command)
end
@doc """
Creates a scanner with the given `scan_id`.
After executing this function, `flicd` sends a `Flix.Protocol.Events.AdvertisementPacket` event for each
advertisement packet received from a Flic button by the server (ie. `flicd`).
`Flix.Protocol.Event.AdvertisementPacket` events are annotated with the provided `scan_id`.
To stop receiving advertisement events use `remove_scanner/2`.
If there already exists an active scanner with the provided `scan_id`, the execution of
this function has no effect.
"""
def create_scanner(client, scan_id) do
command = %Flix.Protocol.Commands.CreateScanner{scan_id: scan_id}
send_command(client, command)
end
@doc """
Removes the scanner with the given `scan_id`.
The client will stop receiving `Flix.Protocol.Event.AdvertisementPacket` events.
"""
def remove_scanner(client, scan_id) do
command = %Flix.Protocol.Commands.RemoveScanner{scan_id: scan_id}
send_command(client, command)
end
@doc """
Creates a connection channel for a Flic button with the given bluetooth address. You assign a unique
`conn_id` for this connection channel that will later be used in commands and events to refer to this
connection channel.
After executing this function, `flicd` sends a `Flix.Protocol.Events.CreateConnectionChannelResponse`
event to the client.
When a connection channel is created, the client starts listening for button events originating from the
provided Flic button. There are four types of button events (ie. `Flix.Protocol.Events.ButtonUpOrDown`,
`Flix.Protocol.Events.ButtonClickOrHold`, `Flix.Protocol.Events.ButtonSingleOrDoubleClick`,
`Flix.Protocol.Events.ButtonSingleOrDoubleClickOrHold`). These events are annotated with the provided
`conn_id`.
To stop receiving button events use `remove_connection_channel/2`.
If there already exists a connection channel with the provided `conn_id`, the execution of this function
has no effect.
"""
def create_connection_channel(
client,
bt_addr,
conn_id,
latency_mode \\ Flix.Protocol.Enums.LatencyMode.default(),
auto_disconnect_time \\ 0
)
def create_connection_channel(client, bt_addr, conn_id, latency_mode, auto_disconnect_time) do
command = %Flix.Protocol.Commands.CreateConnectionChannel{
bt_addr: bt_addr,
conn_id: conn_id,
latency_mode: latency_mode,
auto_disconnect_time: auto_disconnect_time
}
send_command(client, command)
end
@doc """
Removes a previously created connection channel.
After executing this function, no further button events will be sent for this channel.
If there are no other connection channels active to this Flic button (from neither the provided client
nor other clieents), the physical bluetooth connection between `flicd` and the Flic button is
disconnected.
"""
def remove_connection_channel(client, conn_id) do
command = %Flix.Protocol.Commands.RemoveConnectionChannel{conn_id: conn_id}
send_command(client, command)
end
@doc """
Removes all connection channels among all clients for the specified Flic button.
"""
def force_disconnect(client, bt_addr) do
command = %Flix.Protocol.Commands.ForceDisconnect{bt_addr: bt_addr}
send_command(client, command)
end
@doc """
Changes the accepted latency for this connection channel and the auto disconnect time.
The latency mode is applied immediately but the auto disconnect time is applied the next time
the Flic button gets connected.
"""
def change_mode_parameters(
client,
conn_id,
latency_mode \\ Flix.Protocol.Enums.LatencyMode.default(),
auto_disconnect_time \\ 0
)
def change_mode_parameters(client, conn_id, latency_mode, auto_disconnect_time) do
command = %Flix.Protocol.Commands.ChangeModeParameters{
conn_id: conn_id,
latency_mode: latency_mode,
auto_disconnect_time: auto_disconnect_time
}
send_command(client, command)
end
@doc """
Pings the server (ie. `flicd`).
After executing this function, `flicd` sends a `Flix.Protocol.Event.PingResponse` event to the client.
"""
def ping(client, ping_id) do
command = %Flix.Protocol.Commands.Ping{ping_id: ping_id}
send_command(client, command)
end
@doc """
Gets information about a Flic button. The button must have been previously paired with the server (ie.`flicd`).
After executing thisfunction, `flicd` sends a `Flic.Protocol.Event.GetButtonInfoResponse` to the client.
"""
def get_button_info(client, bt_addr) do
command = %Flix.Protocol.Commands.GetButtonInfo{bt_addr: bt_addr}
send_command(client, command)
end
@doc """
Starts a scan wizard with the provided `scan_wizard_id`.
If there already exists a scan wizard with the same `scan_wizard_id`, the execution of this funcion
has no effect.
"""
def create_scan_wizard(client, scan_wizard_id) do
command = %Flix.Protocol.Commands.CreateScanWizard{scan_wizard_id: scan_wizard_id}
send_command(client, command)
end
@doc """
Cancels a scan wizard that was previously started.
If there exists a scan wizard with the provided `scan_wizard_id`, it is cancelled and `flicd` sends a
`Flix.Protocol.Events.ScanWizardCompleted` event to the client with the reason set to
`Flix.Protocol.Enums.ScanWizardResult.CancelledByUser`.
"""
def cancel_scan_wizard(client, scan_wizard_id) do
command = %Flix.Protocol.Commands.CancelScanWizard{scan_wizard_id: scan_wizard_id}
send_command(client, command)
end
@doc """
Deletes a Flic button.
If the button exists in the list of verified buttons, all connection channels will be removed for
all clients for this button. Then, `flicd` sends the `Flix.Protocol.Events.ButtonDeleted` event to all clients.
If the Flic button does not exist in the list of verified buttons, the execution of this function has no effect
but a `Flix.Protocol.Events.ButtonDeleted` is anyways sent to the client.
"""
def delete_button(client, bt_addr) do
command = %Flix.Protocol.Commands.DeleteButton{bt_addr: bt_addr}
send_command(client, command)
end
@doc """
Creates a battery status listener for a Flic button.
If the providede `listener_id` already exists for this client, the execution of this function has no effect.
After the execution of this function, `flicd` sends a `Flix.Protocol.Events.BatteryStatus` event to the client
for the specified button. Further `Flix.Protocol.Events.BatteryStatus` events are sent to the client every time
the battery status of the button changes. This does not usually happen more often than every three hours.
"""
def create_battery_status_listener(client, listener_id, bt_addr) do
command = %Flix.Protocol.Commands.CreateBatteryStatusListener{
listener_id: listener_id,
bt_addr: bt_addr
}
send_command(client, command)
end
@doc """
Removes the battery status listener identified by the provided `listener_id`.
"""
def remove_battery_status_listener(client, listener_id) do
command = %Flix.Protocol.Commands.RemoveBatteryStatusListener{listener_id: listener_id}
send_command(client, command)
end
## Behaviour
## =========================================================================
@callback handle_event(event :: term, state :: term) :: {:ok, term}
defmacro __using__(_opts) do
quote location: :keep do
@behaviour Flix
use GenServer
def init([host, port, module, acc]) do
socket_opts = [:binary, active: :once, packet: :raw]
{:ok, socket} = :gen_tcp.connect(host, port, socket_opts)
{:ok, %{socket: socket, size: 0, event: <<>>, module: module, acc: acc}}
end
def handle_call({:send, command}, _from, %{socket: socket} = state) do
resp = :gen_tcp.send(socket, command)
{:reply, resp, state}
end
def handle_cast(:stop, %{socket: socket} = state) do
:ok = :gen_tcp.close(socket)
{:stop, :normal, state}
end
def handle_info(
{:tcp, _client, packet},
%{socket: socket, size: 0, event: <<>>, acc: acc} = state
) do
:inet.setopts(socket, active: :once)
{events, size, rest} = parse_packet(packet, [])
new_acc =
Enum.reduce(
events,
acc,
fn event, acc ->
{:ok, new_acc} = handle_event(event, acc)
new_acc
end
)
{:noreply, %{state | size: size, event: rest, acc: new_acc}}
end
def handle_info({:tcp, _client, rest}, %{socket: socket, size: size, event: event} = state) do
# TO-DO: Needs to be reworked as per the above function clause
:inet.setopts(socket, active: :once)
rest_size = byte_size(rest)
cond do
rest_size > size ->
IO.puts("Strange!")
IO.puts("#{inspect(rest)}")
{:noreply, %{state | size: 0, event: <<>>}}
rest_size == size ->
IO.puts("It's a match!")
event = <<event, rest>>
event = Flix.Protocol.Events.decode(event)
IO.puts("#{inspect(event)}")
{:noreply, %{state | size: 0, event: <<>>}}
rest_size < size ->
IO.puts("Gotta wait!")
{:noreply, %{state | size: size, event: <<event, rest>>}}
end
end
def handle_info(_msg, state), do: {:noreply, state}
def parse_packet(<<size::16-little, rest::binary>>, _acc) when byte_size(rest) == size do
event = parse_event(rest)
{[event], 0, <<>>}
end
def parse_packet(<<size::16-little, rest::binary>>, _acc) when byte_size(rest) < size do
{[], size, rest}
end
def parse_packet(<<size::16-little, rest::binary>> = packets, acc)
when byte_size(rest) > size do
packet = :binary.part(packets, 2, size)
event = parse_event(packet)
rest = :binary.part(packets, 2 + size, byte_size(packets) - (2 + size))
parse_packet(rest, [event | acc])
end
def parse_packet(<<>>, acc) do
{acc, 0, <<>>}
end
def parse_event(event) do
Flix.Protocol.Events.decode(event)
end
# Flic callbacks
@doc false
def handle_advertisement_packet(event, state) do
require Logger
Logger.debug(
"No handle_advertisement_packet/2 clause in #{__MODULE__} provided for #{inspect(event)}"
)
{:ok, state}
end
@doc false
def handle_battery_status(event, state) do
require Logger
Logger.debug(
"No handle_battery_status/2 clause in #{__MODULE__} provided for #{inspect(event)}"
)
{:ok, state}
end
@doc false
def handle_bluetooth_controller_state_change(event, state) do
require Logger
Logger.debug(
"No handle_bluetooth_controller_state_change/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_button_click_or_hold(event, state) do
require Logger
Logger.debug(
"No handle_button_click_or_hold/2 clause in #{__MODULE__} provided for #{inspect(event)}"
)
{:ok, state}
end
@doc false
def handle_button_deleted(event, state) do
require Logger
Logger.debug(
"No handle_button_deleted/2 clause in #{__MODULE__} provided for #{inspect(event)}"
)
{:ok, state}
end
@doc false
def handle_button_single_or_double_click(event, state) do
require Logger
Logger.debug(
"No handle_button_single_or_double_click/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_button_single_or_double_click_or_hold(event, state) do
require Logger
Logger.debug(
"No handle_single_or_double_click_or_hold/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_button_up_or_down(event, state) do
require Logger
Logger.debug(
"No handle_button_up_or_down/2 clause in #{__MODULE__} provided for #{inspect(event)}"
)
{:ok, state}
end
@doc false
def handle_connection_channel_removed(event, state) do
require Logger
Logger.debug(
"No handle_connection_channel_removed/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_connection_status_changed(event, state) do
require Logger
Logger.debug(
"No handle_connection_status_changed/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_create_connection_channel_response(event, state) do
require Logger
Logger.debug(
"No handle_craete_connection_channel_response/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_get_button_info_response(event, state) do
require Logger
Logger.debug(
"No handle_get_button_info_response/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_get_info_response(event, state) do
require Logger
Logger.debug(
"No handle_get_info_response/2 clause in #{__MODULE__} provided for #{inspect(event)}"
)
{:ok, state}
end
@doc false
def handle_got_space_for_new_connection(event, state) do
require Logger
Logger.debug(
"No handle_got_space_for_new_connection/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_new_verified_button(event, state) do
require Logger
Logger.debug(
"No handle_new_verified_button/2 clause in #{__MODULE__} provided for #{inspect(event)}"
)
{:ok, state}
end
@doc false
def handle_no_space_for_new_connection(event, state) do
require Logger
Logger.debug(
"No handle_no_space_for_new_connection/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_ping_response(event, state) do
require Logger
Logger.debug(
"No handle_ping_response/2 clause in #{__MODULE__} provided for #{inspect(event)}"
)
{:ok, state}
end
@doc false
def handle_scan_wizard_button_connected(event, state) do
require Logger
Logger.debug(
"No handle_scan_wizard_button_connected/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_scan_wizard_completed(event, state) do
require Logger
Logger.debug(
"No handle_scan_wizard_completed/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_scan_wizard_found_private_button(event, state) do
require Logger
Logger.debug(
"No handle_scan_wizard_found_privcate_button/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
@doc false
def handle_scan_wizard_found_public_button(event, state) do
require Logger
Logger.debug(
"No handle_scan_wizard_found_public_button/2 clause in #{__MODULE__} provided for #{
inspect(event)
}"
)
{:ok, state}
end
defoverridable handle_advertisement_packet: 2,
handle_battery_status: 2,
handle_bluetooth_controller_state_change: 2,
handle_button_click_or_hold: 2,
handle_button_deleted: 2,
handle_button_single_or_double_click: 2,
handle_button_single_or_double_click_or_hold: 2,
handle_button_up_or_down: 2,
handle_connection_channel_removed: 2,
handle_connection_status_changed: 2,
handle_create_connection_channel_response: 2,
handle_get_button_info_response: 2,
handle_get_info_response: 2,
handle_got_space_for_new_connection: 2,
handle_new_verified_button: 2,
handle_no_space_for_new_connection: 2,
handle_ping_response: 2,
handle_scan_wizard_button_connected: 2,
handle_scan_wizard_completed: 2,
handle_scan_wizard_found_private_button: 2,
handle_scan_wizard_found_public_button: 2
end
end
end
|
lib/flix.ex
| 0.897986
| 0.775669
|
flix.ex
|
starcoder
|
defmodule Membrane.HTTPAdaptiveStream.Manifest.Track do
@moduledoc """
Struct representing a state of a single manifest track and functions to operate
on it.
"""
alias Membrane.HTTPAdaptiveStream.Manifest
require Manifest.SegmentAttribute
defmodule Config do
@moduledoc """
Track configuration.
"""
alias Membrane.HTTPAdaptiveStream.Manifest.Track
@enforce_keys [
:id,
:track_name,
:content_type,
:header_extension,
:segment_extension,
:target_segment_duration
]
defstruct @enforce_keys ++
[
target_window_duration: nil,
persist?: false
]
@typedoc """
Track configuration consists of the following fields:
- `id` - identifies the track, will be serialized and attached to names of manifests, headers and segments
- `content_type` - either audio or video
- `header_extension` - extension of the header file (for example .mp4 for CMAF)
- `segment_extension` - extension of the segment files (for example .m4s for CMAF)
- `target_segment_duration` - expected duration of each segment
- `target_window_duration` - track manifest duration is kept above that time, while the oldest segments
are removed whenever possible
- `persist?` - determines whether the entire track contents should be available after the streaming finishes
"""
@type t :: %__MODULE__{
id: Track.id_t(),
track_name: String.t(),
content_type: :audio | :video,
header_extension: String.t(),
segment_extension: String.t(),
target_segment_duration: Membrane.Time.t() | Ratio.t(),
target_window_duration: Membrane.Time.t() | Ratio.t(),
persist?: boolean
}
end
@config_keys Config.__struct__() |> Map.from_struct() |> Map.keys()
defstruct @config_keys ++
[
:header_name,
current_seq_num: 0,
current_discontinuity_seq_num: 0,
segments: Qex.new(),
stale_segments: Qex.new(),
stale_headers: Qex.new(),
finished?: false,
window_duration: 0,
discontinuities_counter: 0,
awaiting_discontinuity: nil,
next_segment_id: 0
]
@typedoc """
The struct representing a track.
Consists of all the fields from `Config.t` and also:
- `id_string` - serialized `id`
- `header_name` - name of the header file
- `current_seq_num` - the number to identify the next segment
- `current_discontinuity_seq_num` - number of current discontinuity sequence.
- `segments` - segments' names and durations
- `stale_segments` - stale segments' names and durations, kept empty unless `persist?` is set to true
- `stale_headers` - stale headers' names, kept empty unless `persist?` is set to true
- `finished?` - determines whether the track is finished
- `window_duration` - current window duration
"""
@type t :: %__MODULE__{
id: id_t,
content_type: :audio | :video | :muxed,
header_extension: String.t(),
segment_extension: String.t(),
target_segment_duration: segment_duration_t,
target_window_duration: Membrane.Time.t() | Ratio.t(),
persist?: boolean,
track_name: String.t(),
header_name: String.t(),
current_seq_num: non_neg_integer,
current_discontinuity_seq_num: non_neg_integer,
segments: segments_t,
stale_segments: segments_t,
finished?: boolean,
window_duration: non_neg_integer,
discontinuities_counter: non_neg_integer,
next_segment_id: non_neg_integer()
}
@type id_t :: any
@type segments_t ::
Qex.t(%{
name: String.t(),
duration: segment_duration_t(),
bytes_size: segment_bytes_size_t(),
attributes: list(Manifest.SegmentAttribute.t())
})
@type segment_duration_t :: Membrane.Time.t() | Ratio.t()
@type segment_bytes_size_t :: non_neg_integer()
@type to_remove_names_t :: [segment_names: [String.t()], header_names: [String.t()]]
@spec new(Config.t()) :: t
def new(%Config{} = config) do
type =
case config.content_type do
list when is_list(list) -> :muxed
type -> type
end
config =
config
|> Map.from_struct()
|> Map.put(:content_type, type)
%__MODULE__{
header_name: header_name(config, 0)
}
|> Map.merge(config)
end
@doc """
Add a segment of given duration to the track.
It is recommended not to pass discontinuity attribute manually but use `discontinue/1` function instead.
"""
@spec add_segment(
t,
segment_duration_t,
segment_bytes_size_t,
list(Manifest.SegmentAttribute.t())
) ::
{{to_add_name :: String.t(), to_remove_names :: to_remove_names_t()}, t}
def add_segment(track, duration, bytes_size, attributes \\ [])
def add_segment(%__MODULE__{finished?: false} = track, bytes_size, duration, attributes) do
use Ratio, comparison: true
name =
"#{track.content_type}_segment_#{track.next_segment_id}_" <>
"#{track.track_name}#{track.segment_extension}"
attributes =
if is_nil(track.awaiting_discontinuity),
do: attributes,
else: [track.awaiting_discontinuity | attributes]
{stale_segments, stale_headers, track} =
track
|> Map.update!(
:segments,
&Qex.push(&1, %{
name: name,
duration: duration,
bytes_size: bytes_size,
attributes: attributes
})
)
|> Map.update!(:next_segment_id, &(&1 + 1))
|> Map.update!(:window_duration, &(&1 + duration))
|> Map.update!(:target_segment_duration, &if(&1 > duration, do: &1, else: duration))
|> Map.put(:awaiting_discontinuity, nil)
|> pop_stale_segments_and_headers()
{to_remove_segment_names, to_remove_header_names, stale_segments, stale_headers} =
if track.persist? do
{
[],
[],
Qex.join(track.stale_segments, Qex.new(stale_segments)),
Qex.join(track.stale_headers, Qex.new(stale_headers))
}
else
{
Enum.map(stale_segments, & &1.name),
stale_headers,
track.stale_segments,
track.stale_headers
}
end
track = Map.update!(track, :current_seq_num, &(&1 + length(to_remove_segment_names)))
{{name, [segment_names: to_remove_segment_names, header_names: to_remove_header_names]},
%__MODULE__{track | stale_segments: stale_segments, stale_headers: stale_headers}}
end
def add_segment(%__MODULE__{finished?: true} = _track, _duration, _bytes_size, _attributes),
do: raise("Cannot add new segments to finished track")
@doc """
Discontinue the track, indicating that parameters of the stream have changed.
New header has to be stored under the returned filename.
For details on discontinuity, please refer to [RFC 8216](https://datatracker.ietf.org/doc/html/rfc8216).
"""
@spec discontinue(t()) :: {header_name :: String.t(), t()}
def discontinue(%__MODULE__{finished?: false, discontinuities_counter: counter} = track) do
header = header_name(track, counter + 1)
discontinuity = Manifest.SegmentAttribute.discontinuity(header, counter + 1)
track =
track
|> Map.update!(:discontinuities_counter, &(&1 + 1))
|> Map.put(:awaiting_discontinuity, discontinuity)
{header, track}
end
def discontinue(%__MODULE__{finished?: true}), do: raise("Cannot discontinue finished track")
defp header_name(%{} = config, counter) do
"#{config.content_type}_header_#{config.track_name}_part#{counter}_#{config.header_extension}"
end
@doc """
Marks the track as finished. After this action, it won't be possible to add any new segments to the track.
"""
@spec finish(t) :: t
def finish(track) do
%__MODULE__{track | finished?: true}
end
@doc """
Return new track with all stale segments restored, resulting in playback of historic data.
Only works with 'persist?' option enabled
"""
@spec from_beginning(t()) :: t()
def from_beginning(%__MODULE__{persist?: true} = track) do
%__MODULE__{
track
| segments: Qex.join(track.stale_segments, track.segments),
current_seq_num: 0
}
end
def from_beginning(%__MODULE__{persist?: false} = _track),
do: raise("Cannot play the track from the beginning as it wasn't persisted")
@doc """
Returns all segments present in the track, including stale segments.
"""
@spec all_segments(t) :: [segment_name :: String.t()]
def all_segments(%__MODULE__{} = track) do
Qex.join(track.stale_segments, track.segments) |> Enum.map(& &1.name)
end
defp pop_stale_segments_and_headers(%__MODULE__{target_window_duration: :infinity} = track) do
{[], [], track}
end
defp pop_stale_segments_and_headers(track) do
%__MODULE__{
segments: segments,
window_duration: window_duration,
target_window_duration: target_window_duration,
header_name: header_name,
current_discontinuity_seq_num: discontinuity_seq_number
} = track
{segments_to_remove, headers_to_remove, segments, window_duration,
{new_header_name, discontinuity_seq_number}} =
do_pop_stale_segments(
segments,
window_duration,
target_window_duration,
[],
[],
{header_name, discontinuity_seq_number}
)
# filter out `new_header_name` as it could have been carried by some segment
# that is about to be deleted but the header has become the main track header
headers_to_remove =
headers_to_remove
|> Enum.filter(&(&1 != new_header_name))
track = %__MODULE__{
track
| segments: segments,
window_duration: window_duration,
header_name: new_header_name,
current_discontinuity_seq_num: discontinuity_seq_number
}
{segments_to_remove, headers_to_remove, track}
end
defp do_pop_stale_segments(
segments,
window_duration,
target_window_duration,
segments_acc,
headers_acc,
header
) do
use Ratio, comparison: true
{segment, new_segments} = Qex.pop!(segments)
new_window_duration = window_duration - segment.duration
new_header =
case segment.attributes |> Enum.find(&match?({:discontinuity, {_, _}}, &1)) do
{:discontinuity, {new_header, seq_number}} ->
{new_header, seq_number}
nil ->
header
end
headers_acc =
if new_header != header do
{header_name, _} = header
[header_name | headers_acc]
else
headers_acc
end
if new_window_duration >= target_window_duration and new_window_duration > 0 do
do_pop_stale_segments(
new_segments,
new_window_duration,
target_window_duration,
[segment | segments_acc],
headers_acc,
new_header
)
else
{Enum.reverse(segments_acc), Enum.reverse(headers_acc), segments, window_duration, header}
end
end
end
|
lib/membrane_http_adaptive_stream/manifest/track.ex
| 0.95803
| 0.49707
|
track.ex
|
starcoder
|
defmodule Spotify.Users do
@moduledoc """
For manipulating users.
[Spotify Docs](https://beta.developer.spotify.com/documentation/web-api/reference/users-profile/)
"""
alias Spotify.{ExternalUrls, Followers, Image, Timestamp, Context}
alias Spotify.Tracks.TrackSimple
@typedoc """
The user’s date-of-birth. This field is only available when
the current user has granted access to the `user-read-birthdate` scope.
"""
@type birthdate :: String.t
@typedoc """
The country of the user, as set in the user’s account profile.
An ISO 3166-1 alpha-2 country code. This field is only available
when the current user has granted access to the `user-read-private` scope.
"""
@type country :: String.t
@typedoc """
The name displayed on the user’s profile. null if not available.
"""
@type display_name :: String.t | nil
@typedoc """
The user’s email address, as entered by the user when creating their account.
Important! This email address is unverified; there is no proof that it
actually belongs to the user. This field is only available when the current
user has granted access to the `user-read-email` scope.
"""
@type email :: String.t
@typedoc """
Known public external URLs for this user.
"""
@type external_urls :: ExternalUrls.t
@typedoc """
Information about the followers of this user.
"""
@type followers :: Followers.t
@typedoc """
A link to the Web API endpoint for this user.
"""
@type href :: String.t
@typedoc """
The Spotify user ID for this user.
"""
@type id :: String.t
@typedoc """
The user’s profile image.
"""
@type images :: [Image.t]
@typedoc """
The user’s Spotify subscription level: `premium`, `free`, etc.
(The subscription level `open` can be considered the same as `free`.)
This field is only available when the current user has
granted access to the `user-read-private` scope.
"""
@type product :: String.t
@typedoc """
The object type: `user`
"""
@type type :: String.t
@typedoc """
The Spotify URI for this user.
"""
@type uri :: String.t
@typedoc """
The track the user listened to.
"""
@type track :: TrackSimple.t
@typedoc """
The date and time the track was played.
"""
@type played_at :: Timestamp.t
@typedoc """
The context the track was played from.
"""
@type context :: Context.t
end
|
lib/spotify/models/users/users.ex
| 0.756268
| 0.469459
|
users.ex
|
starcoder
|
defmodule Erlex do
@moduledoc """
Convert Erlang style structs and error messages to equivalent Elixir.
Lexes and parses the Erlang output, then runs through pretty
printer.
## Usage
Invoke `Erlex.pretty_print/1` wuth the input string.
```elixir
iex> str = ~S"('Elixir.Plug.Conn':t(),binary() | atom(),'Elixir.Keyword':t() | map()) -> 'Elixir.Plug.Conn':t()"
iex> Erlex.pretty_print(str)
(Plug.Conn.t(), binary() | atom(), Keyword.t() | map()) :: Plug.Conn.t()
```
While the lion's share of the work is done via invoking
`Erlex.pretty_print/1`, other higher order functions exist for further
formatting certain messages by running through the Elixir formatter.
Because we know the previous example is a type, we can invoke the
`Erlex.pretty_print_contract/1` function, which would format that
appropriately for very long lines.
```elixir
iex> str = ~S"('Elixir.Plug.Conn':t(),binary() | atom(),'Elixir.Keyword':t() | map(), map() | atom(), non_neg_integer(), binary(), binary(), binary(), binary(), binary()) -> 'Elixir.Plug.Conn':t()"
iex> Erlex.pretty_print_contract(str)
(
Plug.Conn.t(),
binary() | atom(),
Keyword.t() | map(),
map() | atom(),
non_neg_integer(),
binary(),
binary(),
binary(),
binary(),
binary()
) :: Plug.Conn.t()
```
"""
defp lex(str) do
try do
{:ok, tokens, _} = :lexer.string(str)
tokens
rescue
_ ->
throw({:error, :lexing, str})
end
end
defp parse(tokens) do
try do
{:ok, [first | _]} = :parser.parse(tokens)
first
rescue
_ ->
throw({:error, :parsing, tokens})
end
end
defp format(code) do
try do
Code.format_string!(code)
rescue
_ ->
throw({:error, :formatting, code})
end
end
@spec pretty_print_infix(infix :: String.t()) :: String.t()
def pretty_print_infix('=:='), do: "==="
def pretty_print_infix('=/='), do: "!=="
def pretty_print_infix('/='), do: "!="
def pretty_print_infix('=<'), do: "<="
def pretty_print_infix(infix), do: to_string(infix)
@spec pretty_print(str :: String.t()) :: String.t()
def pretty_print(str) do
parsed =
str
|> to_charlist()
|> lex()
|> parse()
try do
do_pretty_print(parsed)
rescue
_ ->
throw({:error, :pretty_printing, parsed})
end
end
@spec pretty_print_pattern(pattern :: String.t()) :: String.t()
def pretty_print_pattern('pattern ' ++ rest) do
pretty_print_type(rest)
end
def pretty_print_pattern(pattern) do
pretty_print_type(pattern)
end
@spec pretty_print_contract(
contract :: String.t(),
module :: String.t(),
function :: String.t()
) :: String.t()
def pretty_print_contract(contract, module, function) do
[head | tail] =
contract
|> to_string()
|> String.split(";")
head =
head
|> String.trim_leading(to_string(module))
|> String.trim_leading(":")
|> String.trim_leading(to_string(function))
[head | tail]
|> Enum.join(";")
|> pretty_print_contract()
end
@spec pretty_print_contract(contract :: String.t()) :: String.t()
def pretty_print_contract(contract) do
[head | tail] =
contract
|> to_string()
|> String.split(";")
if Enum.empty?(tail) do
do_pretty_print_contract(head)
else
joiner = "Contract head:\n"
joiner <> Enum.map_join([head | tail], "\n\n" <> joiner, &do_pretty_print_contract/1)
end
end
defp do_pretty_print_contract(contract) do
prefix = "@spec a"
suffix = "\ndef a() do\n :ok\nend"
pretty = pretty_print(contract)
"""
@spec a#{pretty}
def a() do
:ok
end
"""
|> format()
|> Enum.join("")
|> String.trim_leading(prefix)
|> String.trim_trailing(suffix)
|> String.replace("\n ", "\n")
end
@spec pretty_print_type(type :: String.t()) :: String.t()
def pretty_print_type(type) do
prefix = "@spec a("
suffix = ") :: :ok\ndef a() do\n :ok\nend"
indented_suffix = ") ::\n :ok\ndef a() do\n :ok\nend"
pretty = pretty_print(type)
"""
@spec a(#{pretty}) :: :ok
def a() do
:ok
end
"""
|> format()
|> Enum.join("")
|> String.trim_leading(prefix)
|> String.trim_trailing(suffix)
|> String.trim_trailing(indented_suffix)
|> String.replace("\n ", "\n")
end
@spec pretty_print_args(args :: String.t()) :: String.t()
def pretty_print_args(args) do
prefix = "@spec a"
suffix = " :: :ok\ndef a() do\n :ok\nend"
pretty = pretty_print(args)
"""
@spec a#{pretty} :: :ok
def a() do
:ok
end
"""
|> format()
|> Enum.join("")
|> String.trim_leading(prefix)
|> String.trim_trailing(suffix)
|> String.replace("\n ", "\n")
end
defp do_pretty_print({:any}) do
"_"
end
defp do_pretty_print({:inner_any_function}) do
"(...)"
end
defp do_pretty_print({:any_function}) do
"(... -> any)"
end
defp do_pretty_print({:assignment, {:atom, atom}, value}) do
"#{normalize_name(atom)} = #{do_pretty_print(value)}"
end
defp do_pretty_print({:atom, [:_]}) do
"_"
end
defp do_pretty_print({:atom, ['_']}) do
"_"
end
defp do_pretty_print({:atom, atom}) do
atomize(atom)
end
defp do_pretty_print({:binary_part, value, _, size}) do
"#{do_pretty_print(value)} :: #{do_pretty_print(size)}"
end
defp do_pretty_print({:binary_part, value, size}) do
"#{do_pretty_print(value)} :: #{do_pretty_print(size)}"
end
defp do_pretty_print({:binary, [{:binary_part, {:any}, {:any}, {:size, {:int, 8}}}]}) do
"binary()"
end
defp do_pretty_print({:binary, [{:binary_part, {:any}, {:any}, {:size, {:int, 1}}}]}) do
"bitstring()"
end
defp do_pretty_print({:binary, binary_parts}) do
binary_parts = Enum.map_join(binary_parts, ", ", &do_pretty_print/1)
"<<#{binary_parts}>>"
end
defp do_pretty_print({:binary, value, size}) do
"<<#{do_pretty_print(value)} :: #{do_pretty_print(size)}>>"
end
defp do_pretty_print({:byte_list, byte_list}) do
byte_list
|> Enum.into(<<>>, fn byte ->
<<byte::8>>
end)
|> inspect()
end
defp do_pretty_print({:contract, {:args, args}, {:return, return}, {:whens, whens}}) do
{printed_whens, when_names} = collect_and_print_whens(whens)
args = {:when_names, when_names, args}
return = {:when_names, when_names, return}
"(#{do_pretty_print(args)}) :: #{do_pretty_print(return)} when #{printed_whens}"
end
defp do_pretty_print({:contract, {:args, {:inner_any_function}}, {:return, return}}) do
"((...) -> #{do_pretty_print(return)})"
end
defp do_pretty_print({:contract, {:args, args}, {:return, return}}) do
"#{do_pretty_print(args)} :: #{do_pretty_print(return)}"
end
defp do_pretty_print({:function, {:contract, {:args, args}, {:return, return}}}) do
"(#{do_pretty_print(args)} -> #{do_pretty_print(return)})"
end
defp do_pretty_print({:int, int}) do
"#{to_string(int)}"
end
defp do_pretty_print({:list, :paren, items}) do
"(#{Enum.map_join(items, ", ", &do_pretty_print/1)})"
end
defp do_pretty_print(
{:list, :square,
[
tuple: [
{:type_list, ['a', 't', 'o', 'm'], {:list, :paren, []}},
{:atom, [:_]}
]
]}
) do
"Keyword.t()"
end
defp do_pretty_print(
{:list, :square,
[
tuple: [
{:type_list, ['a', 't', 'o', 'm'], {:list, :paren, []}},
t
]
]}
) do
"Keyword.t(#{do_pretty_print(t)})"
end
defp do_pretty_print({:list, :square, items}) do
"[#{Enum.map_join(items, ", ", &do_pretty_print/1)}]"
end
defp do_pretty_print({:map_entry, key, value}) do
"#{do_pretty_print(key)} => #{do_pretty_print(value)}"
end
defp do_pretty_print(
{:map,
[
{:map_entry, {:atom, '\'__struct__\''}, {:atom, [:_]}},
{:map_entry, {:atom, [:_]}, {:atom, [:_]}}
]}
) do
"struct()"
end
defp do_pretty_print(
{:map,
[
{:map_entry, {:atom, '\'__struct__\''},
{:type_list, ['a', 't', 'o', 'm'], {:list, :paren, []}}},
{:map_entry, {:type_list, ['a', 't', 'o', 'm'], {:list, :paren, []}}, {:atom, [:_]}}
]}
) do
"struct()"
end
defp do_pretty_print(
{:map,
[
{:map_entry, {:atom, '\'__struct__\''},
{:type_list, ['a', 't', 'o', 'm'], {:list, :paren, []}}},
{:map_entry, {:atom, [:_]}, {:atom, [:_]}}
]}
) do
"struct()"
end
defp do_pretty_print(
{:map,
[
{:map_entry, {:atom, '\'__exception__\''}, {:atom, '\'true\''}},
{:map_entry, {:atom, '\'__struct__\''}, {:atom, [:_]}},
{:map_entry, {:atom, [:_]}, {:atom, [:_]}}
]}
) do
"Exception.t()"
end
defp do_pretty_print({:map, map_keys}) do
case struct_parts(map_keys) do
%{name: name, entries: [{:map_entry, {:atom, [:_]}, {:atom, [:_]}}]} ->
"%#{name}{}"
%{name: name, entries: entries} ->
"%#{name}{#{Enum.map_join(entries, ", ", &do_pretty_print/1)}}"
end
end
defp do_pretty_print({:named_type_with_appended_colon, named_type, type})
when is_tuple(named_type) and is_tuple(type) do
case named_type do
{:atom, name} ->
"#{normalize_name(name)}: #{do_pretty_print(type)}"
other ->
"#{do_pretty_print(other)}: #{do_pretty_print(type)}"
end
end
defp do_pretty_print({:named_type, named_type, type})
when is_tuple(named_type) and is_tuple(type) do
case named_type do
{:atom, name} ->
"#{normalize_name(name)} :: #{do_pretty_print(type)}"
other ->
"#{do_pretty_print(other)} :: #{do_pretty_print(type)}"
end
end
defp do_pretty_print({:named_type, named_type, type}) when is_tuple(named_type) do
case named_type do
{:atom, name = '\'Elixir' ++ _} ->
"#{atomize(name)}.#{deatomize(type)}()"
{:atom, name} ->
"#{normalize_name(name)} :: #{deatomize(type)}()"
other ->
name = do_pretty_print(other)
"#{name} :: #{deatomize(type)}()"
end
end
defp do_pretty_print({nil}) do
"nil"
end
defp do_pretty_print({:pattern, pattern_items}) do
"#{Enum.map_join(pattern_items, ", ", &do_pretty_print/1)}"
end
defp do_pretty_print(
{:pipe_list, {:atom, ['f', 'a', 'l', 's', 'e']}, {:atom, ['t', 'r', 'u', 'e']}}
) do
"boolean()"
end
defp do_pretty_print(
{:pipe_list, {:atom, '\'infinity\''},
{:type_list, ['n', 'o', 'n', :_, 'n', 'e', 'g', :_, 'i', 'n', 't', 'e', 'g', 'e', 'r'],
{:list, :paren, []}}}
) do
"timeout()"
end
defp do_pretty_print({:pipe_list, head, tail}) do
"#{do_pretty_print(head)} | #{do_pretty_print(tail)}"
end
defp do_pretty_print({:range, from, to}) do
"#{do_pretty_print(from)}..#{do_pretty_print(to)}"
end
defp do_pretty_print({:rest}) do
"..."
end
defp do_pretty_print({:size, size}) do
"size(#{do_pretty_print(size)})"
end
defp do_pretty_print({:tuple, tuple_items}) do
"{#{Enum.map_join(tuple_items, ", ", &do_pretty_print/1)}}"
end
defp do_pretty_print({:type, type}) do
"#{deatomize(type)}()"
end
defp do_pretty_print({:type, module, type}) do
module = do_pretty_print(module)
type =
if is_tuple(type) do
do_pretty_print(type)
else
deatomize(type) <> "()"
end
"#{module}.#{type}"
end
defp do_pretty_print({:type, module, type, inner_type}) do
"#{atomize(module)}.#{deatomize(type)}(#{do_pretty_print(inner_type)})"
end
defp do_pretty_print({:type_list, type, types}) do
"#{deatomize(type)}#{do_pretty_print(types)}"
end
defp do_pretty_print({:when_names, when_names, {:list, :paren, items}}) do
Enum.map_join(items, ", ", &format_when_names(do_pretty_print(&1), when_names))
end
defp do_pretty_print({:when_names, when_names, item}) do
format_when_names(do_pretty_print(item), when_names)
end
defp format_when_names(item, when_names) do
trimmed = String.trim_leading(item, ":")
if trimmed in when_names do
downcase_first(trimmed)
else
item
end
end
defp collect_and_print_whens(whens) do
{pretty_names, when_names} =
Enum.reduce(whens, {[], []}, fn {_, when_name, type}, {prettys, whens} ->
pretty_name =
{:named_type_with_appended_colon, when_name, type}
|> do_pretty_print()
|> downcase_first()
{[pretty_name | prettys], [when_name | whens]}
end)
when_names =
Enum.map(when_names, fn {_, name} ->
name
|> atomize()
|> String.trim_leading(":")
end)
printed_whens =
pretty_names
|> Enum.reverse()
|> Enum.join(", ")
{printed_whens, when_names}
end
defp downcase_first(string) do
{first, rest} = String.split_at(string, 1)
String.downcase(first) <> rest
end
defp atomize("Elixir." <> module_name) do
String.trim(module_name, "'")
end
defp atomize([char]) do
to_string(char)
end
defp atomize(atom) when is_list(atom) do
atom_string =
atom
|> deatomize()
|> to_string()
stripped = strip_var_version(atom_string)
if stripped == atom_string do
atomize(stripped)
else
stripped
end
end
defp atomize(<<number>>) when is_number(number) do
to_string(number)
end
defp atomize(atom) do
atom = to_string(atom)
if String.starts_with?(atom, "_") do
atom
else
atom
|> String.trim("'")
|> String.to_atom()
|> inspect()
end
end
defp atom_part_to_string({:int, atom_part}), do: Integer.to_charlist(atom_part)
defp atom_part_to_string(atom_part), do: atom_part
defp strip_var_version(var_name) do
var_name
|> String.replace(~r/^V(.+)@\d+$/, "\\1")
|> String.replace(~r/^(.+)@\d+$/, "\\1")
end
defp struct_parts(map_keys) do
%{name: name, entries: entries} =
Enum.reduce(map_keys, %{name: "", entries: []}, &struct_part/2)
%{name: name, entries: Enum.reverse(entries)}
end
defp struct_part({:map_entry, {:atom, '\'__struct__\''}, {:atom, name}}, struct_parts) do
name =
name
|> atomize()
|> String.trim("\"")
Map.put(struct_parts, :name, name)
end
defp struct_part(entry, struct_parts = %{entries: entries}) do
Map.put(struct_parts, :entries, [entry | entries])
end
defp deatomize([:_, :_, '@', {:int, _}]) do
"_"
end
defp deatomize(chars) when is_list(chars) do
Enum.map(chars, fn char ->
char
|> deatomize_char()
|> atom_part_to_string()
end)
end
defp deatomize_char(char) when is_atom(char) do
Atom.to_string(char)
end
defp deatomize_char(char), do: char
defp normalize_name(name) do
name
|> deatomize()
|> to_string()
|> strip_var_version()
end
end
|
lib/erlex.ex
| 0.804021
| 0.752729
|
erlex.ex
|
starcoder
|
defmodule DistLimiter do
@scope __MODULE__
@doc """
Consume a count of resources if available.
If succeeds, `{:ok, remaining_count}`.
If falied, `{:error, :overflow}`.
```
iex> DistLimiter.consume({:ip, "a.b.c.d", :password_challenge}, {60000, 1}, 1)
{:ok, 0}
iex> DistLimiter.consume({:ip, "a.b.c.d", :password_challenge}, {60000, 1}, 1)
{:error, :overflow}
```
"""
@spec consume(resource :: any(), {window :: integer(), limit :: integer()}, count :: integer()) ::
{:ok, integer()} | {:error, :overflow}
def consume(resource, {window, limit} = _rate, count) do
sum = get_sum_of_consumption(resource, window)
if sum + count <= limit do
DistLimiter.Counter.count_up(get_local_counter(resource, window, limit), resource, count)
{:ok, limit - (sum + count)}
else
{:error, :overflow}
end
end
@doc """
Get remaining count of the given resource.
```
iex> DistLimiter.get_remaining({:ip, "a.b.c.d", :sign_up}, {60000, 1})
1
iex> DistLimiter.consume({:ip, "a.b.c.d", :sign_up}, {60000, 1}, 1)
{:ok, 0}
iex> DistLimiter.get_remaining({:ip, "a.b.c.d", :sign_up}, {60000, 1})
0
```
"""
@spec get_remaining(resource :: any(), {window :: integer(), limit :: integer()}) :: integer()
def get_remaining(resource, {window, limit} = _rate) do
sum = get_sum_of_consumption(resource, window)
limit - sum
end
# Util
defp get_sum_of_consumption(resource, window) do
resource
|> get_counters()
|> Task.async_stream(fn counter ->
DistLimiter.Counter.get_count(counter, resource, window)
end)
|> Stream.map(fn {:ok, count} -> count end)
|> Enum.sum()
end
defp get_local_counter(resource, window, limit) do
case UniPg.get_local_members(@scope, resource) do
[counter] ->
counter
[] ->
{:ok, counter} =
DynamicSupervisor.start_child(
DistLimiter.DynamicSupervisor,
%{
id: resource,
start: {DistLimiter.Counter, :start_link, [{resource, {window, limit}}]},
restart: :transient
}
)
UniPg.join(@scope, resource, [counter])
counter
end
end
defp get_counters(resource) do
UniPg.get_members(@scope, resource)
# uniq() is required for bugs in :pg
|> Enum.uniq()
end
end
|
lib/dist_limiter.ex
| 0.832237
| 0.735214
|
dist_limiter.ex
|
starcoder
|
defmodule Telegex.Marked.BlockParser do
@moduledoc """
Parsing implementation of block nodes.
"""
use Telegex.Marked.Parser
alias Telegex.Marked.BlockCodeRule
alias Telegex.Marked.{Line, InlineParser}
@rule_modules [BlockCodeRule]
@doc """
Parse Markdown text, including inline elements.
Note: This function is generally not called directly, please use `Telegex.Marked.as_html/2` instead.
"""
@spec parse(String.t(), keyword()) :: Telegex.Marked.document()
def parse(markdown, _options \\ []) do
markdown |> lines_info() |> parse_all(0)
end
@spec parse_all({[Line.t()], integer()}, integer(), [[Node.t()]]) :: Telegex.Marked.document()
defp parse_all({lines, len} = lines_info, pos, nodes \\ [])
when is_list(lines) and is_integer(len) and is_integer(pos) and is_list(nodes) do
init_state = BlockState.new({lines, len}, pos)
case parse_node(init_state) do
{:match, state} ->
if state.pos <= state.len - 1 && !BlockState.ending?(state) do
parse_all(lines_info, state.ending + 1, nodes ++ [state.nodes ++ [newline_node()]])
else
nodes ++ [state.nodes]
end
{:nomatch, state} ->
if state.pos < state.len - 1 do
inline_nodes = nomatch(state, false)
parse_all(lines_info, state.pos + 1, nodes ++ [inline_nodes])
else
nodes ++ [nomatch(state, true)]
end
end
end
@spec nomatch(BlockState.t(), boolean()) :: [Node.t()]
defp nomatch(state, lastline?) do
state.lines
|> Enum.at(state.pos)
|> (fn %{src: src} -> src end).()
|> InlineParser.parse_line(lastline?, 0)
end
@spec parse_node(BlockState.t()) :: {Telegex.Marked.Rule.match_status(), BlockState.t()}
defp parse_node(%BlockState{} = state) do
@rule_modules
|> Enum.reduce_while({:nomatch, state}, fn rule_module, result ->
{status, state} = rule_module.match(state)
if status == :match, do: {:halt, {:match, state}}, else: {:cont, result}
end)
end
@spec lines_info(String.t()) :: {[Line.t()], integer()}
defp lines_info(text) do
lines =
text
|> String.split("\n")
|> Enum.map(fn line_src -> Line.new(line_src) end)
{lines, length(lines)}
end
end
|
lib/telegex/marked/parsers/block_parser.ex
| 0.645679
| 0.51562
|
block_parser.ex
|
starcoder
|
defmodule PortAudio.Device do
alias PortAudio.Device
defstruct [
:index,
:name,
:host_api_index,
:max_input_channels,
:max_output_channels,
:default_input_latency,
:default_output_latency,
:default_sample_rate
]
@type t :: %Device{}
@doc """
Fetch a device with the given device index.
Will return `{:ok, %Device{}}` if found or `{:error, msg}` otherwise.
"""
def new(index) do
with {:ok, device} <- PortAudio.Native.device_info(index) do
{:ok, from_native(device)}
end
end
defp from_native(device) do
# TODO: use struct() function
%Device{
index: device.index,
name: device.name,
host_api_index: device.host_api,
max_input_channels: device.max_input_channels,
max_output_channels: device.max_output_channels,
default_input_latency: [
low: device.default_low_input_latency,
high: device.default_high_input_latency
],
default_output_latency: [
low: device.default_low_output_latency,
high: device.default_high_output_latency
],
default_sample_rate: device.default_sample_rate
}
end
@doc """
Return the HostAPI of the given device.
"""
def host_api(%Device{host_api_index: host_idx}) do
{:ok, host_api} = PortAudio.HostAPI.new(host_idx)
host_api
end
@type stream_params :: %{
channel_count: non_neg_integer,
sample_format: PortAudio.Native.sample_format(),
suggested_latency: float | :high | :low
}
@spec stream(device :: t, params :: params) :: {:ok, PortAudio.Stream.t()} | {:error, atom}
when params: [
input: stream_params | nil,
output: stream_params | nil,
sample_rate: float | nil
]
@doc """
Opens and starts a stream for the given device.
## Options
* `sample_rate` - The sample rate to use while streaming. Defaults to the
sample rate of the device if not set.
* `input` - The input stream parameters or `nil` if using an output
only stream.
* `output` - The output stream parameters or `nil` if using an input
only stream.
If neither `input` or `output` are set an `ArgumentError` exception will
be raised.
"""
def stream(%Device{} = device, params) do
sample_rate = Keyword.get(params, :sample_rate, device.default_sample_rate)
input_params =
add_defaults_to_params(
device,
Keyword.get(params, :input),
device.default_input_latency
)
output_params =
add_defaults_to_params(
device,
Keyword.get(params, :output),
device.default_output_latency
)
if !input_params and !output_params do
raise ArgumentError, "Either input or output parameters are expected"
end
with {:ok, s} <- PortAudio.Stream.new(input_params, output_params, sample_rate),
{:ok, s} <- PortAudio.Stream.start(s),
do: {:ok, s}
end
@doc """
Same as `PortAudio.Device.stream/2`, but will raise an exception on failure
instead of returning an error tuple.
"""
def stream!(%Device{} = device, params) do
case stream(device, params) do
{:ok, s} -> s
{:error, reason} -> raise reason
end
end
defp add_defaults_to_params(_device, nil, _latencies) do
nil
end
defp add_defaults_to_params(device, params, default_latencies) do
# PortAudio defaults to high latency
params
|> Map.put(:device, device)
|> Map.update(:suggested_latency, default_latencies[:high], fn
:low -> default_latencies[:low]
:high -> default_latencies[:high]
val -> val
end)
end
end
|
lib/portaudio/device.ex
| 0.63443
| 0.434161
|
device.ex
|
starcoder
|
defmodule StarkInfra.Event.Attempt do
alias __MODULE__, as: Attempt
alias StarkInfra.Utils.Rest
alias StarkInfra.Utils.Check
alias StarkInfra.User.Project
alias StarkInfra.User.Organization
alias StarkInfra.Error
@moduledoc """
Groups Event.Attempt related functions
"""
@doc """
When an Event delivery fails, an event attempt will be registered.
It carries information meant to help you debug event reception issues.
## Attributes:
- `:id` [string]: unique id that identifies the delivery attempt. ex: "5656565656565656"
- `:code` [string]: delivery error code. ex: badHttpStatus, badConnection, timeout
- `:message` [string]: delivery error full description. ex: "HTTP POST request returned status 404"
- `:event_id` [string]: ID of the Event whose delivery failed. ex: "4848484848484848"
- `:webhook_id` [string]: ID of the Webhook that triggered this event. ex: "5656565656565656"
- `:created` [DateTime]: datetime representing the moment when the attempt was made. ex: ~U[2020-03-26 19:32:35.418698Z]
"""
@enforce_keys [:id, :code, :message, :webhook_id, :event_id, :created]
defstruct [:id, :code, :message, :webhook_id, :event_id, :created]
@type t() :: %__MODULE__{}
@doc """
Receive a single Event.Attempt struct previously created by the Stark Infra API by its id
## Parameters (required):
- `:id` [string]: struct unique id. ex: "5656565656565656"
## Options:
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- Event.Attempt struct with updated attributes
"""
@spec get(
binary,
user: Project.t() | Organization.t() | nil
) ::
{:ok, Attempt.t()} |
{:error, [%Error{}]}
def get(id, options \\ []) do
Rest.get_id(resource(), id, options)
end
@doc """
Same as get(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec get!(binary, user: Project.t() | Organization.t() | nil) :: Attempt.t()
def get!(id, options \\ []) do
Rest.get_id!(resource(), id, options)
end
@doc """
Receive a stream of Event.Attempt structs previously created in the Stark Infra API
## Options:
- `:limit` [integer, default nil]: maximum number of structs to be retrieved. Unlimited if nil. ex: 35
- `:after` [Date or string, default nil]: date filter for structs created only after specified date. ex: ~D[2020-03-25]
- `:before` [Date or string, default nil]: date filter for structs created only before specified date. ex: ~D[2020-03-25]
- `:event_ids` [list of strings, default nil]: list of Event ids to filter attempts. ex: ["5656565656565656", "4545454545454545"]
- `:webhook_ids` [list of strings, default nil]: list of Webhook ids to filter attempts. ex: ["5656565656565656", "4545454545454545"]
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- stream of Event.Attempt structs with updated attributes
"""
@spec query(
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
event_ids: [binary],
webhook_ids: [binary],
user: Project.t() | Organization.t()
) ::
({:cont, {:ok, [Attempt.t()]}} |
{:error, [Error.t()]} |
{:halt, any} |
{:suspend, any},
any -> any)
def query(options \\ []) do
Rest.get_list(resource(), options)
end
@doc """
Same as query(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec query!(
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
event_ids: [binary],
webhook_ids: [binary],
user: Project.t() | Organization.t()
) ::
({:cont, [Attempt.t()]} |
{:halt, any} |
{:suspend, any},
any -> any)
def query!(options \\ []) do
Rest.get_list!(resource(), options)
end
@doc """
Receive a list of up to 100 Attempt structs previously created in the Stark Infra API and the cursor to the next page.
Use this function instead of query if you want to manually page your requests.
## Options:
- `:cursor` [string, default nil]: cursor returned on the previous page function call
- `:limit` [integer, default 100]: maximum number of structs to be retrieved. Max = 100. ex: 35
- `:after` [Date or string, default nil]: date filter for structs created only after specified date. ex: ~D[2020-03-25]
- `:before` [Date or string, default nil]: date filter for structs created only before specified date. ex: ~D[2020-03-25]
- `:event_ids` [list of strings, default nil]: list of Event ids to filter attempts. ex: ["5656565656565656", "4545454545454545"]
- `:webhook_ids` [list of strings, default nil]: list of Webhook ids to filter attempts. ex: ["5656565656565656", "4545454545454545"]
- `:user` [Organization/Project, default nil]: Organization or Project struct returned from StarkInfra.project(). Only necessary if default project or organization has not been set in configs.
## Return:
- list of Attempt structs with updated attributes and cursor to retrieve the next page of Attempt structs
"""
@spec page(
cursor: binary,
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
event_ids: [binary],
webhook_ids: [binary],
user: Project.t() | Organization.t()
) ::
{:ok, {binary, [Attempt.t()]}} |
{:error, [%Error{}]}
def page(options \\ []) do
Rest.get_page(resource(), options)
end
@doc """
Same as page(), but it will unwrap the error tuple and raise in case of errors.
"""
@spec page!(
cursor: binary,
limit: integer,
after: Date.t() | binary,
before: Date.t() | binary,
event_ids: [binary],
webhook_ids: [binary],
user: Project.t() | Organization.t()
) :: [Attempt.t()]
def page!(options \\ []) do
Rest.get_page!(resource(), options)
end
@doc false
def resource() do
{
"EventAttempt",
&resource_maker/1
}
end
@doc false
def resource_maker(json) do
%Attempt{
id: json[:id],
code: json[:code],
message: json[:message],
webhook_id: json[:webhook_id],
event_id: json[:event_id],
created: json[:created] |> Check.datetime()
}
end
end
|
lib/event/event_attempt.ex
| 0.875121
| 0.462109
|
event_attempt.ex
|
starcoder
|
defmodule Content.Audio.TrainIsBoarding do
@moduledoc """
The next train to [destination] is now boarding.
"""
require Logger
@enforce_keys [:destination, :route_id, :track_number]
defstruct @enforce_keys ++ [:trip_id]
@type t :: %__MODULE__{
destination: PaEss.destination(),
trip_id: Predictions.Prediction.trip_id() | nil,
route_id: String.t(),
track_number: Content.Utilities.track_number()
}
defimpl Content.Audio do
@the_next "501"
@train_to "507"
@is_now_boarding "544"
@on_track_1 "541"
@on_track_2 "542"
def to_params(audio) do
case PaEss.Utilities.destination_var(audio.destination) do
{:ok, destination_var} ->
do_to_params(audio, destination_var)
{:error, :unknown} ->
case PaEss.Utilities.ad_hoc_trip_description(audio.destination) do
{:ok, trip_description} ->
text =
if audio.track_number do
"The next #{trip_description} is now boarding, on track #{audio.track_number}"
else
"The next #{trip_description} is now boarding"
end
{:ad_hoc, {text, :audio}}
{:error, :unknown} ->
Logger.error("TrainIsBoarding.to_params unknown destination: #{audio.destination}")
nil
end
end
end
defp do_to_params(%{destination: destination, route_id: "Green-" <> _branch}, destination_var)
when destination in [:lechmere, :north_station, :government_center, :park_st, :kenmore] do
vars = [
@the_next,
@train_to,
destination_var,
@is_now_boarding
]
PaEss.Utilities.take_message(vars, :audio)
end
defp do_to_params(
%{destination: destination, route_id: route_id, track_number: track_number},
destination_var
) do
vars =
case {Content.Utilities.route_and_destination_branch_letter(
route_id,
destination
), track_number} do
{nil, nil} ->
[
@the_next,
@train_to,
destination_var,
@is_now_boarding
]
{nil, track_number} ->
[
@the_next,
@train_to,
destination_var,
@is_now_boarding,
track(track_number)
]
{green_line_branch, _track_number} ->
[
@the_next,
PaEss.Utilities.green_line_branch_var(green_line_branch),
@train_to,
destination_var,
@is_now_boarding
]
end
PaEss.Utilities.take_message(vars, :audio)
end
@spec track(Content.Utilities.track_number()) :: String.t()
defp track(1), do: @on_track_1
defp track(2), do: @on_track_2
end
end
|
lib/content/audio/train_is_boarding.ex
| 0.684159
| 0.417093
|
train_is_boarding.ex
|
starcoder
|
defmodule AWS.Kendra do
@moduledoc """
Amazon Kendra is a service for indexing large document sets.
"""
@doc """
Removes one or more documents from an index.
The documents must have been added with the `BatchPutDocument` operation.
The documents are deleted asynchronously. You can see the progress of the
deletion by using AWS CloudWatch. Any error messages releated to the processing
of the batch are sent to you CloudWatch log.
"""
def batch_delete_document(client, input, options \\ []) do
request(client, "BatchDeleteDocument", input, options)
end
@doc """
Adds one or more documents to an index.
The `BatchPutDocument` operation enables you to ingest inline documents or a set
of documents stored in an Amazon S3 bucket. Use this operation to ingest your
text and unstructured text into an index, add custom attributes to the
documents, and to attach an access control list to the documents added to the
index.
The documents are indexed asynchronously. You can see the progress of the batch
using AWS CloudWatch. Any error messages related to processing the batch are
sent to your AWS CloudWatch log.
"""
def batch_put_document(client, input, options \\ []) do
request(client, "BatchPutDocument", input, options)
end
@doc """
Creates a data source that you use to with an Amazon Kendra index.
You specify a name, data source connector type and description for your data
source. You also specify configuration information such as document metadata
(author, source URI, and so on) and user context information.
`CreateDataSource` is a synchronous operation. The operation returns 200 if the
data source was successfully created. Otherwise, an exception is raised.
"""
def create_data_source(client, input, options \\ []) do
request(client, "CreateDataSource", input, options)
end
@doc """
Creates an new set of frequently asked question (FAQ) questions and answers.
"""
def create_faq(client, input, options \\ []) do
request(client, "CreateFaq", input, options)
end
@doc """
Creates a new Amazon Kendra index.
Index creation is an asynchronous operation. To determine if index creation has
completed, check the `Status` field returned from a call to . The `Status` field
is set to `ACTIVE` when the index is ready to use.
Once the index is active you can index your documents using the operation or
using one of the supported data sources.
"""
def create_index(client, input, options \\ []) do
request(client, "CreateIndex", input, options)
end
@doc """
Deletes an Amazon Kendra data source.
An exception is not thrown if the data source is already being deleted. While
the data source is being deleted, the `Status` field returned by a call to the
operation is set to `DELETING`. For more information, see [Deleting Data Sources](https://docs.aws.amazon.com/kendra/latest/dg/delete-data-source.html).
"""
def delete_data_source(client, input, options \\ []) do
request(client, "DeleteDataSource", input, options)
end
@doc """
Removes an FAQ from an index.
"""
def delete_faq(client, input, options \\ []) do
request(client, "DeleteFaq", input, options)
end
@doc """
Deletes an existing Amazon Kendra index.
An exception is not thrown if the index is already being deleted. While the
index is being deleted, the `Status` field returned by a call to the
`DescribeIndex` operation is set to `DELETING`.
"""
def delete_index(client, input, options \\ []) do
request(client, "DeleteIndex", input, options)
end
@doc """
Gets information about a Amazon Kendra data source.
"""
def describe_data_source(client, input, options \\ []) do
request(client, "DescribeDataSource", input, options)
end
@doc """
Gets information about an FAQ list.
"""
def describe_faq(client, input, options \\ []) do
request(client, "DescribeFaq", input, options)
end
@doc """
Describes an existing Amazon Kendra index
"""
def describe_index(client, input, options \\ []) do
request(client, "DescribeIndex", input, options)
end
@doc """
Gets statistics about synchronizing Amazon Kendra with a data source.
"""
def list_data_source_sync_jobs(client, input, options \\ []) do
request(client, "ListDataSourceSyncJobs", input, options)
end
@doc """
Lists the data sources that you have created.
"""
def list_data_sources(client, input, options \\ []) do
request(client, "ListDataSources", input, options)
end
@doc """
Gets a list of FAQ lists associated with an index.
"""
def list_faqs(client, input, options \\ []) do
request(client, "ListFaqs", input, options)
end
@doc """
Lists the Amazon Kendra indexes that you have created.
"""
def list_indices(client, input, options \\ []) do
request(client, "ListIndices", input, options)
end
@doc """
Gets a list of tags associated with a specified resource.
Indexes, FAQs, and data sources can have tags associated with them.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Searches an active index.
Use this API to search your documents using query. The `Query` operation enables
to do faceted search and to filter results based on document attributes.
It also enables you to provide user context that Amazon Kendra uses to enforce
document access control in the search results.
Amazon Kendra searches your index for text content and question and answer (FAQ)
content. By default the response contains three types of results.
* Relevant passages
* Matching FAQs
* Relevant documents
You can specify that the query return only one type of result using the
`QueryResultTypeConfig` parameter.
Each query returns the 100 most relevant results.
"""
def query(client, input, options \\ []) do
request(client, "Query", input, options)
end
@doc """
Starts a synchronization job for a data source.
If a synchronization job is already in progress, Amazon Kendra returns a
`ResourceInUseException` exception.
"""
def start_data_source_sync_job(client, input, options \\ []) do
request(client, "StartDataSourceSyncJob", input, options)
end
@doc """
Stops a running synchronization job.
You can't stop a scheduled synchronization job.
"""
def stop_data_source_sync_job(client, input, options \\ []) do
request(client, "StopDataSourceSyncJob", input, options)
end
@doc """
Enables you to provide feedback to Amazon Kendra to improve the performance of
the service.
"""
def submit_feedback(client, input, options \\ []) do
request(client, "SubmitFeedback", input, options)
end
@doc """
Adds the specified tag to the specified index, FAQ, or data source resource.
If the tag already exists, the existing value is replaced with the new value.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes a tag from an index, FAQ, or a data source.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates an existing Amazon Kendra data source.
"""
def update_data_source(client, input, options \\ []) do
request(client, "UpdateDataSource", input, options)
end
@doc """
Updates an existing Amazon Kendra index.
"""
def update_index(client, input, options \\ []) do
request(client, "UpdateIndex", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "kendra"}
host = build_host("kendra", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AWSKendraFrontendService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/kendra.ex
| 0.89268
| 0.668082
|
kendra.ex
|
starcoder
|
defmodule Athel.ModelCase do
@moduledoc """
This module defines the test case to be used by
model tests.
You may define functions here to be used as helpers in
your model tests. See `errors_on/2`'s definition as reference.
Finally, if the test case interacts with the database,
it cannot be async. For this reason, every test runs
inside a transaction which is reset at the beginning
of the test unless the test case is marked as async.
"""
use ExUnit.CaseTemplate
alias Ecto.Changeset
alias Athel.Repo
using do
quote do
alias Athel.Repo
import Ecto
import Ecto.Changeset
import Ecto.Query
import Athel.ModelCase
end
end
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Athel.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(Athel.Repo, {:shared, self()})
end
:ok
end
@spec error(Changeset.t, atom) :: String.t
def error(changeset, key) do
{actual_message, _} = changeset.errors[key]
actual_message
end
@spec assert_invalid(struct, atom, list | any, String.t) :: nil
def assert_invalid(struct, attr, invalid_values, error_message) when is_list(invalid_values) do
for value <- invalid_values do
assert_invalid(struct, attr, value, error_message)
end
end
def assert_invalid(struct, attr, invalid_value, error_message) do
changeset = struct.__struct__.changeset(struct, %{attr => invalid_value})
assert error(changeset, attr) =~ error_message
end
def assert_invalid_format(module, attr, invalid_values) do
assert_invalid(module, attr, invalid_values, "has invalid format")
end
def assert_too_long(module, attr, invalid_values) do
assert_invalid(module, attr, invalid_values, "should be at most")
end
@spec setup_models(non_neg_integer) :: Athel.Group.t
def setup_models(article_count \\ 0) do
group = Athel.Repo.insert!(
%Athel.Group{name: "fun.times",
description: "Funners of the world unite",
status: "y",
low_watermark: 0,
high_watermark: 0})
if article_count > 0 do
for index <- 0..(article_count - 1) do
changeset =
%Athel.Article{}
|> Athel.Article.changeset(%{
message_id: "<EMAIL>",
from: "Me",
subject: "Talking to myself",
# second precision causes times to all line up,
# ensure dates differ for ordering and that any articles created
# afterwards have later dates
date: Timex.now() |> DateTime.add(-(article_count * 2) + index),
parent_message_id: nil,
content_type: "text/plain",
headers: %{},
body: "LET'S ROCK OUT FOR JESUS & AMERICA",
status: "active"})
|> Changeset.put_assoc(:groups, [group])
changeset |> Athel.Repo.insert!() |> Repo.preload(:attachments)
end
end
group
end
end
|
test/support/model_case.ex
| 0.806319
| 0.40536
|
model_case.ex
|
starcoder
|
defmodule QbBackend.Posts do
@moduledoc """
This module takes the id of a manual and proceeds to find the associated Manual
"""
alias QbBackend.{
Repo,
Posts.Manual,
Posts.Comment,
Accounts.Profile,
Posts.Image
}
@doc """
This function takes the manual identification and proceeds to find the associated manual
"""
@spec get_manual(String.t()) :: {:ok, Manual.t()} | {:error, String.t()}
def get_manual(id) do
with %Manual{} = manual <- Repo.get_by(Manual, id: id) do
{:ok, manual}
else
nil -> {:error, "No Manual with id: #{id} on the system"}
end
end
@doc """
This function takes the map attributes and saves a manual record with that map of attributes
"""
@spec create_manual(Profile.t(), map()) :: {:ok, Manual.t()} | {:error, Ecto.Changeset.t()}
def create_manual(%Profile{} = prof, attrs) do
prof
|> Manual.create_changeset(attrs)
|> Repo.insert()
end
@doc """
This function takes a manual and a map of update attributes that it uses to
update the manual
"""
@spec update_manual(Manual.t(), map()) :: {:ok, Manual.t()} | {:error, Ecto.Changeset.t()}
def update_manual(%Manual{} = manual, attrs) do
manual
|> Manual.changeset(attrs)
|> Repo.update()
end
@doc """
this function takes a Manual struct and map as parameters and creates a comment
linked to the specified Manual.
"""
@spec add_comment(Profile.t(), Manual.t(), map()) ::
{:ok, Comment.t()} | {:error, Ecto.Changeset.t()}
def add_comment(%Profile{} = profile, %Manual{} = manual, params) do
profile
|> Comment.create_changeset(manual, params)
|> Repo.insert()
end
@doc """
this function takes a Comment stracture and a map as params and updates the comment
"""
@spec edit_comment(Comment.t(), map()) :: {:ok, Comment.t()} | {:error, Ecto.Changeset.t()}
def edit_comment(%Comment{} = comment, params) do
comment
|> Comment.changeset(params)
|> Repo.update()
end
@doc """
this function takes a comment structure and deletes its
"""
@spec delete_comment(Comment.t()) :: {:ok, Comment.t()} | {:error, Ecto.Changeset.t()}
def delete_comment(%Comment{} = comment) do
comment
|> Repo.delete()
end
@doc """
this function takes an Image struct and a map as params and adds an image to the manual
"""
@spec attach_image(Profile.t(), Manual.t(), map()) ::
{:ok, Image.t()} | {:error, Ecto.Changeset.t()}
def attach_image(%Profile{} = profile, %Manual{} = manual, params) do
profile
|> Image.create_changeset(manual, params)
|> Repo.insert()
end
@doc """
this function delets an image struct from a manual
"""
@spec delete_image(Image.t()) :: {:ok, Image.t()} | {:error, Ecto.Changeset.t()}
def delete_image(%Image{} = image) do
image
|> Repo.delete()
end
end
|
lib/qb_backend/posts/posts.ex
| 0.620622
| 0.589037
|
posts.ex
|
starcoder
|
defmodule CrossedWires do
@moduledoc """
Day 3 — https://adventofcode.com/2019/day/3
"""
@doc """
iex> ["R8,U5,L5,D3", "U7,R6,D4,L4"] |> CrossedWires.part1()
6
iex> ["R75,D30,R83,U83,L12,D49,R71,U7,L72", "U62,R66,U55,R34,D71,R55,D58,R83"]
iex> |> CrossedWires.part1()
159
iex> ["R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51",
iex> "U98,R91,D20,R16,D67,R40,U7,R15,U6,R7"]
iex> |> CrossedWires.part1()
135
"""
@spec part1(Enumerable.t()) :: integer()
def part1(in_stream) do
[w1, w2] =
in_stream
|> Stream.map(&read_wire/1)
|> Enum.to_list()
intersect_distances(w1, w2)
|> Enum.min()
end
@doc """
iex> ["R8,U5,L5,D3", "U7,R6,D4,L4"] |> CrossedWires.part2()
30
iex> ["R75,D30,R83,U83,L12,D49,R71,U7,L72", "U62,R66,U55,R34,D71,R55,D58,R83"]
iex> |> CrossedWires.part2()
610
iex> ["R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51",
iex> "U98,R91,D20,R16,D67,R40,U7,R15,U6,R7"]
iex> |> CrossedWires.part2()
410
"""
@spec part2(Enumerable.t()) :: integer()
def part2(in_stream) do
[w1, w2] =
in_stream
|> Stream.map(&read_wire/1)
|> Enum.to_list()
Enum.zip(intersect_steps(w1, w2, 0), intersect_steps(w2, w1, 0))
|> Enum.map(fn {{_k1, v1}, {_k2, v2}} -> v1 + v2 end)
|> Enum.min()
end
@spec read_wire(Enumerable.t()) :: Enumerable.t()
defp read_wire(line) do
line
|> String.trim()
|> String.split(",")
|> twists_and_turns(0, 0)
end
@spec twists_and_turns(Enumerable.t(), integer(), integer()) :: Enumerable.t()
defp twists_and_turns([], _, _), do: []
defp twists_and_turns([<<bound::utf8, steps::binary>> | path], x, y) do
{new_x, new_y} = walk(bound, String.to_integer(steps), x, y)
[{{x, y}, {new_x, new_y}} | twists_and_turns(path, new_x, new_y)]
end
@spec walk(char(), integer(), integer(), integer()) :: {integer(), integer()}
defp walk(?U, steps, x, y), do: {x, y + steps}
defp walk(?D, steps, x, y), do: {x, y - steps}
defp walk(?R, steps, x, y), do: {x + steps, y}
defp walk(?L, steps, x, y), do: {x - steps, y}
@spec intersect_distances(Enumerable.t(), Enumerable.t()) :: Enumerable.t()
defp intersect_distances([], _), do: []
defp intersect_distances([segment | w1], w2) do
intersect_distances(segment, w2) ++ intersect_distances(w1, w2)
end
defp intersect_distances(_, []), do: []
defp intersect_distances({a, b}, [{c, d} | w2]) do
if intersect(a, b, c, d) and a != c do
[manhattan_distance(a, b, c, d) | intersect_distances({a, b}, w2)]
else
intersect_distances({a, b}, w2)
end
end
@spec intersect(Enumerable.t(), Enumerable.t(), Enumerable.t(), Enumerable.t()) ::
Enumerable.t()
defp intersect(a, b, c, d) do
intersect(a, b, c) != intersect(a, b, d) and
intersect(c, d, a) != intersect(c, d, b)
end
@spec intersect(Enumerable.t(), Enumerable.t(), Enumerable.t()) :: integer()
defp intersect({ax, ay}, {bx, by}, {cx, cy}) do
intersect((by - ay) * (cx - bx) - (bx - ax) * (cy - by))
end
@spec intersect(integer()) :: integer()
defp intersect(0), do: 0
defp intersect(v) when v > 0, do: 1
defp intersect(_), do: 2
@spec manhattan_distance(Enumerable.t(), Enumerable.t(), Enumerable.t(), Enumerable.t()) ::
integer()
defp manhattan_distance({x, _}, {x, _}, {_, y}, {_, y}), do: abs(x) + abs(y)
defp manhattan_distance({_, y}, {_, _}, {x, _}, {_, _}), do: abs(x) + abs(y)
@spec intersect_steps(Enumerable.t(), Enumerable.t(), Enumerable.t()) :: map()
defp intersect_steps([], _, _), do: %{}
defp intersect_steps([segment | w1], w2, steps) do
intersect_steps(segment, w2, steps)
|> Map.merge(intersect_steps(w1, w2, steps + segment_len(segment)))
end
defp intersect_steps(_, [], _), do: %{}
defp intersect_steps({a, b}, [{c, d} | w2], steps) do
if intersect(a, b, c, d) and a != c do
%{intersect_key(a, b, c, d) => steps + segment_len(a, b, c)}
|> Map.merge(intersect_steps({a, b}, w2, steps))
else
intersect_steps({a, b}, w2, steps)
end
end
@spec segment_len(Enumerable.t()) :: integer()
defp segment_len({{x, ay}, {x, by}}), do: abs(by - ay)
defp segment_len({{ax, y}, {bx, y}}), do: abs(bx - ax)
@spec segment_len(Enumerable.t(), Enumerable.t(), Enumerable.t()) :: integer()
defp segment_len({x, ay}, {x, _}, {_, cy}), do: abs(cy - ay)
defp segment_len({ax, y}, {_, y}, {cx, _}), do: abs(cx - ax)
@spec intersect_key(Enumerable.t(), Enumerable.t(), Enumerable.t(), Enumerable.t()) ::
Enumerable.t()
defp intersect_key({x, _}, {x, _}, {_, y}, {_, y}), do: {x, y}
defp intersect_key({_, y}, {_, _}, {x, _}, {_, _}), do: {x, y}
end
|
lib/advent_of_code_2019/day03.ex
| 0.782579
| 0.625753
|
day03.ex
|
starcoder
|
defmodule Cldr.Number.Parser do
@moduledoc """
Functions for parsing numbers and currencies from
a string.
"""
@number_format "[-+]?[0-9][0-9,_]*\\.?[0-9_]+([eE][-+]?[0-9]+)?"
@doc """
Scans a string locale-aware manner and returns
a list of strings and numbers.
## Arguments
* `string` is any `String.t`
* `options` is a keyword list of options
## Options
* `:number` is one of `:integer`, `:float`,
`:decimal` or `nil`. The default is `nil`
meaning that the type auto-detected as either
an `integer` or a `float`.
* `:backend` is any module that includes `use Cldr`
and is therefore a CLDR backend module. The default
is `Cldr.default_backend/0`.
* `:locale` is any locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag.t`. The default is `options[:backend].get_locale/1`.
## Returns
* A list of strings and numbers
## Notes
Number parsing is performed by `Cldr.Number.Parser.parse/2`
and any options provided are passed to that function.
## Examples
iex> Cldr.Number.Parser.scan("£1_000_000.34")
["£", 1000000.34]
iex> Cldr.Number.Parser.scan("I want £1_000_000 dollars")
["I want £", 1000000, " dollars"]
iex> Cldr.Number.Parser.scan("The prize is 23")
["The prize is ", 23]
iex> Cldr.Number.Parser.scan("The lottery number is 23 for the next draw")
["The lottery number is ", 23, " for the next draw"]
iex> Cldr.Number.Parser.scan("The loss is -1.000 euros", locale: "de", number: :integer)
["The loss is ", -1000, " euros"]
"""
def scan(string, options \\ []) do
{locale, backend} = Cldr.locale_and_backend_from(options)
with {:ok, locale} <- Cldr.validate_locale(locale, backend),
{:ok, symbols} <- Cldr.Number.Symbol.number_symbols_for(locale, backend) do
scanner =
@number_format
|> localize_format_string(locale, backend, symbols)
|> Regex.compile!([:unicode])
scanner
|> Regex.split(string, include_captures: true, trim: true)
|> Enum.map(fn element -> parse(element, options) |> elem(1) end)
end
end
@doc """
Parse a string locale-aware manner and return
a number.
## Arguments
* `string` is any `String.t`
* `options` is a keyword list of options
## Options
* `:number` is one of `:integer`, `:float`,
`:decimal` or `nil`. The default is `nil`
meaning that the type auto-detected as either
an `integer` or a `float`.
* `:backend` is any module that includes `use Cldr`
and is therefore a CLDR backend module. The default
is `Cldr.default_backend/0`.
* `:locale` is any locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag.t`. The default is `options[:backend].get_locale/1`.
## Returns
* A number of the requested or default type or
* `{:error, string}` if no number could be determined
## Notes
This function parses a string to return a number but
in a locale-aware manner. It will normalise grouping
characters and decimal separators, different forms of
the `+` and `-` symbols that appear in Unicode and
strips any `_` characters that might be used for
formatting in a string. It then parses the number
using the Elixir standard library functions.
## Examples
iex> Cldr.Number.Parser.parse("+1.000,34", locale: "de")
{:ok, 1000.34}
iex> Cldr.Number.Parser.parse("-1_000_000.34")
{:ok, -1000000.34}
iex> Cldr.Number.Parser.parse("1.000", locale: "de", number: :integer)
{:ok, 1000}
iex> Cldr.Number.Parser.parse("+1.000,34", locale: "de", number: :integer)
{:error, "+1.000,34"}
"""
def parse(string, options \\ []) when is_binary(string) and is_list(options) do
{locale, backend} = Cldr.locale_and_backend_from(options)
with {:ok, locale} <- Cldr.validate_locale(locale, backend),
{:ok, symbols} <- Cldr.Number.Symbol.number_symbols_for(locale, backend) do
normalized_string = normalize_number_string(string, locale, backend, symbols)
case parse_number(normalized_string, Keyword.get(options, :number)) do
{:error, _} -> {:error, string}
success -> success
end
end
end
defp parse_number(string, nil) do
with {:error, string} <- parse_number(string, :integer),
{:error, string} <- parse_number(string, :float) do
{:error, string}
end
end
defp parse_number(string, :integer) do
case Integer.parse(String.trim(string)) do
{integer, ""} -> {:ok, integer}
_other -> {:error, string}
end
end
defp parse_number(string, :float) do
case Float.parse(String.trim(string)) do
{float, ""} -> {:ok, float}
_other -> {:error, string}
end
end
defp parse_number(string, :decimal) do
case Decimal.parse(String.trim(string)) do
{:ok, decimal} -> {:ok, decimal}
:error -> {:error, string}
end
end
@doc """
Resolve curencies from strings within
a list.
## Arguments
* `list` is any list in which currency
names and symbols are expected
* `options` is a keyword list of options
## Options
* `:backend` is any module() that includes `use Cldr` and therefore
is a `Cldr` backend module(). The default is `Cldr.default_backend/0`
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `options[:backend].get_locale()`
* `:only` is an `atom` or list of `atoms` representing the
currencies or currency types to be considered for a match.
The equates to a list of acceptable currencies for parsing.
See the notes below for currency types.
* `:except` is an `atom` or list of `atoms` representing the
currencies or currency types to be not considered for a match.
This equates to a list of unacceptable currencies for parsing.
See the notes below for currency types.
* `:fuzzy` is a float greater than `0.0` and less than or
equal to `1.0` which is used as input to
`String.jaro_distance/2` to determine is the provided
currency string is *close enough* to a known currency
string for it to identify definitively a currency code.
It is recommended to use numbers greater than `0.8` in
order to reduce false positives.
## Returns
* An ISO4217 currency code as an atom or
* `{:error, {exception, message}}`
## Notes
The `:only` and `:except` options accept a list of
currency codes and/or currency types. The following
types are recognised.
If both `:only` and `:except` are specified,
the `:except` entries take priority - that means
any entries in `:except` are removed from the `:only`
entries.
* `:all`, the default, considers all currencies
* `:current` considers those currencies that have a `:to`
date of nil and which also is a known ISO4217 currency
* `:historic` is the opposite of `:current`
* `:tender` considers currencies that are legal tender
* `:unannotated` considers currencies that don't have
"(some string)" in their names. These are usually
financial instruments.
## Examples
iex> Cldr.Number.Parser.scan("100 US dollars")
...> |> Cldr.Number.Parser.resolve_currencies
[100, :USD]
iex> Cldr.Number.Parser.scan("100 eurosports")
...> |> Cldr.Number.Parser.resolve_currencies(fuzzy: 0.8)
[100, :EUR]
iex> Cldr.Number.Parser.scan("100 dollars des États-Unis")
...> |> Cldr.Number.Parser.resolve_currencies(locale: "fr")
[100, :USD]
"""
def resolve_currencies(list, options \\ []) when is_list(list) and is_list(options) do
Enum.map list, fn
string when is_binary(string) ->
case resolve_currency(string, options) do
{:error, _} -> string
currency -> currency
end
other -> other
end
end
@doc """
Resolve a currency from a string
## Arguments
* `list` is any list in which currency
names and symbols are expected
* `options` is a keyword list of options
## Options
* `:backend` is any module() that includes `use Cldr` and therefore
is a `Cldr` backend module(). The default is `Cldr.default_backend/0`
* `:locale` is any valid locale returned by `Cldr.known_locale_names/1`
or a `Cldr.LanguageTag` struct returned by `Cldr.Locale.new!/2`
The default is `options[:backend].get_locale()`
* `:only` is an `atom` or list of `atoms` representing the
currencies or currency types to be considered for a match.
The equates to a list of acceptable currencies for parsing.
See the notes below for currency types.
* `:except` is an `atom` or list of `atoms` representing the
currencies or currency types to be not considered for a match.
This equates to a list of unacceptable currencies for parsing.
See the notes below for currency types.
* `:fuzzy` is a float greater than `0.0` and less than or
equal to `1.0` which is used as input to
`String.jaro_distance/2` to determine is the provided
currency string is *close enough* to a known currency
string for it to identify definitively a currency code.
It is recommended to use numbers greater than `0.8` in
order to reduce false positives.
## Returns
* An ISO417 currency code as an atom or
* `{:error, {exception, message}}`
## Notes
The `:only` and `:except` options accept a list of
currency codes and/or currency types. The following
types are recognised.
If both `:only` and `:except` are specified,
the `:except` entries take priority - that means
any entries in `:except` are removed from the `:only`
entries.
* `:all`, the default, considers all currencies
* `:current` considers those currencies that have a `:to`
date of nil and which also is a known ISO4217 currency
* `:historic` is the opposite of `:current`
* `:tender` considers currencies that are legal tender
* `:unannotated` considers currencies that don't have
"(some string)" in their names. These are usually
financial instruments.
## Examples
iex> Cldr.Number.Parser.resolve_currency("US dollars")
:USD
iex> Cldr.Number.Parser.resolve_currency("100 eurosports", fuzzy: 0.75)
:EUR
iex> Cldr.Number.Parser.resolve_currency("dollars des États-Unis", locale: "fr")
:USD
iex> Cldr.Number.Parser.resolve_currency("not a known currency", locale: "fr")
{:error,
{Cldr.UnknownCurrencyError,
"The currency \\"not a known currency\\" is unknown or not supported"}}
"""
def resolve_currency(string, options \\ []) do
{locale, backend} = Cldr.locale_and_backend_from(options)
string = String.trim(string)
{only_filter, options} =
Keyword.pop(options, :only, Keyword.get(options, :currency_filter, [:all]))
{except_filter, options} = Keyword.pop(options, :except, [])
{fuzzy, _options} = Keyword.pop(options, :fuzzy, nil)
with {:ok, locale} <- backend.validate_locale(locale),
{:ok, currency_strings} <-
Cldr.Currency.currency_strings(locale, backend, only_filter, except_filter),
{:ok, currency} <-
find_currency(currency_strings, string, fuzzy) do
currency
end
end
# Replace localised symbols with canonical forms
defp normalize_number_string(string, locale, backend, symbols) do
string
|> String.replace("_", "")
|> backend.normalize_lenient_parse(:number, locale)
|> backend.normalize_lenient_parse(:general, locale)
|> String.replace(symbols.latn.group, "")
|> String.replace(symbols.latn.decimal, ".")
|> String.replace("_", "-")
end
# Replace canonical forms with localised symbols
defp localize_format_string(string, locale, backend, symbols) do
parse_map = backend.lenient_parse_map(:number, locale.cldr_locale_name)
plus_matchers = Map.get(parse_map, "+").source |> String.replace(["[", "]"], "")
minus_matchers = Map.get(parse_map, "_").source |> String.replace(["[", "]"], "")
grouping_matchers = Map.get(parse_map, ",").source |> String.replace(["[", "]"], "")
string
|> String.replace("[-+]", "[" <> plus_matchers <> minus_matchers <> "]")
|> String.replace(",", grouping_matchers <> symbols.latn.group)
|> String.replace("\\.", "\\" <> symbols.latn.decimal)
end
defp find_currency(currency_strings, currency, nil) do
canonical_currency = String.downcase(currency)
case Map.get(currency_strings, canonical_currency) do
nil ->
{:error, unknown_currency_error(currency)}
currency ->
{:ok, currency}
end
end
defp find_currency(currency_strings, currency, fuzzy)
when is_float(fuzzy) and fuzzy > 0.0 and fuzzy <= 1.0 do
canonical_currency = String.downcase(currency)
{distance, currency_code} =
currency_strings
|> Enum.map(fn {k, v} -> {String.jaro_distance(k, canonical_currency), v} end)
|> Enum.sort(fn {k1, _v1}, {k2, _v2} -> k1 > k2 end)
|> hd
if distance >= fuzzy do
{:ok, currency_code}
else
{:error, unknown_currency_error(currency)}
end
end
defp find_currency(_currency_strings, _currency, fuzzy) do
{:error,
{
ArgumentError,
"option :fuzzy must be a number > 0.0 and <= 1.0. Found #{inspect(fuzzy)}"
}}
end
defp unknown_currency_error(currency) do
{Cldr.UnknownCurrencyError, "The currency #{inspect(currency)} is unknown or not supported"}
end
end
|
lib/cldr/number/parse.ex
| 0.905134
| 0.647826
|
parse.ex
|
starcoder
|
defmodule Beamchmark do
@moduledoc """
Top level module providing `Beamchmark.run/2` API.
`#{inspect(__MODULE__)}` measures EVM performance while it is running user `#{inspect(__MODULE__)}.Scenario`.
# Metrics being measured
## Scheduler Utilization
At the moment, the main interest of `#{inspect(__MODULE__)}` is scheduler utilization which tells
how much given scheduler was busy.
Scheduler is busy when:
* Executing process code
* Executing linked-in driver or NIF code
* Executing BIFs, or any other runtime handling
* Garbage collecting
* Handling any other memory management
Scheduler utilization is measured using Erlang's [`:scheduler`](`:scheduler`) module which uses `:erlang.statistics/1`
under the hood and it is represented as a floating point value between 0.0 and 1.0 and percent.
`#{inspect(__MODULE__)}` measures following types of scheduler utilization:
* normal/cpu/io - average utilization of single scheduler of given type
* total normal/cpu/io - average utilization of all schedulers of given type. E.g total normal equals 1.0 when
each of normal schedulers have been acive all the time
* total - average utilization of all schedulers
* weighted - average utilization of all schedulers weighted against maximum amount of available CPU time
For more information please refer to `:erlang.statistics/1` (under `:scheduler_wall_time`) or `:scheduler.utilization/1`.
## Other
Other metrics being measured:
* reductions - total reductions number
* context switches - total context switches number
"""
@default_duration_s 60
@default_cpu_interval_ms 1000
@default_delay_s 0
@default_formatter Beamchmark.Formatters.Console
@default_output_dir Path.join([System.tmp_dir!(), "beamchmark"])
@default_compare true
@typedoc """
Configuration for `#{inspect(__MODULE__)}`.
* `name` - name of the benchmark. It can be used by formatters.
* `duration` - time in seconds `#{inspect(__MODULE__)}` will be benchmarking EVM. Defaults to `#{@default_duration_s}` seconds.
* `cpu_interval` - time in milliseconds `#{inspect(__MODULE__)}` will be benchmarking cpu usage. Defaults to `#{@default_cpu_interval_ms}` milliseconds. Needs to be greater than or equal to `interfere_timeout`.
* `delay` - time in seconds `#{inspect(__MODULE__)}` will wait after running scenario and before starting benchmarking. Defaults to `#{@default_delay_s}` seconds.
* `formatters` - list of formatters that will be applied to the result. By default contains only `#{inspect(@default_formatter)}`.
* `compare?` - boolean indicating whether formatters should compare results for given scenario with the previous one. Defaults to `#{inspect(@default_compare)}.`
* `output_dir` - directory where results of benchmarking will be saved. Defaults to "`beamchmark`" directory under location provided by `System.tmp_dir!/0`.
"""
@type options_t() :: [
name: String.t(),
duration: pos_integer(),
cpu_interval: pos_integer(),
delay: non_neg_integer(),
formatters: [Beamchmark.Formatter.t()],
compare?: boolean(),
output_dir: Path.t()
]
@doc """
Runs scenario and benchmarks EVM performance.
If `compare?` option equals `true`, invocation of this function will also compare new measurements with the last ones.
Measurements will be compared only if they share the same scenario module, delay and duration.
"""
@spec run(Beamchmark.Scenario.t(), options_t()) :: :ok
def run(scenario, opts \\ []) do
config = %Beamchmark.Suite.Configuration{
name: Keyword.get(opts, :name),
duration: Keyword.get(opts, :duration, @default_duration_s),
cpu_interval: Keyword.get(opts, :cpu_interval, @default_cpu_interval_ms),
delay: Keyword.get(opts, :delay, @default_delay_s),
formatters: Keyword.get(opts, :formatters, [@default_formatter]),
compare?: Keyword.get(opts, :compare?, @default_compare),
output_dir: Keyword.get(opts, :output_dir, @default_output_dir) |> Path.expand()
}
scenario
|> Beamchmark.Suite.init(config)
|> Beamchmark.Suite.run()
|> tap(fn suite -> :ok = Beamchmark.Suite.save(suite) end)
|> tap(fn suite -> :ok = Beamchmark.Formatter.output(suite) end)
:ok
end
end
|
lib/beamchmark.ex
| 0.892237
| 0.632928
|
beamchmark.ex
|
starcoder
|
defmodule JSONRPC2.Clients.TCP do
@moduledoc """
A client for JSON-RPC 2.0 using a line-based TCP transport.
"""
alias JSONRPC2.Clients.TCP.Protocol
@default_timeout 5_000
@type host :: binary | :inet.socket_address() | :inet.hostname()
@type request_id :: any
@type call_option ::
{:string_id, boolean}
| {:timeout, pos_integer}
@type call_options :: [call_option]
@type cast_options :: [{:string_id, boolean}]
@doc """
Start a client pool named `name`, connected to `host` at `port`.
You can optionally pass `client_opts`, detailed
[here](https://github.com/lpgauth/shackle#client_options), as well as `pool_opts`, detailed
[here](https://github.com/lpgauth/shackle#pool_options).
"""
@spec start(host, :inet.port_number(), atom, Keyword.t(), Keyword.t()) :: :ok
def start(host, port, name, client_opts \\ [], pool_opts \\ []) do
host = if is_binary(host), do: to_charlist(host), else: host
ip =
case host do
host when is_list(host) ->
case :inet.parse_address(host) do
{:ok, ip} -> ip
{:error, :einval} -> host
end
host ->
host
end
client_opts = Keyword.merge([ip: ip, port: port, socket_options: [:binary, packet: :line]], client_opts)
:shackle_pool.start(name, Protocol, client_opts, pool_opts)
end
@doc """
Stop the client pool with name `name`.
"""
@spec stop(atom) :: :ok | {:error, :shackle_not_started | :pool_not_started}
def stop(name) do
:shackle_pool.stop(name)
end
@doc """
Call the given `method` with `params` using the client pool named `name` with `options`.
You can provide the option `string_id: true` for compatibility with pathological implementations,
to force the request ID to be a string.
You can also provide the option `timeout: 5_000` to set the timeout to 5000ms, for instance.
For backwards compatibility reasons, you may also provide a boolean for the `options` parameter,
which will set `string_id` to the given boolean.
"""
@spec call(atom, JSONRPC2.method(), JSONRPC2.params(), boolean | call_options) ::
{:ok, {atom(), reference()}} | {:error, :backlog_full | :pool_not_started | :shackle_not_started}
def call(name, method, params, options \\ [])
def call(name, method, params, string_id) when is_boolean(string_id) do
call(name, method, params, string_id: string_id)
end
def call(name, method, params, options) do
string_id = Keyword.get(options, :string_id, false)
timeout = Keyword.get(options, :timeout, @default_timeout)
:shackle.call(name, {:call, method, params, string_id}, timeout)
end
@doc """
Asynchronously call the given `method` with `params` using the client pool named `name` with
`options`.
Use `receive_response/1` with the `request_id` to get the response.
You can provide the option `string_id: true` for compatibility with pathological implementations,
to force the request ID to be a string.
You can also provide the option `timeout: 5_000` to set the timeout to 5000ms, for instance.
Additionally, you may provide the option `pid: self()` in order to specify which process should
be sent the message which is returned by `receive_response/1`.
For backwards compatibility reasons, you may also provide a boolean for the `options` parameter,
which will set `string_id` to the given boolean.
"""
@spec cast(atom, JSONRPC2.method(), JSONRPC2.params(), boolean | cast_options) ::
{:ok, {atom(), reference()}} | {:error, :backlog_full | :pool_not_started | :shackle_not_started}
def cast(name, method, params, options \\ [])
def cast(name, method, params, string_id) when is_boolean(string_id) do
cast(name, method, params, string_id: string_id)
end
def cast(name, method, params, options) do
string_id = Keyword.get(options, :string_id, false)
timeout = Keyword.get(options, :timeout, @default_timeout)
pid = Keyword.get(options, :pid, self())
:shackle.cast(name, {:call, method, params, string_id}, pid, timeout)
end
@doc """
Receive the response for a previous `cast/3` which returned a `request_id`.
"""
@spec receive_response(request_id) :: {:error, any}
def receive_response(request_id) do
:shackle.receive_response(request_id)
end
@doc """
Send a notification with the given `method` and `params` using the client pool named `name`.
This function returns a `request_id`, but it should not be used with `receive_response/1`.
"""
@spec notify(atom, JSONRPC2.method(), JSONRPC2.params()) ::
{:ok, {atom(), reference()}} | {:error, :backlog_full | :pool_not_started | :shackle_not_started}
def notify(name, method, params) do
# Spawn a dead process so responses go to /dev/null
pid = spawn(fn -> :ok end)
:shackle.cast(name, {:notify, method, params}, pid, 0)
end
end
|
lib/jsonrpc2/clients/tcp.ex
| 0.909219
| 0.420153
|
tcp.ex
|
starcoder
|
defmodule Clickhousex.Codec.RowBinary do
@moduledoc """
A codec that speaks Clickhouse's RowBinary format
To use this codec, set the application `:clickhousex` `:codec` application variable:
config :clickhousex, codec: Clickhousex.Codec.RowBinary
"""
alias Clickhousex.{Codec, Codec.Binary.Extractor, Codec.RowBinary.Utils}
import Utils
use Extractor
require Record
@behaviour Codec
Record.defrecord(:state, column_count: 0, column_names: [], column_types: [], rows: [], count: 0)
@impl Codec
def response_format do
"RowBinaryWithNamesAndTypes"
end
@impl Codec
def request_format do
"Values"
end
@impl Codec
def encode(query, replacements, params) do
params =
Enum.map(params, fn
%DateTime{} = dt -> DateTime.to_unix(dt)
other -> other
end)
Codec.Values.encode(query, replacements, params)
end
@impl Codec
def decode(state(column_names: column_names, rows: rows, count: count)) do
{:ok, %{column_names: column_names, rows: Enum.reverse(rows), count: count}}
end
def decode(nil) do
decode(state())
end
@impl Codec
def new do
nil
end
@impl Codec
def append(nil, data) do
extract_column_count(data, state())
end
def append(state() = state, data) do
extract_rows(data, state)
end
def append({:resume, resumer}, data) do
case resumer.(data) do
{:resume, _} = resumer -> resumer
state() = state -> state
end
end
@extract column_count: :varint
defp extract_column_count(<<data::binary>>, column_count, state) do
extract_column_names(data, column_count, state(state, column_count: column_count))
end
defp extract_column_names(
<<data::binary>>,
0,
state(column_count: column_count, column_names: column_names) = state
) do
new_state = state(state, column_names: Enum.reverse(column_names))
extract_column_types(data, column_count, [], new_state)
end
defp extract_column_names(<<data::binary>>, remaining, state) do
extract_column_name(data, remaining, state)
end
@extract column_name: :string
defp extract_column_name(<<data::binary>>, remaining, column_name, state) do
column_names = state(state, :column_names)
extract_column_names(
data,
remaining - 1,
state(state, column_names: [column_name | column_names])
)
end
defp extract_column_types(<<data::binary>>, 0, column_types, state) do
column_types = Enum.reverse(column_types)
new_state = state(state, column_types: column_types)
extract_rows(data, new_state)
end
defp extract_column_types(<<data::binary>>, remaining, column_types, state) do
extract_column_type(data, remaining, column_types, state)
end
@extract column_type: :string
defp extract_column_type(<<data::binary>>, remaining, column_type, column_types, state) do
column_type = parse_type(column_type)
extract_column_types(data, remaining - 1, [column_type | column_types], state)
end
defp extract_rows(<<>>, state() = state) do
state
end
defp extract_rows(<<data::binary>>, state(column_types: column_types) = state) do
extract_row(data, column_types, [], state)
end
defp extract_row(<<data::binary>>, [], row_data, state(rows: rows, count: count) = state) do
row = row_data |> Enum.reverse() |> List.to_tuple()
new_state = state(state, rows: [row | rows], count: count + 1)
extract_rows(data, new_state)
end
defp extract_row(<<data::binary>>, [type | types], row, state) do
extract_field(data, type, types, row, state)
end
defp extract_field(<<>>, type, types, row, state) do
{:resume, &extract_field(&1, type, types, row, state)}
end
defp extract_field(<<data::binary>>, {:fixed_string, length} = fixed_string, types, row, state) do
case data do
<<value::binary-size(length), rest::binary>> ->
extract_row(rest, types, [value | row], state)
_ ->
{:resume, &extract_field(data <> &1, fixed_string, types, row, state)}
end
end
@scalar_types [
:i64,
:i32,
:i16,
:i8,
:u64,
:u32,
:u16,
:u8,
:f64,
:f32,
:boolean,
:string,
:date,
:datetime
]
@all_types @scalar_types
|> Enum.flat_map(&type_permutations/1)
|> Enum.sort()
# Build all permutations of extract_field/5
for type <- @all_types do
defp extract_field(<<data::binary>>, unquote(type), types, row, state) do
unquote(extractor_name(type))(data, types, row, state)
end
end
# Build all specific typed extractors, e.g. extract_u8/5
for type <- @all_types do
@extract field_value: type
defp unquote(extractor_name(type))(<<data::binary>>, field_value, types, row, state) do
extract_row(data, types, [field_value | row], state)
end
end
defp parse_type(<<"Nullable(", type::binary>>) do
rest_type =
type
|> String.replace_suffix(")", "")
|> parse_type()
{:nullable, rest_type}
end
defp parse_type(<<"FixedString(", rest::binary>>) do
case Integer.parse(rest) do
{length, rest} ->
rest
|> String.replace_suffix(")", "")
{:fixed_string, length}
end
end
defp parse_type(<<"Array(", type::binary>>) do
rest_type =
type
|> String.replace_suffix(")", "")
|> parse_type()
{:array, rest_type}
end
# Boolean isn't represented below because clickhouse has no concept
# of booleans.
@clickhouse_mappings [
{"Int64", :i64},
{"Int32", :i32},
{"Int16", :i16},
{"Int8", :i8},
{"UInt64", :u64},
{"UInt32", :u32},
{"UInt16", :u16},
{"UInt8", :u8},
{"Float64", :f64},
{"Float32", :f32},
{"Float16", :f16},
{"Float8", :f8},
{"String", :string},
{"Date", :date},
{"DateTime", :datetime}
]
for {clickhouse_type, local_type} <- @clickhouse_mappings do
defp parse_type(unquote(clickhouse_type)) do
unquote(local_type)
end
end
end
|
lib/clickhousex/codec/row_binary.ex
| 0.733643
| 0.517449
|
row_binary.ex
|
starcoder
|
defmodule Relay.Certs do
@moduledoc """
Utilities for working with PEM-encoded certificates.
"""
@typep pem_entry :: :public_key.pem_entry()
# This is a somewhat loose regex designed to exclude things that obviously
# aren't hostnames. It will allow some non-hostnames, because full validation
# would be a lot more complex.
@hostname_regex ~r/^[a-zA-Z0-9.-]+$/
@key_types [:RSAPrivateKey, :DSAPrivateKey, :ECPrivateKey]
@doc """
Encodes the given PEM data (either a single entry, a list of entries, or an
already-encoded binary) in PEM format.
"""
@spec pem_encode(binary | pem_entry | [pem_entry]) :: binary
def pem_encode(pem_data) when is_binary(pem_data), do: pem_data
def pem_encode({_, _, _} = pem_data), do: pem_encode([pem_data])
def pem_encode(pem_data), do: :public_key.pem_encode(pem_data)
@doc """
Extracts the subject CNs and SAN DNS names from the given certificate to
determine which SNI hostnames to serve it for.
"""
@spec get_hostnames(:relay_pk_utils.cert()) :: [String.t()]
def get_hostnames(cert) do
cert
|> :relay_pk_utils.get_cert_names()
|> Enum.map(&to_string/1)
|> Enum.filter(&String.match?(&1, @hostname_regex))
|> Enum.uniq()
end
@doc """
Returns a list of all certificates in the given PEM data.
"""
@spec get_certs(binary | [pem_entry]) :: [pem_entry]
def get_certs(pem_data) do
pem_data
|> pem_decode()
|> Enum.filter(fn {pem_type, _, _} -> pem_type == :Certificate end)
end
@doc """
Gets the first key in the given PEM data. Returns `{:ok, key}` if there is at
least one key, otherwise `:error`.
"""
@spec get_key(binary | [pem_entry]) :: {:ok, pem_entry} | :error
def get_key(pem_data) do
pem_data
|> pem_decode()
|> Enum.filter(fn {pem_type, _, _} -> pem_type in @key_types end)
|> Enum.fetch(0)
end
defp pem_decode(pem_data) when is_list(pem_data), do: pem_data
defp pem_decode(pem_data), do: :public_key.pem_decode(pem_data)
@doc """
Extracts hostnames from all end-entity certs in the given PEM data. In order
to support self-signed certs (which may look a lot like CA certs), we assume
that if there's only one cert in the PEM data it's the one we want.
"""
@spec get_end_entity_hostnames(binary | [pem_entry]) :: [String.t()]
def get_end_entity_hostnames(pem_data) do
pem_data
|> get_certs()
|> get_end_entity_certs()
|> Enum.flat_map(&get_hostnames/1)
|> Enum.uniq()
end
defp get_end_entity_certs([cert]), do: [cert]
defp get_end_entity_certs(certs), do: :relay_pk_utils.get_end_entity_certs(certs)
end
|
lib/relay/certs.ex
| 0.745861
| 0.444083
|
certs.ex
|
starcoder
|
defmodule Recurly.Coupon do
@moduledoc """
Module for handling coupons in Recurly.
See the [developer docs on coupons](https://dev.recurly.com/docs/list-active-coupons)
for more details
"""
use Recurly.Resource
alias Recurly.{Resource,Coupon,Money}
@endpoint "/coupons"
schema :coupon do
field :applies_to_all_plans, :boolean
field :applies_to_non_plan_charges, :boolean
field :coupon_code, :string
field :coupon_type, :string
field :created_at, :date_time, read_only: true
field :deleted_at, :date_time, read_only: true
field :description, :string
field :discount_type, :string
field :discount_in_cents, Money
field :discount_percent, :integer
field :duration, :string
field :invoice_description, :string
field :max_redemptions, :integer
field :max_redemptions_per_account, :integer
field :name, :string
field :plan_codes, list: true
field :redeem_by_date, :date_time
field :redemption_resource, :string
field :state, :string
field :temporal_amount, :integer
field :temporal_unit, :string
field :unique_code_template, :string
field :updated_at, :date_time, read_only: true
end
@doc """
Creates a stream of coupons given some options.
## Parameters
- `options` Keyword list of the request options. See options in the
[coupon list section](https://dev.recurly.com/docs/list-active-coupons)
of the docs.
## Examples
See `Recurly.Resource.stream/3` for more detailed examples of
working with resource streams.
```
# stream of active coupons sorted from most recently
# updated to least recently updated
stream = Recurly.Coupon.stream(state: :active, sort: :updated_at)
```
"""
def stream(options \\ []) do
Resource.stream(Coupon, @endpoint, options)
end
@doc """
Lists all the coupons. See [the couopons dev docs](https://dev.recurly.com/docs/list-active-coupons) for more details.
## Parameters
- `options` Keyword list of GET params
## Examples
```
case Recurly.Coupon.list(state: "redeemable") do
{:ok, coupons} ->
# list of redeemable coupons
{:error, error} ->
# error happened
end
```
"""
def list(options \\ []) do
Resource.list(%Coupon{}, @endpoint, options)
end
@doc """
Finds a coupon given a coupon code. Returns the coupon or an error.
## Parameters
- `coupon_code` String coupon code
## Examples
```
alias Recurly.NotFoundError
case Recurly.Coupon.find("mycouponcode") do
{:ok, coupon} ->
# Found the coupon
{:error, %NotFoundError{}} ->
# 404 coupon was not found
end
```
"""
def find(coupon_code) do
Resource.find(%Coupon{}, path(coupon_code))
end
@doc """
Creates a coupon from a changeset.
## Parameters
- `changeset` Keyword list changeset
## Examples
```
alias Recurly.ValidationError
changeset = [
coupon_code: "mycouponcode",
name: "<NAME>",
discount_type: "dollars",
discount_in_cents: [
USD: 1000
],
duration: "single_use"
]
case Recurly.Coupon.create(changeset) do
{:ok, coupon} ->
# created the coupon
{:error, %ValidationError{errors: errors}} ->
# will give you a list of validation errors
end
```
"""
def create(changeset) do
Resource.create(%Coupon{}, changeset, @endpoint)
end
@doc """
Generates the path to a coupon given the coupon code
## Parameters
- `coupon_code` String coupon code
"""
def path(coupon_code) do
Path.join(@endpoint, coupon_code)
end
end
|
lib/recurly/coupon.ex
| 0.838217
| 0.725454
|
coupon.ex
|
starcoder
|
defmodule Robotica.Scheduler.Schedule do
@moduledoc """
Process a schedule entry
"""
alias Robotica.Config.Loader
@timezone Application.compile_env(:robotica, :timezone)
if Application.compile_env(:robotica_common, :compile_config_files) do
@filename Application.compile_env(:robotica, :schedule_file)
@external_resource @filename
@data Loader.schedule(@filename)
defp get_data, do: @data
else
defp get_data do
filename = Application.get_env(:robotica, :schedule_file)
Loader.schedule(filename)
end
end
defp convert_time_to_utc(date, time) do
{:ok, naive_date_time} = NaiveDateTime.new(date, time)
{:ok, date_time} = DateTime.from_naive(naive_date_time, @timezone)
{:ok, utc_date_time} = DateTime.shift_zone(date_time, "UTC")
utc_date_time
end
defp parse_action(action) do
{name, remaining} =
case String.split(action, "(", parts: 2) do
[name] -> {name, ")"}
[name, remaining] -> {name, remaining}
end
case String.split(remaining, ")", parts: 2) do
[""] -> {:error, "Right bracket not found"}
["", _] -> {:ok, name, MapSet.new()}
[options, ""] -> {:ok, name, String.split(options, ",") |> MapSet.new()}
[_, extra] -> {:error, "Extra text found #{extra}"}
end
end
defp add_schedule(expanded_schedule, date, schedule, classification) do
schedule
|> Map.get(classification, %{})
|> Enum.map(fn {time, actions} -> {convert_time_to_utc(date, time), actions} end)
|> Enum.reduce(expanded_schedule, fn {datetime, actions}, acc ->
Enum.reduce(actions, acc, fn action, acc ->
{:ok, name, options} = parse_action(action)
Map.put(acc, name, {datetime, options})
end)
end)
end
def get_schedule(classifications, date) do
s = get_data()
expanded_schedule = add_schedule(%{}, date, s, "*")
expanded_schedule =
classifications
|> Enum.reduce(expanded_schedule, fn c, acc -> add_schedule(acc, date, s, c) end)
|> Enum.reduce(%{}, fn {name, {datetime, options}}, acc ->
action = {name, options}
Map.update(acc, datetime, [action], &[action | &1])
end)
|> Map.to_list()
|> Enum.sort(fn x, y -> DateTime.compare(elem(x, 0), elem(y, 0)) == :lt end)
expanded_schedule
end
end
|
robotica/lib/robotica/scheduler/schedule.ex
| 0.727201
| 0.438785
|
schedule.ex
|
starcoder
|
defmodule BridgeEx.Graphql.LanguageConventions do
@moduledoc """
This defines an adapter that supports GraphQL query documents in their
conventional (in JS) camelcase notation, while allowing the schema to be
defined using conventional (in Elixir) underscore (snakecase) notation, and
tranforming the names as needed for lookups, results, and error messages.
For example, this document:
```
{
myUser: createUser(userId: 2) {
firstName
lastName
}
}
```
Would map to an internal schema that used the following names:
* `create_user` instead of `createUser`
* `user_id` instead of `userId`
* `first_name` instead of `firstName`
* `last_name` instead of `lastName`
Likewise, the result of executing this (camelcase) query document against our
(snakecase) schema would have its names transformed back into camelcase on the
way out:
```
%{
data: %{
"myUser" => %{
"firstName" => "Joe",
"lastName" => "Black"
}
}
}
```
Note variables are a client-facing concern (they may be provided as
parameters), so variable names should match the convention of the query
document (eg, camelCase).
"""
use Absinthe.Adapter
@doc """
Converts a camelCase to snake_case
iex> to_internal_name("test", :read)
"test"
iex> to_internal_name("testTTT", :read)
"test_t_t_t"
iex> to_internal_name("testTest", :read)
"test_test"
iex> to_internal_name("testTest1", :read)
"test_test_1"
iex> to_internal_name("testTest11", :read)
"test_test_11"
iex> to_internal_name("testTest11Pippo", :read)
"test_test_11_pippo"
iex> to_internal_name("camelCase23Snake4344", :read)
"camel_case_23_snake_4344"
"""
def to_internal_name(nil, _role) do
nil
end
def to_internal_name("__" <> camelized_name, role) do
"__" <> to_internal_name(camelized_name, role)
end
def to_internal_name(camelized_name, :operation) do
camelized_name
end
def to_internal_name(camelized_name, _role) do
~r/([A-Z]|\d+)/
|> Regex.replace(camelized_name, "_\\1")
|> String.downcase()
end
defdelegate to_external_name(underscored_name, role), to: Absinthe.Adapter.LanguageConventions
end
|
lib/graphql/language_conventions.ex
| 0.857604
| 0.869991
|
language_conventions.ex
|
starcoder
|
defmodule Gradient.ExprData do
require Gradient.Debug
import Gradient.Debug, only: [elixir_to_ast: 1]
def all_basic_pp_test_data() do
[
value_test_data(),
list_test_data(),
call_test_data(),
variable_test_data(),
exception_test_data(),
block_test_data(),
binary_test_data(),
map_test_data(),
function_ref_test_data(),
sigil_test_data()
]
|> List.flatten()
end
def value_test_data() do
[
{"geric atom", {:atom, 0, :fjdksaose}, ~s(:"fjdksaose")},
{"module atom", {:atom, 0, Gradient.ElixirExpr}, "Gradient.ElixirExpr"},
{"nil atom", {:atom, 0, nil}, "nil"},
{"true atom", {:atom, 0, true}, "true"},
{"false atom", {:atom, 0, false}, "false"},
{"char", {:char, 0, ?c}, "?c"},
{"float", {:float, 0, 12.0}, "12.0"},
{"integer", {:integer, 0, 1}, "1"},
{"erlang string", {:string, 0, 'ala ma kota'}, ~s('ala ma kota')},
{"remote name", {:remote, 7, {:atom, 7, Exception}, {:atom, 7, :normalize}},
"Exception.normalize"}
]
end
def list_test_data() do
[
{"charlist",
{:cons, 0, {:integer, 0, 97},
{:cons, 0, {:integer, 0, 108}, {:cons, 0, {:integer, 0, 97}, {nil, 0}}}}, ~s('ala')},
{"int list",
{:cons, 0, {:integer, 0, 0},
{:cons, 0, {:integer, 0, 1}, {:cons, 0, {:integer, 0, 2}, {nil, 0}}}}, "[0, 1, 2]"},
{"mixed list",
{:cons, 0, {:integer, 0, 0},
{:cons, 0, {:atom, 0, :ok}, {:cons, 0, {:integer, 0, 2}, {nil, 0}}}}, ~s([0, :"ok", 2])},
{"var in list", {:cons, 0, {:integer, 0, 0}, {:cons, 0, {:var, 0, :a}, {nil, 0}}},
"[0, a]"},
{"list tail pm", elixir_to_ast([a | t] = [12, 13, 14]), "[a | t] = [12, 13, 14]"},
{"empty list", elixir_to_ast([] = []), "[] = []"}
]
end
def call_test_data() do
[
{"call", {:call, 0, {:atom, 0, :my_func}, []}, "my_func()"},
{"remote call", {:call, 0, {:remote, 0, {:atom, 0, MyModule}, {:atom, 0, :my_func}}, []},
"MyModule.my_func()"},
{"erl remote call", {:call, 0, {:remote, 0, {:atom, 0, :erlang}, {:atom, 0, :my_func}}, []},
":erlang.my_func()"}
]
end
def variable_test_data() do
[
{"variable", {:var, 0, :abbc}, "abbc"},
{"underscore variable", {:var, 0, :_}, "_"},
{"ast underscore variable", {:var, 0, :_@1}, "_"},
{"ast variable", {:var, 0, :_val@1}, "val"}
]
end
def exception_test_data() do
[
{"throw", elixir_to_ast(throw({:ok, 12})), ~s(throw {:"ok", 12})},
{"raise/1", elixir_to_ast(raise "test error"), ~s(raise "test error")},
{"raise/1 without msg", elixir_to_ast(raise RuntimeError), "raise RuntimeError"},
{"raise/2", elixir_to_ast(raise RuntimeError, "test error"), ~s(raise "test error")},
{"custom raise", elixir_to_ast(raise ArithmeticError, "only odd numbers"),
~s(raise ArithmeticError, "only odd numbers")}
]
end
def block_test_data() do
simple_block =
elixir_to_ast do
a = 1
a + 1
end
[
{"block", simple_block, "a = 1; a + 1"}
]
end
def map_test_data do
[
{"string map", elixir_to_ast(%{"a" => 12}), ~s(%{"a" => 12})},
{"map pm", elixir_to_ast(%{a: a} = %{a: 12}), ~s(%{"a": a} = %{"a": 12})},
{"update map", elixir_to_ast(%{%{} | a: 1}), ~s(%{%{} | "a": 1})},
{"struct expr", elixir_to_ast(%{__struct__: TestStruct, name: "John"}),
~s(%TestStruct{"name": "John"})}
]
end
def function_ref_test_data() do
[
{"&fun/arity", {:fun, 0, {:function, :my_fun, 0}}, "&my_fun/0"},
{"&Mod.fun/arity", elixir_to_ast(&MyMod.my_fun/1), "&MyMod.my_fun/1"}
]
end
def sigil_test_data() do
[
{"regex", elixir_to_ast(~r/foo|bar/), regex_exp()},
{"string ~s", elixir_to_ast(~s(this is a string with "double" quotes, not 'single' ones)),
"\"this is a string with \"double\" quotes, not 'single' ones\""},
{"string ~S", elixir_to_ast(~S(String without escape codes \x26 without #{interpolation})),
"\"String without escape codes \\x26 without \#{interpolation}\""},
{"char lists", elixir_to_ast(~c(this is a char list containing 'single quotes')),
"'this is a char list containing \\'single quotes\\''"},
{"word list", elixir_to_ast(~w(foo bar bat)), ~s(["foo", "bar", "bat"])},
{"word list atom", elixir_to_ast(~w(foo bar bat)a), ~s([:"foo", :"bar", :"bat"])},
{"date", elixir_to_ast(~D[2019-10-31]),
~s(%Date{"calendar": Calendar.ISO, "year": 2019, "month": 10, "day": 31})},
{"time", elixir_to_ast(~T[23:00:07.0]),
~s(%Time{"calendar": Calendar.ISO, "hour": 23, "minute": 0, "second": 7, "microsecond": {0, 1}})},
{"naive date time", elixir_to_ast(~N[2019-10-31 23:00:07]),
~s(%NaiveDateTime{"calendar": Calendar.ISO, "year": 2019, "month": 10, "day": 31, "hour": 23, "minute": 0, "second": 7, "microsecond": {0, 0}})},
{"date time", elixir_to_ast(~U[2019-10-31 19:59:03Z]),
~s(%DateTime{"calendar": Calendar.ISO, "year": 2019, "month": 10, "day": 31, "hour": 19, "minute": 59, "second": 3, "microsecond": {0, 0}, "time_zone": "Etc/UTC", "zone_abbr": "UTC", "utc_offset": 0, "std_offset": 0})}
]
end
def binary_test_data do
[
bin_pm_bin_var(),
bin_joining_syntax(),
bin_with_bin_var(),
bin_with_pp_int_size(),
bin_with_pp_and_bitstring_size(),
{"bin float", elixir_to_ast(<<4.3::float>>), "<<4.3::float>>"}
]
end
defp bin_pm_bin_var do
ast =
elixir_to_ast do
<<a::8, _rest::binary>> = <<1, 2, 3, 4>>
end
{"bin pattern matching with bin var", ast, "<<a::8, _rest::binary>> = <<1, 2, 3, 4>>"}
end
defp bin_joining_syntax do
ast =
elixir_to_ast do
x = "b"
"a" <> x
end
{"binary <> joining", ast, ~s(x = "b"; <<"a", x::binary>>)}
end
defp bin_with_bin_var do
ast =
elixir_to_ast do
x = "b"
<<"a", "b", x::binary>>
end
{"binary with bin var", ast, ~s(x = "b"; <<"a", "b", x::binary>>)}
end
defp bin_with_pp_int_size do
ast =
elixir_to_ast do
<<a::16>> = <<"abcd">>
end
{"binary with int size", ast, ~s(<<a::16>> = "abcd")}
end
defp bin_with_pp_and_bitstring_size do
ast =
elixir_to_ast do
<<header::8, length::32, message::bitstring-size(144)>> =
<<1, 2, 3, 4, 5, 101, 114, 97, 115, 101, 32, 116, 104, 101, 32, 101, 118, 105, 100, 101,
110, 99, 101>>
end
expected =
"<<header::8, length::32, message::bitstring-size(144)>> = <<1, 2, 3, 4, 5, 101, 114, 97, 115, 101, 32, 116, 104, 101, 32, 101, 118, 105, 100, 101, 110, 99, 101>>"
{"binary with patter matching and bitstring-size", ast, expected}
end
defp regex_exp() do
<<37, 82, 101, 103, 101, 120, 123, 34, 111, 112, 116, 115, 34, 58, 32, 60, 60, 62, 62, 44, 32,
34, 114, 101, 95, 112, 97, 116, 116, 101, 114, 110, 34, 58, 32, 123, 58, 34, 114, 101, 95,
112, 97, 116, 116, 101, 114, 110, 34, 44, 32, 48, 44, 32, 48, 44, 32, 48, 44, 32, 34, 69,
82, 67, 80, 86, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 195, 191, 195, 191, 195, 191, 195, 191,
195, 191, 195, 191, 195, 191, 195, 191, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 194, 131, 0, 9, 29,
102, 29, 111, 29, 111, 119, 0, 9, 29, 98, 29, 97, 29, 114, 120, 0, 18, 0, 34, 125, 44, 32,
34, 114, 101, 95, 118, 101, 114, 115, 105, 111, 110, 34, 58, 32, 123, 34, 56, 46, 52, 52,
32, 50, 48, 50, 48, 45, 48, 50, 45, 49, 50, 34, 44, 32, 58, 34, 108, 105, 116, 116, 108,
101, 34, 125, 44, 32, 34, 115, 111, 117, 114, 99, 101, 34, 58, 32, 34, 102, 111, 111, 124,
98, 97, 114, 34, 125>>
end
end
|
test/support/expr_data.ex
| 0.623835
| 0.474631
|
expr_data.ex
|
starcoder
|
defmodule BroadwaySQS.Producer do
@moduledoc """
A GenStage producer that continuously polls messages from a SQS queue and
acknowledge them after being successfully processed.
By default this producer uses `BroadwaySQS.ExAwsClient` to talk to S3 but
you can provide your client by implemneting the `BroadwaySQS.SQSClient`
behaviour.
For a quick getting started on using Broadway with Amazon SQS, please see
the [Amazon SQS Guide](https://hexdocs.pm/broadway/amazon-sqs.html).
## Options for `BroadwaySQS.ExAwsClient`
* `:queue_name` - Required. The name of the queue.
* `:max_number_of_messages` - Optional. The maximum number of messages to be fetched
per request. This value must be between `1` and `10`, which is the maximun number
allowed by AWS. Default is `10`.
* `:wait_time_seconds` - Optional. The duration (in seconds) for which the call waits
for a message to arrive in the queue before returning.
* `:visibility_timeout` - Optional. The time period (in seconds) that a message will
remain _invisible_ to other consumers whilst still on the queue and not acknowledged.
This is passed to SQS when the message (or messages) are read.
This value must be between 0 and 43200 (12 hours).
* `:attribute_names` - A list containing the names of attributes that should be
attached to the response and appended to the `metadata` field of the message.
Supported values are `:sender_id`, `:sent_timestamp`, `:approximate_receive_count`,
`:approximate_first_receive_timestamp`, `:wait_time_seconds` and
`:receive_message_wait_time_seconds`. You can also use `:all` instead of the list
if you want to retrieve all attributes.
* `:message_attribute_names` - A list containing the names of custom message attributes
that should be attached to the response and appended to the `metadata` field of the
message. You can also use `:all` instead of the list if you want to retrieve all
attributes.
* `:config` - Optional. A set of options that overrides the default ExAws configuration
options. The most commonly used options are: `:access_key_id`, `:secret_access_key`,
`:scheme`, `:region` and `:port`. For a complete list of configuration options and
their default values, please see the `ExAws` documentation.
## Producer Options
These options applies to all producers, regardless of client implementation:
* `:receive_interval` - Optional. The duration (in milliseconds) for which the producer
waits before making a request for more messages. Default is 5000.
* `:sqs_client` - Optional. A module that implements the `BroadwaySQS.SQSClient`
behaviour. This module is responsible for fetching and acknowledging the
messages. Pay attention that all options passed to the producer will be forwarded
to the client. It's up to the client to normalize the options it needs. Default
is `BroadwaySQS.ExAwsClient`.
## Acknowledgments
In case of successful processing, the message is properly acknowledge to SQS.
In case of failures, no message is acknowledged, which means Amazon SQS will
eventually redeliver the message on remove it based on the "Visibility Timeout"
and "Max Receive Count" configurations. For more information, see:
* ["Visibility Timeout" page on Amazon SQS](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html)
* ["Dead Letter Queue" page on Amazon SQS](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html)
### Batching
Even if you are not interested in working with Broadway batches via the
`handle_batch/3` callback, we recommend all Broadway pipelines with SQS
producers to define a default batcher with `batch_size` set to 10, so
messages can be acknowledged in batches, which improves the performance
and reduce the costs of integrating with SQS.
## Example
Broadway.start_link(MyBroadway,
name: MyBroadway,
producers: [
default: [
module: {BroadwaySQS.Producer,
queue_name: "my_queue",
config: [
access_key_id: "YOUR_AWS_ACCESS_KEY_ID",
secret_access_key: "YOUR_AWS_SECRET_ACCESS_KEY",
region: "us-east-2"
]
}
]
],
processors: [
default: []
],
batchers: [
default: [
batch_size: 10,
batch_timeout: 2000
]
]
)
The above configuration will set up a producer that continuously receives
messages from `"my_queue"` and sends them downstream.
## Retrieving Metadata
By default the following information is added to the `metadata` field in the
`%Message{}` struct:
* `message_id` - The message id received when the message was sent to the queue
* `receipt_handle` - The receipt handle
* `md5_of_body` - An MD5 digest of the message body
You can access any of that information directly while processing the message:
def handle_message(_, message, _) do
receipt = %{
id: message.metadata.message_id,
receipt_handle: message.metadata.receipt_handle
}
# Do something with the receipt
end
If you want to retrieve `attributes` or `message_attributes`, you need to
configure the `:attributes_names` and `:message_attributes_names` options
accordingly, otherwise, attributes will not be attached to the response and
will not be available in the `metadata` field
producers: [
default: [
module: {BroadwaySQS.Producer,
queue_name: "my_queue",
# Define which attributes/message_attributes you want to be attached
attribute_names: [:approximate_receive_count],
message_attribute_names: ["SomeAttribute"],
}
]
]
and then in `handle_message`:
def handle_message(_, message, _) do
approximate_receive_count = message.metadata.attributes["approximate_receive_count"]
some_attribute = message.metadata.message_attributes["SomeAttribute"]
# Do something with the attributes
end
For more information on the `:attributes_names` and `:message_attributes_names`
options.
"""
use GenStage
@default_receive_interval 5000
@impl true
def init(opts) do
client = opts[:sqs_client] || BroadwaySQS.ExAwsClient
receive_interval = opts[:receive_interval] || @default_receive_interval
case client.init(opts) do
{:error, message} ->
raise ArgumentError, "invalid options given to #{inspect(client)}.init/1, " <> message
{:ok, opts} ->
{:producer,
%{
demand: 0,
receive_timer: nil,
receive_interval: receive_interval,
sqs_client: {client, opts}
}}
end
end
@impl true
def handle_demand(incoming_demand, %{demand: demand} = state) do
handle_receive_messages(%{state | demand: demand + incoming_demand})
end
@impl true
def handle_info(:receive_messages, state) do
handle_receive_messages(%{state | receive_timer: nil})
end
@impl true
def handle_info(_, state) do
{:noreply, [], state}
end
defp handle_receive_messages(%{receive_timer: nil, demand: demand} = state) when demand > 0 do
messages = receive_messages_from_sqs(state, demand)
new_demand = demand - length(messages)
receive_timer =
case {messages, new_demand} do
{[], _} -> schedule_receive_messages(state.receive_interval)
{_, 0} -> nil
_ -> schedule_receive_messages(0)
end
{:noreply, messages, %{state | demand: new_demand, receive_timer: receive_timer}}
end
defp handle_receive_messages(state) do
{:noreply, [], state}
end
defp receive_messages_from_sqs(state, total_demand) do
%{sqs_client: {client, opts}} = state
client.receive_messages(total_demand, opts)
end
defp schedule_receive_messages(interval) do
Process.send_after(self(), :receive_messages, interval)
end
end
|
lib/broadway_sqs/producer.ex
| 0.903189
| 0.522689
|
producer.ex
|
starcoder
|
defmodule AWS.PI do
@moduledoc """
AWS Performance Insights enables you to monitor and explore different
dimensions of database load based on data captured from a running RDS
instance. The guide provides detailed information about Performance
Insights data types, parameters and errors. For more information about
Performance Insights capabilities see [Using Amazon RDS Performance
Insights
](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html)
in the *Amazon RDS User Guide*.
The AWS Performance Insights API provides visibility into the performance
of your RDS instance, when Performance Insights is enabled for supported
engine types. While Amazon CloudWatch provides the authoritative source for
AWS service vended monitoring metrics, AWS Performance Insights offers a
domain-specific view of database load measured as Average Active Sessions
and provided to API consumers as a 2-dimensional time-series dataset. The
time dimension of the data provides DB load data for each time point in the
queried time range, and each time point decomposes overall load in relation
to the requested dimensions, such as SQL, Wait-event, User or Host,
measured at that time point.
"""
@doc """
For a specific time period, retrieve the top `N` dimension keys for a
metric.
"""
def describe_dimension_keys(client, input, options \\ []) do
request(client, "DescribeDimensionKeys", input, options)
end
@doc """
Retrieve Performance Insights metrics for a set of data sources, over a
time period. You can provide specific dimension groups and dimensions, and
provide aggregation and filtering criteria for each group.
"""
def get_resource_metrics(client, input, options \\ []) do
request(client, "GetResourceMetrics", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "pi"}
host = build_host("pi", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "PerformanceInsightsv20180227.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/pi.ex
| 0.825414
| 0.608478
|
pi.ex
|
starcoder
|
defmodule Lin.Ldf do
require Logger
@moduledoc """
# LIN .ldf file parser.
## Output
A map containing all fields from a `.ldf` file.
Top lever fields are:
* `global`
* `section`
"""
# Structure representing data in .ldf-file
defstruct [
master: [],
slaves: [],
signals: [],
frames: [],
scheduling: [],
signal_encoding_type: [],
signal_representation: [],
diagnostic_signals: [],
diagnostic_frames: []
]
# Structures representing .ldf fields
defmodule Signal do
defstruct [
name: "",
size: 0, # Can be bit(0..7), byte(8), integer(16), Arraye(16..64)
initial_value: 0,
publisher: "",
subscribers: [],
]
end
defmodule DiagnosticSignal do
defstruct [
name: "",
size: 0, # Can be bit(0..7), byte(8), integer(16), Arraye(16..64)
initial_value: 0,
]
end
defmodule Frame do
defstruct [
name: "",
id: nil,
publisher: "",
frame_size: 0,
signals: [],
]
end
defmodule DiagnosticFrame do
defstruct [
name: "",
id: nil,
signals: [],
]
end
defmodule FrameSignals do
defstruct [
signal_name: [],
signal_offset: [],
]
end
defmodule SchedulingTable do
defstruct [
table_name: "",
frame_schedules: [],
]
end
defmodule SignalEncodingType do
defstruct [
signal_name: "",
encodings: [],
]
end
defmodule SignalEncoding do
defstruct [
physical: [],
logical: [],
]
end
defmodule Logical do
defstruct [
name: [],
value: [],
]
end
defmodule Physical do
defstruct [
min: [],
max: [],
scale: [],
offset: [],
text: [],
]
end
defmodule SignalRepresentation do
defstruct [
encoding_type_name: "",
signals: [],
]
end
@regex_nodes ~r/\s*(\w+)\:\s+([A-Z,\s]*)/
@regex_section_recursives ~r/\s*(\w+|.*)\s*\{((?>[^{}]|(?R))*)\}/
@regex_signal_line ~r/\s*(\w+)\:\s+(\d+)\,\s+(-?\d+)\,\s*((?:\w+,?\s*)*);/
@regex_frame ~r/\s*(\w+)\:\s+(\w+)\,\s+(\w+)\,\s+(\d+)\s+\{\s*((?:\w+\,\s*\d*;\s*)*)\}/
@regex_scheduling ~r/\s*(\w+)\s+\{\s*((?:\w+\s+\w+\s+(?:\d+\.\d+)\s+\w+;\s*)*)\}/
@regex_signal_encoding ~r/\s*(\w+)\s+\{\s*((?:.*;\s*)*)\}/
@regex_signal_representations ~r/\s*(\w+)\s*\:\s*((?:.|\n)+);/
@regex_diagnostic_signals ~r/\s*(\w+)\:\s+(\d+)\,\s+(\d+);/
@regex_diagnostic_frame ~r/\s*(\w+)\:\s+(\d+)\s+\{\s*((?:\w+\,\s*\d*;\s*)*)\}/
def parse_file(path) do
path
|> File.read!()
|> parse_data()
end
def parse_data(data) do
Regex.scan(@regex_section_recursives, data)
|> Enum.reduce(%Lin.Ldf{}, fn(match, acc) ->
[_, section_name, section_content] = match
case section_name do
"Nodes" ->
Regex.scan(@regex_nodes, section_content)
|> Enum.reduce(acc, fn(match, acc) ->
[_, master_or_slave, nodes] = match
case master_or_slave do
"Master" ->
%Lin.Ldf{acc| master: Enum.map(String.split(nodes), fn(entry) -> String.trim(String.trim(entry), ",") end)}
"Slaves" ->
%Lin.Ldf{acc| slaves: Enum.map(String.split(nodes), fn(entry) -> String.trim(String.trim(entry), ",") end)}
end
end)
"Signals" ->
Regex.scan(@regex_signal_line, section_content)
|> Enum.reduce(acc, fn(match, acc) ->
[_, signal_name, size, initial_value, pubsubs] = match
[publisher | subscribers] =
pubsubs
|> String.split(",", trim: true)
|> Enum.map(&String.trim/1)
add = %Signal{
name: signal_name,
size: elem(Integer.parse(size), 0),
initial_value: elem(Integer.parse(initial_value), 0),
publisher: publisher,
subscribers: subscribers,
}
%Lin.Ldf{acc| signals: [add | acc.signals]}
end)
"Diagnostic_signals" ->
Regex.scan(@regex_diagnostic_signals, section_content)
|> Enum.reduce(acc, fn(match, acc) ->
[_, signal_name, size, initial_value] = match
add = %Signal{
name: signal_name,
size: elem(Integer.parse(size), 0),
initial_value: elem(Integer.parse(initial_value), 0),
publisher: nil,
subscribers: nil
}
%Lin.Ldf{acc| signals: [add | acc.signals]}
end)
"Frames" ->
Regex.scan(@regex_frame, section_content)
|> Enum.reduce(acc, fn(match, acc) ->
[_, frame_name, frame_id, publisher, frame_size, frame_signals] = match
[_ | tail] = frame_signals
|> String.split(";", trim: true)
|> Enum.map(&String.trim/1)
|> Enum.reverse()
signals = Enum.reverse(tail)
|> Enum.map(fn(x) ->
String.split(x, ",", trim: true)
|> Enum.map(&String.trim/1)
end) |> Enum.reduce(%FrameSignals{}, fn(signals, acc) ->
%FrameSignals{signal_name: [Enum.at(signals, 0) | acc.signal_name], signal_offset: [elem(Integer.parse(Enum.at(signals, 1)), 0) | acc.signal_offset]}
end)
{frame_dec, _crap} = Integer.parse(String.slice(frame_id, 2..-1), 16)
add = %Frame{
name: frame_name,
id: frame_dec,
publisher: publisher,
frame_size: elem(Integer.parse(frame_size), 0),
signals: signals,
}
#IO.inspect add
%Lin.Ldf{acc | frames: [add | acc.frames]}
end)
"Diagnostic_frames" ->
Regex.scan(@regex_diagnostic_frame, section_content)
|> Enum.reduce(acc, fn(match, acc) ->
[_, frame_name, frame_id, frame_signals] = match
[_ | tail] = frame_signals
|> String.split(";", trim: true)
|> Enum.map(&String.trim/1)
|> Enum.reverse()
signals = Enum.reverse(tail)
|> Enum.map(fn(x) ->
String.split(x, ",", trim: true)
|> Enum.map(&String.trim/1)
end) |> Enum.reduce(%FrameSignals{}, fn(signals, acc) ->
%FrameSignals{signal_name: [Enum.at(signals, 0) | acc.signal_name], signal_offset: [elem(Integer.parse(Enum.at(signals, 1)), 0) | acc.signal_offset]}
end)
{frame_dec, _crap} = Integer.parse(frame_id)
add = %Frame{
name: frame_name,
id: frame_dec,
publisher: "CCM",
frame_size: 8,
signals: signals,
}
#IO.inspect add
%Lin.Ldf{acc | frames: [add | acc.frames]}
end)
"Schedule_tables" ->
Regex.scan(@regex_scheduling, section_content)
|> Enum.reduce(acc, fn(match, acc) ->
[_, schedule_table_name, frame_schedule] = match
[_ | tail] = frame_schedule
|> String.split(";", trim: true)
|> Enum.map(&String.trim/1)
|> Enum.reverse()
#IO.inspect tail
# TODO: fix this, doesn't need to be list
schedules = Enum.reverse(tail)
|> Enum.map(fn(x) ->
String.split(x, " ", trim: true)
|> Enum.map(&String.trim/1)
end) |> Enum.map(fn(schedules) ->
%{frame_name: Enum.at(schedules, 0), frame_delay: Enum.at(schedules, 2)}
end)
add = %SchedulingTable{
table_name: schedule_table_name,
frame_schedules: schedules,
}
%Lin.Ldf{acc | scheduling: [add | acc.scheduling]}
end)
"Signal_encoding_types" ->
Regex.scan(@regex_signal_encoding, section_content)
|> Enum.reduce(acc, fn(match, acc) ->
[_, signal_name, signal_values] = match
[_ | tail] = signal_values
|> String.split(";", trim: true)
|> Enum.map(&String.trim/1)
|> Enum.reverse()
#IO.inspect tail
encodings = Enum.reverse(tail)
|> Enum.map(fn(x) ->
String.split(x, ",", trim: true)
|> Enum.map(&String.trim/1)
end) |> Enum.reduce(%SignalEncoding{}, fn(encodings, acc) ->
#IO.inspect encodings
if (Enum.at(encodings, 0) == "logical_value") do
add = %Logical{
name: Enum.at(encodings, 2),
value: elem(Integer.parse(Enum.at(encodings, 1)), 0),}
%SignalEncoding{logical: [add | acc.logical]}
else
add = %Physical{
min: elem(Float.parse(Enum.at(encodings, 1)), 0),
max: elem(Float.parse(Enum.at(encodings, 2)), 0),
scale: elem(Float.parse(Enum.at(encodings, 3)), 0),
offset: elem(Float.parse(Enum.at(encodings, 4)), 0),
text: Enum.at(encodings, 5),
}
%SignalEncoding{physical: [add | acc.physical]}
end
end)
add = %SignalEncodingType{
signal_name: signal_name,
encodings: encodings,
}
#IO.inspect add
%Lin.Ldf{acc | signal_encoding_type: [add | acc.signal_encoding_type]}
end)
"Signal_representation" ->
Regex.scan(@regex_signal_representations, section_content)
|> Enum.reduce(acc, fn(match, acc) ->
[_, encoding_type_name, signals_] = match
signals = signals_
|> String.split(",", trim: true)
|> Enum.map(&String.trim/1)
add = %SignalRepresentation{
encoding_type_name: encoding_type_name,
signals: signals,
}
#IO.inspect add
%Lin.Ldf{acc | signal_representation: [add | acc.signal_representation]}
end)
_ ->
#IO.warn("Unknown section: #{inspect section_name}")
:ok
acc
end
end)
end
def get_signal_id(ldf_frames, name) do
[signal] =
Enum.map(ldf_frames, fn(frame) ->
Enum.map(frame.signals.signal_name, fn(signal_name) ->
case signal_name do
^name -> frame.id
_ -> :no_match
end
end)
end)
|> Enum.flat_map(fn (x) -> x end)
|> Enum.filter(fn(entry) -> entry != :no_match end)
# Logger.debug "match #{inspect signal}"
signal
end
def get_signal_start_bit(ldf_frames, name) do
[signal] =
Enum.map(ldf_frames, fn(frame) ->
# Logger.debug ("stuff #{inspect frame.signals.signal_name}")
zipped = Enum.zip(frame.signals.signal_name, frame.signals.signal_offset)
Enum.map(zipped, fn({signal_name, signal_offset}) ->
case signal_name do
^name -> signal_offset
_ -> :no_match
end
end)
end)
|> Enum.flat_map(fn (x) -> x end)
|> Enum.filter(fn(entry) -> entry != :no_match end)
# Logger.debug "startbit is: #{inspect signal}"
signal
end
def get_signal_size(signals, name) do
[size] =
Enum.map(signals, fn(signal) ->
case signal.name == name do
true -> signal.size
false -> :no_match
end
end)
|> Enum.filter(fn(entry) -> entry != :no_match end)
# Logger.debug "size is #{inspect size}"
size
end
def get_signal_scale(ldf_data, name, key, default) do
value =
try do
[encoding_type_name] =
Enum.map(ldf_data.signal_representation, fn(signal_repr) ->
case Enum.member?(signal_repr.signals, name) do
true -> signal_repr.encoding_type_name
false -> :no_match
end
end)
|> Enum.filter(fn(name) -> name != :no_match end)
# Logger.debug ("encoding type is: #{inspect encoding_type_name}")
[signal_encoding] =
Enum.map(ldf_data.signal_encoding_type, fn(encoding) ->
case encoding.signal_name == encoding_type_name do
true -> encoding.encodings
false -> :no_match
end
end)
|> Enum.filter(fn(encodings) -> encodings != :no_match end)
# Logger.debug ("signal_encoding is: #{inspect signal_encoding}")
_value =
case signal_encoding.physical do
[physical] -> Map.get(physical, key)
[] -> default
end
rescue
_ ->
Logger.warn ("bad ldf file. Cannot find #{inspect key} for signal #{inspect name} returning default value which is #{inspect default}")
default
end
# Logger.debug ("value_is: #{inspect value}")
value
end
# Find length of the from that an specific signal belongs to.
def get_frame_of_signal_length(ldf_frames, name) do
[signal] =
Enum.map(ldf_frames, fn(frame) ->
Enum.map(frame.signals.signal_name, fn(signal_name) ->
case signal_name do
^name -> frame.frame_size
_ -> :no_match
end
end)
end)
|> Enum.flat_map(fn (x) -> x end)
|> Enum.filter(fn(entry) -> entry != :no_match end)
# Logger.debug "match #{inspect signal}"
signal
end
def write_arb_helper do
write_arbitration_frame(Payload.Name.generate_name_from_namespace("Lin", :server),
Payload.Name.generate_name_from_namespace("Lin", :desc),
"CCMLIN18Fr03")
end
def write_arbitration_frame(lin_server, _lin_desc, frame) do
GenServer.cast(lin_server, {:write_arbitration_frame, frame.id, div(frame.length, 8)})
end
end
|
apps/app_lin/lib/ldf.ex
| 0.587115
| 0.543106
|
ldf.ex
|
starcoder
|
defmodule WeChat.Work do
@moduledoc """
企业微信
```elixir
use WeChat.Work,
corp_id: "corp_id",
agents: [
contacts_agent(secret: "your_contacts_secret"),
%WeChat.Work.Agent{name: :agent_name, id: 10000, secret: "your_secret"},
...
]
```
"""
import WeChat.Utils, only: [work_doc_link_prefix: 0]
alias WeChat.Work
@doc_link "#{work_doc_link_prefix()}/90000/90135"
@type client :: module()
@typedoc """
每个企业都拥有唯一的 corpid -
[官方文档](#{@doc_link}/90665#corpid)
获取此信息可在管理后台“我的企业”-“企业信息”下查看“企业ID”(需要有管理员权限)
"""
@type corp_id :: String.t()
@typedoc """
每个应用都有唯一的 agentid -
[官方文档](#{@doc_link}/90665#agentid)
在管理后台->“应用与小程序”->“应用”,点进某个应用,即可看到 agentid
"""
@type agent_id :: Work.Agent.agent_id()
@type agent_name :: Work.Agent.agent_name()
@type agent :: agent_name | agent_id
@type agents :: [Work.Agent.t(), ...]
@typedoc """
secret 是企业应用里面用于保障数据安全的“钥匙” -
[官方文档](#{@doc_link}/90665#secret)
每一个应用都有一个独立的访问密钥,为了保证数据的安全,secret务必不能泄漏。
目前 `secret` 有:
- 自建应用 `secret`
在管理后台->“应用与小程序”->“应用”->“自建”,点进某个应用,即可看到。
- 基础应用 `secret`
某些基础应用(如“审批”“打卡”应用),支持通过API进行操作。在管理后台->“应用与小程序”->“应用->”“基础”,点进某个应用,点开“API”小按钮,即可看到。
- 通讯录管理 `secret`
在“管理工具”-“通讯录同步”里面查看(需开启“API接口同步”);
- 外部联系人管理 `secret`
在“客户联系”栏,点开“API”小按钮,即可看到。
"""
@type secret :: Work.Agent.secret()
@typedoc """
参数
## 参数说明
- `corp_id`: `t:corp_id/0` - 必填
- `agents`: 应用列表 - `t:agents/0` - 必填 & 至少一个
- `server_role`: `t:WeChat.server_role/0`
- `storage`: `t:WeChat.Storage.Adapter.t/0`
- `requester`: 请求客户端 - `t:module/0`
## 默认参数:
- `server_role`: `:client`
- `storage`: `WeChat.Storage.File`
- `requester`: `WeChat.WorkRequester`
- 其余参数皆为可选
"""
@type options :: [
corp_id: corp_id,
agents: agents,
server_role: WeChat.server_role(),
storage: WeChat.Storage.Adapter.t(),
requester: module
]
@typedoc """
access_token 是企业后台去企业微信的后台获取信息时的重要票据 -
[官方文档](#{@doc_link}/90665#access_token)
由 `corpid` 和 `secret` 产生。所有接口在通信时都需要携带此信息用于验证接口的访问权限
"""
@type access_token :: String.t()
@doc false
defmacro __using__(options \\ []) do
quote do
import WeChat.Work.Agent
use WeChat.WorkBuilder, unquote(options)
end
end
@doc "动态构建 client"
@spec build_client(client, options) :: {:ok, client}
def build_client(client, options) do
with {:module, module, _binary, _term} <-
Module.create(
client,
quote do
@moduledoc false
use WeChat.WorkBuilder, unquote(Macro.escape(options))
end,
Macro.Env.location(__ENV__)
) do
{:ok, module}
end
end
@doc """
获取 access_token - [官方文档](#{@doc_link}/91039){:target="_blank"}
"""
@spec get_access_token(client, agent) :: WeChat.response()
def get_access_token(client, agent) do
corp_secret = client.agent_secret(agent)
client.get("/cgi-bin/gettoken",
query: [corpid: client.appid(), corpsecret: corp_secret]
)
end
end
|
lib/wechat/work/work.ex
| 0.52342
| 0.573559
|
work.ex
|
starcoder
|
defmodule Plug.Adapters.Cowboy2 do
@moduledoc """
Adapter interface to the Cowboy2 webserver.
## Options
* `:ip` - the ip to bind the server to.
Must be either a tuple in the format `{a, b, c, d}` with each value in `0..255` for IPv4
or a tuple in the format `{a, b, c, d, e, f, g, h}` with each value in `0..65535` for IPv6.
* `:port` - the port to run the server.
Defaults to 4000 (http) and 4040 (https).
* `:acceptors` - the number of acceptors for the listener.
Defaults to 100.
* `:max_connections` - max number of connections supported.
Defaults to `16_384`.
* `:dispatch` - manually configure Cowboy's dispatch.
If this option is used, the given plug won't be initialized
nor dispatched to (and doing so becomes the user's responsibility).
* `:ref` - the reference name to be used.
Defaults to `plug.HTTP` (http) and `plug.HTTPS` (https).
This is the value that needs to be given on shutdown.
* `:compress` - Cowboy will attempt to compress the response body.
Defaults to false.
* `:timeout` - Time in ms with no requests before Cowboy closes the connection.
Defaults to 5000ms.
* `:protocol_options` - Specifies remaining protocol options,
see [Cowboy docs](https://ninenines.eu/docs/en/cowboy/2.0/manual/cowboy_http/).
All other options are given to the underlying transport.
"""
require Logger
# Made public with @doc false for testing.
@doc false
def args(scheme, plug, plug_opts, cowboy_options) do
{cowboy_options, non_keyword_options} =
enum_split_with(cowboy_options, &(is_tuple(&1) and tuple_size(&1) == 2))
cowboy_options
|> Keyword.put_new(:max_connections, 16_384)
|> set_compress()
|> normalize_cowboy_options(scheme)
|> to_args(scheme, plug, plug_opts, non_keyword_options)
end
@doc """
Runs cowboy under http.
## Example
# Starts a new interface
Plug.Adapters.Cowboy2.http MyPlug, [], port: 80
# The interface above can be shutdown with
Plug.Adapters.Cowboy2.shutdown MyPlug.HTTP
"""
@spec http(module(), Keyword.t(), Keyword.t()) ::
{:ok, pid} | {:error, :eaddrinuse} | {:error, term}
def http(plug, opts, cowboy_options \\ []) do
run(:http, plug, opts, cowboy_options)
end
@doc """
Runs cowboy under https.
Besides the options described in the module documentation,
this module also accepts all options defined in [the `ssl`
erlang module] (http://www.erlang.org/doc/man/ssl.html),
like keyfile, certfile, cacertfile, dhfile and others.
The certificate files can be given as a relative path.
For such, the `:otp_app` option must also be given and
certificates will be looked from the priv directory of
the given application.
## Example
# Starts a new interface
Plug.Adapters.Cowboy2.https MyPlug, [],
port: 443,
password: "<PASSWORD>",
otp_app: :my_app,
keyfile: "priv/ssl/key.pem",
certfile: "priv/ssl/cert.pem",
dhfile: "priv/ssl/dhparam.pem"
# The interface above can be shutdown with
Plug.Adapters.Cowboy2.shutdown MyPlug.HTTPS
"""
@spec https(module(), Keyword.t(), Keyword.t()) ::
{:ok, pid} | {:error, :eaddrinuse} | {:error, term}
def https(plug, opts, cowboy_options \\ []) do
Application.ensure_all_started(:ssl)
run(:https, plug, opts, cowboy_options)
end
@doc """
Shutdowns the given reference.
"""
def shutdown(ref) do
:cowboy.stop_listener(ref)
end
@doc """
A function for starting a Cowboy2 server under Elixir v1.5 supervisors.
It expects three options:
* `:scheme` - either `:http` or `:https`
* `:plug` - such as MyPlug or {MyPlug, plug_opts}
* `:options` - the server options as specified in the module documentation
## Examples
Assuming your Plug module is named `MyApp` you can add it to your
supervision tree by using this function:
children = [
{Plug.Adapters.Cowboy2, scheme: :http, plug: MyApp, options: [port: 4040]}
]
Supervisor.start_link(children, strategy: :one_for_one)
"""
def child_spec(opts) do
:ok = verify_cowboy_version()
scheme = Keyword.fetch!(opts, :scheme)
cowboy_opts = Keyword.get(opts, :options, [])
{plug, plug_opts} =
case Keyword.fetch!(opts, :plug) do
{_, _} = tuple -> tuple
plug -> {plug, []}
end
cowboy_args = args(scheme, plug, plug_opts, cowboy_opts)
[ref, transport_opts, proto_opts] = cowboy_args
{ranch_module, cowboy_protocol, transport_opts} =
case scheme do
:http ->
{:ranch_tcp, :cowboy_clear, transport_opts}
:https ->
transport_opts =
transport_opts
|> Keyword.put_new(:next_protocols_advertised, ["h2", "http/1.1"])
|> Keyword.put_new(:alpn_preferred_protocols, ["h2", "http/1.1"])
{:ranch_ssl, :cowboy_tls, transport_opts}
end
num_acceptors = Keyword.get(transport_opts, :num_acceptors, 100)
%{
id: {:ranch_listener_sup, ref},
start:
{:ranch_listener_sup, :start_link,
[
ref,
num_acceptors,
ranch_module,
transport_opts,
cowboy_protocol,
proto_opts
]},
restart: :permanent,
shutdown: :infinity,
type: :supervisor,
modules: [:ranch_listener_sup]
}
end
## Helpers
@protocol_options [:timeout, :compress, :stream_handlers]
defp run(scheme, plug, opts, cowboy_options) do
case Application.ensure_all_started(:cowboy) do
{:ok, _} ->
verify_cowboy_version()
{:error, {:cowboy, _}} ->
raise "could not start the Cowboy application. Please ensure it is listed as a dependency in your mix.exs"
end
start =
case scheme do
:http -> :start_clear
:https -> :start_tls
other -> :erlang.error({:badarg, [other]})
end
apply(:cowboy, start, args(scheme, plug, opts, cowboy_options))
end
@default_stream_handlers [Plug.Adapters.Cowboy2.Stream]
defp set_compress(cowboy_options) do
compress = Keyword.get(cowboy_options, :compress)
stream_handlers = Keyword.get(cowboy_options, :stream_handlers)
case {compress, stream_handlers} do
{true, nil} ->
Keyword.put_new(cowboy_options, :stream_handlers, [
:cowboy_compress_h | @default_stream_handlers
])
{true, _} ->
raise "cannot set both compress and stream_handlers at once. " <>
"If you wish to set compress, please add `:cowboy_compress_h` to your stream handlers."
_ ->
cowboy_options
end
end
defp normalize_cowboy_options(cowboy_options, :http) do
Keyword.put_new(cowboy_options, :port, 4000)
end
defp normalize_cowboy_options(cowboy_options, :https) do
assert_ssl_options(cowboy_options)
cowboy_options = Keyword.put_new(cowboy_options, :port, 4040)
ssl_opts = [:keyfile, :certfile, :cacertfile, :dhfile]
cowboy_options = Enum.reduce(ssl_opts, cowboy_options, &normalize_ssl_file(&1, &2))
Enum.reduce([:password], cowboy_options, &to_charlist(&2, &1))
end
defp to_args(opts, scheme, plug, plug_opts, non_keyword_opts) do
opts = Keyword.delete(opts, :otp_app)
{ref, opts} = Keyword.pop(opts, :ref)
{dispatch, opts} = Keyword.pop(opts, :dispatch)
{num_acceptors, opts} = Keyword.pop(opts, :acceptors, 100)
{protocol_options, opts} = Keyword.pop(opts, :protocol_options, [])
dispatch = :cowboy_router.compile(dispatch || dispatch_for(plug, plug_opts))
{extra_options, transport_options} = Keyword.split(opts, @protocol_options)
extra_options = Keyword.put_new(extra_options, :stream_handlers, @default_stream_handlers)
protocol_and_extra_options = :maps.from_list(protocol_options ++ extra_options)
protocol_options = Map.merge(%{env: %{dispatch: dispatch}}, protocol_and_extra_options)
transport_options = Keyword.put_new(transport_options, :num_acceptors, num_acceptors)
[ref || build_ref(plug, scheme), non_keyword_opts ++ transport_options, protocol_options]
end
defp build_ref(plug, scheme) do
Module.concat(plug, scheme |> to_string |> String.upcase())
end
defp dispatch_for(plug, opts) do
opts = plug.init(opts)
[{:_, [{:_, Plug.Adapters.Cowboy2.Handler, {plug, opts}}]}]
end
defp normalize_ssl_file(key, cowboy_options) do
value = cowboy_options[key]
cond do
is_nil(value) ->
cowboy_options
Path.type(value) == :absolute ->
put_ssl_file(cowboy_options, key, value)
true ->
put_ssl_file(cowboy_options, key, Path.expand(value, otp_app(cowboy_options)))
end
end
defp assert_ssl_options(cowboy_options) do
has_sni? =
Keyword.has_key?(cowboy_options, :sni_hosts) or Keyword.has_key?(cowboy_options, :sni_fun)
has_key? =
Keyword.has_key?(cowboy_options, :key) or Keyword.has_key?(cowboy_options, :keyfile)
has_cert? =
Keyword.has_key?(cowboy_options, :cert) or Keyword.has_key?(cowboy_options, :certfile)
cond do
has_sni? -> :ok
!has_key? -> fail("missing option :key/:keyfile")
!has_cert? -> fail("missing option :cert/:certfile")
true -> :ok
end
end
defp put_ssl_file(cowboy_options, key, value) do
value = to_charlist(value)
unless File.exists?(value) do
fail(
"the file #{value} required by SSL's #{inspect(key)} either does not exist, " <>
"or the application does not have permission to access it"
)
end
Keyword.put(cowboy_options, key, value)
end
defp otp_app(cowboy_options) do
if app = cowboy_options[:otp_app] do
Application.app_dir(app)
else
fail(
"to use a relative certificate with https, the :otp_app " <>
"option needs to be given to the adapter"
)
end
end
defp to_charlist(cowboy_options, key) do
if value = cowboy_options[key] do
Keyword.put(cowboy_options, key, to_charlist(value))
else
cowboy_options
end
end
defp fail(message) do
raise ArgumentError, message: "could not start Cowboy2 adapter, " <> message
end
defp verify_cowboy_version do
case Application.spec(:cowboy, :vsn) do
'2.' ++ _ ->
:ok
vsn ->
raise "you are using Plug.Adapters.Cowboy (for Cowboy 1) but your current Cowboy " <>
"version is #{vsn}. Please update your mix.exs file accordingly"
end
end
# TODO: Remove once we depend on Elixir ~> 1.4.
Code.ensure_loaded(Enum)
split_with = if function_exported?(Enum, :split_with, 2), do: :split_with, else: :partition
defp enum_split_with(enum, fun), do: apply(Enum, unquote(split_with), [enum, fun])
end
|
lib/plug/adapters/cowboy2.ex
| 0.881806
| 0.547041
|
cowboy2.ex
|
starcoder
|
defmodule Backoff do
@moduledoc """
Functions to decrease the rate of some process.
A **Backoff** algorithm is commonly used to space out repeated retransmissions
of the same block of data, avoiding congestion.
This module provides a data structure, [Backoff](#t:t/0), that holds the state
and the configuration of the backoff algorithm. Then, we can use function
`step/1` to get the time to wait for repeating a process and a new state of
the backoff algorithm.
## Example
iex> backoff = Backoff.new(kind: :exp)
#Backoff<kind: exp, min: 200, max: 15000>
iex> {200, next_backoff} = Backoff.step(backoff)
iex> {400, _next_backoff} = Backoff.step(next_backoff)
"""
use Bitwise, only_operators: true
@default_kind :rand_exp
@default_min 200
@default_max 15_000
@typedoc """
The implementation used to provide the [Backoff](#t:t/0) behaviour.
There are different ways to provide [Backoff](#t:t/0) behaviour:
* `rand`: on every step, the delay time is computed randomly between
two values, *min* and *max*.
* `exp`: every step the delay is increased exponentially.
* `rand_exp`: a combination of the previous two.
"""
@type kind :: :rand | :exp | :rand_exp
@typedoc ~s"""
Available options to configure a [Backoff](#t:t/0).
* `kind`: the implementation to be used. Can be any of the available
`t:kind/0`s. Defaults to `#{@default_kind}`.
* `min`: the minimum value that can return a *Backoff*. Defaults to
#{@default_min}.
* `max`: the maximum value that can return a *Backoff*. Defaults to
#{@default_max}.
"""
@type option() ::
{:kind, kind()}
| {:min, non_neg_integer()}
| {:max, non_neg_integer()}
@typedoc "A list of `t:option/0`."
@type options() :: [option()]
@typedoc """
A *Backoff* state.
An opaque data structure that holds the state and the configuration of a
*Backoff* algorithm. Can be created with `new/0` or `new/1`.
"""
@opaque t() :: %__MODULE__{
kind: kind(),
min: non_neg_integer(),
max: non_neg_integer(),
state: term()
}
defstruct [:kind, :min, :max, :state]
@doc """
Creates a new [Backoff](#t:t/0).
Returns a new [Backoff](#t:t/0) configured by `t:options/0`.
"""
@spec new() :: t()
@spec new(options()) :: t()
def new(opts \\ []) do
kind = Keyword.get(opts, :kind, @default_kind)
{min, max} = min_max(opts)
do_new(kind, min, max)
end
@doc """
Sets a [Backoff](#t:t/0) to the initial state.
Given a [Backoff](#t:t/0), sets its state back to the initial value. Note that for
`rand` implementation this functions has no effect.
"""
@spec reset(t()) :: t()
def reset(backoff = %__MODULE__{kind: :exp, min: min}), do: %{backoff | state: min}
def reset(backoff = %__MODULE__{kind: :rand_exp, min: min, state: {_last, seed}}) do
%__MODULE__{backoff | state: {min, seed}}
end
def reset(backoff = %__MODULE__{kind: :rand}), do: backoff
@doc """
Computes the current delay.
Given a [Backoff](#t:t/0), returns the current delay time a next state of the
current [Backoff](#t:t/0).
"""
@spec step(t()) :: {non_neg_integer(), t()}
def step(backoff = %__MODULE__{kind: :rand, min: min, max: max, state: seed}) do
{diff, next_seed} = :rand.uniform_s(max - min + 1, seed)
{diff + min - 1, %{backoff | state: next_seed}}
end
def step(backoff = %__MODULE__{kind: :exp, max: max, state: state}) do
{state, %{backoff | state: min(max(state, 1) <<< 1, max)}}
end
def step(backoff = %__MODULE__{kind: :rand_exp, max: max, state: {last, seed}}) do
upper_bound = max(last, 1) <<< 1
{diff, next_seed} = :rand.uniform_s(upper_bound + 1, seed)
next_value = min(last + diff, max)
{next_value, %{backoff | state: {next_value, next_seed}}}
end
defimpl Inspect do
import Inspect.Algebra
def inspect(%{kind: kind, min: min, max: max}, inspect_opts) do
fields = [{"kind:", kind}, {"min:", min}, {"max:", max}]
fields_doc =
container_doc("<", fields, ">", inspect_opts, fn {key, value}, _opts ->
glue(key, to_string(value))
end)
concat("#Backoff", fields_doc)
end
end
@spec min_max(options()) :: {number(), number()}
defp min_max(opts) do
case {Keyword.get(opts, :min), Keyword.get(opts, :max)} do
{nil, nil} -> {@default_min, @default_max}
{nil, max} -> {min(@default_min, max), max}
{min, nil} -> {min, max(min, @default_max)}
{min, max} -> {min, max}
end
end
@spec do_new(kind(), number(), number()) :: t() | no_return()
defp do_new(_, min, _) when not (is_integer(min) and min >= 0) do
raise ArgumentError, "minimum #{inspect(min)} not 0 or a positive integer"
end
defp do_new(_, _, max) when not (is_integer(max) and max >= 0) do
raise ArgumentError, "maximum #{inspect(max)} not 0 or a positive integer"
end
defp do_new(_, min, max) when min > max do
raise ArgumentError, "minimum #{min} is greater than maximum #{max}"
end
defp do_new(:rand, min, max) do
seed = :rand.seed_s(:exsplus)
%__MODULE__{kind: :rand, min: min, max: max, state: seed}
end
defp do_new(:exp, min, max) do
%__MODULE__{kind: :exp, min: min, max: max, state: min}
end
defp do_new(:rand_exp, min, max) do
seed = :rand.seed_s(:exsplus)
%__MODULE__{kind: :rand_exp, min: min, max: max, state: {min, seed}}
end
defp do_new(kind, _min, _max) do
raise ArgumentError, "unknown kind #{inspect(kind)}"
end
end
|
lib/backoff.ex
| 0.908176
| 0.843444
|
backoff.ex
|
starcoder
|
defmodule FishermanServer.Sorts do
@moduledoc """
Provides handy algorithm helpers
"""
@doc """
Sort shell records by relative time interval. This is accomplished
by the following process:
1. Separate each shell record into two bounds (start and end)
2. Sort all bounds by timestamp and record their sorted order
3. Split up the processed bounds into two data structures:
- A list of start-boundaries
- A map consisting of command_id -> end-boundary order lookups
4. Iterate over each start-boundary and lookup its corresponding
end boundary by shared id.
5. Merge each corresponding boundary into a structure %{id, start, end}
- id = string identifier for shell record
- start = integer denoting start boundary's relative order to
other boundaries.
- end = integer denoting end boundary's relative order to
other boundaries.
6. Build a lookup by order index map to return with the list
"""
def interval_sort(intervals) do
%{
starts: starts,
ends: ends
} =
intervals
|> Enum.reduce([], fn %FishermanServer.ShellRecord{
uuid: id,
command_timestamp: sm,
error_timestamp: em
},
acc ->
[
%{ts: DateTime.to_unix(sm, :millisecond), id: id, bound: 0},
%{ts: DateTime.to_unix(em, :millisecond), id: id, bound: 1}
| acc
]
end)
|> Enum.sort(&(&1.ts <= &2.ts))
|> Enum.with_index()
|> Enum.reduce(%{starts: [], ends: %{}}, &split_bounds(&1, &2))
results =
starts
|> Enum.reverse()
|> Enum.map(fn {%{id: id}, idx} ->
%{id: id, start: idx, end: Map.get(ends, id)}
end)
results
end
# Bucket the bound into its appropriate container
defp split_bounds({%{bound: 0}, _rel_order} = boundary, acc) do
Map.update!(acc, :starts, &[boundary | &1])
end
defp split_bounds({%{bound: 1, id: id}, rel_order} = _boundary, %{ends: ends} = acc) do
Map.put(acc, :ends, Map.put(ends, id, rel_order))
end
@doc """
Build a 2D map of content by row for UI table
"""
def build_table_matrix(records, pids) do
# Build lookup of record uuid -> record
record_lookup = Enum.reduce(records, %{}, &Map.put(&2, &1.uuid, &1))
# Maintain a set of occupied bounds for each pid
pids_map =
pids
|> Enum.reduce(%{}, &Map.put(&2, &1, %{}))
# Populate occupied bounds for each pid
sorted_intervals = records
matrix =
records
|> interval_sort()
|> Enum.reduce(pids_map, &add_cell_info(&1, &2, record_lookup))
{matrix, record_lookup}
end
defp add_cell_info(
%{id: id, start: start_idx, end: end_idx},
pids_map,
record_lookup
) do
# Fetch whole shell record object from lookup map
%{
pid: pid,
uuid: record_id
} = Map.get(record_lookup, id)
# Place in map with identifier to denote the start of a block
# as well as how many spaces it will take up
fill_size = abs(end_idx - start_idx)
cell_info = %{
fill_size: fill_size,
record_id: record_id
}
# rel_order_idx maps to cell info
idx_map_for_pid = Map.get(pids_map, pid, %{})
pid_map = Map.put(idx_map_for_pid, start_idx, {:start, cell_info})
# Generate :fill records for cells that should be skipped akin
# to how <td> cells are skipped by rowspan/colspan
pid_map =
if fill_size > 1 do
(start_idx + 1)..(end_idx - 1)
|> Enum.reduce(pid_map, &Map.put(&2, &1, :fill))
else
pid_map
end
Map.put(pids_map, pid, pid_map)
end
# Builds a mapping from rel_order_idx -> record_id for interval bounds
# NOTE Not used currently
defp build_order_idx_lookup(intervals) do
lookup_by_order_idx =
Enum.reduce(intervals, %{}, fn item, acc ->
acc
|> Map.put(item.start, item.id)
|> Map.put(item.end, item.id)
end)
end
end
|
fisherman_server/lib/fisherman_server/sorts.ex
| 0.844297
| 0.557183
|
sorts.ex
|
starcoder
|
defmodule Translecto.Query do
@moduledoc """
Provides convenient functionality for querying translatable models.
"""
defmacro __using__(_options) do
quote do
import Translecto.Query
import Ecto.Query, except: [from: 1, from: 2]
end
end
defp get_table({ :in, _, [{ ref, _, _ }, data] }), do: { ref, data }
defp expand_translate_query(kw, tables, locale \\ 1), do: Enum.reverse(expand_translate_query(kw, tables, locale, []))
defp expand_translate_query([], _, _, acc), do: acc
defp expand_translate_query([{ :translate, { :in, _, [name, { { :., _, [table_name = { table, _, _ }, field] }, _, _ }] } }|kw], tables, locale = { :locale, locale_id }, acc) do
expand_translate_query(kw, tables, locale, [quote do
{ :on, unquote(table_name).unquote(field) == unquote(name).translate_id and unquote(name).locale_id == unquote(locale_id) }
end, quote do
{ :left_join, unquote(name) in ^unquote(tables[table]).get_translation(unquote(field)) }
end|acc])
end
defp expand_translate_query([{ :translate, { :in, _, [name, { { :., _, [table_name = { table, _, _ }, field] }, _, _ }] } }|kw], tables, locale = { :locales, locale_ids }, acc) do
expand_translate_query(kw, tables, locale, [quote do
{ :on, unquote(table_name).unquote(field) == unquote(name).translate_id and unquote(name).locale_id in unquote(locale_ids) }
end, quote do
{ :left_join, unquote(name) in ^unquote(tables[table]).get_translation(unquote(field)) }
end|acc])
end
defp expand_translate_query([{ :must_translate, { :in, _, [name, { { :., _, [table_name = { table, _, _ }, field] }, _, _ }] } }|kw], tables, locale = { :locale, locale_id }, acc) do
expand_translate_query(kw, tables, locale, [quote do
{ :on, unquote(table_name).unquote(field) == unquote(name).translate_id and unquote(name).locale_id == unquote(locale_id) }
end, quote do
{ :join, unquote(name) in ^unquote(tables[table]).get_translation(unquote(field)) }
end|acc])
end
defp expand_translate_query([{ :must_translate, { :in, _, [name, { { :., _, [table_name = { table, _, _ }, field] }, _, _ }] } }|kw], tables, locale = { :locales, locale_ids }, acc) do
expand_translate_query(kw, tables, locale, [quote do
{ :on, unquote(table_name).unquote(field) == unquote(name).translate_id and unquote(name).locale_id in unquote(locale_ids) }
end, quote do
{ :join, unquote(name) in ^unquote(tables[table]).get_translation(unquote(field)) }
end|acc])
end
defp expand_translate_query([expr = { join, table }|kw], tables, locale, acc) when join in [:join, :inner_join, :left_join, :right_join, :cross_join, :full_join, :inner_lateral_join, :left_lateral_join] do
expand_translate_query(kw, [get_table(table)|tables], locale, [expr|acc])
end
defp expand_translate_query([{ :locale_match, translations }|kw], tables, locale = { :locale, locale_id }, acc) do
expand_translate_query(kw, tables, locale, build_locale_matcher(translations, [locale_id], acc))
end
defp expand_translate_query([{ :locale_match, translations }|kw], tables, locale = { :locales, locale_ids }, acc) do
expand_translate_query(kw, tables, locale, build_locale_matcher(translations, locale_ids, acc))
end
defp expand_translate_query([locale = { :locale, _ }|kw], tables, _, acc), do: expand_translate_query(kw, tables, locale, acc)
defp expand_translate_query([locale = { :locales, _ }|kw], tables, _, acc), do: expand_translate_query(kw, tables, locale, acc)
defp expand_translate_query([expr|kw], tables, locale, acc), do: expand_translate_query(kw, tables, locale, [expr|acc])
@spec permutate(list, integer, (([any] -> [any]))) :: [any]
defp permutate(list, reductions \\ 0, fun \\ fn e -> e end) when reductions <= length(list), do: permutate(list, [], Enum.count(list) - reductions, list, [], fun)
defp permutate(_, acc, 0, _, _, fun), do: fun.(acc)
defp permutate([h|t], acc, n, list, gacc, fun) do
[permutate(list, [h|acc], n - 1, list, gacc, fun)|permutate(t, acc, n, list, gacc, fun)]
end
defp permutate([], _, _, _, gacc, _), do: gacc
defp valid?([nil|t], [used|list]), do: if(Enum.all?(t, &(&1 != used)), do: valid?(t, list), else: false)
defp valid?([h|t], [_|list]), do: if(h not in list, do: valid?(t, list), else: false)
defp valid?(_, _), do: true
defp flatten(list, level \\ 0), do: Enum.reverse(flatten(list, [], Enum.count(list), level))
defp flatten([], list, _, _), do: list
defp flatten(e, list, n, n), do: [e|list]
defp flatten([h|t], list, n, level), do: flatten(t, flatten(h, list, n - 1, level), n, level)
defp get_field(nil, _), do: nil
defp get_field(name, field) do
quote do
unquote(name).unquote(field)
end
end
defp build_locale_matcher(tables, { :^, _, _ }, acc) do
[{ :where, quote do
unquote(Enum.map(tables, &get_field(&1, :locale_id))) in unquote(permutate([nil|tables], 1, fn e ->
Enum.uniq(e)
|> Enum.filter(&(&1 != nil))
|> Enum.count
|> case do
1 -> if(valid?(e, tables), do: e, else: [])
_ -> []
end
end)
|> flatten(1)
|> Enum.map(fn match ->
quote do
unquote(Enum.map(match, &get_field(&1, :locale_id)))
end
end))
end }|acc]
end
defp build_locale_matcher(tables, locales, acc) do
[{ :where, Enum.map(locales, fn locale ->
Enum.map(tables, fn table ->
quote do
unquote(table).locale_id in [unquote(locale), nil]
end
end)
|> Enum.chunk_every(2)
|> Enum.map(fn
[match] -> match
matches -> { :and, [], matches }
end)
|> Enum.reduce(fn match, acc ->
{ :and, [], [acc, match]}
end)
end)
|> Enum.chunk_every(2)
|> Enum.map(fn
[match] -> match
matches -> { :or, [], matches }
end)
|> Enum.reduce(fn match, acc ->
{ :or, [], [acc, match]}
end) }|acc]
end
@doc """
Create a query.
It allows for the standard [`Ecto.Query.from/2`](https://hexdocs.pm/ecto/Ecto.Query.html#from/2)
query syntax and functionality to be used. But adds support for some new expressions aimed at
simplifying the creation of translatable queries. Such as `locale`, `locales`, `translate`,
`must_translate`, `locale_match`.
A translatable query is structured as follows:
\# Get the english names for all ingredients.
from ingredient in Model.Ingredient,
locale: ^en.id,
translate: name in ingredient.name,
select: name.term
\# Get only the ingredients which have english names.
from ingredient in Model.Ingredient,
locale: ^en.id,
must_translate: name in ingredient.name,
select: name.term
\# Get the english and french names for all ingredients.
from ingredient in Model.Ingredient,
locales: ^[en.id, fr.id],
translate: name in ingredient.name,
select: name.term
\# Get the english and french names and types for all ingredients (results won't have mixed locales)
from ingredient in Model.Ingredient,
locales: ^[en.id, fr.id],
translate: info in ingredient.info,
select: { info.name, info.type }
\# Get the english and french names and types for all ingredients (results may have mixed locales)
from ingredient in Model.Ingredient,
locales: ^[en.id, fr.id],
translate: name in ingredient.name,
translate: type in ingredient.type,
select: { name.term, type.term }
\# Get the english and french names and types for all ingredients (results won't have mixed locales)
from ingredient in Model.Ingredient,
locales: ^[en.id, fr.id],
translate: name in ingredient.name,
translate: type in ingredient.type,
locale_match: [name, type],
select: { name.term, type.term }
A translatable query requires a locale to be set using the `:locale` keyword. This value should be
the locale value that will be matched in the translation model's for `:locale_id` field. Alternatively
a list of locales can be matched against using the keyword `:locales`, where a list of locale values
is provided.
The `:translate` keyword is used to create access to any translatable terms, if those terms are not
available it will return null instead. While `:must_translate` is an alternative keyword that enforces
a translation exists. These take the form of an `in` expression where the left argument is the named
reference to that translation, and the right argument is the translatable field (field marked as
`Translecto.Schema.Translatable.translatable/3`).
After using translate the translatable term(s) for that field are now available throughout the query,
in the given locale specified.
\# Get the ingredient whose english name matches "orange"
from ingredient in Model.Ingredient,
locale: ^en.id,
translate: name in ingredient.name, where: name.term == "orange",
select: ingredient
Multiple translates can be used together in the same expression to translate as many fields of
the translatable fields as needed.
The `:locale_match` keyword is used to enforce the specified translatable fields are all of the
same locale (if the field was successfully retrieved). This keyword takes a list of translatable
fields.
"""
@spec from(any, keyword()) :: Macro.t
defmacro from(expr, kw \\ []) do
quote do
Ecto.Query.from(unquote(expr), unquote(expand_translate_query(kw, [get_table(expr)])))
end
end
end
|
lib/translecto/query.ex
| 0.604282
| 0.478712
|
query.ex
|
starcoder
|
defmodule Membrane.Core.Element.DemandController do
@moduledoc false
# Module handling demands incoming through output pads.
alias Membrane.{Core, Element}
alias Core.CallbackHandler
alias Element.{CallbackContext, Pad}
alias Core.Element.{ActionHandler, PadModel, State}
require CallbackContext.Demand
require PadModel
use Core.Element.Log
use Bunch
@doc """
Handles demand coming on a output pad. Updates demand value and executes `handle_demand` callback.
"""
@spec handle_demand(Pad.ref_t(), non_neg_integer, State.t()) :: State.stateful_try_t()
def handle_demand(pad_ref, size, state) do
PadModel.assert_data(pad_ref, %{direction: :output}, state)
{total_size, state} =
PadModel.get_and_update_data!(
pad_ref,
:demand,
fn demand -> (demand + size) ~> {&1, &1} end,
state
)
if exec_handle_demand?(pad_ref, state) do
%{other_demand_unit: unit} = PadModel.get_data!(pad_ref, state)
context = CallbackContext.Demand.from_state(state, incoming_demand: size)
CallbackHandler.exec_and_handle_callback(
:handle_demand,
ActionHandler,
%{split_cont_f: &exec_handle_demand?(pad_ref, &1)},
[pad_ref, total_size, unit, context],
state
)
|> or_warn_error("""
Demand arrived from pad #{inspect(pad_ref)}, but error happened while
handling it.
""")
else
{:ok, state}
end
end
@spec exec_handle_demand?(Pad.ref_t(), State.t()) :: boolean
defp exec_handle_demand?(pad_ref, state) do
case PadModel.get_data!(pad_ref, state) do
%{end_of_stream?: true} ->
debug(
"""
Demand controller: not executing handle_demand as EndOfStream has already been sent
""",
state
)
false
%{demand: demand} when demand <= 0 ->
debug(
"""
Demand controller: not executing handle_demand as demand is not greater than 0,
demand: #{inspect(demand)}
""",
state
)
false
_ ->
true
end
end
end
|
lib/membrane/core/element/demand_controller.ex
| 0.788624
| 0.416025
|
demand_controller.ex
|
starcoder
|
defmodule AOC.Day5 do
@moduledoc """
Solution to Day 5 of the Advent of code 2021
https://adventofcode.com/2021/day/5
"""
@doc """
Read the input file
"""
@spec get_inputs(File) :: [String.t()]
def get_inputs(f \\ "lib/inputs/day5.txt") do
File.read!(f)
|> String.trim()
|> String.split("\n")
|> Enum.map(fn s ->
String.split(s, "->")
|> Enum.map(&String.trim/1)
|> Enum.map(fn s -> String.split(s, ",") |> Enum.map(&String.to_integer/1) end)
end)
end
@doc """
Keep only straight lines from the given segments
"""
def filter_for_straight_lines(segments \\ get_inputs()) do
segments |> Enum.filter(fn [[sx, sy], [ex, ey]] -> sx == ex || sy == ey end)
end
@doc """
For vertical lines, return list of all intermediate points.
i.e 0,7 -> 0,10 = [{0,7}, {0,8}, {0,9}, {0,10}]
For horizontal lines, return list of all intermediate points.
i.e 7,0 -> 10,0 = [{7,0}, {8,0}, {9,0}, {10,0}]
For diagonals, return points at 45 degrees. i.e points we get by incrementing x and y by 1
"""
def explode_segment([[x, y1], [x, y2]]), do: for(i <- y1..y2, do: {x, i})
def explode_segment([[x1, y], [x2, y]]), do: for(i <- x1..x2, do: {i, y})
def explode_segment([[x1, y1], [x2, y2]]),
do: Enum.zip(Enum.to_list(x1..x2), Enum.to_list(y1..y2))
@doc """
To avoid the most dangerous areas, you need to determine the number of points where at least two lines overlap.
In the above example, this is anywhere in the diagram with a 2 or larger - a total of 5 points.
Consider only horizontal and vertical lines. At how many points do at least two lines overlap?
"""
def part1() do
"lib/inputs/day5.txt"
|> get_inputs
|> filter_for_straight_lines
|> Enum.map(&explode_segment/1)
|> List.flatten()
|> Enum.frequencies()
|> Enum.filter(fn {_, c} -> c >= 2 end)
|> Enum.count()
end
@doc """
Consider all of the lines. At how many points do at least two lines overlap?
"""
def part2() do
"lib/inputs/day5.txt"
|> get_inputs
|> Enum.map(&explode_segment/1)
|> List.flatten()
|> Enum.frequencies()
|> Enum.filter(fn {_, c} -> c >= 2 end)
|> Enum.count()
end
end
|
elixir/advent_of_code/lib/2021/day5.ex
| 0.802594
| 0.548915
|
day5.ex
|
starcoder
|
defmodule ExJsonSchema.Validator.Type do
@moduledoc """
`ExJsonSchema.Validator` implementation for `"type"` attributes.
See:
https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.2
https://tools.ietf.org/html/draft-wright-json-schema-validation-01#section-6.25
https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1
"""
alias ExJsonSchema.Validator
alias ExJsonSchema.Validator.Error
@behaviour ExJsonSchema.Validator
@impl ExJsonSchema.Validator
def validate(%{version: version}, _, {"type", type}, data, _) do
do_validate(version, type, data)
end
def validate(_, _, _, _, _) do
[]
end
@spec do_validate(
version :: non_neg_integer,
type :: ExJsonSchema.data(),
data :: ExJsonSchema.data()
) :: Validator.errors()
defp do_validate(version, type, data) do
if valid?(version, type, data) do
[]
else
[%Error{error: %Error.Type{expected: List.wrap(type), actual: data_type(data)}}]
end
end
defp valid?(_, "number", data), do: is_number(data)
defp valid?(_, "array", data), do: is_list(data)
defp valid?(_, "object", data), do: is_map(data)
defp valid?(_, "null", data), do: is_nil(data)
defp valid?(_, "boolean", data), do: is_boolean(data)
defp valid?(_, "string", data), do: is_binary(data)
defp valid?(_, "integer", data) when is_integer(data), do: true
defp valid?(4, "integer", _), do: false
defp valid?(version, "integer", data) when version >= 6 do
is_float(data) and Float.round(data) == data
end
defp valid?(version, type, data) when is_list(type) do
Enum.any?(type, &valid?(version, &1, data))
end
defp data_type(nil), do: "null"
defp data_type(data) when is_binary(data), do: "string"
defp data_type(data) when is_boolean(data), do: "boolean"
defp data_type(data) when is_integer(data), do: "integer"
defp data_type(data) when is_list(data), do: "array"
defp data_type(data) when is_map(data), do: "object"
defp data_type(data) when is_number(data), do: "number"
defp data_type(_), do: "unknown"
end
|
lib/ex_json_schema/validator/type.ex
| 0.707607
| 0.502991
|
type.ex
|
starcoder
|
defmodule Nx.Defn.Compiler do
@moduledoc """
The specification and helper functions for custom `defn` compilers.
"""
@aot_version 1
@doc """
Callback for async execution (on top of JIT compilation).
It receives the same arguments as `c:__jit__/4` but must return
a struct that implements the `Nx.Async` protocol.
"""
@callback __async__(key :: term, vars :: [Nx.t()], ([Nx.t()] -> Nx.t()), opts :: keyword) ::
Nx.Async.t()
@doc """
Callback for JIT compilation.
It receives an opaque `key`, often used for caching, the function
`vars`, the function which builds an expression, and the compiler
options.
It must call `fun` with the vars as a list of arguments.
The callback uses double underscores so it can be defined
at root modules without affecting the module's main API.
"""
@callback __jit__(key :: term, vars :: [Nx.t()], ([Nx.t()] -> Nx.t()), opts :: keyword) ::
Nx.t() | tuple()
@doc """
Callback for AOT compilation.
It compiles the given functions to NIFs.
It receives the output directory for compiled artifacts, the module
the NIFs belong to, the function definitions, alongside the options
to customize the AOT compilation.
The function definitions are four element tuples containing the function
name, a function that builds the tensor expression, the tensor expression
arguments as a list, and the definition options. The compilation of the
tensor expression should behave as close to the JIT compilation as possible,
except that each tuple is compiled to a NIF. The NIF will receive the
binaries equivalent to each tensor expression argument and it must return
`{:ok, list_of_binaries}`, where `list_of_binaries` represents each tensor
on the output, where composite types are flattened. Or it may return
`{:error, charlist}`.
It must return `{:ok, results, nif_path}`, where results is the result
of each anonymous function call, and `nif_path` is the path the compiled
NIF artifact was written to. It may also return `{:error, Exception.t}`
in case of errors.
This callback is optional.
"""
@callback __aot__(output_dir :: binary, module :: atom, [def], aot_opts :: keyword) ::
{:ok, [Nx.t()], nif_path :: binary} | {:error, Exception.t()}
when def: {function_name :: atom, ([Nx.t()] -> Nx.t()), [Nx.t()], opts :: keyword}
@optional_callbacks __aot__: 4
# These operations do not have valid meaning for Nx.Defn.Expr
@forbidden_ops [:backend_copy, :backend_deallocate, :backend_transfer] ++
[:to_binary, :to_scalar, :to_flat_list, :to_heatmap, :to_batched_list]
# These operations wrap a tensor in their original backend
@tensor_ops [:tensor, :from_binary]
defguardp is_var(var)
when is_tuple(var) and tuple_size(var) == 3 and is_atom(elem(var, 0)) and
is_atom(elem(var, 2))
defguardp is_underscore(var)
when is_tuple(var) and tuple_size(var) == 3 and elem(var, 0) == :_ and
is_atom(elem(var, 2))
## AOT
@doc false
def __export_aot__(output_dir, module, tuples, compiler, aot_opts) do
{export_tuples, compiler_tuples} =
tuples
|> Enum.map(fn {name, fun, args, opts} ->
tensors = Nx.Defn.Tree.from_nested_args(args)
templates = Nx.Defn.Tree.to_nested_templates(args, tensors)
export_tuple = {name, templates}
runtime_fun = &runtime_fun(&1, fun, args, compiler)
compiler_tuple = {aot_name(name, args), runtime_fun, tensors, opts}
{export_tuple, compiler_tuple}
end)
|> Enum.unzip()
_ = Code.ensure_compiled(compiler)
unless function_exported?(compiler, :__aot__, 4) do
raise ArgumentError, "AOT compilation is not available to the #{inspect(compiler)} compiler"
end
File.mkdir_p!(output_dir)
case compiler.__aot__(output_dir, module, compiler_tuples, aot_opts) do
{:ok, results, nif} ->
tensors = Nx.Defn.Tree.from_nested_args(results)
results = Nx.Defn.Tree.to_nested_templates(results, tensors)
# TODO: Use Enum.zip_with on Elixir v1.12
{export_tuples, []} =
Enum.map_reduce(export_tuples, results, fn {name, arity}, [result | results] ->
{{name, arity, result}, results}
end)
path = Path.join(output_dir, "#{module}.nx.aot")
export = {Path.extname(nif), export_tuples}
File.write!(path, :erlang.term_to_binary({@aot_version, export}))
:ok
{:error, exception} ->
{:error, exception}
end
end
@doc false
def __import_aot__(output_dir, module, external_resources?) do
export_path = Path.join(output_dir, "#{module}.nx.aot")
{nif_extension, export_tuples} =
case File.read(export_path) do
{:ok, binary} ->
try do
:erlang.binary_to_term(binary)
rescue
_ ->
raise ArgumentError,
"could not decode AOT export for #{inspect(module)} at #{output_dir}"
else
{@aot_version, export_tuples} ->
export_tuples
other ->
raise ArgumentError,
"incompatible version #{elem(other, 0)} for AOT export for #{inspect(module)} " <>
"at #{output_dir}, expected v#{@aot_version}. Please make sure the Nx version" <>
"used for the export matches the one in the import"
end
{:error, _} ->
raise ArgumentError, "could not find AOT export for #{inspect(module)} at #{output_dir}"
end
nif_path = output_dir |> Path.join(Atom.to_string(module)) |> String.to_charlist()
nif_ext_path = Path.join(output_dir, "#{module}.#{nif_extension}")
funs =
for {name, args, result} <- export_tuples do
aot_name = aot_name(name, args)
{args, vars_and_templates} = aot_args(args)
vars = Enum.map(vars_and_templates, &elem(&1, 0))
templates = Enum.map(vars_and_templates, fn {v, t} -> {v, Macro.escape(t)} end)
quote do
def unquote(name)(unquote_splicing(args)) do
unquote(vars) = __nx_input__(unquote(templates))
__nx_output__(
unquote(Macro.escape(result)),
unquote(aot_name)(unquote_splicing(vars))
)
end
defp unquote(aot_name)(unquote_splicing(vars)) do
:erlang.nif_error(:undef)
end
end
end
body =
quote do
if unquote(external_resources?) do
@external_resource unquote(export_path)
@external_resource unquote(nif_ext_path)
end
@on_load :__on_load__
def __on_load__, do: :erlang.load_nif(unquote(nif_path), 0)
@compile {:inline, __nx_input__: 1, __nx_output__: 2}
defp __nx_input__(vars_and_templates) do
for {var, template} <- vars_and_templates do
tensor = Nx.Defn.Tree.from_arg(var)
unless Nx.compatible?(tensor, template) do
raise ArgumentError, """
Nx AOT-compiled function expected a tensor of type, shape, and names:
#{inspect(template)}
But got tensor:
#{inspect(tensor)}
"""
end
Nx.to_binary(tensor)
end
end
defp __nx_output__(result, {:ok, list}) do
{result, []} =
Nx.Defn.Tree.composite(result, list, fn
%Nx.Tensor{} = t, [binary | list] when is_binary(binary) ->
{%{t | data: %Nx.BinaryBackend{state: binary}}, list}
end)
result
end
defp __nx_output__(_result, {:error, reason}) do
raise "Nx AOT-compiled function failed with reason: #{inspect(reason)}"
end
unquote(funs)
end
Module.eval_quoted(module, body, [], line: __ENV__.line, file: __ENV__.file)
:ok
end
# We need to include the actual arity in the name because
# defn foo({a, b}) and defn foo(a, b) compile to the same
# name+arity at the AOT level.
defp aot_name(name, args), do: :"__aot_#{name}_#{length(args)}"
defp aot_args(args) do
{args, {vars, _}} =
Enum.map_reduce(args, {[], 0}, fn arg, {acc, i} ->
Nx.Defn.Tree.composite(arg, {acc, i}, fn template, {acc, i} ->
var = Macro.var(:"arg#{i}", __MODULE__)
{var, {[{var, template} | acc], i + 1}}
end)
end)
{args, Enum.reverse(vars)}
end
## JIT/Async/Stream
@doc false
def __jit__(fun, args, compiler, opts) do
runtime(:__jit__, fun, args, compiler, opts)
end
@doc false
def __async__(fun, args, compiler, opts) do
runtime(:__async__, fun, args, compiler, opts)
end
defp runtime(callback, fun, args, compiler, opts) do
tensors = Nx.Defn.Tree.from_nested_args(args)
runtime_fun = &runtime_fun(&1, fun, args, compiler)
Kernel.apply(compiler, callback, [fun, tensors, runtime_fun, opts])
end
defp runtime_fun(tensors, fun, args, compiler) do
if Process.get(Nx.Defn.Compiler) do
raise "cannot trigger JIT compilation when there is already a JIT compilation happening"
end
Process.put(Nx.Defn.Compiler, compiler)
try do
args = Nx.Defn.Tree.to_nested_params(args, tensors)
fun
|> apply(args)
|> Nx.Defn.Tree.to_result()
after
Process.delete(Nx.Defn.Compiler)
end
end
## Compiler
@doc false
def __remote__(module, function, defn, args) do
try do
apply(module, defn, args)
catch
:error, :undef ->
stack =
case __STACKTRACE__ do
[{^module, ^defn, args_or_arity, info} | stack] ->
[{module, function, args_or_arity, info} | stack]
stack ->
stack
end
:erlang.raise(:error, :undef, stack)
end
end
@doc false
def __compile__(%Macro.Env{module: module, file: file, line: line}, exports) do
state = %{
module: module,
file: file,
line: line,
function: nil,
exports: exports,
rewrite_underscore?: false
}
quoted = Enum.map(exports, &compile_each(&1, state))
{:__block__, [], quoted}
end
defp compile_each({{name, _arity} = def, def_meta}, state) do
{{kind, _meta, args, ast}, state} = get_and_normalize_definition(def, state)
{nx_args, cache_args, cache_vars} = split_args(args, 0, def_meta.defaults, [], [], [])
flat_args = collect_vars(nx_args)
{def_module, def_opts} = def_meta.compiler
defn_name = defn_name(name)
cache =
quote do
fn unquote_splicing(cache_vars) -> unquote(defn_name)(unquote_splicing(cache_args)) end
end
quote line: state.line do
Nx.Defn.Module.delete_definition(__MODULE__, unquote(def))
Kernel.unquote(kind)(unquote(name)(unquote_splicing(args))) do
if Process.get(Nx.Defn.Compiler) do
unquote(defn_name)(unquote_splicing(args))
else
unquote(def_module).__jit__(
unquote(cache),
Nx.Defn.Tree.from_flat_args(unquote(flat_args)),
fn unquote(flat_args) ->
Process.put(Nx.Defn.Compiler, unquote(def_module))
try do
unquote(flat_args) = Nx.Defn.Tree.to_flat_params(unquote(flat_args))
Nx.Defn.Tree.to_result(unquote(defn_name)(unquote_splicing(args)))
after
Process.delete(Nx.Defn.Compiler)
end
end,
unquote(Macro.escape(def_opts))
)
end
end
Kernel.unquote(kind)(unquote(defn_name)(unquote_splicing(args)), do: unquote(ast))
end
end
defp split_args([arg | args], i, defaults, nx, cache, vars) do
if i in defaults do
split_args(args, i + 1, defaults, nx, [arg | cache], vars)
else
var = Macro.var(:"arg#{i}", __MODULE__)
split_args(args, i + 1, defaults, [arg | nx], [var | cache], [var | vars])
end
end
defp split_args([], _, _, nx, cache, vars),
do: {Enum.reverse(nx), Enum.reverse(cache), Enum.reverse(vars)}
defp collect_vars(args) do
{_, vars} =
Macro.prewalk(args, [], fn
var, acc when is_var(var) and not is_underscore(var) ->
{var, [var | acc]}
node, acc ->
{node, acc}
end)
Enum.reverse(vars)
end
defp get_and_normalize_definition(def, state) do
{:v1, kind, meta, clauses} = Nx.Defn.Module.get_definition(state.module, def)
state = %{state | function: def, line: meta[:line] || state.line, rewrite_underscore?: true}
case clauses do
[] ->
compile_error!(meta, state, "cannot have #{kind}n without clauses")
[{meta, args, [], ast}] ->
{args, state} = normalize_args(args, meta, state)
{ast, state} = normalize(ast, %{state | rewrite_underscore?: false})
case extract_assigns(args, state) do
{_, []} ->
{{kind, meta, args, ast}, state}
{args, assigns} ->
{{kind, meta, args, {:__block__, meta, assigns ++ [ast]}}, state}
end
[_, _ | _] ->
compile_error!(meta, state, "cannot compile #{kind}n with multiple clauses")
end
end
## Normalization
defp normalize({special_form, meta, args}, state)
when special_form in [:{}, :%{}, :__block__] do
{args, state} = normalize_list(args, state)
{{special_form, meta, args}, state}
end
defp normalize({:=, meta, [left, right]}, state) do
{left, state} = normalize(left, state)
assert_uniq_vars!(left, state)
{right, state} = normalize(right, state)
{{:=, meta, [left, right]}, state}
end
defp normalize({:fn, meta, clauses}, state) do
unless match?([_], clauses) do
compile_error!(meta, state, "only a single clause is allowed inside fn")
end
{clauses, state} =
Enum.map_reduce(clauses, state, fn {:->, clause_meta, [args, body]}, state ->
{args, state} = normalize_args(args, meta, state)
{body, state} = normalize(body, state)
{{:->, clause_meta, [args, body]}, state}
end)
{{:fn, meta, clauses}, state}
end
defp normalize({:cond, meta, [[do: clauses]]}, state) do
{[{last_meta, {last_condition, last_expr}} | rest], state} =
Enum.reduce(clauses, {[], state}, fn {:->, meta, [[condition], expr]}, {acc, state} ->
{condition, state} = normalize(condition, state)
{expr, state} = normalize(expr, state)
{[{meta, {condition, expr}} | acc], state}
end)
if rest == [] do
compile_error!(meta, state, "cond must have at least 2 clauses, got 1")
end
if not is_atom(last_condition) or last_condition == nil or last_condition == false do
compile_error!(
last_meta,
state,
"expected the last clause of cond to match on an atom, " <>
"such as true or :otherwise, got: #{Macro.to_string(last_condition)}"
)
end
ast =
quote do
Nx.Defn.Expr.cond(unquote(state.file), unquote(Enum.reverse(rest)), unquote(last_expr))
end
{ast, state}
end
defp normalize({name, meta, args} = expr, state) when is_atom(name) and is_list(args) do
pair = {name, length(args)}
case state.exports do
%{^pair => _} ->
{args, state} = normalize_list(args, state)
{{defn_name(name), meta, args}, state}
%{} ->
invalid_numerical_expression!(expr, state)
end
end
defp normalize(underscore, state) when is_underscore(underscore) do
{underscore, state}
end
defp normalize({name, meta, ctx} = var, state) when is_var(var) do
{version, meta} = Keyword.pop!(meta, :version)
{{name, [counter: version, generated: true] ++ meta, ctx}, state}
end
defp normalize({{:., dot_meta, [Nx, name]}, meta, args}, state) do
if name in @forbidden_ops do
compile_error!(meta, state, "Nx.#{name}/#{length(args)} is not allowed inside defn")
end
{args, state} = normalize_list(args, state)
args = rewrite_args(name, args)
call = {{:., dot_meta, [Nx, name]}, meta, args}
if name in @tensor_ops do
{{{:., dot_meta, [Nx.Defn.Expr, :tensor]}, dot_meta, [call]}, state}
else
{call, state}
end
end
defp normalize({{:., _, [Nx.Defn.Kernel, name]} = call, meta, args}, state) do
{args, state} =
case args do
[ast, fun] when name == :transform ->
{ast, state} = normalize(ast, state)
{[ast, fun], state}
_ ->
normalize_list(args, state)
end
{{call, meta, args}, state}
end
defp normalize({{:., _, [Access, :get]} = call, meta, args}, state) do
{args, state} = normalize_list(args, state)
{{call, meta, args}, state}
end
defp normalize({{:., dot_meta, [remote, name]}, meta, args}, state)
when is_atom(remote) and is_atom(name) do
{args, state} = normalize_list(args, state)
{{{:., dot_meta, [__MODULE__, :__remote__]}, meta, [remote, name, defn_name(name), args]},
state}
end
defp normalize({left, right}, state) do
{left, state} = normalize(left, state)
{right, state} = normalize(right, state)
{{left, right}, state}
end
defp normalize(list, state) when is_list(list) do
normalize_list(list, state)
end
defp normalize(literal, state)
when is_number(literal) or is_atom(literal) or is_binary(literal) do
{literal, state}
end
defp normalize(expr, state) do
invalid_numerical_expression!(expr, state)
end
defp normalize_list(list, state) do
Enum.map_reduce(list, state, &normalize/2)
end
defp invalid_numerical_expression!(expr, state) do
string = expr |> Macro.to_string() |> String.replace("\n", "\n ")
compile_error!(
maybe_meta(expr),
state,
"invalid numerical expression:\n\n #{string}\n"
)
end
## Rewrite args
defp rewrite_args(:iota, [t]), do: [t, add_backend([])]
defp rewrite_args(:iota, [t, opts]), do: [t, add_backend(opts)]
defp rewrite_args(:eye, [n]), do: [n, add_backend([])]
defp rewrite_args(:eye, [n, opts]), do: [n, add_backend(opts)]
defp rewrite_args(:random_uniform, [t]), do: [t, add_backend([])]
defp rewrite_args(:random_uniform, [t, opts]), do: [t, add_backend(opts)]
defp rewrite_args(:random_uniform, [t, min, max]), do: [t, min, max, add_backend([])]
defp rewrite_args(:random_uniform, [t, min, max, opts]), do: [t, min, max, add_backend(opts)]
defp rewrite_args(:random_normal, [t]), do: [t, add_backend([])]
defp rewrite_args(:random_normal, [t, opts]), do: [t, add_backend(opts)]
defp rewrite_args(:random_normal, [t, mu, sigma]), do: [t, mu, sigma, add_backend([])]
defp rewrite_args(:random_normal, [t, mu, sigma, opts]), do: [t, mu, sigma, add_backend(opts)]
defp rewrite_args(_name, args), do: args
defp add_backend(list) when is_list(list), do: [backend: Nx.Defn.Expr] ++ list
defp add_backend(expr), do: quote(do: Keyword.put(unquote(expr), :backend, Nx.Defn.Expr))
## Normalize args
defp normalize_args(args, meta, state) when is_list(args) do
{args, state} = Enum.map_reduce(args, state, &normalize_arg(&1, meta, &2))
assert_uniq_vars!(args, state)
{args, state}
end
defp normalize_arg(var, _meta, state) when is_var(var) do
if state.rewrite_underscore? and is_underscore(var) do
# TODO: Use Macro.unique_var on Elixir v1.12
{{:arg, [counter: :elixir_module.next_counter(state.module)], state.module}, state}
else
normalize(var, state)
end
end
defp normalize_arg({op, meta, args}, _meta, state) when op in [:{}, :=] do
{args, state} = Enum.map_reduce(args, state, &normalize_arg(&1, meta, &2))
{{op, meta, args}, state}
end
defp normalize_arg({left, right}, meta, state) do
{left, state} = normalize_arg(left, meta, state)
{right, state} = normalize_arg(right, meta, state)
{{:{}, meta, [left, right]}, state}
end
defp normalize_arg(expr, meta, state) do
compile_error!(
meta,
state,
"only variables and tuples are allowed as arguments in defn, got: #{Macro.to_string(expr)}"
)
end
defp assert_uniq_vars!(ast, state) do
Macro.prewalk(ast, %{}, fn
var, acc when is_var(var) and not is_underscore(var) ->
meta = elem(var, 1)
counter = Keyword.fetch!(meta, :counter)
case acc do
%{^counter => var} ->
compile_error!(
meta,
state,
"variable \"#{Macro.to_string(var)}\" appears twice in pattern " <>
Macro.to_string(ast)
)
%{} ->
{var, Map.put(acc, counter, var)}
end
node, acc ->
{node, acc}
end)
:ok
end
defp extract_assigns(args, state) do
Macro.prewalk(args, [], fn
{:=, meta, [left, right]} = expr, acc ->
cond do
is_var(left) ->
{right, [{:=, meta, [left, right]} | acc]}
is_var(right) ->
{left, [{:=, meta, [right, left]} | acc]}
true ->
compile_error!(
meta,
state,
"using = in arguments expects at least one of the sides to be a variable, " <>
"got: #{Macro.to_string(expr)}"
)
end
node, acc ->
{node, acc}
end)
end
## Helpers
defp maybe_meta({_, meta, _}), do: meta
defp maybe_meta(_), do: []
defp compile_error!(meta, state, description) do
line = meta[:line] || state.line
raise CompileError, line: line, file: state.file, description: description
end
defp defn_name(name), do: :"__defn:#{name}__"
end
|
lib/nx/defn/compiler.ex
| 0.754644
| 0.539226
|
compiler.ex
|
starcoder
|
defmodule Example_Ke do
def start do
Keyword.delete([a: 1, b: 2], :a)
end
def start2 do
Keyword.drop([a: 1, b: 2, b: 3, c: 3, a: 5], [:b, :d])
end
def start3 do
Keyword.equal?([a: 1, b: 2, a: 3], [b: 2, a: 3, a: 1])
end
def start4 do
Keyword.from_keys([:foo, :bar, :baz], :atom)
end
def start5 do
Keyword.get([a: 1], :b, 3)
end
def start6 do
Keyword.get_and_update([a: 1], :a, fn current_value ->
{current_value, "new_value!"}
end)
end
def start7 do
Keyword.get_and_update([a: 1], :b, fn current_value ->
{current_value, "new_value!"}
end)
end
def start8 do
keyword = [a: 1]
fun = fn ->
13
end
Keyword.get_lazy(keyword, :a, fun)
end
def start9 do
Keyword.get_values([a: 1, a: 3, a: 6], :a)
end
def start10 do
Keyword.merge([a: 1, b: 2], [a: 3, d: 4, a: 5], fn :a, v1, v2 ->
v1 + v2
end)
end
def start11 do
Keyword.new([{:a, 3}, {:c, 5}])
end
def start12 do
keyword = [a: 1]
fun = fn ->
13
end
Keyword.pop_lazy(keyword, :a, fun)
end
def start13 do
Keyword.pop_values([a: 1, a: 2, a: 3], :a)
end
def start14 do
Keyword.put([a: 1, b: 2], :a, 3)
end
def start15 do
Keyword.replace([a: 1, b: 2, a: 4], :a, 3)
end
def start16 do
Keyword.replace_lazy([a: 2, b: 2, a: 2], :a, fn v -> v * 4 end)
end
def start17 do
Keyword.split([a: 1, b: 2, c: 3, a: 4], [:a, :c, :e])
end
def start18 do
Keyword.take([a: 1, b: 2, c: 3, a: 5], [:a, :c, :e])
end
def start19 do
Keyword.to_list(a: 2)
end
def start20 do
Keyword.update([a: 12], :b, 15, fn value -> value * 5 end)
end
def start21 do
{:ok, result} = Keyword.validate([], [one: 1, two: 2])
Enum.sort(result)
end
def start22 do
Keyword.validate([three: 3, four: 4], [one: 1, two: 2])
end
def start23 do
Keyword.validate!([one: 1], [:one, two: 2]) |> Enum.sort()
end
def start24 do
Keyword.values(a: 1, b: 2, a: 3)
end
end
|
lib/beam/keyword/keyword.ex
| 0.667256
| 0.852383
|
keyword.ex
|
starcoder
|
defmodule Elixirdo.Instance.MonadTrans.State do
alias Elixirdo.Instance.MonadTrans.State, as: StateT
use Elixirdo.Base
use Elixirdo.Typeclass.Monad.Trans, import_typeclasses: true
use Elixirdo.Typeclass.Monad.State, import_monad_state: true
defstruct [:data]
defmodule State do
defstruct [:input, :pos, :user]
end
defmodule Pos do
defstruct [:file, :line, :column]
end
deftype state_t(s, m, a) :: %StateT{data: (s -> m({a, s}))}
def new(data) do
%StateT{data: data}
end
def run(%StateT{data: inner}) do
inner
end
definstance functor(state_t(s, m), m: functor) do
def fmap(f, state_t_a) do
map(
fn functor_a ->
Functor.fmap(fn {a, state} -> {f.(a), state} end, functor_a, m)
end,
state_t_a
)
end
end
definstance applicative(state_t(s, m), m: monad) do
def pure(a) do
do_state(fn s -> {a, s} end, m)
end
def ap(state_t_f, state_t_a) do
new(fn s ->
monad m do
{f, ns} <- run(state_t_f, s)
{a, nns} <- run(state_t_a, ns)
Monad.return({f.(a), nns}, m)
end
end)
end
end
definstance monad(state_t(s, m), m: monad) do
def bind(state_t_a, afb) do
new(fn s ->
monad m do
{a, ns} <- run(state_t_a, s)
run(afb.(a), ns)
end
end)
end
end
definstance monad_trans(state_t(s, m), m: monad) do
def lift(monad_a) do
new(fn s ->
Monad.lift_m(fn a -> {a, s} end, monad_a, m)
end)
end
end
definstance monad_state(state_t(s, m), m: monad) do
def state(f) do
do_state(f, m)
end
end
def run(state_t_a, state) do
run(state_t_a).(state)
end
def map(f, state_t_a) do
new(fn state -> f.(run(state_t_a, state)) end)
end
def eval(state_t_a, state, m \\ :monad) do
fn {a, _} -> a end |> Monad.lift_m(run(state_t_a, state), m)
end
def exec(state_t_a, state, m \\ :monad) do
fn {_, s} -> s end |> Monad.lift_m(run(state_t_a, state), m)
end
defp do_state(f, m) do
new(fn state ->
Monad.return(f.(state), m)
end)
end
end
|
lib/elixirdo/instance/monad_trans/state.ex
| 0.528777
| 0.565089
|
state.ex
|
starcoder
|
defmodule ApiWeb.Params do
@moduledoc """
Parses request params into domain datastructures.
"""
## Defaults
@max_limit 100
@default_params ~w(include sort page filter fields api_key)
@doc """
Returns a Keyword list of options from JSONAPI query params.
Supported options:
* `:offset` - a `"page"` key with a map containing `"offset"`
Resulting options include `:offset`.
* `:limit` - a `"page"` key with a map containing `"limit"`
Resulting options include `:limit`.
* `:order_by` - a `"sort"` key containg a field to sort by. The key may
be optionally prefixed with a `-`, such as "-name" for descending order,
otherwise ascending order is assumed.
Resulting options include an `:order_by`, for example `{:id, :asc}`.
## Examples
iex> ApiWeb.Params.filter_opts(%{"page" => %{"offset" => 0}}, [:offset], build_conn())
%{offset: 0}
iex> ApiWeb.Params.filter_opts(%{"page" => %{"limit" => 10}}, [:limit], build_conn())
%{limit: 10}
iex> ApiWeb.Params.filter_opts(%{"sort" => "name"}, [:order_by], build_conn())
%{order_by: [{:name, :asc}]}
iex> ApiWeb.Params.filter_opts(%{"sort" => "-name,value"}, [:order_by], build_conn())
%{order_by: [{:name, :desc}, {:value, :asc}]}
iex> ApiWeb.Params.filter_opts(%{"sort" => "-name"}, [:order_by], build_conn(), order_by: [name: :asc])
%{order_by: [name: :desc]}
"""
def filter_opts(params, options, conn, acc \\ %{}) do
Enum.reduce(options, Map.new(acc), fn opt, acc ->
filter_opt(opt, params, conn, acc)
end)
end
defp filter_opt(:offset, %{"page" => %{"offset" => offset}}, _conn, acc) do
case parse_int(offset) do
{:ok, offset} when offset >= 0 -> Map.put(acc, :offset, offset)
_ -> acc
end
end
defp filter_opt(:offset, _params, _conn, acc), do: acc
defp filter_opt(:limit, %{"page" => %{"limit" => limit}}, _conn, acc) do
case parse_int(limit) do
{:ok, limit} when limit > 0 and limit <= @max_limit ->
Map.put(acc, :limit, limit)
_ ->
acc
end
end
defp filter_opt(:limit, _params, _conn, acc), do: acc
defp filter_opt(:distance, %{"filter" => %{"latitude" => lat, "longitude" => lng}}, _conn, acc),
do: Map.merge(acc, %{latitude: lat, longitude: lng})
defp filter_opt(:distance, %{"filter" => %{"latitude" => lat}, "longitude" => lng}, _conn, acc),
do: Map.merge(acc, %{latitude: lat, longitude: lng})
defp filter_opt(:distance, %{"filter" => %{"longitude" => lng}, "latitude" => lat}, _conn, acc),
do: Map.merge(acc, %{latitude: lat, longitude: lng})
defp filter_opt(:distance, %{"longitude" => lng, "latitude" => lat}, _conn, acc),
do: Map.merge(acc, %{latitude: lat, longitude: lng})
defp filter_opt(:distance, _params, _conn, acc), do: acc
defp filter_opt(:order_by, %{"sort" => fields}, conn, acc) do
order_by =
for field <- split_on_comma(fields) do
case field do
"-" <> desc_field ->
{String.to_existing_atom(desc_field), :desc}
asc_field ->
{String.to_existing_atom(asc_field), :asc}
end
end
Map.put(acc, :order_by, order_by)
rescue
ArgumentError ->
if conn.assigns.api_version >= "2019-07-01" do
Map.put(acc, :order_by, [{:invalid, :asc}])
else
acc
end
end
defp filter_opt(:order_by, _params, _conn, acc), do: acc
@doc """
Converts comma delimited strings into integer values
## Examples
iex> ApiWeb.Params.integer_values("1,2,3")
[1, 2, 3]
iex> ApiWeb.Params.integer_values("1,not_number,1")
[1]
"""
def integer_values(""), do: []
def integer_values(str) do
str
|> String.split(",")
|> Stream.map(&int(&1))
|> Stream.filter(& &1)
|> Enum.uniq()
end
@doc """
Fetches and casts latitude, longitude, and optional radius from params.
## Examples
iex> ApiWeb.Params.fetch_coords(%{"latitude" => "1.0", "longitude" => "-2.0"})
{:ok, {1.0, -2.0, 0.01}}
iex> ApiWeb.Params.fetch_coords(%{"latitude" => "1.0", "longitude" => "-2.0", "radius" => "5"})
{:ok, {1.0, -2.0, 5.0}}
iex> ApiWeb.Params.fetch_coords(%{"latitude" => "1.0", "longitude" => "nope"})
:error
iex> ApiWeb.Params.fetch_coords(%{})
:error
"""
def fetch_coords(%{"latitude" => lat, "longitude" => long} = params) do
with {parsed_lat, ""} <- Float.parse(lat),
{parsed_long, ""} <- Float.parse(long),
{radius, ""} <- Float.parse(Map.get(params, "radius", "0.01")) do
{:ok, {parsed_lat, parsed_long, radius}}
else
_ -> :error
end
end
def fetch_coords(%{}), do: :error
@doc """
Splits a param key by comma into a list of values.
"""
@spec split_on_comma(%{any => String.t()}, any) :: [String.t()]
def split_on_comma(params, name) do
case Map.fetch(params, name) do
{:ok, value} -> split_on_comma(value)
:error -> []
end
end
@doc """
Splits a string on comma, filtering blank values and duplicates.
## Examples
iex> ApiWeb.Params.split_on_comma("a,b,c")
["a", "b", "c"]
iex> ApiWeb.Params.split_on_comma("dup,,dup")
["dup"]
iex> ApiWeb.Params.split_on_comma(nil)
[]
"""
@spec split_on_comma(String.t() | nil) :: [String.t()]
def split_on_comma(str) when is_binary(str) and str != "" do
str
|> String.splitter(",", trim: true)
|> Enum.uniq()
end
def split_on_comma(_) do
[]
end
@doc """
Parses the direction_id out of a parameter map.
"""
@spec direction_id(%{String.t() => String.t()}) :: Model.Direction.id() | nil
def direction_id(params)
def direction_id(%{"direction_id" => "0"}), do: 0
def direction_id(%{"direction_id" => "1"}), do: 1
def direction_id(_), do: nil
@doc """
Parses a list of route types out of a parameter map
"""
@spec route_types(%{String.t() => String.t()}) :: [Model.Route.route_type()]
def route_types(%{"route_type" => route_types}), do: integer_values(route_types)
def route_types(_), do: []
@doc """
Parses and integer value from params.
## Examples
iex> ApiWeb.Params.parse_int("123")
{:ok, 123}
iex> ApiWeb.Params.parse_int("123.4")
:error
iex> ApiWeb.Params.parse_int(123)
{:ok, 123}
iex> ApiWeb.Params.parse_int(nil)
:error
"""
def parse_int(num) when is_integer(num), do: {:ok, num}
def parse_int(str) when is_binary(str) do
case Integer.parse(str) do
{num, ""} -> {:ok, num}
_ -> :error
end
end
def parse_int(_val), do: :error
@doc """
Returns and integer value from params or nil if invalid or missing.
## Examples
iex> ApiWeb.Params.int("123")
123
iex> ApiWeb.Params.int("123.4")
nil
iex> ApiWeb.Params.int(123)
123
iex> ApiWeb.Params.int(nil)
nil
"""
def int(val) do
case parse_int(val) do
{:ok, val} -> val
:error -> nil
end
end
@doc """
Returns a flattened map of filtered JSON-API query params.
Query params that are in the `filter` namespace have priority over duplicate
query params.
"""
@spec filter_params(map, [String.t()], Plug.Conn.t()) ::
{:ok, map} | {:error, atom, [String.t()]}
def filter_params(params, keys, conn) do
with top_level_params <- Map.drop(params, @default_params),
{:ok, filtered1} <- validate_filters(top_level_params, keys, conn),
{:ok, filtered2} <- validate_filters(Map.get(params, "filter"), keys, conn) do
{:ok, Map.merge(filtered1, filtered2)}
else
{:error, _, _} = error -> error
end
end
@spec validate_filters(map, [String.t()], Plug.Conn.t()) ::
{:ok, map} | {:error, atom, [String.t()]}
def validate_filters(nil, _keys, _conn), do: {:ok, %{}}
def validate_filters(params, keys, conn) do
case params do
filter when is_map(filter) ->
bad_filters = Map.keys(filter) -- keys
if conn.assigns.api_version < "2019-04-05" or bad_filters == [] do
{:ok, Map.take(filter, keys)}
else
{:error, :bad_filter, bad_filters}
end
_ ->
{:ok, %{}}
end
end
@spec validate_includes(map, [String.t()], Plug.Conn.t()) :: :ok | {:error, atom, [String.t()]}
def validate_includes(_params, _includes, %{assigns: %{api_version: version}})
when version < "2019-04-05",
do: :ok
def validate_includes(%{"include" => values}, includes, _conn) when is_binary(values) do
split =
values
|> String.split(",", trim: true)
|> Enum.map(&(&1 |> String.split(".") |> List.first()))
includes_set = MapSet.new(includes)
bad_includes = Enum.filter(split, fn el -> el not in includes_set end)
if bad_includes == [] do
:ok
else
{:error, :bad_include, bad_includes}
end
end
def validate_includes(%{"include" => values}, _includes, _conn) when is_map(values) do
{:error, :bad_include, Map.keys(values)}
end
def validate_includes(_params, _includes, _conn), do: :ok
@spec validate_show_params(map, Plug.Conn.t()) :: :ok | {:error, atom, [String.t()]}
def validate_show_params(params, conn)
def validate_show_params(params, %{assigns: %{api_version: version}})
when version >= "2019-04-05" do
bad_query_params =
params
|> Map.drop(["id", "filter", "include", "fields"])
|> Map.keys()
bad_filters =
case Map.get(params, "filter") do
%{} = filters ->
Map.keys(filters)
_ ->
[]
end
case {bad_query_params, bad_filters} do
{[], []} -> :ok
{a, b} -> {:error, :bad_filter, a ++ b}
end
end
def validate_show_params(_params, _conn) do
:ok
end
end
|
apps/api_web/lib/api_web/params.ex
| 0.887296
| 0.403802
|
params.ex
|
starcoder
|
defmodule WebDriver do
use Application
@moduledoc """
Version: #{ WebDriver.Mixfile.project[:version] }
This is the Elixir WebDriver application. It can be used to drive a
WebDriver enabled browser via Elixir code.
The current version supports PhantomJS, ChromeDriver, FireFox and remote
Web Driver servers (e.g. Selenium).
All drivers except 'remote' manage running the browser in the application
supervision tree.
This code is written to be very similar to the Selenium bindings for other
languages.
"""
defmodule Config do
defstruct browser: :phantomjs, name: nil, root_url: ""
@moduledoc """
Configuration for a WebDriver browser instance.
Note that this record will have fields added as development of
the library progresses.
* `browser` - The type of browser, :phantomjs or :firefox
* `name` - An atom to refer to the browser for later calls.
* `root_url` - Used only for the Remote driver. This is the base url
of the webdriver server. It must be a complete URL eg: "http://localhost:5555/wd/hub"
This value is ignored for the other drivers, which set up their own root_url.
"""
end
@doc """
Start the application. This is a callback called by
:application.start :webdriver
and should probably not be called directly.
"""
def start :normal, config do
WebDriver.Supervisor.start_link config
end
@doc """
Callback to clean up on exit. Currently does nothing much.
"""
def stop _config do
:ok
end
@doc """
Start a browser with the given configuration.
The _config_ parameter is a WebDriver.Config struct defined as
``` defstruct browser: :phantomjs, name: nil, root_url: "" ```
Currently Config is very minimal, future versions will add to this.
Browser can be eithes :phantomjs or :firefox.
*Note that at the moment Firefox support is highly experimental.*
The name parameter is an atom with which you can reference the browser
process for further calls.
Returns ```{:ok, pid}``` or ```{:error, reason}```
Example:
iex> config = %WebDriver.Config{browser: :phantomjs, name: :test_browser}
iex> WebDriver.start_browser config
Starting phantomjs
Phantom js started
{:ok,#PID<0.235.0>}
"""
def start_browser config do
WebDriver.Supervisor.start_browser config
end
@doc """
Stop the web browser referred to by name. You can also use the pid of the
process if you wish.
This causes the browser and all associated sessions to be terminated.
"""
def stop_browser name do
WebDriver.Supervisor.stop_browser name
end
@doc """
Stops all browsers and sessions that are running.
"""
def stop_all_browsers do
Enum.each browsers, fn(browser) -> stop_browser(browser) end
end
@doc """
Returns a list of the process names of all browsers that are running.
"""
def browsers do
children = :supervisor.which_children :webdriver
Enum.map children, fn(child) -> get_browser_name(child) end
end
@doc """
Start a session on the specified browser.
You must specify a name for the session in order to refer to it
for further calls to the Session module.
Returns ```{:ok, pid}``` or ```{:error, reason}```
"""
def start_session browser, session_name do
:gen_server.call browser, {:start_session, session_name}
end
@doc """
Stop session. Kill a session on the current browser.
This will attempt to terminate the session in the browser then it will
stop the process running the session in the VM.
"""
def stop_session session_name do
WebDriver.Session.stop session_name
end
@doc """
Returns a list of all the process names of all sessions that are running.
"""
def sessions do
:lists.flatten(Enum.map browsers, fn(browser) -> sessions(browser) end)
end
@doc """
Returns a list of all the process names of sessions running on the specified browser.
"""
def sessions browser do
{:ok, s} = :gen_server.call browser, :sessions
s
end
defp get_browser_name {name, _pid, :worker, _mods} do
name
end
end
|
lib/webdriver.ex
| 0.79854
| 0.659609
|
webdriver.ex
|
starcoder
|
defmodule Salemove.HttpClient.Middleware.Logger do
@behaviour Tesla.Middleware
@moduledoc """
Log requests as single line.
Logs request method, url, response status and time taken in milliseconds.
### Example usage
```
defmodule MyClient do
use Tesla
plug Salemove.HttpClient.Middleware.Logger,
level: %{
100..399 => :info,
422 => :info,
400..499 => :warn,
500..600 => :error
}
end
```
### Logger output
```
2017-09-30 13:39:06.663 [info] GET http://example.com -> 200 (736.988 ms)
```
"""
require Logger
def call(env, next, opts) do
time_start = System.monotonic_time()
with {:ok, env} <- Tesla.run(env, next) do
_ = log(env, elapsed_ms(time_start), opts)
{:ok, env}
else
{:error, error} ->
log(env, error, elapsed_ms(time_start), opts)
{:error, error}
end
end
defp log(env, %Tesla.Error{reason: :timeout}, elapsed_ms, _opts) do
message = "#{normalize_method(env)} #{env.url} -> :timeout (#{elapsed_ms} ms)"
Logger.log(:warn, message)
end
defp log(env, %Tesla.Error{reason: reason}, elapsed_ms, opts) do
log_status(0, "#{normalize_method(env)} #{env.url} -> #{inspect(reason)} (#{elapsed_ms} ms)", opts)
end
defp log(env, error, elapsed_ms, opts) do
log_status(0, "#{normalize_method(env)} #{env.url} -> #{inspect(error)} (#{elapsed_ms} ms)", opts)
end
defp log(env, elapsed_ms, opts) do
message = "#{normalize_method(env)} #{env.url} -> #{env.status} (#{elapsed_ms} ms)"
log_status(env.status, message, opts)
end
defp normalize_method(env) do
env.method |> to_string() |> String.upcase()
end
defp elapsed_ms(from) do
now = System.monotonic_time()
us = System.convert_time_unit(now - from, :native, :microsecond)
:io_lib.format("~.3f", [us / 1000])
end
defp log_status(status, message, opts) do
levels = Keyword.get(opts || [], :level)
status
|> status_to_level(levels)
|> Logger.log(message)
end
defp status_to_level(status, levels) when is_map(levels) do
case levels do
%{^status => level} -> level
levels -> find_matching_level(levels, status) || status_to_level(status, nil)
end
end
defp status_to_level(status, _) do
cond do
status >= 400 || status == 0 -> :error
status >= 300 -> :warn
true -> :info
end
end
defp find_matching_level(levels, status) do
Enum.find_value(levels, fn
{%Range{} = range, level} -> if status in range, do: level
{^status, level} -> level
_ -> false
end)
end
end
|
lib/salemove/http_client/middleware/logger.ex
| 0.809765
| 0.646767
|
logger.ex
|
starcoder
|
defmodule RigCloudEvents.Parser.PartialParser do
@moduledoc """
Error-tolerant reader for JSON encoded CloudEvents.
Interprets the passed data structure as little as possible. The idea comes from the
CloudEvents spec that states that JSON payloads ("data") are encoded along with the
envelope ("context attributes"). This parser only interprets fields that are
required for RIG to operate and skips the (potentially large) data payload.
"""
@behaviour RigCloudEvents.Parser
alias RigCloudEvents.Parser
alias Jaxon.Event, as: JaxonToken
alias Jaxon.Parser, as: JaxonParser
@type t :: [JaxonToken.t()]
@impl true
@spec parse(Parser.json_string()) :: t
defdelegate parse(json), to: JaxonParser
# ---
@impl true
@spec context_attribute(t, Parser.attribute()) ::
{:ok, value :: any}
| {:error, {:not_found, Parser.attribute(), t}}
| {:error, {:non_scalar_value, Parser.attribute(), t}}
| {:error, any}
def context_attribute(tokens, attr_name) do
value(tokens, attr_name)
end
# ---
@impl true
@spec extension_attribute(
t,
Parser.extension(),
Parser.attribute()
) ::
{:ok, value :: any}
| {:error, {:not_found, Parser.attribute(), t}}
| {:error, {:not_an_object | :non_scalar_value, Parser.attribute(), t}}
| {:error, any}
def extension_attribute(tokens, extension_name, attr_name) do
case apply_lens(tokens, extension_name) do
[] -> {:error, {:not_found, extension_name, tokens}}
[:start_object | _] = extension -> value(extension, attr_name)
tokens -> {:error, {:not_an_object, extension_name, tokens}}
end
end
# ---
@impl true
@spec find_value(t, Parser.json_pointer()) ::
{:ok, value :: any}
| {:error, {:not_found, location :: String.t(), t}}
| {:error, {:non_scalar_value, location :: String.t(), t}}
| {:error, any}
def find_value(json_tokens, "/" <> json_pointer) do
# See https://tools.ietf.org/html/rfc6901#section-4
reference_tokens =
for token <- String.split(json_pointer, "/"),
do: token |> String.replace("~1", "/") |> String.replace("~0", "~")
# We can't do much if the pointer goes into data and data is encoded..
if points_into_encoded_data(json_tokens, reference_tokens) do
{:error, :cannot_extract_from_encoded_data}
else
do_find_value(json_tokens, reference_tokens)
end
end
def find_value(tokens, "" = _the_whole_document),
do: {:error, {:non_scalar_value, "", tokens}}
def find_value(_tokens, "#" <> _),
do: raise("The URI fragment identifier representation is not supported.")
# ---
defp points_into_encoded_data(json_tokens, reference_tokens)
defp points_into_encoded_data(json_tokens, [ref_token | rest])
when ref_token == "data" and rest != [] do
# `data` is encoded if, and only if, `contenttype` is set.
case value(json_tokens, "contenttype") do
{:error, {:not_found, _, _}} ->
# `contenttype` is not set, so `data` must be already parsed.
false
{:ok, _} ->
# `contenttype` is set, so `data` is still encoded.
true
end
end
defp points_into_encoded_data(_, _), do: false
# ---
defp do_find_value(json_tokens, reference_tokens)
defp do_find_value(json_tokens, [ref_token | []]),
do: value(json_tokens, ref_token)
defp do_find_value(json_tokens, [ref_token | remaining_ref_tokens]),
do: apply_lens(json_tokens, ref_token) |> do_find_value(remaining_ref_tokens)
# ---
defp value(tokens, prop) do
case apply_lens(tokens, prop) do
[] -> {:error, {:not_found, prop, tokens}}
[{:error, error} | _] -> {:error, error}
[{_, value}] -> {:ok, value}
[nil] -> {:ok, nil}
[:start_object | _] = tokens -> {:error, {:non_scalar_value, prop, tokens}}
[:start_array | _] = tokens -> {:error, {:non_scalar_value, prop, tokens}}
end
end
def apply_lens(tokens, attr_name) do
case tokens do
[{:string, ^attr_name} | [:colon | tokens]] -> read_val(tokens)
[{:string, _key} | [:colon | tokens]] -> skip_val(tokens) |> apply_lens(attr_name)
[:start_object | tokens] -> apply_lens(tokens, attr_name)
[:end_object] -> []
[] -> []
[{:error, _} | _] -> tokens
[:start_array | _] -> :not_implemented
end
end
# ---
defp read_val(tokens), do: do_read_val(tokens, tokens)
# Assumes tokens is right after a colon.
defp do_read_val(all_tokens, remaining_tokens, n_processed \\ 0, obj_depth \\ 0, arr_depth \\ 0)
# The exit condition: comma or end of input at the root level:
defp do_read_val(tokens, [:comma | _], n_processed, 0, 0), do: Enum.take(tokens, n_processed)
defp do_read_val(tokens, [:end_object], n_processed, 0, 0), do: Enum.take(tokens, n_processed)
defp do_read_val(tokens, [], n_processed, 0, 0), do: Enum.take(tokens, n_processed)
defp do_read_val(tokens, [:start_object | rest], n_processed, obj_depth, arr_depth),
do: do_read_val(tokens, rest, n_processed + 1, obj_depth + 1, arr_depth)
defp do_read_val(tokens, [:end_object | rest], n_processed, obj_depth, arr_depth)
when obj_depth > 0,
do: do_read_val(tokens, rest, n_processed + 1, obj_depth - 1, arr_depth)
defp do_read_val(tokens, [:start_array | rest], n_processed, obj_depth, arr_depth),
do: do_read_val(tokens, rest, n_processed + 1, obj_depth, arr_depth + 1)
defp do_read_val(tokens, [:end_array | rest], n_processed, obj_depth, arr_depth)
when arr_depth > 0,
do: do_read_val(tokens, rest, n_processed + 1, obj_depth, arr_depth - 1)
# "Skip" all other tokens:
defp do_read_val(tokens, [_ | rest], n_processed, obj_depth, arr_depth),
do: do_read_val(tokens, rest, n_processed + 1, obj_depth, arr_depth)
# ---
# Assumes tokens is right after a colon and skips until right before the next key or the end.
defp skip_val(tokens, obj_depth \\ 0, arr_depth \\ 0)
defp skip_val([:start_object | tokens], obj_depth, arr_depth) do
skip_val(tokens, obj_depth + 1, arr_depth)
end
defp skip_val([:end_object | tokens], obj_depth, arr_depth) when obj_depth > 0 do
skip_val(tokens, obj_depth - 1, arr_depth)
end
defp skip_val([:start_array | tokens], obj_depth, arr_depth) do
skip_val(tokens, obj_depth, arr_depth + 1)
end
defp skip_val([:end_array | tokens], obj_depth, arr_depth) when arr_depth > 0 do
skip_val(tokens, obj_depth, arr_depth - 1)
end
defp skip_val([_ | tokens], obj_depth, arr_depth) when obj_depth > 0 or arr_depth > 0 do
skip_val(tokens, obj_depth, arr_depth)
end
defp skip_val([{_, _} | tokens], 0, 0), do: skip_val(tokens, 0, 0)
defp skip_val([nil | tokens], 0, 0), do: skip_val(tokens, 0, 0)
defp skip_val([:comma | tokens], 0, 0), do: tokens
# The root object:
defp skip_val([:end_object], 0, 0), do: []
defp skip_val([], _, _), do: []
end
|
apps/rig_cloud_events/lib/rig_cloud_events/parser/partial_parser.ex
| 0.858199
| 0.489626
|
partial_parser.ex
|
starcoder
|
defmodule ForgeAbi.AccountState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
balance: ForgeAbi.BigUint.t() | nil,
nonce: non_neg_integer,
num_txs: non_neg_integer,
address: String.t(),
pk: binary,
type: ForgeAbi.WalletType.t() | nil,
moniker: String.t(),
context: ForgeAbi.StateContext.t() | nil,
issuer: String.t(),
gas_balance: ForgeAbi.BigUint.t() | nil,
migrated_to: [String.t()],
migrated_from: [String.t()],
num_assets: non_neg_integer,
stake: ForgeAbi.StakeContext.t() | nil,
pinned_files: ForgeAbi.CircularQueue.t() | nil,
poke: ForgeAbi.PokeInfo.t() | nil,
deposit_received: ForgeAbi.BigUint.t() | nil,
withdraw_items: ForgeAbi.CircularQueue.t() | nil,
data: Google.Protobuf.Any.t() | nil
}
defstruct [
:balance,
:nonce,
:num_txs,
:address,
:pk,
:type,
:moniker,
:context,
:issuer,
:gas_balance,
:migrated_to,
:migrated_from,
:num_assets,
:stake,
:pinned_files,
:poke,
:deposit_received,
:withdraw_items,
:data
]
field :balance, 1, type: ForgeAbi.BigUint
field :nonce, 2, type: :uint64
field :num_txs, 3, type: :uint64
field :address, 4, type: :string
field :pk, 5, type: :bytes
field :type, 6, type: ForgeAbi.WalletType, deprecated: true
field :moniker, 7, type: :string
field :context, 8, type: ForgeAbi.StateContext
field :issuer, 9, type: :string
field :gas_balance, 10, type: ForgeAbi.BigUint
field :migrated_to, 13, repeated: true, type: :string
field :migrated_from, 14, repeated: true, type: :string
field :num_assets, 15, type: :uint64
field :stake, 16, type: ForgeAbi.StakeContext
field :pinned_files, 17, type: ForgeAbi.CircularQueue
field :poke, 18, type: ForgeAbi.PokeInfo
field :deposit_received, 19, type: ForgeAbi.BigUint
field :withdraw_items, 20, type: ForgeAbi.CircularQueue
field :data, 50, type: Google.Protobuf.Any
end
defmodule ForgeAbi.AssetState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
owner: String.t(),
moniker: String.t(),
readonly: boolean,
transferrable: boolean,
ttl: non_neg_integer,
consumed_time: Google.Protobuf.Timestamp.t() | nil,
issuer: String.t(),
parent: String.t(),
stake: ForgeAbi.StakeContext.t() | nil,
context: ForgeAbi.StateContext.t() | nil,
data: Google.Protobuf.Any.t() | nil
}
defstruct [
:address,
:owner,
:moniker,
:readonly,
:transferrable,
:ttl,
:consumed_time,
:issuer,
:parent,
:stake,
:context,
:data
]
field :address, 1, type: :string
field :owner, 2, type: :string
field :moniker, 3, type: :string
field :readonly, 4, type: :bool
field :transferrable, 5, type: :bool
field :ttl, 6, type: :uint32
field :consumed_time, 7, type: Google.Protobuf.Timestamp
field :issuer, 8, type: :string
field :parent, 9, type: :string
field :stake, 13, type: ForgeAbi.StakeContext
field :context, 14, type: ForgeAbi.StateContext
field :data, 50, type: Google.Protobuf.Any
end
defmodule ForgeAbi.CoreProtocol do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
address: String.t()
}
defstruct [:name, :address]
field :name, 1, type: :string
field :address, 2, type: :string
end
defmodule ForgeAbi.ForgeState.TasksEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: non_neg_integer,
value: ForgeAbi.UpgradeTasks.t() | nil
}
defstruct [:key, :value]
field :key, 1, type: :uint64
field :value, 2, type: ForgeAbi.UpgradeTasks
end
defmodule ForgeAbi.ForgeState.StakeSummaryEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: non_neg_integer,
value: ForgeAbi.StakeSummary.t() | nil
}
defstruct [:key, :value]
field :key, 1, type: :uint32
field :value, 2, type: ForgeAbi.StakeSummary
end
defmodule ForgeAbi.ForgeState.GasEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: non_neg_integer
}
defstruct [:key, :value]
field :key, 1, type: :string
field :value, 2, type: :uint32
end
defmodule ForgeAbi.ForgeState.AccountConfigEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: ForgeAbi.AccountConfig.t() | nil
}
defstruct [:key, :value]
field :key, 1, type: :string
field :value, 2, type: ForgeAbi.AccountConfig
end
defmodule ForgeAbi.ForgeState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
consensus: ForgeAbi.ConsensusParams.t() | nil,
tasks: %{non_neg_integer => ForgeAbi.UpgradeTasks.t() | nil},
stake_summary: %{non_neg_integer => ForgeAbi.StakeSummary.t() | nil},
version: String.t(),
token: ForgeAbi.ForgeToken.t() | nil,
tx_config: ForgeAbi.TransactionConfig.t() | nil,
protocols: [ForgeAbi.CoreProtocol.t()],
gas: %{String.t() => non_neg_integer},
upgrade_info: ForgeAbi.UpgradeInfo.t() | nil,
account_config: %{String.t() => ForgeAbi.AccountConfig.t() | nil},
token_swap_config: ForgeAbi.TokenSwapConfig.t() | nil,
data: Google.Protobuf.Any.t() | nil
}
defstruct [
:address,
:consensus,
:tasks,
:stake_summary,
:version,
:token,
:tx_config,
:protocols,
:gas,
:upgrade_info,
:account_config,
:token_swap_config,
:data
]
field :address, 1, type: :string
field :consensus, 2, type: ForgeAbi.ConsensusParams
field :tasks, 3, repeated: true, type: ForgeAbi.ForgeState.TasksEntry, map: true
field :stake_summary, 4, repeated: true, type: ForgeAbi.ForgeState.StakeSummaryEntry, map: true
field :version, 5, type: :string
field :token, 8, type: ForgeAbi.ForgeToken
field :tx_config, 9, type: ForgeAbi.TransactionConfig
field :protocols, 12, repeated: true, type: ForgeAbi.CoreProtocol
field :gas, 13, repeated: true, type: ForgeAbi.ForgeState.GasEntry, map: true
field :upgrade_info, 14, type: ForgeAbi.UpgradeInfo
field :account_config, 16,
repeated: true,
type: ForgeAbi.ForgeState.AccountConfigEntry,
map: true
field :token_swap_config, 17, type: ForgeAbi.TokenSwapConfig
field :data, 2047, type: Google.Protobuf.Any
end
defmodule ForgeAbi.RootState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
account: binary,
asset: binary,
receipt: binary,
protocol: binary,
governance: binary,
custom: binary
}
defstruct [:address, :account, :asset, :receipt, :protocol, :governance, :custom]
field :address, 1, type: :string
field :account, 2, type: :bytes
field :asset, 3, type: :bytes
field :receipt, 4, type: :bytes
field :protocol, 5, type: :bytes
field :governance, 6, type: :bytes
field :custom, 7, type: :bytes
end
defmodule ForgeAbi.StakeState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
from: String.t(),
to: String.t(),
balance: ForgeAbi.BigUint.t() | nil,
message: String.t(),
context: ForgeAbi.StateContext.t() | nil,
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :from, :to, :balance, :message, :context, :data]
field :address, 1, type: :string
field :from, 2, type: :string
field :to, 3, type: :string
field :balance, 4, type: ForgeAbi.BigUint
field :message, 5, type: :string
field :context, 14, type: ForgeAbi.StateContext
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.StatisticsState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
num_blocks: non_neg_integer,
num_txs: non_neg_integer,
num_stakes: ForgeAbi.BigUint.t() | nil,
num_validators: non_neg_integer,
tx_statistics: ForgeAbi.TxStatistics.t() | nil
}
defstruct [:address, :num_blocks, :num_txs, :num_stakes, :num_validators, :tx_statistics]
field :address, 1, type: :string
field :num_blocks, 2, type: :uint64
field :num_txs, 3, type: :uint64
field :num_stakes, 4, type: ForgeAbi.BigUint
field :num_validators, 5, type: :uint32
field :tx_statistics, 6, type: ForgeAbi.TxStatistics
end
defmodule ForgeAbi.BlacklistState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: [String.t()]
}
defstruct [:address]
field :address, 1, repeated: true, type: :string
end
defmodule ForgeAbi.ProtocolState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
itx: ForgeAbi.DeployProtocolTx.t() | nil,
root_hash: binary,
status: ForgeAbi.ProtocolStatus.t(),
migrated_to: [String.t()],
migrated_from: [String.t()],
context: ForgeAbi.StateContext.t() | nil,
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :itx, :root_hash, :status, :migrated_to, :migrated_from, :context, :data]
field :address, 1, type: :string
field :itx, 2, type: ForgeAbi.DeployProtocolTx
field :root_hash, 3, type: :bytes
field :status, 4, type: ForgeAbi.ProtocolStatus, enum: true
field :migrated_to, 12, repeated: true, type: :string
field :migrated_from, 13, repeated: true, type: :string
field :context, 14, type: ForgeAbi.StateContext
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.SwapState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
hash: String.t(),
address: String.t(),
hashkey: binary,
sender: String.t(),
receiver: String.t(),
value: ForgeAbi.BigUint.t() | nil,
assets: [String.t()],
locktime: non_neg_integer,
hashlock: binary,
context: ForgeAbi.StateContext.t() | nil
}
defstruct [
:hash,
:address,
:hashkey,
:sender,
:receiver,
:value,
:assets,
:locktime,
:hashlock,
:context
]
field :hash, 1, type: :string
field :address, 2, type: :string
field :hashkey, 3, type: :bytes
field :sender, 4, type: :string
field :receiver, 5, type: :string
field :value, 6, type: ForgeAbi.BigUint
field :assets, 7, repeated: true, type: :string
field :locktime, 8, type: :uint32
field :hashlock, 9, type: :bytes
field :context, 10, type: ForgeAbi.StateContext
end
defmodule ForgeAbi.SwapStatistics do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
locked_value_out: ForgeAbi.BigUint.t() | nil,
locked_value_in: ForgeAbi.BigUint.t() | nil,
locked_assets_out: non_neg_integer,
locked_assets_in: non_neg_integer
}
defstruct [:address, :locked_value_out, :locked_value_in, :locked_assets_out, :locked_assets_in]
field :address, 1, type: :string
field :locked_value_out, 2, type: ForgeAbi.BigUint
field :locked_value_in, 3, type: ForgeAbi.BigUint
field :locked_assets_out, 4, type: :uint32
field :locked_assets_in, 5, type: :uint32
end
defmodule ForgeAbi.DelegateOpState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
rule: String.t(),
num_txs: non_neg_integer,
num_txs_delta: non_neg_integer,
balance: ForgeAbi.BigUint.t() | nil,
balance_delta: ForgeAbi.BigUint.t() | nil
}
defstruct [:rule, :num_txs, :num_txs_delta, :balance, :balance_delta]
field :rule, 1, type: :string
field :num_txs, 2, type: :uint64
field :num_txs_delta, 3, type: :uint64
field :balance, 4, type: ForgeAbi.BigUint
field :balance_delta, 5, type: ForgeAbi.BigUint
end
defmodule ForgeAbi.DelegateState.OpsEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: ForgeAbi.DelegateOpState.t() | nil
}
defstruct [:key, :value]
field :key, 1, type: :string
field :value, 2, type: ForgeAbi.DelegateOpState
end
defmodule ForgeAbi.DelegateState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
address: String.t(),
ops: %{String.t() => ForgeAbi.DelegateOpState.t() | nil},
context: ForgeAbi.StateContext.t() | nil,
data: Google.Protobuf.Any.t() | nil
}
defstruct [:address, :ops, :context, :data]
field :address, 1, type: :string
field :ops, 2, repeated: true, type: ForgeAbi.DelegateState.OpsEntry, map: true
field :context, 14, type: ForgeAbi.StateContext
field :data, 15, type: Google.Protobuf.Any
end
defmodule ForgeAbi.AssetFactoryState do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
description: String.t(),
limit: non_neg_integer,
price: ForgeAbi.BigUint.t() | nil,
template: String.t(),
allowed_spec_args: [String.t()],
asset_name: String.t(),
attributes: ForgeAbi.AssetAttributes.t() | nil,
num_created: non_neg_integer,
data: Google.Protobuf.Any.t() | nil
}
defstruct [
:description,
:limit,
:price,
:template,
:allowed_spec_args,
:asset_name,
:attributes,
:num_created,
:data
]
field :description, 1, type: :string
field :limit, 2, type: :uint32
field :price, 3, type: ForgeAbi.BigUint
field :template, 4, type: :string
field :allowed_spec_args, 5, repeated: true, type: :string
field :asset_name, 6, type: :string
field :attributes, 7, type: ForgeAbi.AssetAttributes
field :num_created, 8, type: :uint32
field :data, 15, type: Google.Protobuf.Any
end
|
lib/protobuf/gen/state.pb.ex
| 0.714329
| 0.660857
|
state.pb.ex
|
starcoder
|
defmodule LocalLedger.CachedBalance do
@moduledoc """
This module is an interface to the abstract balances stored in DB. It is responsible for caching
wallets and serves as an interface to retrieve the current balances (which will either be
loaded from a cached balance or computed - or both).
"""
alias LocalLedgerDB.{Wallet, CachedBalance, Entry}
@doc """
Cache all the wallets balances using a batch stream mechanism for retrieval (1000 at a time). This
is meant to be used in some kind of schedulers, but can also be ran manually.
"""
@spec cache_all() :: :ok
def cache_all do
Wallet.stream_all(fn wallet ->
{:ok, calculate_with_strategy(wallet)}
end)
end
@doc """
Get all the balances for the given wallet.
"""
@spec all(%Wallet{}) :: {:ok, map()}
def all(wallet) do
{:ok, get_amounts(wallet)}
end
@doc """
Get the balance for the specified token (token_id) and
the given wallet.
"""
@spec get(%Wallet{}, String.t()) :: {:ok, map()}
def get(wallet, token_id) do
amounts = get_amounts(wallet)
{:ok, %{token_id => amounts[token_id] || 0}}
end
defp get_amounts(wallet) do
wallet.address
|> CachedBalance.get()
|> calculate_amounts(wallet)
end
defp calculate_amounts(nil, wallet), do: calculate_from_beginning_and_insert(wallet)
defp calculate_amounts(computed_balance, wallet) do
wallet.address
|> Entry.calculate_all_balances(%{
since: computed_balance.computed_at
})
|> add_amounts(computed_balance.amounts)
end
defp add_amounts(amounts_1, amounts_2) do
(Map.keys(amounts_1) ++ Map.keys(amounts_2))
|> Enum.map(fn token_id ->
{token_id, (amounts_1[token_id] || 0) + (amounts_2[token_id] || 0)}
end)
|> Enum.into(%{})
end
defp calculate_with_strategy(wallet) do
:local_ledger
|> Application.get_env(:balance_caching_strategy)
|> calculate_with_strategy(wallet)
end
defp calculate_with_strategy("since_last_cached", wallet) do
case CachedBalance.get(wallet.address) do
nil -> calculate_from_beginning_and_insert(wallet)
computed_balance -> calculate_from_cached_and_insert(wallet, computed_balance)
end
end
defp calculate_with_strategy("since_beginning", wallet) do
calculate_from_beginning_and_insert(wallet)
end
defp calculate_with_strategy(_, wallet) do
calculate_with_strategy("since_beginning", wallet)
end
defp calculate_from_beginning_and_insert(wallet) do
computed_at = NaiveDateTime.utc_now()
wallet.address
|> Entry.calculate_all_balances(%{upto: computed_at})
|> insert(wallet, computed_at)
end
defp calculate_from_cached_and_insert(wallet, computed_balance) do
computed_at = NaiveDateTime.utc_now()
wallet.address
|> Entry.calculate_all_balances(%{
since: computed_balance.computed_at,
upto: computed_at
})
|> add_amounts(computed_balance.amounts)
|> insert(wallet, computed_at)
end
defp insert(amounts, wallet, computed_at) do
_ =
if Enum.any?(amounts, fn {_token, amount} -> amount > 0 end) do
{:ok, _} =
CachedBalance.insert(%{
amounts: amounts,
wallet_address: wallet.address,
computed_at: computed_at
})
end
amounts
end
end
|
apps/local_ledger/lib/local_ledger/cached_balance.ex
| 0.796332
| 0.529385
|
cached_balance.ex
|
starcoder
|
defmodule Day7 do
def read_file(path) do
File.stream!(path)
|> parse_input
end
def parse_input(input) do
input
|> Stream.map(&String.split/1)
|> Stream.filter(fn x -> !Enum.empty? x end)
|> Enum.to_list
|> Enum.map(&row/1)
|> Map.new(fn item -> {item[:name], item} end)
end
def row(list) do
list
|> Enum.filter(fn item -> item != "->" end)
|> Enum.map(fn item -> String.trim(item, ",") end)
|> Enum.reduce(%{}, &populate_map/2)
end
def populate_map(item, map) do
cond do
!Map.has_key?(map, :name) -> Map.put(map, :name, item)
!Map.has_key?(map, :weight) -> Map.put(map, :weight, item |> String.trim("(") |> String.trim(")") |> String.to_integer)
true -> Map.update(map, :children, [item], fn children -> children ++ [item |> String.trim(",")] end)
end
end
def find_root(discs) do
find_root(discs, discs |> Map.values |> hd)
end
def find_root(discs, disc) do
parent = parent(discs, disc)
if parent == nil do
disc
else
find_root(discs, parent)
end
end
def parent(discs, disc) do
discs |> Map.values |> Enum.find(fn d -> Enum.find(Map.get(d, :children, []), fn child -> child == disc[:name] end) end)
end
def children(discs, disc) do
Map.get(disc, :children, []) |> Enum.map(fn name -> discs[name] end)
end
def tree_weight(discs, disc) do
disc[:weight] + (subtree_weights(discs, disc) |> Enum.reduce(0, &(&1 + &2)))
end
def subtree_weight(discs, disc) do
children(discs, disc) |> Enum.reduce(0, fn child, acc -> acc + child[:weight] + subtree_weight(discs, child) end)
end
def subtree_weights(discs, disc) do
children(discs, disc) |> Enum.map(fn disc -> disc[:weight] + subtree_weight(discs, disc) end)
end
def find_unbalanced(discs) do
find_unbalanced(discs, find_root(discs))
end
def find_unbalanced(discs, disc) when is_map(disc) do
children = children(discs, disc)
if is_balanced?(discs, disc) do
0
else
balanced_children = children |> Enum.map(fn child -> is_balanced?(discs, child) end)
if balanced_children |> Enum.filter(&(&1)) |> Enum.count == length(children) do
groups = children |> Enum.group_by(fn child -> tree_weight(discs, child) end)
w = groups |> Enum.find(fn {_, list} -> length(list) == 1 end)
c = groups |> Enum.find(fn {_, list} -> length(list) > 1 end)
{_, [wrong]} = groups |> Enum.find(fn {_, list} -> length(list) == 1 end)
{_, [correct | _]} = groups |> Enum.find(fn {_, list} -> length(list) > 1 end)
wrong[:weight] - (tree_weight(discs, wrong) - tree_weight(discs, correct))
else
Map.get(disc, :children, []) |> Enum.map(fn child -> find_unbalanced(discs, discs[child]) end) |> Enum.max
end
end
end
def is_balanced?(discs, disc) do
is_balanced?(subtree_weights(discs, disc))
end
def is_balanced?(weights) do
MapSet.new(weights) |> MapSet.size == 1
end
end
|
lib/day7.ex
| 0.541894
| 0.458894
|
day7.ex
|
starcoder
|
defmodule Graphvix.HTMLRecord do
@moduledoc """
Models a graph vertex that uses HTML to generate a table-shaped record.
# Table structure
The Graphviz API allows the basic table-related HTML elements:
* `<table>`
* `<tr>`
* `<th>`
and the `Graphvix` API provides the parallel functions:
* `new/2`
* `tr/1`
* `td/2`
## Example
iex> import Graphvix.HTMLRecord, only: [tr: 1, td: 1, td: 2]
iex> record = HTMLRecord.new([
iex> tr([
...> td("a"),
...> td("b")
...> ]),
...> tr([
...> td("c"),
...> td("d")
...> ]),
...> ])
iex> HTMLRecord.to_label(record)
~S(<table>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td>c</td>
<td>d</td>
</tr>
</table>)
# Ports
As with `Graphvix.Record` vertices, port names can be attached to cells. With
`HTMLRecord` vertices, this is done by passing a `:port` key as one of the
attributes in the second argument keyword list for `td/2`.
iex> import Graphvix.HTMLRecord, only: [tr: 1, td: 1, td: 2]
iex> record = HTMLRecord.new([
iex> tr([td("a"), td("b")]),
...> tr([td("c", port: "port_c"), td("d")]),
...> ])
iex> HTMLRecord.to_label(record)
~S(<table>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td port="port_c">c</td>
<td>d</td>
</tr>
</table>)
In addition to `:port`, values for existing HTML keys
* `border`
* `cellpadding`
* `cellspacing`
can be added to cells, and
* `border`
* `cellborder`
* `cellpadding`
* `cellspacing`
can be added to the table at the top-level to style the table and cells.
# Text formatting
Aside from structuring the table, two elements are available for formatting
the content of the cells
* `<font>`
* `<br/>`
with corresponding `Graphvix.HTMLRecord` functions
* `font/2`
* `br/0`
In addition to contents as its first argument, `font/2` can take a keyword list
of properties as its optional second argument.
iex> import Graphvix.HTMLRecord, only: [tr: 1, td: 1, td: 2, br: 0, font: 2]
iex> record = HTMLRecord.new([
iex> tr([td("a"), td(["b", br(), font("B", color: "red", point_size: 100)])]),
...> tr([td("c"), td("d")]),
...> ])
iex> HTMLRecord.to_label(record)
~S(<table>
<tr>
<td>a</td>
<td>b<br/><font color="red" point-size="100">B</font></td>
</tr>
<tr>
<td>c</td>
<td>d</td>
</tr>
</table>)
While maintaining proper nesting (each element contains both opening and closing
tags within its enclosing element), these elements may be nested as desired,
including nesting entire tables inside of cells.
"""
defstruct [
rows: [],
attributes: []
]
@type t :: %__MODULE__{
rows: [__MODULE__.tr],
attributes: keyword()
}
@type tr :: %{cells: __MODULE__.cells}
@type br :: %{tag: :br}
@type font :: %{tag: :font, cell: __MODULE__.one_or_more_cells, attributes: keyword()}
@type td :: %{label: __MODULE__.one_or_more_cells, attributes: keyword()}
@type cell :: String.t |
__MODULE__.br |
__MODULE__.font |
__MODULE__.td |
__MODULE__.t
@type cells :: [__MODULE__.cell]
@type one_or_more_cells :: __MODULE__.cell | [__MODULE__.cell]
alias Graphvix.HTMLRecord
import Graphvix.DotHelpers, only: [indent: 1]
@doc """
Returns a new `HTMLRecord` which can be turned into an HTML table vertex.
It takes two arguments. The first is a list of table rows all returned from
the `tr/1` function.
The second is an optional keyword list of attributes to apply to the table as
a whole. Valid keys for this list are:
* `align`
* `bgcolor`
* `border`
* `cellborder`
* `cellpadding`
* `cellspacing`
* `color`
* `columns`
* `fixedsize`
* `gradientangle`
* `height`
* `href`
* `id`
* `port`
* `rows`
* `sides`
* `style`
* `target`
* `title`
* `tooltip`
* `valign`
* `width`
## Example
iex> import HTMLRecord, only: [tr: 1, td: 1]
iex> HTMLRecord.new([
...> tr([
...> td("a"),
...> td("b")
...> ]),
...> tr([
...> td("c"),
...> td("d")
...> ])
...> ], border: 1, cellspacing: 0, cellborder: 1)
%HTMLRecord{
rows: [
%{cells: [
%{label: "a", attributes: []},
%{label: "b", attributes: []},
]},
%{cells: [
%{label: "c", attributes: []},
%{label: "d", attributes: []},
]}
],
attributes: [
border: 1,
cellspacing: 0,
cellborder: 1
]
}
"""
def new(rows, attributes \\ []) when is_list(rows) do
%__MODULE__{rows: rows, attributes: attributes}
end
@doc """
A helper method to generate a row of a table.
It takes a single argument, which is a list of cells returned by the `td/2`
helper function.
"""
def tr(cells) when is_list(cells) do
%{cells: cells}
end
@doc """
A helper method to generate a single cell of a table.
The first argument is the contents of the cell. It can be a plain string or
a list of other elements.
The second argument is an optional keyword list of attributes to apply to the
cell. Valid keys include:
* `align`
* `balign`
* `bgcolor`
* `border`
* `cellpadding`
* `cellspacing`
* `color`
* `colspan`
* `fixedsize`
* `gradientangle`
* `height`
* `href`
* `id`
* `port`
* `rowspan`
* `sides`
* `style`
* `target`
* `title`
* `tooltip`
* `valign`
* `width`
See the module documentation for `Graphvix.HTMLRecord` for usage examples in context.
"""
def td(label, attributes \\ []) do
%{label: label, attributes: attributes}
end
@doc """
Creates a `<br/>` element as part of a cell in an `HTMLRecord`
A helper method that creates a `<br/>` HTML element as part of a table cell.
See the module documentation for `Graphvix.HTMLRecord` for usage examples in context.
"""
def br, do: %{tag: :br}
@doc """
Creates a `<font/>` element as part of a cell in an `HTMLRecord`
A helper method that creates a `<br/>` HTML element as part of a table cell.
The first argument to `font/2` is the contents of the cell, which can itself
be a plain string or a list of nested element functions.
The second, optional argument is a keyword list of attributes to determine
the formatting of the contents of the `<tag>`. Valid keys for this list are
* `color`
* `face`
* `point_size`
## Example
iex> HTMLRecord.font("a", color: "blue", face: "Arial", point_size: 10)
%{tag: :font, cell: "a", attributes: [color: "blue", face: "Arial", point_size: 10]}
"""
def font(cell, attributes \\ []) do
%{tag: :font, cell: cell, attributes: attributes}
end
@doc """
Converts an `HTMLRecord` struct into a valid HTML-like string.
The resulting string can be passed to `Graphvix.Graph.add_vertex/3` as a label
for a vertex.
## Example
iex> import HTMLRecord, only: [tr: 1, td: 1]
iex> record = HTMLRecord.new([
...> tr([
...> td("a"),
...> td("b")
...> ]),
...> tr([
...> td("c"),
...> td("d")
...> ])
...> ], border: 1, cellspacing: 0, cellborder: 1)
iex> HTMLRecord.to_label(record)
~S(<table border="1" cellspacing="0" cellborder="1">
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td>c</td>
<td>d</td>
</tr>
</table>)
"""
def to_label(%__MODULE__{rows: rows, attributes: attributes}) do
[
"<table#{attributes_for_label(attributes)}>",
Enum.map(rows, &tr_to_label/1),
"</table>"
] |> List.flatten |> Enum.join("\n")
end
## Private
defp tr_to_label(%{cells: cells}) do
[
"<tr>",
Enum.map(cells, &td_to_label/1),
"</tr>"
] |> List.flatten |> Enum.join("\n") |> indent
end
defp td_to_label(%{label: label, attributes: attributes}) do
[
"<td#{attributes_for_label(attributes)}>",
label_to_string(label),
"</td>"
] |> Enum.join("") |> indent()
end
defp attributes_for_label(attributes) do
case attributes do
[] -> ""
attrs ->
" " <> (attrs |> Enum.map(fn {k, v} ->
~s(#{hyphenize(k)}="#{v}")
end) |> Enum.join(" "))
end
end
defp hyphenize(name) do
name |> to_string |> String.replace("_", "-")
end
defp label_to_string(list) when is_list(list) do
list |> Enum.map(&label_to_string/1) |> Enum.join("")
end
defp label_to_string(%{tag: :br}), do: "<br/>"
defp label_to_string(%{tag: :font, cell: cell, attributes: attributes}) do
[
"<font#{attributes_for_label(attributes)}>",
label_to_string(cell),
"</font>"
] |> Enum.join("")
end
defp label_to_string(table = %HTMLRecord{}) do
HTMLRecord.to_label(table)
end
defp label_to_string(string) when is_bitstring(string), do: string
end
|
lib/graphvix/html_record.ex
| 0.895377
| 0.733422
|
html_record.ex
|
starcoder
|
defmodule Sourceror.LinesCorrector do
@moduledoc false
import Sourceror, only: [get_line: 1, correct_lines: 2]
@doc """
Corrects the line numbers of AST nodes such that they are correctly ordered.
* If a node has no line number, it's assumed to be in the same line as the previous one.
* If a node has a line number higher than the one before, it's kept as is.
* If a node has a line number lower than the one before, it's incremented to be one line higher than it's predecessor
* If a node has leading comments, it's line number is incremented by the length of the comments list
* If a node has trailing comments, it's end_of_expression and end line metadata are set to the line of their last child plus the trailing comments list length
"""
def correct(quoted) do
{ast, _} = Macro.traverse(quoted, %{last_line: 1}, &pre_correct/2, &post_correct/2)
ast
end
defp pre_correct({form, meta, args} = quoted, state) do
{quoted, state} =
cond do
is_nil(meta[:line]) ->
meta = Keyword.put(meta, :line, state.last_line)
{{form, meta, args}, state}
get_line(quoted) < state.last_line ->
correction = state.last_line + 1 - get_line(quoted)
quoted = recursive_correct_lines(quoted, correction)
{quoted, %{state | last_line: get_line(quoted)}}
true ->
{quoted, %{state | last_line: get_line(quoted)}}
end
if has_leading_comments?(quoted) do
leading_comments = length(meta[:leading_comments])
quoted = recursive_correct_lines(quoted, leading_comments + 1)
{quoted, %{state | last_line: meta[:line]}}
else
{quoted, state}
end
end
defp pre_correct(quoted, state) do
{quoted, state}
end
defp post_correct({_, meta, _} = quoted, state) do
last_line = Sourceror.get_end_line(quoted, state.last_line)
last_line =
if has_trailing_comments?(quoted) do
last_line + length(meta[:trailing_comments] || []) + 1
else
last_line
end
quoted =
quoted
|> maybe_correct_end_of_expression(last_line)
|> maybe_correct_end(last_line)
|> maybe_correct_closing(last_line)
{quoted, %{state | last_line: last_line}}
end
defp post_correct(quoted, state) do
{quoted, state}
end
defp maybe_correct_end_of_expression({form, meta, args} = quoted, last_line) do
meta =
if meta[:end_of_expression] || has_trailing_comments?(quoted) do
eoe = meta[:end_of_expression] || []
eoe = Keyword.put(eoe, :line, last_line)
Keyword.put(meta, :end_of_expression, eoe)
else
meta
end
{form, meta, args}
end
defp maybe_correct_end({form, meta, args}, last_line) do
meta =
if meta[:end] do
put_in(meta, [:end, :line], last_line)
else
meta
end
{form, meta, args}
end
defp maybe_correct_closing({form, meta, args}, last_line) do
meta =
cond do
meta[:do] ->
meta
meta[:closing] ->
put_in(meta, [:closing, :line], last_line)
true ->
meta
end
{form, meta, args}
end
def has_comments?(quoted) do
has_leading_comments?(quoted) or has_trailing_comments?(quoted)
end
def has_leading_comments?({_, meta, _}) do
match?([_ | _], meta[:leading_comments])
end
def has_trailing_comments?({_, meta, _}) do
match?([_ | _], meta[:trailing_comments])
end
defp recursive_correct_lines(ast, line_correction) do
Macro.postwalk(ast, fn
{_, _, _} = ast ->
correct_lines(ast, line_correction)
ast ->
ast
end)
end
end
|
lib/sourceror/lines_corrector.ex
| 0.658637
| 0.567937
|
lines_corrector.ex
|
starcoder
|
defmodule Tensorflow.Eager.Operation.Input do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
item: {atom, any}
}
defstruct [:item]
oneof(:item, 0)
field(:remote_handle, 1, type: Tensorflow.Eager.RemoteTensorHandle, oneof: 0)
field(:tensor, 2, type: Tensorflow.TensorProto, oneof: 0)
end
defmodule Tensorflow.Eager.Operation.AttrsEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: Tensorflow.AttrValue.t() | nil
}
defstruct [:key, :value]
field(:key, 1, type: :string)
field(:value, 2, type: Tensorflow.AttrValue)
end
defmodule Tensorflow.Eager.Operation do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
id: integer,
name: String.t(),
op_inputs: [Tensorflow.Eager.Operation.Input.t()],
control_op_ids: [integer],
attrs: %{String.t() => Tensorflow.AttrValue.t() | nil},
device: String.t(),
is_component_function: boolean,
func_step_id: integer,
is_function: boolean
}
defstruct [
:id,
:name,
:op_inputs,
:control_op_ids,
:attrs,
:device,
:is_component_function,
:func_step_id,
:is_function
]
field(:id, 1, type: :int64)
field(:name, 2, type: :string)
field(:op_inputs, 10, repeated: true, type: Tensorflow.Eager.Operation.Input)
field(:control_op_ids, 4, repeated: true, type: :int64)
field(:attrs, 5,
repeated: true,
type: Tensorflow.Eager.Operation.AttrsEntry,
map: true
)
field(:device, 6, type: :string)
field(:is_component_function, 7, type: :bool)
field(:func_step_id, 8, type: :int64)
field(:is_function, 9, type: :bool)
end
defmodule Tensorflow.Eager.QueueItem do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
item: {atom, any}
}
defstruct [:item]
oneof(:item, 0)
field(:handle_to_decref, 1,
type: Tensorflow.Eager.RemoteTensorHandle,
oneof: 0
)
field(:operation, 2, type: Tensorflow.Eager.Operation, oneof: 0)
field(:send_tensor, 3, type: Tensorflow.Eager.SendTensorOp, oneof: 0)
field(:register_function, 4,
type: Tensorflow.Eager.RegisterFunctionOp,
oneof: 0
)
field(:cleanup_function, 5,
type: Tensorflow.Eager.CleanupFunctionOp,
oneof: 0
)
field(:sync_remote_executor_for_stream, 6,
type: Tensorflow.Eager.SyncRemoteExecutorForStream,
oneof: 0
)
field(:send_packed_handle, 7,
type: Tensorflow.Eager.SendPackedHandleOp,
oneof: 0
)
end
defmodule Tensorflow.Eager.QueueResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
shape: [Tensorflow.TensorShapeProto.t()],
device: [String.t()],
tensor: [Tensorflow.TensorProto.t()]
}
defstruct [:shape, :device, :tensor]
field(:shape, 1, repeated: true, type: Tensorflow.TensorShapeProto)
field(:device, 3, repeated: true, type: :string)
field(:tensor, 2, repeated: true, type: Tensorflow.TensorProto)
end
defmodule Tensorflow.Eager.CreateContextRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
server_def: Tensorflow.ServerDef.t() | nil,
async: boolean,
keep_alive_secs: integer,
version_def: Tensorflow.VersionDef.t() | nil,
cluster_device_attributes: [Tensorflow.DeviceAttributes.t()],
context_id: non_neg_integer,
context_view_id: non_neg_integer,
lazy_copy_remote_function_inputs: boolean
}
defstruct [
:server_def,
:async,
:keep_alive_secs,
:version_def,
:cluster_device_attributes,
:context_id,
:context_view_id,
:lazy_copy_remote_function_inputs
]
field(:server_def, 1, type: Tensorflow.ServerDef)
field(:async, 2, type: :bool)
field(:keep_alive_secs, 3, type: :int64)
field(:version_def, 4, type: Tensorflow.VersionDef)
field(:cluster_device_attributes, 6,
repeated: true,
type: Tensorflow.DeviceAttributes
)
field(:context_id, 7, type: :fixed64)
field(:context_view_id, 8, type: :fixed64)
field(:lazy_copy_remote_function_inputs, 9, type: :bool)
end
defmodule Tensorflow.Eager.CreateContextResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
device_attributes: [Tensorflow.DeviceAttributes.t()]
}
defstruct [:device_attributes]
field(:device_attributes, 2,
repeated: true,
type: Tensorflow.DeviceAttributes
)
end
defmodule Tensorflow.Eager.UpdateContextRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
server_def: Tensorflow.ServerDef.t() | nil,
cluster_device_attributes: [Tensorflow.DeviceAttributes.t()],
context_id: non_neg_integer,
context_view_id: non_neg_integer
}
defstruct [
:server_def,
:cluster_device_attributes,
:context_id,
:context_view_id
]
field(:server_def, 1, type: Tensorflow.ServerDef)
field(:cluster_device_attributes, 2,
repeated: true,
type: Tensorflow.DeviceAttributes
)
field(:context_id, 3, type: :fixed64)
field(:context_view_id, 4, type: :fixed64)
end
defmodule Tensorflow.Eager.UpdateContextResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
device_attributes: [Tensorflow.DeviceAttributes.t()]
}
defstruct [:device_attributes]
field(:device_attributes, 1,
repeated: true,
type: Tensorflow.DeviceAttributes
)
end
defmodule Tensorflow.Eager.EnqueueRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
context_id: non_neg_integer,
queue: [Tensorflow.Eager.QueueItem.t()]
}
defstruct [:context_id, :queue]
field(:context_id, 1, type: :fixed64)
field(:queue, 3, repeated: true, type: Tensorflow.Eager.QueueItem)
end
defmodule Tensorflow.Eager.EnqueueResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
queue_response: [Tensorflow.Eager.QueueResponse.t()]
}
defstruct [:queue_response]
field(:queue_response, 1,
repeated: true,
type: Tensorflow.Eager.QueueResponse
)
end
defmodule Tensorflow.Eager.WaitQueueDoneRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
context_id: non_neg_integer,
op_id: [integer]
}
defstruct [:context_id, :op_id]
field(:context_id, 1, type: :fixed64)
field(:op_id, 2, repeated: true, type: :int64)
end
defmodule Tensorflow.Eager.WaitQueueDoneResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.Eager.RunComponentFunctionRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
context_id: non_neg_integer,
operation: Tensorflow.Eager.Operation.t() | nil,
output_num: [integer]
}
defstruct [:context_id, :operation, :output_num]
field(:context_id, 1, type: :fixed64)
field(:operation, 2, type: Tensorflow.Eager.Operation)
field(:output_num, 3, repeated: true, type: :int32)
end
defmodule Tensorflow.Eager.RunComponentFunctionResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
shape: [Tensorflow.TensorShapeProto.t()],
tensor: [Tensorflow.TensorProto.t()]
}
defstruct [:shape, :tensor]
field(:shape, 1, repeated: true, type: Tensorflow.TensorShapeProto)
field(:tensor, 2, repeated: true, type: Tensorflow.TensorProto)
end
defmodule Tensorflow.Eager.KeepAliveRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
context_id: non_neg_integer
}
defstruct [:context_id]
field(:context_id, 1, type: :fixed64)
end
defmodule Tensorflow.Eager.KeepAliveResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
context_view_id: non_neg_integer
}
defstruct [:context_view_id]
field(:context_view_id, 1, type: :fixed64)
end
defmodule Tensorflow.Eager.CloseContextRequest do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
context_id: non_neg_integer,
context_view_id: non_neg_integer
}
defstruct [:context_id, :context_view_id]
field(:context_id, 1, type: :fixed64)
field(:context_view_id, 2, type: :fixed64)
end
defmodule Tensorflow.Eager.CloseContextResponse do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.Eager.RegisterFunctionOp do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
function_def: Tensorflow.FunctionDef.t() | nil,
is_component_function: boolean,
library: Tensorflow.FunctionDefLibrary.t() | nil
}
defstruct [:function_def, :is_component_function, :library]
field(:function_def, 1, type: Tensorflow.FunctionDef)
field(:is_component_function, 2, type: :bool)
field(:library, 3, type: Tensorflow.FunctionDefLibrary)
end
defmodule Tensorflow.Eager.CleanupFunctionOp do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
step_id: integer
}
defstruct [:step_id]
field(:step_id, 1, type: :int64)
end
defmodule Tensorflow.Eager.SyncRemoteExecutorForStream do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{}
defstruct []
end
defmodule Tensorflow.Eager.SendTensorOp do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
op_id: integer,
tensors: [Tensorflow.TensorProto.t()],
device_name: String.t()
}
defstruct [:op_id, :tensors, :device_name]
field(:op_id, 1, type: :int64)
field(:tensors, 2, repeated: true, type: Tensorflow.TensorProto)
field(:device_name, 3, type: :string)
end
defmodule Tensorflow.Eager.SendPackedHandleOp.LocalTensorHandle do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
tensor: Tensorflow.TensorProto.t() | nil,
device: String.t()
}
defstruct [:tensor, :device]
field(:tensor, 1, type: Tensorflow.TensorProto)
field(:device, 2, type: :string)
end
defmodule Tensorflow.Eager.SendPackedHandleOp.Handle do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
item: {atom, any}
}
defstruct [:item]
oneof(:item, 0)
field(:local_handle, 1,
type: Tensorflow.Eager.SendPackedHandleOp.LocalTensorHandle,
oneof: 0
)
field(:remote_handle, 2, type: Tensorflow.Eager.RemoteTensorHandle, oneof: 0)
end
defmodule Tensorflow.Eager.SendPackedHandleOp do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
op_id: integer,
handles: [Tensorflow.Eager.SendPackedHandleOp.Handle.t()],
device_name: String.t()
}
defstruct [:op_id, :handles, :device_name]
field(:op_id, 1, type: :int64)
field(:handles, 2,
repeated: true,
type: Tensorflow.Eager.SendPackedHandleOp.Handle
)
field(:device_name, 3, type: :string)
end
|
lib/tensorflow/core/protobuf/eager_service.pb.ex
| 0.818374
| 0.678533
|
eager_service.pb.ex
|
starcoder
|
defmodule Configure.Persist do
@moduledoc """
Persist and broadcast changes to settings.
Implemented as a simple GenServer that reads and caches state. For the purposes of
this, it does not really matter that this is a bottleneck - highly concurrent access is not
needed in the same way as if this was on an internet server. The bottleneck (for setting values) at least
ensures that the file is only written to sequentially.
"""
use GenServer
@behaviour Configure.Settings
alias Configure.{Events, Settings}
require Logger
keys = [:filename, :update_topic, :settings]
@enforce_keys keys
defstruct keys
@type t :: %__MODULE__{
filename: String.t(),
update_topic: String.t(),
settings: map()
}
def start_link({_filename, _update_topic, name} = args) do
opts =
if name do
[name: name]
else
[]
end
GenServer.start_link(__MODULE__, args, opts)
end
def init({filename, update_topic, _}) do
{:ok,
%__MODULE__{
filename: filename,
update_topic: update_topic,
settings: read_settings(filename)
}}
end
def get(server, key) do
GenServer.call(server, {:get, key})
end
def all_settings(server) do
GenServer.call(server, :all_settings)
end
def set(server, key, value) do
GenServer.cast(server, {:set, key, value})
end
def handle_call({:get, key}, _, %{settings: settings} = state) do
{:reply, Map.get(settings, key), state}
end
def handle_call(:all_settings, _, %{settings: settings} = state) do
{:reply, settings, state}
end
def handle_cast(
{:set, key, value},
%{filename: filename, settings: settings, update_topic: update_topic} = state
) do
Events.broadcast(update_topic, {:updated_config, key, value})
updated_settings = %{settings | key => value}
File.write(filename, :erlang.term_to_binary(updated_settings))
{:noreply, %{state | settings: updated_settings}}
end
defp read_settings(file) do
with {:ok, binary} <- File.read(file),
%{} = settings <- decode_file_contents(binary) do
Map.merge(default_settings(), settings)
else
{:error, :enoent} ->
# Not been set - it's fine
default_settings()
other ->
Logger.error("Unexpected problem reading configure settings file: #{inspect(other)}")
default_settings()
end
end
defp default_settings do
Settings.default_settings()
end
defp decode_file_contents(binary) do
try do
:erlang.binary_to_term(binary)
rescue
ArgumentError ->
"Corrupt file"
end
end
end
|
apps/configure/lib/configure/persist.ex
| 0.63307
| 0.435001
|
persist.ex
|
starcoder
|
defmodule Plymio.Vekil.Form do
@moduledoc ~S"""
This module implements the `Plymio.Vekil` protocol using a `Map` where the
*proxies* (`keys`) are atoms and the *foroms* (`values`) hold quoted forms.
The default when creating a **form** *vekil* is to create
`Plymio.Vekil.Forom.Form` *forom* but any *vekil* can hold any
*forom*.
See `Plymio.Vekil` for the definitions of the protocol functions.
## Module State
See `Plymio.Vekil` for the common fields.
The module's state is held in a `struct` with the following field(s):
| Field | Aliases | Purpose |
| :--- | :--- | :--- |
| `:dict` | *:d* | *holds the map of proxies v forom* |
| `:forom_normalise` | | *see field description* |
| `:proxy_normalise` | | *see field description* |
### Module State Field: `:forom_normalise`
The `:forom_normalise` field holds an arity 1 or 2 function.
If it is arity 2, it is passed the same arguments
to the *vekil's* `forom_normalise/2` function and must return `{:ok, {forom, vekil}}`.
If it is arity 1, just the second argument from the call to the
*vekil's* `forom_normalise/2` function is passed and must return
`{:ok, forom}`.
The default for this *vekil* is `Plymio.Vekil.Forom.Form.normalise/1`.
### Module State Field: `:proxy_normalise`
The `:proxy_normalise` field holds an arity 1 function that is
usually passed the second argumnet from the call to the *vekil's*
`proxy_normalise/2` function.
The default for this *vekil* is `Plymio.Fontais.Utility.validate_key/1`.
## Test Environment
See also notes in `Plymio.Vekil`.
The vekil created in the example below of `new/1` is returned by
`vekil_helper_form_vekil_example1/0`.
iex> {:ok, vekil} = new()
...> dict = [
...> x_add_1: quote(do: x = x + 1),
...> x_mult_x: quote(do: x = x * x),
...> x_sub_1: quote(do: x = x - 1),
...> x_funs: [:x_add_1, :x_mul_x, :x_sub_1],
...> x_loop: [:x_add_1, :x_loop, :x_sub_1]
...> ]
...> {:ok, vekil} = vekil |> update(dict: dict)
...> match?(%VEKILFORM{}, vekil)
true
"""
require Plymio.Fontais.Option
require Plymio.Fontais.Guard
require Plymio.Fontais.Vekil.ProxyForomDict, as: PROXYFOROMDICT
use Plymio.Fontais.Attribute
use Plymio.Vekil.Attribute
@type t :: %__MODULE__{}
@type form :: Plymio.Fontais.form()
@type forms :: Plymio.Fontais.forms()
@type proxy :: Plymio.Fontais.key()
@type proxies :: Plymio.Fontais.keys()
@type forom :: struct
@type opts :: Plymio.Fontais.opts()
@type error :: Plymio.Fontais.error()
@type kv :: Plymio.Fontais.kv()
@type product :: Plymio.Fontais.product()
import Plymio.Fontais.Error,
only: [
new_error_result: 1
],
warn: false
import Plymio.Fontais.Option,
only: [
opts_create_aliases_dict: 1,
opts_canonical_keys: 2
]
@plymio_fontais_vekil_kvs_aliases [
# struct
@plymio_vekil_field_alias_dict,
@plymio_vekil_field_alias_proxy_normalise,
@plymio_vekil_field_alias_forom_normalise,
@plymio_fontais_field_alias_protocol_name,
@plymio_fontais_field_alias_protocol_impl
]
@plymio_fontais_vekil_dict_aliases @plymio_fontais_vekil_kvs_aliases
|> opts_create_aliases_dict
@doc false
def update_canonical_opts(opts, dict \\ @plymio_fontais_vekil_dict_aliases) do
opts |> opts_canonical_keys(dict)
end
@plymio_fontais_vekil_defstruct [
{@plymio_vekil_field_dict, @plymio_fontais_the_unset_value},
{@plymio_vekil_field_forom_normalise, &Plymio.Vekil.Forom.Form.normalise/1},
{@plymio_vekil_field_proxy_normalise, &Plymio.Vekil.PVO.pvo_validate_atom_proxy/1},
{@plymio_fontais_field_protocol_name, Plymio.Vekil},
{@plymio_fontais_field_protocol_impl, __MODULE__}
]
defstruct @plymio_fontais_vekil_defstruct
@doc_new ~S"""
`new/1` takes an optional *opts* and creates a new **form** *vekil*
returning `{:ok, vekil}`.
## Examples
iex> {:ok, vekil} = new()
...> match?(%VEKILFORM{}, vekil)
true
`Plymio.Vekil.Utility.vekil?/1` returns `true` if the value implements `Plymio.Vekil`
iex> {:ok, vekil} = new()
...> vekil |> Plymio.Vekil.Utility.vekil?
true
The vekil dictionary can be supplied as a `Map` or `Keyword`. It
will be validated to ensure all the *proxies* are atoms and all the
*forom* are valid *forms*.
iex> {:ok, vekil} = [dict: [
...> x_add_1: quote(do: x = x + 1),
...> x_mul_x: quote(do: x = x * x),
...> x_sub_1: quote(do: x = x - 1),
...> x_funs: [:x_add_1, :x_mul_x, :x_sub_1]
...> ]] |> new()
...> match?(%VEKILFORM{}, vekil)
true
In the above example the `:x_funs` *proxy* in the *vekil* dictionary
was given a list of 3 atoms: `[:x_add_1, :x_mul_x, :x_sub_1]`. Each
of the atoms is treated as a reference to another *proxy* in the
*vekil* and are normalised into instances of
`Plymio.Vekil.Forom.Proxy` (a *proxy forom*).
List of atoms provide a simple way of defining a composite *forom*
out of constituent *forom*.
See the e.g. `proxy_fetch/2` examples for how this works in practice.
"""
@doc_update ~S"""
`update/2` takes a *vekil* and *opts* and update the field(s) in the
*vekil* from the `{field,value}` typles in the *opts*.
## Examples
iex> {:ok, vekil} = new()
...> dict = [
...> x_add_1: quote(do: x = x + 1),
...> x_mul_x: quote(do: x = x * x),
...> x_sub_1: quote(do: x = x - 1),
...> x_funs: [:x_add_1, :x_mul_x, :x_sub_1]
...> ]
...> {:ok, vekil} = vekil |> update(dict: dict)
...> match?(%VEKILFORM{}, vekil)
true
"""
@doc_proxy_get2 ~S"""
See `Plymio.Vekil.proxy_get/2`
## Examples
A single known, *proxy* is requested with no default
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_get(:x_add_1)
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{8, ["x = x + 1"]}
Two known *proxies* are requested:
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_get([:x_mul_x, :x_add_1])
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{50, ["x = x * x", "x = x + 1"]}
A single unknown, *proxy* is requested with no default. Note the use of
`:realise_default` to provide a backstop default.
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_get(:not_a_proxy)
...> {:ok, {forms, _}} = forom
...> |> FOROMPROT.realise(realise_default: quote(do: x = x + 35))
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{42, ["x = x + 35"]}
"""
@doc_proxy_get3 ~S"""
See `Plymio.Vekil.proxy_get/3`
## Examples
A single unknown *proxy* is requested with a default:
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_get(:not_a_proxy, quote(do: x = x *x * x))
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{343, ["x = x * x * x"]}
A mix of known and unknown *proxies*, together with a default:
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_get([:missing_proxy, :x_sub_1, :not_a_proxy], quote(do: x = x * x * x))
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 2])
{343, ["x = x * x * x", "x = x - 1", "x = x * x * x"]}
The default is not a valid *form(s)*
iex> {:error, error} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_get(:not_a_proxy, %{a: 1})
...> error |> Exception.message
"default invalid, got: %{a: 1}"
"""
@doc_proxy_fetch ~S"""
See `Plymio.Vekil.proxy_fetch/2`.
## Examples
A single *proxy* is fetched:
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_fetch(:x_add_1)
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{8, ["x = x + 1"]}
Two *proxies* are fetched:
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_fetch([:x_mul_x, :x_add_1])
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{50, ["x = x * x", "x = x + 1"]}
*proxies* is nil / empty. Note the use and override of `:realise_default`.
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_fetch(nil)
...> {:ok, {value, _}} = forom |> FOROMPROT.realise
...> value |> Plymio.Fontais.Guard.is_value_unset
true
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_fetch([])
...> {:ok, {forms, _}} = forom
...> |> FOROMPROT.realise(realise_default: quote(do: x = x + 35))
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{42, ["x = x + 35"]}
One or more *proxies* not found
iex> {:error, error} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_fetch(:not_a_proxy)
...> error |> Exception.message
"proxy invalid, got: :not_a_proxy"
iex> {:error, error} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_fetch([:missing_proxy, :x_sub_1, :not_a_proxy])
...> error |> Exception.message
"proxies invalid, got: [:missing_proxy, :not_a_proxy]"
Proxy loops are caught:
iex> {:ok, %VEKILFORM{} = vekil} = [dict: [
...> x_add_1: quote(do: x = x + 1),
...> x_mul_x: quote(do: x = x * x),
...> x_sub_1: quote(do: x = x - 1),
...> x_loopa: [:x_add_1, :x_loopb, :x_sub_1],
...> x_loopb: [:x_add_1, :x_sub_1, :x_loopc],
...> x_loopc: [:x_loopa, :x_add_1, :x_sub_1],
...> ]] |> VEKILFORM.new()
...> {:ok, {forom, %VEKILFORM{}}} = vekil |> VEKILPROT.proxy_fetch(:x_loopa)
...> {:error, error} = forom |> FOROMPROT.realise
...> error |> Exception.message
"proxy seen before, got: :x_loopa"
In this example the *proxy* is `:x_funs` which was defined in the *vekil*
dictionary as a list of 3 atoms: `[:x_add_1, :x_mul_x, :x_sub_1]`.
Each of the atoms is treated as a reference to another *proxy* in
the *vekil* and are normalised into instances of
`Plymio.Vekil.Forom.Proxy` (a *proxy forom*). Since the fetch must
return a *forom*, the 3 *proxy foroms* are normalised to a
`Plymio.Vekil.Forom.List` for the result.
When a *proxy forom* is realised, the original atom *proxy*
(e.g. `:x_sub_1`) is used in a `proxy_fetch/2` and the *forom*
returned by the fetch is realised.
iex> {:ok, {forom, %VEKILFORM{}}} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_fetch(:x_funs)
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{63, ["x = x + 1", "x = x * x", "x = x - 1"]}
"""
@doc_proxy_put2 ~S"""
See `Plymio.Vekil.proxy_put/2`
## Examples
A list of `{proxy,form}` tuples can be given. Since a **form** *vekil's* *proxy* is
an atom, `Keyword` syntax can be used here:
iex> {:ok, %VEKILFORM{} = vekil} = VEKILFORM.new()
...> {:ok, %VEKILFORM{} = vekil} = vekil |> VEKILPROT.proxy_put(
...> x_add_1: quote(do: x = x + 1),
...> x_mul_x: quote(do: x = x * x),
...> x_sub_1: quote(do: x = x - 1),
...> x_funs: [:x_add_1, :x_mul_x, :x_sub_1])
...> {:ok, {forom, %VEKILFORM{}}} = vekil |> VEKILPROT.proxy_fetch(:x_funs)
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{63, ["x = x + 1", "x = x * x", "x = x - 1"]}
"""
@doc_proxy_put3 ~S"""
See `Plymio.Vekil.proxy_put/3`
## Examples
This example puts a *proxy* into an empty *vekil* and then fetches it.
iex> {:ok, %VEKILFORM{} = vekil} = VEKILFORM.new()
...> {:ok, %VEKILFORM{} = vekil} = vekil
...> |> VEKILPROT.proxy_put(:x_add_1, quote(do: x = x + 1))
...> {:ok, {forom, %VEKILFORM{}}} = vekil |> VEKILPROT.proxy_fetch(:x_add_1)
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{8, ["x = x + 1"]}
The *proxy* can have/put multiple forms:
iex> {:ok, %VEKILFORM{} = vekil} = VEKILFORM.new()
...> {:ok, %VEKILFORM{} = vekil} = vekil |> VEKILPROT.proxy_put(:x_add_mul_sub,
...> [quote(do: x = x + 1), quote(do: x = x * x), quote(do: x = x - 1)])
...> {:ok, {forom, %VEKILFORM{}}} = vekil |> VEKILPROT.proxy_fetch(:x_add_mul_sub)
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{63, ["x = x + 1", "x = x * x", "x = x - 1"]}
"""
@doc_proxy_delete ~S"""
See `Plymio.Vekil.proxy_delete/2`
Note proxies are normalised.
## Examples
Here a known *proxy* is deleted and then fetched, causing an error:
iex> {:ok, %VEKILFORM{} = vekil} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_delete(:x_sub_1)
...> {:error, error} = vekil |> VEKILPROT.proxy_fetch([:x_add_1, :x_sub_1])
...> error |> Exception.message
"proxy invalid, got: :x_sub_1"
This example deletes `:x_mul_x` and but provides `quote(do: x = x *
x * x)` as the default in the following get:
iex> {:ok, %VEKILFORM{} = vekil} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_delete(:x_mul_x)
...> {:ok, {forom, %VEKILFORM{}}} = vekil
...> |> VEKILPROT.proxy_get([:x_add_1, :x_mul_x, :x_sub_1], quote(do: x = x * x * x))
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{511, ["x = x + 1", "x = x * x * x", "x = x - 1"]}
Deleting unknown *proxies* does not cause an error:
iex> {:ok, %VEKILFORM{} = vekil} = vekil_helper_form_vekil_example1()
...> |> VEKILPROT.proxy_delete([:x_sub_1, :not_a_proxy, :x_mul_x])
...> vekil |> Plymio.Vekil.Utility.vekil?
true
"""
@doc_has_proxy? ~S"""
See `Plymio.Vekil.has_proxy?/2`
Note: the *proxy* is not normalised in any way.
## Examples
Here a known *proxy* is tested for:
iex> vekil_helper_form_vekil_example1()
...> |> VEKILPROT.has_proxy?(:x_sub_1)
true
An unknown *proxy* returns `false`
iex> vekil_helper_form_vekil_example1()
...> |> VEKILPROT.has_proxy?(:not_a_proxy)
false
iex> vekil_helper_form_vekil_example1()
...> |> VEKILPROT.has_proxy?(%{a: 1})
false
"""
@doc_forom_normalise ~S"""
See `Plymio.Vekil.forom_normalise/2`
The default action is to create a `Plymio.Vekil.Forom.Form` forom,
## Examples
Here the value being normalised is a simple statement:
iex> %VEKILFORM{} = vekil = vekil_helper_form_vekil_example1()
...> value = quote(do: x = x + 1)
...> {:ok, {forom, %VEKILFORM{}}} = vekil |> VEKILPROT.forom_normalise(value)
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 2])
{3, ["x = x + 1"]}
An existing *forom* is returned unchanged.
iex> %VEKILFORM{} = vekil = vekil_helper_form_vekil_example1()
...> {:ok, %Plymio.Vekil.Forom.Form{} = forom} = quote(do: x = x + 1)
...> |> Plymio.Vekil.Forom.Form.normalise
...> {:ok, {forom, %VEKILFORM{}}} = vekil |> VEKILPROT.forom_normalise(forom)
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 2])
{3, ["x = x + 1"]}
In this example the value is `:x_funs` which means it is a proxy and
is normalised into a *proxy forom*. When the *proxy forom* is
realised, the original atom (i.e. `:x_funs`) is used in a
`proxy_fetch/2` and the *forom* from the fetch realised.
iex> %VEKILFORM{} = vekil = vekil_helper_form_vekil_example1()
...> value = :x_funs
...> {:ok, {%Plymio.Vekil.Forom.Proxy{} = forom, %VEKILFORM{}}} = vekil
...> |> VEKILPROT.forom_normalise(value)
...> {:ok, {forms, _}} = forom |> FOROMPROT.realise
...> forms |> harnais_helper_test_forms!(binding: [x: 7])
{63, ["x = x + 1", "x = x * x", "x = x - 1"]}
"""
@vekil [
Plymio.Vekil.Codi.Dict.__vekil__(),
# overrides to the defaults
%{
state_def_new_doc: quote(do: @doc(unquote(@doc_new))),
state_def_update_doc: quote(do: @doc(unquote(@doc_update))),
# protocol function docs
vekil_def_proxy_get2_doc: quote(do: @doc(unquote(@doc_proxy_get2))),
vekil_def_proxy_get3_doc: quote(do: @doc(unquote(@doc_proxy_get3))),
vekil_def_proxy_fetch_doc: quote(do: @doc(unquote(@doc_proxy_fetch))),
vekil_def_proxy_put2_doc: quote(do: @doc(unquote(@doc_proxy_put2))),
vekil_def_proxy_put3_doc: quote(do: @doc(unquote(@doc_proxy_put3))),
vekil_def_proxy_delete_doc: quote(do: @doc(unquote(@doc_proxy_delete))),
vekil_def_has_proxy_doc?: quote(do: @doc(unquote(@doc_has_proxy?))),
vekil_def_forom_normalise_doc: quote(do: @doc(unquote(@doc_forom_normalise)))
}
]
|> PROXYFOROMDICT.create_proxy_forom_dict!()
@vekil
|> Enum.sort_by(fn {k, _v} -> k end)
@vekil_proxies [
:state_base_package,
:state_defp_update_field_header,
:state_vekil_dict_defp_update_field_dict_normalise_vekil_dict,
:state_vekil_dict_defp_update_field_normalise_proxy_or_normalise_forom,
:vekil_dict_defp_normalise_simple_dict,
:vekil_dict_defp_reduce_gather_opts,
:vekil_proxy_def_proxy_normalise,
:vekil_proxy_def_proxies_normalise,
:vekil_defp_forom_value_normalise,
# protocol functions
:vekil_dict_def_proxy_get,
:vekil_dict_def_proxy_fetch,
:vekil_dict_def_proxy_put,
:vekil_dict_def_proxy_delete,
:vekil_dict_def_has_proxy?,
:vekil_dict_form_def_forom_normalise
]
@codi_opts [
{@plymio_fontais_key_dict, @vekil}
]
@vekil_proxies
|> PROXYFOROMDICT.reify_proxies(@codi_opts)
end
defimpl Plymio.Vekil, for: Plymio.Vekil.Form do
@funs :functions
|> @protocol.__info__
|> Keyword.drop([:__protocol__, :impl_for, :impl_for!])
for {fun, arity} <- @funs do
defdelegate unquote(fun)(unquote_splicing(Macro.generate_arguments(arity, nil))), to: @for
end
end
defimpl Inspect, for: Plymio.Vekil.Form do
use Plymio.Vekil.Attribute
import Plymio.Fontais.Guard,
only: [
is_value_unset_or_nil: 1
]
def inspect(%Plymio.Vekil.Form{@plymio_vekil_field_dict => dict}, _opts) do
dict_telltale =
dict
|> case do
x when is_value_unset_or_nil(x) -> nil
x when is_map(x) -> "D=#{inspect(map_size(x))}"
_ -> "D=?"
end
keys_telltale =
dict
|> case do
x when is_map(x) ->
case x |> map_size do
0 -> nil
n when n in [1, 2, 3, 4, 5] -> x |> Map.keys() |> Enum.join("/")
_ -> nil
end
_ ->
nil
end
vekil_telltale =
[
dict_telltale,
keys_telltale
]
|> List.flatten()
|> Enum.reject(&is_nil/1)
|> Enum.join("; ")
"VEKILForm(#{vekil_telltale})"
end
end
|
lib/vekil/concrete/vekil/form.ex
| 0.870803
| 0.705075
|
form.ex
|
starcoder
|
defmodule AWS.CloudFront do
@moduledoc """
Amazon CloudFront
This is the *Amazon CloudFront API Reference*. This guide is for developers
who need detailed information about CloudFront API actions, data types, and
errors. For detailed information about CloudFront features, see the *Amazon
CloudFront Developer Guide*.
"""
@doc """
Creates a cache policy.
After you create a cache policy, you can attach it to one or more cache
behaviors. When it’s attached to a cache behavior, the cache policy
determines the following:
<ul> <li> The values that CloudFront includes in the *cache key*. These
values can include HTTP headers, cookies, and URL query strings. CloudFront
uses the cache key to find an object in its cache that it can return to the
viewer.
</li> <li> The default, minimum, and maximum time to live (TTL) values that
you want objects to stay in the CloudFront cache.
</li> </ul> The headers, cookies, and query strings that are included in
the cache key are automatically included in requests that CloudFront sends
to the origin. CloudFront sends a request when it can’t find an object in
its cache that matches the request’s cache key. If you want to send values
to the origin but *not* include them in the cache key, use
`CreateOriginRequestPolicy`.
For more information about cache policies, see [Controlling the cache
key](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html)
in the *Amazon CloudFront Developer Guide*.
"""
def create_cache_policy(client, input, options \\ []) do
path_ = "/2020-05-31/cache-policy"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a new origin access identity. If you're using Amazon S3 for your
origin, you can use an origin access identity to require users to access
your content using a CloudFront URL instead of the Amazon S3 URL. For more
information about how to use origin access identities, see [Serving Private
Content through
CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html)
in the *Amazon CloudFront Developer Guide*.
"""
def create_cloud_front_origin_access_identity(client, input, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a new web distribution. You create a CloudFront distribution to
tell CloudFront where you want content to be delivered from, and the
details about how to track and manage content delivery. Send a `POST`
request to the `/*CloudFront API version*/distribution`/`distribution ID`
resource.
<important> When you update a distribution, there are more required fields
than when you create a distribution. When you update your distribution by
using
[UpdateDistribution](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_UpdateDistribution.html),
follow the steps included in the documentation to get the current
configuration and then make your updates. This helps to make sure that you
include all of the required fields. To view a summary, see [Required Fields
for Create Distribution and Update
Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html)
in the *Amazon CloudFront Developer Guide*.
</important>
"""
def create_distribution(client, input, options \\ []) do
path_ = "/2020-05-31/distribution"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a new distribution with tags.
"""
def create_distribution_with_tags(client, input, options \\ []) do
path_ = "/2020-05-31/distribution?WithTags"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a new field-level encryption configuration.
"""
def create_field_level_encryption_config(client, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a field-level encryption profile.
"""
def create_field_level_encryption_profile(client, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a new invalidation.
"""
def create_invalidation(client, distribution_id, input, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(distribution_id)}/invalidation"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates an origin request policy.
After you create an origin request policy, you can attach it to one or more
cache behaviors. When it’s attached to a cache behavior, the origin request
policy determines the values that CloudFront includes in requests that it
sends to the origin. Each request that CloudFront sends to the origin
includes the following:
<ul> <li> The request body and the URL path (without the domain name) from
the viewer request.
</li> <li> The headers that CloudFront automatically includes in every
origin request, including `Host`, `User-Agent`, and `X-Amz-Cf-Id`.
</li> <li> All HTTP headers, cookies, and URL query strings that are
specified in the cache policy or the origin request policy. These can
include items from the viewer request and, in the case of headers,
additional ones that are added by CloudFront.
</li> </ul> CloudFront sends a request when it can’t find a valid object in
its cache that matches the request. If you want to send values to the
origin and also include them in the cache key, use `CreateCachePolicy`.
For more information about origin request policies, see [Controlling origin
requests](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html)
in the *Amazon CloudFront Developer Guide*.
"""
def create_origin_request_policy(client, input, options \\ []) do
path_ = "/2020-05-31/origin-request-policy"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Add a new public key to CloudFront to use, for example, for field-level
encryption. You can add a maximum of 10 public keys with one AWS account.
"""
def create_public_key(client, input, options \\ []) do
path_ = "/2020-05-31/public-key"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Creates a new RTMP distribution. An RTMP distribution is similar to a web
distribution, but an RTMP distribution streams media files using the Adobe
Real-Time Messaging Protocol (RTMP) instead of serving files using HTTP.
To create a new distribution, submit a `POST` request to the *CloudFront
API version*/distribution resource. The request body must include a
document with a *StreamingDistributionConfig* element. The response echoes
the `StreamingDistributionConfig` element and returns other information
about the RTMP distribution.
To get the status of your request, use the *GET StreamingDistribution* API
action. When the value of `Enabled` is `true` and the value of `Status` is
`Deployed`, your distribution is ready. A distribution usually deploys in
less than 15 minutes.
For more information about web distributions, see [Working with RTMP
Distributions](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-rtmp.html)
in the *Amazon CloudFront Developer Guide*.
<important> Beginning with the 2012-05-05 version of the CloudFront API, we
made substantial changes to the format of the XML document that you include
in the request body when you create or update a web distribution or an RTMP
distribution, and when you invalidate objects. With previous versions of
the API, we discovered that it was too easy to accidentally delete one or
more values for an element that accepts multiple values, for example,
CNAMEs and trusted signers. Our changes for the 2012-05-05 release are
intended to prevent these accidental deletions and to notify you when
there's a mismatch between the number of values you say you're specifying
in the `Quantity` element and the number of values specified.
</important>
"""
def create_streaming_distribution(client, input, options \\ []) do
path_ = "/2020-05-31/streaming-distribution"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Create a new streaming distribution with tags.
"""
def create_streaming_distribution_with_tags(client, input, options \\ []) do
path_ = "/2020-05-31/streaming-distribution?WithTags"
headers = []
query_ = []
case request(client, :post, path_, query_, headers, input, options, 201) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
{"Location", "Location"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Deletes a cache policy.
You cannot delete a cache policy if it’s attached to a cache behavior.
First update your distributions to remove the cache policy from all cache
behaviors, then delete the cache policy.
To delete a cache policy, you must provide the policy’s identifier and
version. To get these values, you can use `ListCachePolicies` or
`GetCachePolicy`.
"""
def delete_cache_policy(client, id, input, options \\ []) do
path_ = "/2020-05-31/cache-policy/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Delete an origin access identity.
"""
def delete_cloud_front_origin_access_identity(client, id, input, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Delete a distribution.
"""
def delete_distribution(client, id, input, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Remove a field-level encryption configuration.
"""
def delete_field_level_encryption_config(client, id, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Remove a field-level encryption profile.
"""
def delete_field_level_encryption_profile(client, id, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Deletes an origin request policy.
You cannot delete an origin request policy if it’s attached to any cache
behaviors. First update your distributions to remove the origin request
policy from all cache behaviors, then delete the origin request policy.
To delete an origin request policy, you must provide the policy’s
identifier and version. To get the identifier, you can use
`ListOriginRequestPolicies` or `GetOriginRequestPolicy`.
"""
def delete_origin_request_policy(client, id, input, options \\ []) do
path_ = "/2020-05-31/origin-request-policy/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Remove a public key you previously added to CloudFront.
"""
def delete_public_key(client, id, input, options \\ []) do
path_ = "/2020-05-31/public-key/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Delete a streaming distribution. To delete an RTMP distribution using the
CloudFront API, perform the following steps.
**To delete an RTMP distribution using the CloudFront API**:
<ol> <li> Disable the RTMP distribution.
</li> <li> Submit a `GET Streaming Distribution Config` request to get the
current configuration and the `Etag` header for the distribution.
</li> <li> Update the XML document that was returned in the response to
your `GET Streaming Distribution Config` request to change the value of
`Enabled` to `false`.
</li> <li> Submit a `PUT Streaming Distribution Config` request to update
the configuration for your distribution. In the request body, include the
XML document that you updated in Step 3. Then set the value of the HTTP
`If-Match` header to the value of the `ETag` header that CloudFront
returned when you submitted the `GET Streaming Distribution Config` request
in Step 2.
</li> <li> Review the response to the `PUT Streaming Distribution Config`
request to confirm that the distribution was successfully disabled.
</li> <li> Submit a `GET Streaming Distribution Config` request to confirm
that your changes have propagated. When propagation is complete, the value
of `Status` is `Deployed`.
</li> <li> Submit a `DELETE Streaming Distribution` request. Set the value
of the HTTP `If-Match` header to the value of the `ETag` header that
CloudFront returned when you submitted the `GET Streaming Distribution
Config` request in Step 2.
</li> <li> Review the response to your `DELETE Streaming Distribution`
request to confirm that the distribution was successfully deleted.
</li> </ol> For information about deleting a distribution using the
CloudFront console, see [Deleting a
Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html)
in the *Amazon CloudFront Developer Guide*.
"""
def delete_streaming_distribution(client, id, input, options \\ []) do
path_ = "/2020-05-31/streaming-distribution/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
request(client, :delete, path_, query_, headers, input, options, 204)
end
@doc """
Gets a cache policy, including the following metadata:
<ul> <li> The policy’s identifier.
</li> <li> The date and time when the policy was last modified.
</li> </ul> To get a cache policy, you must provide the policy’s
identifier. If the cache policy is attached to a distribution’s cache
behavior, you can get the policy’s identifier using `ListDistributions` or
`GetDistribution`. If the cache policy is not attached to a cache behavior,
you can get the identifier using `ListCachePolicies`.
"""
def get_cache_policy(client, id, options \\ []) do
path_ = "/2020-05-31/cache-policy/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets a cache policy configuration.
To get a cache policy configuration, you must provide the policy’s
identifier. If the cache policy is attached to a distribution’s cache
behavior, you can get the policy’s identifier using `ListDistributions` or
`GetDistribution`. If the cache policy is not attached to a cache behavior,
you can get the identifier using `ListCachePolicies`.
"""
def get_cache_policy_config(client, id, options \\ []) do
path_ = "/2020-05-31/cache-policy/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the information about an origin access identity.
"""
def get_cloud_front_origin_access_identity(client, id, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the configuration information about an origin access identity.
"""
def get_cloud_front_origin_access_identity_config(client, id, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the information about a distribution.
"""
def get_distribution(client, id, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the configuration information about a distribution.
"""
def get_distribution_config(client, id, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the field-level encryption configuration information.
"""
def get_field_level_encryption(client, id, options \\ []) do
path_ = "/2020-05-31/field-level-encryption/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the field-level encryption configuration information.
"""
def get_field_level_encryption_config(client, id, options \\ []) do
path_ = "/2020-05-31/field-level-encryption/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the field-level encryption profile information.
"""
def get_field_level_encryption_profile(client, id, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the field-level encryption profile configuration information.
"""
def get_field_level_encryption_profile_config(client, id, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the information about an invalidation.
"""
def get_invalidation(client, distribution_id, id, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(distribution_id)}/invalidation/#{URI.encode(id)}"
headers = []
query_ = []
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets an origin request policy, including the following metadata:
<ul> <li> The policy’s identifier.
</li> <li> The date and time when the policy was last modified.
</li> </ul> To get an origin request policy, you must provide the policy’s
identifier. If the origin request policy is attached to a distribution’s
cache behavior, you can get the policy’s identifier using
`ListDistributions` or `GetDistribution`. If the origin request policy is
not attached to a cache behavior, you can get the identifier using
`ListOriginRequestPolicies`.
"""
def get_origin_request_policy(client, id, options \\ []) do
path_ = "/2020-05-31/origin-request-policy/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets an origin request policy configuration.
To get an origin request policy configuration, you must provide the
policy’s identifier. If the origin request policy is attached to a
distribution’s cache behavior, you can get the policy’s identifier using
`ListDistributions` or `GetDistribution`. If the origin request policy is
not attached to a cache behavior, you can get the identifier using
`ListOriginRequestPolicies`.
"""
def get_origin_request_policy_config(client, id, options \\ []) do
path_ = "/2020-05-31/origin-request-policy/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the public key information.
"""
def get_public_key(client, id, options \\ []) do
path_ = "/2020-05-31/public-key/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Return public key configuration informaation
"""
def get_public_key_config(client, id, options \\ []) do
path_ = "/2020-05-31/public-key/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets information about a specified RTMP distribution, including the
distribution configuration.
"""
def get_streaming_distribution(client, id, options \\ []) do
path_ = "/2020-05-31/streaming-distribution/#{URI.encode(id)}"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Get the configuration information about a streaming distribution.
"""
def get_streaming_distribution_config(client, id, options \\ []) do
path_ = "/2020-05-31/streaming-distribution/#{URI.encode(id)}/config"
headers = []
query_ = []
case request(client, :get, path_, query_, headers, nil, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Gets a list of cache policies.
You can optionally apply a filter to return only the managed policies
created by AWS, or only the custom policies created in your AWS account.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_cache_policies(client, marker \\ nil, max_items \\ nil, type \\ nil, options \\ []) do
path_ = "/2020-05-31/cache-policy"
headers = []
query_ = []
query_ = if !is_nil(type) do
[{"Type", type} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists origin access identities.
"""
def list_cloud_front_origin_access_identities(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List CloudFront distributions.
"""
def list_distributions(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distribution"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of distribution IDs for distributions that have a cache
behavior that’s associated with the specified cache policy.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_distributions_by_cache_policy_id(client, cache_policy_id, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distributionsByCachePolicyId/#{URI.encode(cache_policy_id)}"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of distribution IDs for distributions that have a cache
behavior that’s associated with the specified origin request policy.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_distributions_by_origin_request_policy_id(client, origin_request_policy_id, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distributionsByOriginRequestPolicyId/#{URI.encode(origin_request_policy_id)}"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List the distributions that are associated with a specified AWS WAF web
ACL.
"""
def list_distributions_by_web_a_c_l_id(client, web_a_c_l_id, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distributionsByWebACLId/#{URI.encode(web_a_c_l_id)}"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List all field-level encryption configurations that have been created in
CloudFront for this account.
"""
def list_field_level_encryption_configs(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/field-level-encryption"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Request a list of field-level encryption profiles that have been created in
CloudFront for this account.
"""
def list_field_level_encryption_profiles(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Lists invalidation batches.
"""
def list_invalidations(client, distribution_id, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(distribution_id)}/invalidation"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Gets a list of origin request policies.
You can optionally apply a filter to return only the managed policies
created by AWS, or only the custom policies created in your AWS account.
You can optionally specify the maximum number of items to receive in the
response. If the total number of items in the list exceeds the maximum that
you specify, or the default maximum, the response is paginated. To get the
next page of items, send a subsequent request that specifies the
`NextMarker` value from the current response as the `Marker` value in the
subsequent request.
"""
def list_origin_request_policies(client, marker \\ nil, max_items \\ nil, type \\ nil, options \\ []) do
path_ = "/2020-05-31/origin-request-policy"
headers = []
query_ = []
query_ = if !is_nil(type) do
[{"Type", type} | query_]
else
query_
end
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List all public keys that have been added to CloudFront for this account.
"""
def list_public_keys(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/public-key"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List streaming distributions.
"""
def list_streaming_distributions(client, marker \\ nil, max_items \\ nil, options \\ []) do
path_ = "/2020-05-31/streaming-distribution"
headers = []
query_ = []
query_ = if !is_nil(max_items) do
[{"MaxItems", max_items} | query_]
else
query_
end
query_ = if !is_nil(marker) do
[{"Marker", marker} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
List tags for a CloudFront resource.
"""
def list_tags_for_resource(client, resource, options \\ []) do
path_ = "/2020-05-31/tagging"
headers = []
query_ = []
query_ = if !is_nil(resource) do
[{"Resource", resource} | query_]
else
query_
end
request(client, :get, path_, query_, headers, nil, options, nil)
end
@doc """
Add tags to a CloudFront resource.
"""
def tag_resource(client, input, options \\ []) do
path_ = "/2020-05-31/tagging?Operation=Tag"
headers = []
{query_, input} =
[
{"Resource", "Resource"},
]
|> AWS.Request.build_params(input)
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
Remove tags from a CloudFront resource.
"""
def untag_resource(client, input, options \\ []) do
path_ = "/2020-05-31/tagging?Operation=Untag"
headers = []
{query_, input} =
[
{"Resource", "Resource"},
]
|> AWS.Request.build_params(input)
request(client, :post, path_, query_, headers, input, options, 204)
end
@doc """
Updates a cache policy configuration.
When you update a cache policy configuration, all the fields are updated
with the values provided in the request. You cannot update some fields
independent of others. To update a cache policy configuration:
<ol> <li> Use `GetCachePolicyConfig` to get the current configuration.
</li> <li> Locally modify the fields in the cache policy configuration that
you want to update.
</li> <li> Call `UpdateCachePolicy` by providing the entire cache policy
configuration, including the fields that you modified and those that you
didn’t.
</li> </ol>
"""
def update_cache_policy(client, id, input, options \\ []) do
path_ = "/2020-05-31/cache-policy/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Update an origin access identity.
"""
def update_cloud_front_origin_access_identity(client, id, input, options \\ []) do
path_ = "/2020-05-31/origin-access-identity/cloudfront/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Updates the configuration for a web distribution.
<important> When you update a distribution, there are more required fields
than when you create a distribution. When you update your distribution by
using this API action, follow the steps here to get the current
configuration and then make your updates, to make sure that you include all
of the required fields. To view a summary, see [Required Fields for Create
Distribution and Update
Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html)
in the *Amazon CloudFront Developer Guide*.
</important> The update process includes getting the current distribution
configuration, updating the XML document that is returned to make your
changes, and then submitting an `UpdateDistribution` request to make the
updates.
For information about updating a distribution using the CloudFront console
instead, see [Creating a
Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-creating-console.html)
in the *Amazon CloudFront Developer Guide*.
**To update a web distribution using the CloudFront API**
<ol> <li> Submit a
[GetDistributionConfig](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistributionConfig.html)
request to get the current configuration and an `Etag` header for the
distribution.
<note> If you update the distribution again, you must get a new `Etag`
header.
</note> </li> <li> Update the XML document that was returned in the
response to your `GetDistributionConfig` request to include your changes.
<important> When you edit the XML file, be aware of the following:
<ul> <li> You must strip out the ETag parameter that is returned.
</li> <li> Additional fields are required when you update a distribution.
There may be fields included in the XML file for features that you haven't
configured for your distribution. This is expected and required to
successfully update the distribution.
</li> <li> You can't change the value of `CallerReference`. If you try to
change this value, CloudFront returns an `IllegalUpdate` error.
</li> <li> The new configuration replaces the existing configuration; the
values that you specify in an `UpdateDistribution` request are not merged
into your existing configuration. When you add, delete, or replace values
in an element that allows multiple values (for example, `CNAME`), you must
specify all of the values that you want to appear in the updated
distribution. In addition, you must update the corresponding `Quantity`
element.
</li> </ul> </important> </li> <li> Submit an `UpdateDistribution` request
to update the configuration for your distribution:
<ul> <li> In the request body, include the XML document that you updated in
Step 2. The request body must include an XML document with a
`DistributionConfig` element.
</li> <li> Set the value of the HTTP `If-Match` header to the value of the
`ETag` header that CloudFront returned when you submitted the
`GetDistributionConfig` request in Step 1.
</li> </ul> </li> <li> Review the response to the `UpdateDistribution`
request to confirm that the configuration was successfully updated.
</li> <li> Optional: Submit a
[GetDistribution](https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistribution.html)
request to confirm that your changes have propagated. When propagation is
complete, the value of `Status` is `Deployed`.
</li> </ol>
"""
def update_distribution(client, id, input, options \\ []) do
path_ = "/2020-05-31/distribution/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Update a field-level encryption configuration.
"""
def update_field_level_encryption_config(client, id, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Update a field-level encryption profile.
"""
def update_field_level_encryption_profile(client, id, input, options \\ []) do
path_ = "/2020-05-31/field-level-encryption-profile/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Updates an origin request policy configuration.
When you update an origin request policy configuration, all the fields are
updated with the values provided in the request. You cannot update some
fields independent of others. To update an origin request policy
configuration:
<ol> <li> Use `GetOriginRequestPolicyConfig` to get the current
configuration.
</li> <li> Locally modify the fields in the origin request policy
configuration that you want to update.
</li> <li> Call `UpdateOriginRequestPolicy` by providing the entire origin
request policy configuration, including the fields that you modified and
those that you didn’t.
</li> </ol>
"""
def update_origin_request_policy(client, id, input, options \\ []) do
path_ = "/2020-05-31/origin-request-policy/#{URI.encode(id)}"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Update public key information. Note that the only value you can change is
the comment.
"""
def update_public_key(client, id, input, options \\ []) do
path_ = "/2020-05-31/public-key/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@doc """
Update a streaming distribution.
"""
def update_streaming_distribution(client, id, input, options \\ []) do
path_ = "/2020-05-31/streaming-distribution/#{URI.encode(id)}/config"
{headers, input} =
[
{"IfMatch", "If-Match"},
]
|> AWS.Request.build_params(input)
query_ = []
case request(client, :put, path_, query_, headers, input, options, nil) do
{:ok, body, response} ->
body =
[
{"ETag", "ETag"},
]
|> Enum.reduce(body, fn {header_name, key}, acc ->
case List.keyfind(response.headers, header_name, 0) do
nil -> acc
{_header_name, value} -> Map.put(acc, key, value)
end
end)
{:ok, body, response}
result ->
result
end
end
@spec request(AWS.Client.t(), binary(), binary(), list(), list(), map(), list(), pos_integer()) ::
{:ok, Poison.Parser.t(), Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, method, path, query, headers, input, options, success_status_code) do
client = %{client | service: "cloudfront",
region: "us-east-1"}
host = build_host("cloudfront", client)
url = host
|> build_url(path, client)
|> add_query(query)
additional_headers = [{"Host", host}, {"Content-Type", "text/xml"}]
headers = AWS.Request.add_headers(additional_headers, headers)
payload = encode_payload(input)
headers = AWS.Request.sign_v4(client, method, url, headers, payload)
perform_request(method, url, payload, headers, options, success_status_code)
end
defp perform_request(method, url, payload, headers, options, nil) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, response}
{:ok, %HTTPoison.Response{status_code: status_code, body: body} = response}
when status_code == 200 or status_code == 202 or status_code == 204 ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp perform_request(method, url, payload, headers, options, success_status_code) do
case HTTPoison.request(method, url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: ""} = response} ->
{:ok, %{}, response}
{:ok, %HTTPoison.Response{status_code: ^success_status_code, body: body} = response} ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{endpoint: endpoint}) do
"#{endpoint_prefix}.#{endpoint}"
end
defp build_url(host, path, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}#{path}"
end
defp add_query(url, []) do
url
end
defp add_query(url, query) do
querystring = AWS.Util.encode_query(query)
"#{url}?#{querystring}"
end
defp encode_payload(input) do
if input != nil, do: AWS.Util.encode_xml(input), else: ""
end
end
|
lib/aws/cloud_front.ex
| 0.884389
| 0.512449
|
cloud_front.ex
|
starcoder
|
defmodule Combinatorics do
@moduledoc """
Utility for generating combinatorics.
Extracted from the [implementation in CouchDB](https://github.com/apache/couchdb/blob/master/src/couch_tests/src/couch_tests_combinatorics.erl).
"""
@doc """
Generate a powerset for a given list.
Returns a list.
## Examples
iex> Combinatorics.powerset([:foo, :bar, :baz])
[
[:foo],
[:foo,:baz],
[:foo,:bar,:baz],
[:foo,:bar],
[:bar],
[:bar,:baz],
[:baz],
[]
]
"""
@spec powerset(list()) :: [list()]
def powerset([]), do: [[]]
def powerset([h | t]) do
pt = powerset(t)
do_powerset(h, pt, pt)
end
@doc """
Generate all permutations of a given list of lists.
Returns a list of lists.
## Examples
iex> Combinatorics.permutations([:foo, :bar, :baz])
[
[:foo, :bar, :baz],
[:foo, :baz, :bar],
[:bar, :foo, :baz],
[:bar, :baz, :foo],
[:baz, :foo, :bar],
[:baz, :bar, :foo]
]
"""
@spec permutations(list()) :: list()
def permutations([]), do: [[]]
def permutations(list) do
for h <- list, t <- permutations(list -- [h]) do
[h | t]
end
end
@doc """
Generate all product a given list.
Returns a list.
## Examples
iex> Combinatorics.product([ [:foo, :bar], [1, 2, 3] ])
[
[:foo, 1],
[:foo, 2],
[:foo, 3],
[:bar, 1],
[:bar, 2],
[:bar, 3]
]
"""
@spec product([list()]) :: [list()]
def product([]), do: []
def product([h]) do
for x <- h, do: [x]
end
def product([h | t]) do
for a <- h, b <- product(t) do
[a | b]
end
end
@doc """
Generate all combinations of `true` or `false` for a given number of bits.
Returns a list of lists.
## Examples
iex> Combinatorics.binary_combinations(3)
[
[ false , false , false ],
[ false , false , true ],
[ false , true , false ],
[ false , true , true ],
[ true , false , false ],
[ true , false , true ],
[ true , true , false ],
[ true , true , true ]
]
"""
@spec binary_combinations(pos_integer()) :: [list(boolean())]
def binary_combinations(n) do
List.duplicate([false, true], n) |> product()
end
@doc """
Generate n number of combinations of items in given list.
Returns a list of lists with n element.
## Examples
iex> Combinatorics.n_combinations(2, [:mon, :tue, :wed, :thu, :fri])
[
[:mon, :tue],
[:mon, :wed],
[:mon, :thu],
[:mon, :fri],
[:tue, :wed],
[:tue, :thu],
[:tue, :fri],
[:wed, :thu],
[:wed, :fri],
[:thu, :fri]
]
"""
@spec n_combinations(pos_integer(), list()) :: [list()]
def n_combinations(0, _list), do: [[]]
def n_combinations(_, []), do: []
def n_combinations(n, [h | t]) do
list = for l <- n_combinations(n - 1, t), do: [h | l]
list ++ n_combinations(n, t)
end
defp do_powerset(_, [], acc), do: acc
defp do_powerset(x, [h | t], acc) do
do_powerset(x, t, [[x | h] | acc])
end
end
|
lib/combinatorics.ex
| 0.893366
| 0.648383
|
combinatorics.ex
|
starcoder
|
defmodule Cloak.Cipher do
@moduledoc """
A behaviour for encryption/decryption modules. You can rely on this behaviour
to create your own Cloak-compatible cipher modules.
## Example
We will create a cipher that simply prepends `"Hello, "` to any given
plaintext on encryption, and removes the prefix on decryption.
First, define your own cipher module, and specify the `Cloak.Cipher`
behaviour.
defmodule MyApp.PrefixCipher do
@behaviour Cloak.Cipher
end
Add some configuration to your vault for this new cipher:
config :my_app, MyApp.Vault,
ciphers: [
prefix: {MyApp.PrefixCipher, prefix: "Hello, "}
]
The keyword list containing the `:prefix` will be passed in as `opts`
to our cipher callbacks. You should specify any options your cipher will
need for encryption/decryption here, such as the key.
Next, define the `can_decrypt?/2` callback:
@impl true
def can_decrypt?(ciphertext, opts) do
String.starts_with?(ciphertext, opts[:prefix])
end
If the ciphertext starts with `"Hello, "`, we know it was encrypted with this
cipher and we can proceed. Finally, define the `encrypt` and `decrypt`
functions:
@impl true
def encrypt(plaintext, opts) do
opts[:prefix] <> plaintext
end
@impl true
def decrypt(ciphertext, opts) do
String.replace(ciphertext, opts[:prefix], "")
end
You can now use your cipher with your vault!
MyApp.Vault.encrypt!("World!", :prefix)
# => "Hello, World!"
MyApp.Vault.decrypt!("Hello, World!")
# => "World!"
"""
@type plaintext :: binary
@type ciphertext :: binary
@type opts :: Keyword.t()
@doc """
Encrypt a value, using the given keyword list of options. These options
derive from the cipher configuration, like so:
config :my_app, MyApp.Vault,
ciphers: [
default: {Cloak.Ciphers.AES.GCM, tag: "AES.GCM.V1", key: <<1, 0, ...>>}
]
The above configuration will result in the following `opts` being passed
to this function:
[tag: "AES.GCM.V1", key: <<1, 0, ...>>]
Your implementation **must** include any information it will need for
decryption in the generated ciphertext.
"""
@callback encrypt(plaintext, opts) :: {:ok, binary} | :error
@doc """
Decrypt a value, using the given opts. Options are derived from the cipher
configuration. See `encrypt/2`.
"""
@callback decrypt(ciphertext, opts) :: {:ok, binary} | :error
@doc """
Determines if a given ciphertext can be decrypted by this cipher. Options
are derived from the cipher configuration. See `encrypt/2`.
"""
@callback can_decrypt?(ciphertext, opts) :: boolean
end
|
lib/cloak/cipher.ex
| 0.905573
| 0.465752
|
cipher.ex
|
starcoder
|
defmodule Yggdrasil.Adapter.Icon do
@moduledoc """
This module defines an `Yggdrasil` adapter for ICON 2.0.
## Overview
With this adapter we can subscribe to the ICON 2.0 websocket to get either or
both block and event log updates in real time. It leverages `Yggdrasil` (built
over `Phoenix.PubSub`) for handling incoming messages.
The channel has two main fields:
- `adapter` - Which for this specific adapter, it should be set to `:icon`.
- `name` - Where we'll find information of the websocket connection. For this
adapter, it is a map with the following keys:
+ `source` - Whether `:block` or `:event` (required).
+ `identity` - `Icon.RPC.Identity` instance pointed to the right network. It
defaults to Mainnet if no identity is provided.
+ `from_height` - Block height from which we should start receiving messages.
It defaults to `:latest`.
+ `data` - It varies depending on the `source` chosen (see next sections for
more information).
> **Important**: We need to be careful when using `from_height` in the channel
> because `Yggdrasil` will restart the synchronization process from the
> chosen height if the process crashes.
In general, we can subscribe any process using the function
`Yggdrasil.subscribe/1` and unsubscribe using `Yggdrasil.unsubscribe/1` e.g.
for subscribing to every block tick we would do the following:
```elixir
iex> channel = [name: %{source: :block}, adapter: :icon]
iex> Yggdrasil.subscribe(channel)
:ok
```
and we'll find our messages in the mailbox of our process:
```elixir
iex> flush()
{:Y_CONNECTED, %Yggdrasil.Channel{adapter: :icon, ...}}
{:Y_EVENT, %Yggdrasil.Channel{adapter: :icon, ...}, %Icon.Schema.Types.Block.Tick{height: 42, hash: "0xc71303ef8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"}}
...
```
When we're done, we can unsubscribe using the following:
```elixir
iex> Yggdrasil.unsubscribe(channel)
:ok
```
## Subscribing to Blocks
When subscribing to blocks, we can subscribe to:
- Only block ticks.
- Or block ticks and event logs.
The previous section showed how to subscribe to block ticks. However, if we
want to subscribe to specific events as well, we can list them the `data` e.g.
let's say we want to subscribe to the event:
- `Transfer(Address,Address,int)`
- For the contract `cxb0776ee37f5b45bfaea8cff1d8232fbb6122ec32`
- When the first address is `hxbe258ceb872e08851f1f59694dac2558708ece11`
then we would do the following:
```elixir
iex> channel = [
...> adapter: :icon,
...> name: %{
...> source: :block,
...> data: [
...> %{
...> event: "Transfer(Address,Address,int)",
...> addr: "cxb0776ee37f5b45bfaea8cff1d8232fbb6122ec32",
...> indexed: [
...> "hxbe258ceb872e08851f1f59694dac2558708ece11",
...> nil
...> ],
...> data: [
...> nil
...> ]
...> }
...> ]
...> }
...> ]
iex> Yggdrasil.subscribe(channel)
:ok
```
then we will start receiving both block ticks and event logs related to that
event when they occur:
```elixir
iex> flush()
{:Y_CONNECTED, %Yggdrasil.Channel{...}}
{:Y_EVENT, %Yggdrasil.Channel{...}, %Icon.Schema.Types.Block.Tick{height: 42, ...}}
{:Y_EVENT, %Yggdrasil.Channel{...}, %Icon.Schema.Types.Block.Tick{height: 43, ...}}
{:Y_EVENT, %Yggdrasil.Channel{...}, %Icon.Schema.Types.Block.Tick{height: 44, ...}}
{:Y_EVENT, %Yggdrasil.Channel{...}, %Icon.Schema.Types.EventLog{header: "Transfer(Address,Address,int)", ...}}
...
```
## Subscribing to Events
We can also subscribe directly to events if we don't care much about the
current block. In this case the `data` would not be a list of events, but a
single event e.g. if we apply the same example we've seen in the previous
section, this is how it would look like:
```elixir
iex> channel = [
...> adapter: :icon,
...> name: %{
...> source: :event,
...> data: %{
...> event: "Transfer(Address,Address,int)",
...> addr: "cxb0776ee37f5b45bfaea8cff1d8232fbb6122ec32",
...> indexed: [
...> "hxbe258ceb872e08851f1f59694dac2558708ece11",
...> nil
...> ],
...> data: [
...> nil
...> ]
...> }
...> }
...> ]
iex> Yggdrasil.subscribe(channel)
:ok
```
then we will start receiving the event logs related to that event when they
occur:
```elixir
iex> flush()
{:Y_CONNECTED, %Yggdrasil.Channel{...}}
{:Y_EVENT, %Yggdrasil.Channel{...}, %Icon.Schema.Types.EventLog{header: "Transfer(Address,Address,int)", ...}}
...
```
## Further Reading
For more informacion, check out the following modules:
- Publisher adapter: `Yggdrasil.Publisher.Adapter.Icon`
- Subscriber adapter: `Yggdrasil.Subscriber.Adapter.Icon`
"""
use Yggdrasil.Adapter, name: :icon
end
|
lib/yggdrasil/adapter/icon.ex
| 0.876588
| 0.949482
|
icon.ex
|
starcoder
|
defmodule Nosedrum.ApplicationCommand do
@moduledoc """
The application command behaviour specifies the interface that a slash, user,
or message command module should implement.
Like regular commands, application command modules are stateless on their own. Implementations of the callbacks
defined by this behaviour are invoked from other modules/functions, notably a `Nosedrum.Interactor`.
The types defined in this module reflect the official
[Application Command docs](https://discord.com/developers/docs/interactions/application-commands).
## Example Slash Command
This command echos the passed message back to the user.
```elixir
# In your application command module file, e.g. ./lib/my_app/commands/echo.ex
defmodule MyApp.Commands.Echo do
@behaviour Nosedrum.ApplicationCommand
@impl true
def description() do
"Echos a message."
end
@impl true
def command(interaction) do
[%{name: "message", value: message}] = interaction.data.options
[
content: message,
ephemeral?: true
]
end
@impl true
def type() do
:slash
end
@impl true
def options() do
[
%{
type: :string,
name: "message",
description: "The message for the bot to echo.",
required: true
}
]
end
end
```
```elixir
# In your Nostrum.Consumer file, e.g. ./lib/my_app/consumer.ex
defmodule MyApp.Consumer do
use Nostrum.Consumer
# ...
# You may use `:global` instead of a guild id at GUILD_ID_HERE, but note
# that global commands could take up to an hour to become available.
def handle_event({:READY, _data, _ws_state}) do
case Nosedrum.Interactor.Dispatcher.add_command("echo", MyApp.Commands.Echo, GUILD_ID_HERE) do
{:ok, _} -> IO.puts("Registered Echo command.")
e -> IO.inspect(e, label: "An error occurred registering the Echo command")
end
end
def handle_event({:INTERACTION_CREATE, interaction, _ws_state}) do
Nosedrum.Interactor.Dispatcher.handle_interaction(interaction)
end
end
```
You will also need to start the `Nosedrum.Interactor.Dispatcher` as part of
your supervision tree, for example, by adding this to your application start
function:
```elixir
# ./lib/my_app/application.ex
defmodule MyApp.Application do
# ...
def start(type, args) do
children = [
# ...
{Nosedrum.Interactor.Dispatcher, name: Nosedrum.Interactor.Dispatcher},
]
options = [strategy: :rest_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, options)
end
end
```
"""
@moduledoc since: "0.4.0"
@type response_type ::
:channel_message_with_source
| :deferred_channel_message_with_source
| :deferred_update_message
| :pong
| :update_message
@typedoc """
A field in a keyword list interaction response.
Special notes:
- `:type` is required, unless `:content` or `:embeds` is present, in which case it defaults to
`:channel_message_with_source`.
- `:allowed_mentions` is a list that should contain "users", "roles", and/or "everyone", or be empty.
"""
@type response_field ::
{:type, response_type}
| {:content, String.t()}
| {:embeds, [Embed.t()]}
| {:components, [map()]}
| {:ephemeral?, boolean()}
| {:tts?, boolean()}
| {:allowed_mentions, [String.t()] | []}
@typedoc """
A keyword list of fields to include in the interaction response, after running the `c:command/1` callback.
If `:type` is not specified, it will default to `:channel_message_with_source`, though one of
either `:embeds` or `:content` must be present.
## Example
```elixir
def command(interaction) do
# Since `:content` is included, Nosedrum will infer `type: :channel_message_with_source`
[
content: "Hello, world!",
ephemeral?: true,
allowed_mentions: ["users", "roles"]
]
end
```
"""
@type response :: [response_field]
@typedoc """
An option (argument) for an application command.
See callback `c:options/0` documentation for examples.
"""
@type option :: %{
optional(:required) => true | false,
optional(:choices) => [choice],
optional(:options) => [option],
type:
:sub_command
| :sub_command_group
| :string
| :integer
| :boolean
| :user
| :channel
| :role
| :mentionable
| :number,
name: String.t(),
description: String.t()
}
@typedoc """
A choice for an option.
See callback `c:options/0` documentation for examples.
"""
@type choice :: %{
name: String.t(),
value: String.t() | number()
}
@doc """
Returns one of `:slash`, `:message`, or `:user`, indicating what kind of application command this module represents.
"""
@callback type() :: :slash | :message | :user
@doc """
Returns a description of the command. Used when registering the command with Discord. This is what the user will see
in the autofill command-selection menu.
## Example
```elixir
def description, do: "This is a command description."
```
"""
@callback description() :: String.t()
@doc """
An optional callback that returns a list of options (arguments) that the
command takes. Used when registering the command with Discord. Only valid for
CHAT_INPUT application commands, aka slash commands.
Read more in the official
[Application Command documentation](https://discord.com/developers/docs/interactions/application-commands#application-command-object-application-command-option-structure).
## Example options callback for a "/role" command
```elixir
def options, do:
[
%{
type: :user,
name: "user",
description: "The user to assign the role to.",
required: true # Defaults to false if not specified.
},
%{
type: :role,
name: "role",
description: "The role to be assigned.",
required: true,
choices: [
%{
name: "Event Notifications",
value: 123456789123456789 # A role ID, passed to your `command/1` callback via the Interaction struct.
},
%{
name: "Announcements",
value: 123456789123456790
}
]
}
]
```
"""
@callback options() :: [option]
@doc """
Execute the command invoked by the given `t:Nostrum.Struct.Interaction.t/0`. Returns a `t:response/0`
## Example
```elixir
defmodule MyApp.MyCommand do
@behaviour Nosedrum.ApplicationCommand
# ...
@impl true
def command(interaction) do
%{name: opt_name} = List.first(interaction.data.options)
[content: "Hello World \#{opt_name}!"]
end
end
```
"""
@callback command(interaction :: Interaction.t()) :: response
@optional_callbacks [options: 0]
end
|
lib/nosedrum/application_command.ex
| 0.939927
| 0.691953
|
application_command.ex
|
starcoder
|
defmodule Logi.Filter do
@moduledoc """
Log Message Filter Behaviour.
A filter decides whether to allow a message be sent to the target channel.
## Note
A filter should not raise exceptions when it's `c:filter/2` is called.
If any exception is raised, the invocation of the log function will be aborted and
the exception will be propagated to the caller process.
## Examples
```elixir
iex> require Logi
iex> write = fn (_, format, data) -> :io.format format <> "\\n", data end
iex> {:ok, _} = Logi.Channel.install_sink(Logi.BuiltIn.Sink.Fun.new(:foo, write), :info)
iex> filter = fn (context) -> not Map.get(Logi.Context.get_metadata(context), :discard, false) end
iex> logger = Logi.new([filter: Logi.BuiltIn.Filter.Fun.new(filter)])
iex> Logi.save_as_default logger
iex> Logi.info "hello world"
#OUTPUT# hello world
iex> Logi.info "hello world", [], [metadata: %{:discard => true}]
# No output: the log message was discarded by the filter
```
"""
@typedoc """
An instance of `Logi.Filter` behaviour implementation module.
"""
@opaque filter :: {callback_module, state} | callback_module
@typedoc """
A module that implements the `Logi.Filter` behaviour.
"""
@type callback_module :: module
@typedoc """
The value of the second arguemnt of the `c:filter/2` callback function.
If a `filter()` does not have an explicit `state()`, `nil` will be passed instead.
"""
@type state :: any
@doc """
Log messages filtering function.
This function decides whether to allow a message be sent to the target channel.
If it returns `false` (or `{false, state}`), the message will be dropped.
"""
@callback filter(Logi.Context.context, state) :: boolean | {boolean, state}
@doc """
Creates a new filter instance.
"""
@spec new(callback_module, state) :: filter
def new(callback_module, state \\ nil) do
:logi_filter.new callback_module, state
end
@doc """
Returns `true` if `x` is a `t:filter/0` value, `false` otherwise.
"""
@spec filter?(any) :: boolean
def filter?(x) do
:logi_filter.is_filter x
end
@doc "Gets the module of `filter`."
@spec get_module(filter) :: callback_module()
def get_module(filter) do
:logi_filter.get_module filter
end
@doc "Gets the state of `filter`."
@spec get_state(filter) :: state()
def get_state(filter) do
:logi_filter.get_state filter
end
@doc """
Applies `filter`.
This function returns `do_allow` if the state of `filter` is not changed, `{do_allow, new_filter}` otherwise.
"""
@spec apply(Logi.Context.context, filter) :: do_allow | {do_allow, new_filter} when
do_allow: boolean,
new_filter: filter
def apply(context, filter) do
:logi_filter.apply context, filter
end
end
|
lib/logi/filter.ex
| 0.846038
| 0.751671
|
filter.ex
|
starcoder
|
defmodule JSONAPI.View do
@moduledoc """
A View is simply a module that defines certain callbacks to configure proper
rendering of your JSONAPI documents.
defmodule PostView do
use JSONAPI.View
def fields, do: [:id, :text, :body]
def type, do: "post"
def relationships do
[author: UserView,
comments: CommentView]
end
end
defmodule UserView do
use JSONAPI.View
def fields, do: [:id, :username]
def type, do: "user"
def relationships, do: []
end
defmodule CommentView do
use JSONAPI.View
def fields, do: [:id, :text]
def type, do: "comment"
def relationships do
[user: {UserView, :include}]
end
end
defmodule DogView do
use JSONAPI.View, namespace: "/pupperz-api"
end
You can now call `UserView.show(user, conn, conn.params)` and it will render
a valid jsonapi doc.
## Fields
By default, the resulting JSON document consists of fields, defined in the `fields/0`
function. You can define custom fields or override current fields by defining a
2-arity function inside the view that takes `data` and `conn` as arguments and has
the same name as the field it will be producing. Refer to our `fullname/2` example below.
defmodule UserView do
use JSONAPI.View
def fullname(data, conn), do: "fullname"
def fields, do: [:id, :username, :fullname]
def type, do: "user"
def relationships, do: []
end
Fields may be omitted manually using the `hidden/1` function.
defmodule UserView do
use JSONAPI.View
def fields, do: [:id, :username, :email]
def type, do: "user"
def hidden(_data) do
[:email] # will be removed from the response
end
end
In order to use [sparse fieldsets](https://jsonapi.org/format/#fetching-sparse-fieldsets)
you must include the `JSONAPI.QueryParser` plug.
## Relationships
Currently the relationships callback expects that a map is returned
configuring the information you will need. If you have the following Ecto
Model setup
defmodule User do
schema "users" do
field :username
has_many :posts
has_one :image
end
end
and the includes setup from above. If your Post has loaded the author and the
query asks for it then it will be loaded.
So for example:
`GET /posts?include=post.author` if the author record is loaded on the Post, and you are using
the `JSONAPI.QueryParser` it will be included in the `includes` section of the JSONAPI document.
If you always want to include a relationship. First make sure its always preloaded
and then use the `[user: {UserView, :include}]` syntax in your `includes` function. This tells
the serializer to *always* include if its loaded.
## Options
* `:host` (binary) - Allows the `host` to be overrided for generated URLs. Defaults to `host` of the supplied `conn`.
* `:scheme` (atom) - Enables configuration of the HTTP scheme for generated URLS. Defaults to `scheme` from the provided `conn`.
* `:namespace` (binary) - Allows the namespace of a given resource. This may be
configured globally or overridden on the View itself. Note that if you have
a globally defined namespace and need to *remove* the namespace for a
resource, set the namespace to a blank String.
The default behaviour for `host` and `scheme` is to derive it from the `conn` provided, while the
default style for presentation in names is to be underscored and not dashed.
"""
alias JSONAPI.{Paginator, Utils}
alias Plug.Conn
@type t :: module()
@type data :: any()
@type field :: atom()
@type links :: %{atom() => String.t()}
@type meta :: %{atom() => String.t()}
@type options :: keyword()
@type resource_id :: String.t()
@type resource_type :: String.t()
@callback attributes(data(), Conn.t() | nil) :: map()
@callback id(data()) :: resource_id() | nil
@callback fields() :: [field()]
@callback hidden(data()) :: [field()]
@callback links(data(), Conn.t()) :: links()
@callback meta(data(), Conn.t()) :: meta() | nil
@callback namespace() :: String.t()
@callback pagination_links(data(), Conn.t(), Paginator.page(), Paginator.options()) ::
Paginator.links()
@callback path() :: String.t() | nil
@callback relationships() :: [{atom(), t() | {t(), :include}}]
@callback type() :: resource_type()
@callback url_for(data(), Conn.t() | nil) :: String.t()
@callback url_for_pagination(data(), Conn.t(), Paginator.params()) :: String.t()
@callback url_for_rel(term(), String.t(), Conn.t() | nil) :: String.t()
@callback visible_fields(data(), Conn.t() | nil) :: list(atom)
defmacro __using__(opts \\ []) do
{type, opts} = Keyword.pop(opts, :type)
{namespace, opts} = Keyword.pop(opts, :namespace)
{path, opts} = Keyword.pop(opts, :path)
{paginator, _opts} = Keyword.pop(opts, :paginator)
quote do
alias JSONAPI.{Serializer, View}
@behaviour View
@resource_type unquote(type)
@namespace unquote(namespace)
@path unquote(path)
@paginator unquote(paginator)
@impl View
def id(nil), do: nil
def id(%{__struct__: Ecto.Association.NotLoaded}), do: nil
def id(%{id: id}), do: to_string(id)
@impl View
def attributes(data, conn) do
visible_fields = View.visible_fields(__MODULE__, data, conn)
Enum.reduce(visible_fields, %{}, fn field, intermediate_map ->
value =
case function_exported?(__MODULE__, field, 2) do
true -> apply(__MODULE__, field, [data, conn])
false -> Map.get(data, field)
end
Map.put(intermediate_map, field, value)
end)
end
@impl View
def fields, do: raise("Need to implement fields/0")
@impl View
def hidden(_data), do: []
@impl View
def links(_data, _conn), do: %{}
@impl View
def meta(_data, _conn), do: nil
@impl View
if @namespace do
def namespace, do: @namespace
else
def namespace, do: Application.get_env(:jsonapi, :namespace, "")
end
@impl View
def pagination_links(data, conn, page, options) do
paginator = Application.get_env(:jsonapi, :paginator, @paginator)
if Code.ensure_loaded?(paginator) && function_exported?(paginator, :paginate, 5) do
paginator.paginate(data, __MODULE__, conn, page, options)
else
%{}
end
end
@impl View
def path, do: @path
@impl View
def relationships, do: []
@impl View
if @resource_type do
def type, do: @resource_type
else
def type, do: raise("Need to implement type/0")
end
@impl View
def url_for(data, conn),
do: View.url_for(__MODULE__, data, conn)
@impl View
def url_for_pagination(data, conn, pagination_params),
do: View.url_for_pagination(__MODULE__, data, conn, pagination_params)
@impl View
def url_for_rel(data, rel_type, conn),
do: View.url_for_rel(__MODULE__, data, rel_type, conn)
@impl View
def visible_fields(data, conn),
do: View.visible_fields(__MODULE__, data, conn)
defoverridable View
def index(models, conn, _params, meta \\ nil, options \\ []),
do: Serializer.serialize(__MODULE__, models, conn, meta, options)
def show(model, conn, _params, meta \\ nil, options \\ []),
do: Serializer.serialize(__MODULE__, model, conn, meta, options)
if Code.ensure_loaded?(Phoenix) do
def render("show.json", %{data: data, conn: conn, meta: meta, options: options}),
do: Serializer.serialize(__MODULE__, data, conn, meta, options)
def render("show.json", %{data: data, conn: conn, meta: meta}),
do: Serializer.serialize(__MODULE__, data, conn, meta)
def render("show.json", %{data: data, conn: conn}),
do: Serializer.serialize(__MODULE__, data, conn)
def render("index.json", %{data: data, conn: conn, meta: meta, options: options}),
do: Serializer.serialize(__MODULE__, data, conn, meta, options)
def render("index.json", %{data: data, conn: conn, meta: meta}),
do: Serializer.serialize(__MODULE__, data, conn, meta)
def render("index.json", %{data: data, conn: conn}),
do: Serializer.serialize(__MODULE__, data, conn)
else
raise ArgumentError,
"Attempted to call function that depends on Phoenix. " <>
"Make sure Phoenix is part of your dependencies"
end
end
end
@spec url_for(t(), term(), Conn.t() | nil) :: String.t()
def url_for(view, data, nil = _conn) when is_nil(data) or is_list(data),
do: URI.to_string(%URI{path: Enum.join([view.namespace(), path_for(view)], "/")})
def url_for(view, data, nil = _conn) do
URI.to_string(%URI{
path: Enum.join([view.namespace(), path_for(view), view.id(data)], "/")
})
end
def url_for(view, data, %Plug.Conn{} = conn) when is_nil(data) or is_list(data) do
URI.to_string(%URI{
scheme: scheme(conn),
host: host(conn),
path: Enum.join([view.namespace(), path_for(view)], "/")
})
end
def url_for(view, data, %Plug.Conn{} = conn) do
URI.to_string(%URI{
scheme: scheme(conn),
host: host(conn),
path: Enum.join([view.namespace(), path_for(view), view.id(data)], "/")
})
end
@spec url_for_rel(t(), data(), resource_type(), Conn.t() | nil) :: String.t()
def url_for_rel(view, data, rel_type, conn) do
"#{url_for(view, data, conn)}/relationships/#{rel_type}"
end
@spec url_for_rel(t(), data(), Conn.query_params(), Paginator.params()) :: String.t()
def url_for_pagination(
view,
data,
%{query_params: query_params} = conn,
nil = _pagination_params
) do
query =
query_params
|> Utils.List.to_list_of_query_string_components()
|> URI.encode_query()
prepare_url(view, query, data, conn)
end
def url_for_pagination(view, data, %{query_params: query_params} = conn, pagination_params) do
query_params = Map.put(query_params, "page", pagination_params)
url_for_pagination(view, data, %{conn | query_params: query_params}, nil)
end
@spec visible_fields(t(), data(), Conn.t() | nil) :: list(atom)
def visible_fields(view, data, conn) do
all_fields =
view
|> requested_fields_for_type(conn)
|> net_fields_for_type(view.fields())
hidden_fields = view.hidden(data)
all_fields -- hidden_fields
end
defp net_fields_for_type(requested_fields, fields) when requested_fields in [nil, %{}],
do: fields
defp net_fields_for_type(requested_fields, fields) do
fields
|> MapSet.new()
|> MapSet.intersection(MapSet.new(requested_fields))
|> MapSet.to_list()
end
defp prepare_url(view, "", data, conn), do: url_for(view, data, conn)
defp prepare_url(view, query, data, conn) do
view
|> url_for(data, conn)
|> URI.parse()
|> struct(query: query)
|> URI.to_string()
end
defp requested_fields_for_type(view, %Conn{assigns: %{jsonapi_query: %{fields: fields}}}) do
fields[view.type()]
end
defp requested_fields_for_type(_view, _conn), do: nil
defp host(%Conn{host: host}),
do: Application.get_env(:jsonapi, :host, host)
defp scheme(%Conn{scheme: scheme}),
do: Application.get_env(:jsonapi, :scheme, to_string(scheme))
defp path_for(view), do: view.path() || view.type()
end
|
lib/jsonapi/view.ex
| 0.839306
| 0.500366
|
view.ex
|
starcoder
|
defmodule Finch.Request do
@moduledoc """
A request struct.
"""
@enforce_keys [:scheme, :host, :port, :method, :path, :headers, :body, :query]
defstruct [:scheme, :host, :port, :method, :path, :headers, :body, :query]
@atom_methods [
:get,
:post,
:put,
:patch,
:delete,
:head,
:options
]
@methods [
"GET",
"POST",
"PUT",
"PATCH",
"DELETE",
"HEAD",
"OPTIONS"
]
@atom_to_method Enum.zip(@atom_methods, @methods) |> Enum.into(%{})
@typedoc """
An HTTP request method represented as an `atom()` or a `String.t()`.
The following atom methods are supported: `#{Enum.map_join(@atom_methods, "`, `", &inspect/1)}`.
You can use any arbitrary method by providing it as a `String.t()`.
"""
@type method() :: :get | :post | :head | :patch | :delete | :options | :put | String.t()
@typedoc """
A Uniform Resource Locator, the address of a resource on the Web.
"""
@type url() :: String.t() | URI.t()
@typedoc """
Request headers.
"""
@type headers() :: Mint.Types.headers()
@typedoc """
Optional request body.
"""
@type body() :: iodata() | nil
@type t :: %Finch.Request{}
@doc false
def request_path(%{path: path, query: nil}), do: path
def request_path(%{path: path, query: query}), do: "#{path}?#{query}"
@doc false
def build(method, url, headers, body) do
{scheme, host, port, path, query} = parse_url(url)
%Finch.Request{
scheme: scheme,
host: host,
port: port,
method: build_method(method),
path: path,
headers: headers,
body: body,
query: query
}
end
@doc false
def parse_url(url) when is_binary(url) do
url |> URI.parse() |> parse_url()
end
def parse_url(%URI{} = parsed_uri) do
normalize_uri(parsed_uri)
end
defp normalize_uri(parsed_uri) do
normalized_path = parsed_uri.path || "/"
scheme = normalize_scheme(parsed_uri.scheme)
{scheme, parsed_uri.host, parsed_uri.port, normalized_path, parsed_uri.query}
end
defp build_method(method) when is_binary(method), do: method
defp build_method(method) when method in @atom_methods, do: @atom_to_method[method]
defp build_method(method) do
supported = Enum.map_join(@atom_methods, ", ", &inspect/1)
raise ArgumentError, """
got unsupported atom method #{inspect(method)}.
Only the following methods can be provided as atoms: #{supported}.
Otherwise you must pass a binary.
"""
end
defp normalize_scheme(scheme) do
case scheme do
"https" ->
:https
"http" ->
:http
scheme ->
raise ArgumentError, "invalid scheme #{inspect(scheme)}"
end
end
end
|
lib/finch/request.ex
| 0.883949
| 0.447762
|
request.ex
|
starcoder
|
defmodule DateTimeParser.Formatters do
@moduledoc false
def format_token(tokens, :hour) do
case {find_token(tokens, :hour), tokens |> find_token(:am_pm) |> format()} do
{{:hour, 0}, _} ->
0
{{:hour, 12}, "AM"} ->
0
{{:hour, hour}, "PM"} when hour < 12 ->
hour + 12
{{:hour, hour}, _} ->
hour
_ ->
nil
end
end
def format_token(tokens, :year) do
case tokens |> find_token(:year) |> format() do
nil ->
nil
year ->
year |> to_4_year() |> String.to_integer()
end
end
def format_token(tokens, token) do
tokens |> find_token(token) |> format()
end
defp find_token(tokens, find_me) do
Enum.find(tokens, fn
{token, _} -> token == find_me
_ -> false
end)
end
# If the parsed two-digit year is 00 to 49, then
# - If the last two digits of the current year are 00 to 49, then the returned year has the same
# first two digits as the current year.
# - If the last two digits of the current year are 50 to 99, then the first 2 digits of the
# returned year are 1 greater than the first 2 digits of the current year.
# If the parsed two-digit year is 50 to 99, then
# - If the last two digits of the current year are 00 to 49, then the first 2 digits of the
# returned year are 1 less than the first 2 digits of the current year.
# - If the last two digits of the current year are 50 to 99, then the returned year has the same
# first two digits as the current year.
defp to_4_year(parsed_3yr) when byte_size(parsed_3yr) == 3 do
[current_millenia | _rest] =
DateTime.utc_now()
|> Map.get(:year)
|> Integer.digits()
"#{current_millenia}#{parsed_3yr}"
end
defp to_4_year(parsed_2yr) when byte_size(parsed_2yr) == 2 do
[current_millenia, current_century, current_decade, current_year] =
DateTime.utc_now()
|> Map.get(:year)
|> Integer.digits()
parsed_2yr = String.to_integer(parsed_2yr)
current_2yr = String.to_integer("#{current_decade}#{current_year}")
cond do
parsed_2yr < 50 && current_2yr < 50 ->
"#{current_millenia}#{current_century}#{parsed_2yr}"
parsed_2yr < 50 && current_2yr >= 50 ->
[_parsed_millenia, parsed_century] =
[current_millenia, current_century]
|> Integer.undigits()
|> Kernel.+(1)
|> Integer.digits()
"#{current_millenia}#{parsed_century}#{parsed_2yr}"
parsed_2yr >= 50 && current_2yr < 50 ->
[parsed_millenia, parsed_century] =
[current_millenia, current_century]
|> Integer.undigits()
|> Kernel.-(1)
|> Integer.digits()
"#{parsed_millenia}#{parsed_century}#{parsed_2yr}"
parsed_2yr >= 50 && current_2yr >= 50 ->
"#{current_millenia}#{current_century}#{parsed_2yr}"
end
end
defp to_4_year(parsed_year), do: parsed_year
def format({:microsecond, value}) do
val = value |> to_string |> String.slice(0, 6)
{
val |> String.pad_trailing(6, "0") |> String.to_integer(),
val |> byte_size()
}
end
def format({:zone_abbr, value}), do: String.upcase(value)
def format({:utc_offset, offset}), do: to_string(offset)
def format({:year, value}), do: to_string(value)
def format({:am_pm, value}), do: String.upcase(value)
def format({_, value}) when is_integer(value), do: value
def format({_, value}), do: String.to_integer(value)
def format(_), do: nil
def clean(string) when is_binary(string) do
string
|> String.trim()
|> String.replace(" @ ", "T")
|> String.replace(~r{[[:space:]]+}, " ")
|> String.replace(" - ", "-")
|> String.replace("//", "/")
|> String.replace(~r{=|"|'|,|\\}, "")
|> String.downcase()
end
def clean(%{} = map) do
map
|> Enum.reject(fn {_k, v} -> is_nil(v) end)
|> Enum.into(%{})
end
end
|
lib/formatters.ex
| 0.745213
| 0.619788
|
formatters.ex
|
starcoder
|
defmodule RDF.IRI.InvalidError do
defexception [:message]
end
defmodule RDF.Literal.InvalidError do
defexception [:message]
end
defmodule RDF.Triple.InvalidSubjectError do
defexception [:subject]
def message(%{subject: subject}) do
"'#{inspect(subject)}' is not a valid subject of a RDF.Triple"
end
end
defmodule RDF.Triple.InvalidPredicateError do
defexception [:predicate]
def message(%{predicate: predicate}) do
"'#{inspect(predicate)}' is not a valid predicate of a RDF.Triple"
end
end
defmodule RDF.Quad.InvalidGraphContextError do
defexception [:graph_context]
def message(%{graph_context: graph_context}) do
"'#{inspect(graph_context)}' is not a valid graph context of a RDF.Quad"
end
end
defmodule RDF.Graph.EmptyDescriptionError do
defexception [:subject]
def message(%{subject: subject}) do
"""
RDF.Graph with empty description about '#{inspect(subject)}' detected.
Empty descriptions in a graph lead to inconsistent behaviour. The RDF.Graph API
should ensure that this never happens. So this probably happened by changing the
contents of the RDF.Graph struct directly, which is strongly discouraged.
You should always use the RDF.Graph API to change the content of a graph.
If this happened while using the RDF.Graph API, this is a bug.
Please report this at https://github.com/rdf-elixir/rdf-ex/issues and describe the
circumstances how this happened.
"""
end
end
defmodule RDF.XSD.Datatype.Mismatch do
defexception [:value, :expected_type]
def message(%{value: value, expected_type: expected_type}) do
"'#{inspect(value)}' is not a #{expected_type}"
end
end
defmodule RDF.Namespace.InvalidVocabBaseIRIError do
defexception [:message]
end
defmodule RDF.Namespace.InvalidTermError do
defexception [:message]
end
defmodule RDF.Namespace.InvalidAliasError do
defexception [:message]
end
defmodule RDF.Namespace.UndefinedTermError do
defexception [:message]
end
defmodule RDF.Query.InvalidError do
defexception [:message]
end
defmodule RDF.Resource.Generator.ConfigError do
defexception [:message]
end
|
lib/rdf/exceptions.ex
| 0.625896
| 0.515681
|
exceptions.ex
|
starcoder
|
defmodule Streamex.Activities do
@moduledoc """
The `Streamex.Activities` module defines functions
for working with feed activities.
"""
import Streamex.Request
alias Streamex.{Request, Client, Feed, Activity}
@doc """
Lists the given feed's activities.
Returns `{:ok, activities}`, or `{:error, message}` if something went wrong.
Available options are:
- `limit` - limits the number of results. Defaults to `25`
- `offset` - offsets the results
- `id_gte` - filter the feed on ids greater than or equal to the given value
- `id_gt` - filter the feed on ids greater than the given value
- `id_lte` - filter the feed on ids smaller than or equal to the given value
- `id_lt` - filter the feed on ids smaller than the given value
## Examples
iex> {_, feed} = Streamex.Feed.new("user", "eric")
{:ok, %Streamex.Feed{...}}
iex> Streamex.Activities.get(feed)
{:ok, [%Streamex.Activity{}...]}
"""
def get(feed, opts \\ []) do
Request.new
|> with_method(:get)
|> with_path(endpoint_get(feed))
|> with_token(feed, "feed", "read")
|> with_params(activity_get_params(opts))
|> commit_request
|> handle_response
end
@doc """
Adds activities to the given feed.
Accepts a single `Map` or a `List` of `Map`s.
Returns `{:ok, activity | activities}`, or `{:error, message}`
if something went wrong.
Activities have a number of required fields.
Refer to `Streamex.Activity` for a complete list.
## Examples
iex> {_, feed} = Streamex.Feed.new("user", "eric")
{:ok, %Streamex.Feed{...}}
iex> activity = %{"actor" => "Tony", "verb" => "like", "object" => "Elixir", "foreign_id" => "tony:1"}
%{...}
iex> Streamex.Activities.add(feed, activity)
{:ok, %Streamex.Activity{...}}
iex> activity_b = %{"actor" => "Anna", "verb" => "like", "object" => "Hiking", "foreign_id" => "anna:1"}
%{...}
iex> Streamex.Activities.add(feed, [activity, activity_b])
{:ok, [%Streamex.Activity{...}, %Streamex.Activity{...}]}
"""
def add(feed, %{} = activity) do
case add(feed, [activity]) do
{:ok, nil} -> {:ok, nil}
{:ok, results} -> {:ok, Enum.at(results, 0)}
response -> response
end
end
def add(feed, [%{} | _] = activities) do
Request.new
|> with_method(:post)
|> with_path(endpoint_create(feed))
|> with_token(feed, "feed", "write")
|> with_body(body_create_update_activities(activities))
|> commit_request
|> handle_response
end
@doc """
Adds an activity to a `List` of feeds.
Returns `{:ok, nil}`, or `{:error, message}` if something went wrong.
Activities have a number of required fields.
Refer to `Streamex.Activity` for a complete list.
## Examples
iex> {_, feed} = Streamex.Feed.new("user", "eric")
{:ok, %Streamex.Feed{...}}
iex> {_, feed_b} = Streamex.Feed.new("user", "deborah")
{:ok, %Streamex.Feed{...}}
iex> activity = %{"actor" => "Tony", "verb" => "like", "object" => "Elixir", "foreign_id" => "tony:1"}
%{...}
iex> Streamex.Activities.add_to_many([feed, feed_b], activity)
{:ok, nil}
"""
def add_to_many(feeds, %{} = activity) do
Request.new
|> with_method(:post)
|> with_path(endpoint_add_to_many())
|> with_body(body_create_batch_activities(feeds, activity))
|> commit_request
|> handle_response
end
@doc """
Updates activities. Accepts a single `Map` or a `List` of `Map`s.
Returns `{:ok, nil}`, or `{:error, message}`
Activities have a number of required fields.
Refer to `Streamex.Activity` for a complete list.
## Examples
iex> {_, feed} = Streamex.Feed.new("user", "eric")
{:ok, %Streamex.Feed{...}}
iex> activity = %{"actor" => "Tony", "verb" => "like", "object" => "Elixir", "foreign_id" => "tony:1", "time" => "2016-08-09T19:38:12.241758"}
%{...}
iex> Streamex.Activities.update(feed, activity)
{:ok, nil}
"""
def update(feed, %{} = activity) do
update(feed, [activity])
end
def update(feed, [%{} | _] = activities) do
Request.new
|> with_method(:post)
|> with_path(endpoint_update())
|> with_token(feed, "activities", "write")
|> with_body(body_create_update_activities(activities))
|> commit_request
|> handle_response
end
@doc """
Removes activities.
Accepts an `id` or `foreign_id` string value.
Returns `{:ok, removed_id}`, or `{:error, message}`
Available options are:
- `foreign_id` - if set to true, removes the activity by `foreign_id`
## Examples
iex> {_, feed} = Streamex.Feed.new("user", "eric")
{:ok, %Streamex.Feed{...}}
iex> Streamex.Activities.remove(feed, "d2d6fc2c-5e5a-11e6-8080-80017383369d")
{:ok, "d2d6fc2c-5e5a-11e6-8080-80017383369d"}
iex> Streamex.Activities.remove(feed, "tony:1", foreign_id: true)
{:ok, "tony:1"}
"""
def remove(feed, id, opts \\ []) do
foreign_id = Keyword.get(opts, :foreign_id, false)
params = foreign_id && %{"foreign_id" => 1} || %{}
Request.new
|> with_method(:delete)
|> with_path(endpoint_remove(feed, id))
|> with_token(feed, "feed", "delete")
|> with_params(params)
|> commit_request
|> handle_response
end
defp activity_get_params(opts) do
defaults = [limit: 25, offset: 0]
Keyword.merge(defaults, opts) |> Enum.into(%{})
end
defp commit_request(request) do
request
|> Client.prepare_request
|> Client.sign_request
|> Client.execute_request
end
defp handle_response({:ok, nil}), do: {:ok, nil}
defp handle_response({:error, message}), do: {:error, message}
defp handle_response(%{"exception" => exception}), do: {:error, exception}
defp handle_response(%{"results" => results}), do:
{:ok, Enum.reduce(results, [], fn(result, acc) ->
with {:ok, activity} <- handle_response(result), do: acc ++ activity
end)}
defp handle_response(%{"activities" => results}), do:
{:ok, Enum.map(results, &(Activity.to_struct/1))}
defp handle_response(%{"removed" => id}), do: {:ok, id}
defp handle_response(%{"duration" => _}), do: {:ok, nil}
defp endpoint_get(%Feed{} = feed) do
<<"feed/", feed.slug :: binary, "/", feed.user_id :: binary, "/">>
end
defp endpoint_create(%Feed{} = feed) do
endpoint_get(feed)
end
defp endpoint_update(), do: "activities/"
defp endpoint_remove(%Feed{} = feed, id) do
<<endpoint_get(feed) :: binary, id :: binary, "/">>
end
defp endpoint_add_to_many() do
"feed/add_to_many/"
end
defp body_create_batch_activities(feeds, activity) do
feeds = Enum.map(feeds, fn(feed) -> Feed.get_follow_target_string(feed) end)
payload = %{"feeds" => feeds, "activity" => activity}
Poison.encode!(payload)
end
defp body_create_update_activities(activities) do
Poison.encode!(%{"activities" => activities})
end
end
|
lib/streamex/activities.ex
| 0.883989
| 0.525491
|
activities.ex
|
starcoder
|
defmodule Paginator.Ecto.Query do
@moduledoc false
import Ecto.Query
alias Paginator.Config
def paginate(queryable, config \\ [])
def paginate(queryable, %Config{} = config) do
queryable
|> maybe_where(config)
|> limit(^query_limit(config))
end
def paginate(queryable, opts) do
paginate(queryable, Config.new(opts))
end
defp filter_values(query, cursor_fields, values, operator) do
sorts =
cursor_fields
|> Enum.zip(values)
|> Enum.reject(fn val -> match?({_column, nil}, val) end)
dynamic_sorts =
sorts
|> Enum.with_index()
|> Enum.reduce(true, fn {{column, value}, i}, dynamic_sorts ->
dynamic = true
dynamic =
case operator do
:lt ->
dynamic([q], field(q, ^column) < ^value and ^dynamic)
:gt ->
dynamic([q], field(q, ^column) > ^value and ^dynamic)
end
dynamic =
sorts
|> Enum.take(i)
|> Enum.reduce(dynamic, fn {prev_column, prev_value}, dynamic ->
dynamic([q], field(q, ^prev_column) == ^prev_value and ^dynamic)
end)
if i == 0 do
dynamic([q], ^dynamic and ^dynamic_sorts)
else
dynamic([q], ^dynamic or ^dynamic_sorts)
end
end)
where(query, [q], ^dynamic_sorts)
end
defp maybe_where(query, %Config{
after_values: nil,
before_values: nil,
sort_direction: :asc
}) do
query
end
defp maybe_where(query, %Config{
after_values: after_values,
before: nil,
cursor_fields: cursor_fields,
sort_direction: :asc
}) do
query
|> filter_values(cursor_fields, after_values, :gt)
end
defp maybe_where(query, %Config{
after_values: nil,
before_values: before_values,
cursor_fields: cursor_fields,
sort_direction: :asc
}) do
query
|> filter_values(cursor_fields, before_values, :lt)
|> reverse_order_bys()
end
defp maybe_where(query, %Config{
after_values: after_values,
before_values: before_values,
cursor_fields: cursor_fields,
sort_direction: :asc
}) do
query
|> filter_values(cursor_fields, after_values, :gt)
|> filter_values(cursor_fields, before_values, :lt)
end
defp maybe_where(query, %Config{
after: nil,
before: nil,
sort_direction: :desc
}) do
query
end
defp maybe_where(query, %Config{
after_values: after_values,
before: nil,
cursor_fields: cursor_fields,
sort_direction: :desc
}) do
query
|> filter_values(cursor_fields, after_values, :lt)
end
defp maybe_where(query, %Config{
after: nil,
before_values: before_values,
cursor_fields: cursor_fields,
sort_direction: :desc
}) do
query
|> filter_values(cursor_fields, before_values, :gt)
|> reverse_order_bys()
end
defp maybe_where(query, %Config{
after_values: after_values,
before_values: before_values,
cursor_fields: cursor_fields,
sort_direction: :desc
}) do
query
|> filter_values(cursor_fields, after_values, :lt)
|> filter_values(cursor_fields, before_values, :gt)
end
# In order to return the correct pagination cursors, we need to fetch one more
# # record than we actually want to return.
defp query_limit(%Config{limit: limit}) do
limit + 1
end
# This code was taken from https://github.com/elixir-ecto/ecto/blob/v2.1.4/lib/ecto/query.ex#L1212-L1226
defp reverse_order_bys(query) do
update_in(query.order_bys, fn
[] ->
[]
order_bys ->
for %{expr: expr} = order_by <- order_bys do
%{
order_by
| expr:
Enum.map(expr, fn
{:desc, ast} -> {:asc, ast}
{:asc, ast} -> {:desc, ast}
end)
}
end
end)
end
end
|
lib/paginator/ecto/query.ex
| 0.651466
| 0.441011
|
query.ex
|
starcoder
|
defmodule Snowflex do
@moduledoc """
The client interface for connecting to the Snowflake data warehouse.
The main entry point to this module is `Snowflex.sql_query`. This function takes a string containing
a SQL query and returns a list of maps (one per row). NOTE: due to the way the Erlang ODBC works, all values comeback
as strings. You will need to cast values appropriately.
"""
alias Ecto.Changeset
alias Snowflex.Worker
@type query_param :: {:odbc.odbc_data_type(), list(:odbc.value())}
@type sql_data :: list(%{optional(String.t()) => String.t()})
@spec sql_query(atom(), String.t(), timeout()) ::
sql_data() | {:error, term}
def sql_query(pool_name, query, timeout) do
case :poolboy.transaction(
pool_name,
&Worker.sql_query(&1, query, timeout),
timeout
) do
{:ok, results} -> process_results(results)
err -> err
end
end
@spec param_query(atom(), String.t(), list(query_param()), timeout()) ::
sql_data() | {:error, term}
def param_query(pool_name, query, params \\ [], timeout) do
case :poolboy.transaction(
pool_name,
&Worker.param_query(&1, query, params, timeout),
timeout
) do
{:ok, results} -> process_results(results)
err -> err
end
end
def cast_results(data, schema) do
Enum.map(data, &cast_row(&1, schema))
end
def int_param(val), do: {:sql_integer, val}
def string_param(val, length \\ 250), do: {{:sql_varchar, length}, val}
# Helpers
defp process_results(data) when is_list(data) do
Enum.map(data, &process_results(&1))
end
defp process_results({:selected, headers, rows}) do
bin_headers =
headers
|> Enum.map(fn header -> header |> to_string() |> String.downcase() end)
|> Enum.with_index()
Enum.map(rows, fn row ->
Enum.reduce(bin_headers, %{}, fn {col, index}, map ->
data =
row
|> elem(index)
|> to_string_if_charlist()
Map.put(map, col, data)
end)
end)
end
defp to_string_if_charlist(data) when is_list(data), do: to_string(data)
defp to_string_if_charlist(data), do: data
defp cast_row(row, schema) do
schema
|> struct()
|> Changeset.cast(row, schema.__schema__(:fields))
|> Changeset.apply_changes()
end
end
|
lib/snowflex.ex
| 0.785309
| 0.408513
|
snowflex.ex
|
starcoder
|
defmodule Nacha.Batch do
@moduledoc """
A struct that represents a batch, containing the Batch Header, Batch Control,
and Entry Detail records.
Also includes utility functions for building and managing batches.
"""
import Kernel, except: [to_string: 1]
alias Nacha.Entry
alias Nacha.Records.BatchHeader, as: Header
alias Nacha.Records.BatchControl, as: Control
@credit_codes ["22", "32"]
@debit_codes ["27", "37"]
@service_class_codes %{mixed: 200, credit_only: 220, debit_only: 225}
defstruct [:header_record, :control_record, errors: [], entries: []]
@typep entry_list :: list(Entry.t())
@type t :: %__MODULE__{
header_record: Header.t(),
entries: entry_list,
control_record: Control.t(),
errors: list({atom, String.t()})
}
@doc """
Build a valid batch with necessary generated values.
"""
@spec build(entry_list, %{atom => any}) :: {:ok, t()} | {:error, t()}
def build(entries, params) do
params
|> build_params(entries)
|> do_build
|> validate
end
@spec to_string(__MODULE__.t()) :: String.t()
def to_string(%__MODULE__{} = batch),
do: batch |> to_iolist |> Kernel.to_string()
@spec to_iolist(list(__MODULE__.t())) :: iolist
def to_iolist([%__MODULE__{} | _] = batches),
do: batches |> Stream.map(&to_iolist/1) |> Enum.intersperse("\n")
@spec to_iolist(__MODULE__.t()) :: iolist
def to_iolist(%__MODULE__{} = batch) do
[
Header.to_iolist(batch.header_record),
"\n",
Entry.to_iolist(batch.entries),
"\n",
Control.to_iolist(batch.control_record)
]
end
defp build_params(params, entries) do
{credit_total, debit_total} = totals(entries)
Map.merge(
params,
%{
entries: entries,
entry_count: length(entries),
entry_hash: calculate_hash(entries),
total_credits: credit_total,
total_debits: debit_total,
service_class_code: calculate_scc(credit_total, debit_total)
}
)
end
defp do_build(params) do
%__MODULE__{
header_record: build_header(params),
entries: params.entries,
control_record: build_control(params)
}
end
@spec valid?(__MODULE__.t()) :: boolean
def valid?(batch), do: match?({:ok, _}, validate(batch))
defp build_header(params), do: Header |> struct(params)
defp build_control(params), do: Control |> struct(params)
defp validate(%{header_record: header, control_record: control} = batch) do
case {Header.validate(header), Control.validate(control)} do
{%{valid?: true} = header, %{valid?: true} = control} ->
{:ok, %{batch | header_record: header, control_record: control}}
{header, control} ->
{:error, consolidate_errors(batch, header, control)}
end
end
defp consolidate_errors(batch, header, control) do
%{
batch
| header_record: header,
control_record: control,
errors: Enum.uniq(header.errors ++ control.errors)
}
end
defp totals(entries) do
entries
|> Enum.group_by(&credit_or_debit/1, &get_amount/1)
|> sums()
end
defp calculate_hash(entries) do
entries
|> Stream.map(& &1.record.rdfi_id)
|> Enum.sum()
|> Integer.digits()
|> Enum.take(-10)
|> Integer.undigits()
end
defp calculate_scc(0, debits) when debits > 0,
do: @service_class_codes.debit_only
defp calculate_scc(credits, 0) when credits > 0,
do: @service_class_codes.credit_only
defp calculate_scc(_, _), do: @service_class_codes.mixed
defp credit_or_debit(%{record: %{transaction_code: tx}})
when tx in @credit_codes,
do: :credit
defp credit_or_debit(%{record: %{transaction_code: tx}})
when tx in @debit_codes,
do: :debit
defp credit_or_debit(_), do: :error
defp get_amount(%{record: %{amount: amount}}), do: amount
defp sums(amounts), do: {sum(amounts, :credit), sum(amounts, :debit)}
defp sum(amounts, type), do: amounts |> Map.get(type, []) |> Enum.sum()
end
|
lib/nacha/batch.ex
| 0.825379
| 0.53868
|
batch.ex
|
starcoder
|
defmodule Surface.Components.Utils do
@moduledoc false
import Surface, only: [event_to_opts: 2]
@valid_uri_schemes [
"http:",
"https:",
"ftp:",
"ftps:",
"mailto:",
"news:",
"irc:",
"gopher:",
"nntp:",
"feed:",
"telnet:",
"mms:",
"rtsp:",
"svn:",
"tel:",
"fax:",
"xmpp:"
]
def valid_destination!(%URI{} = uri, context) do
valid_destination!(URI.to_string(uri), context)
end
def valid_destination!({:safe, to}, context) do
{:safe, valid_string_destination!(IO.iodata_to_binary(to), context)}
end
def valid_destination!({other, to}, _context) when is_atom(other) do
[Atom.to_string(other), ?:, to]
end
def valid_destination!(to, context) do
valid_string_destination!(IO.iodata_to_binary(to), context)
end
for scheme <- @valid_uri_schemes do
def valid_string_destination!(unquote(scheme) <> _ = string, _context), do: string
end
def valid_string_destination!(to, context) do
if not match?("/" <> _, to) and String.contains?(to, ":") do
raise ArgumentError, """
unsupported scheme given to #{context}. In case you want to link to an
unknown or unsafe scheme, such as javascript, use a tuple: {:javascript, rest}
"""
else
to
end
end
def csrf_data(to, opts) do
case Keyword.pop(opts, :csrf_token, true) do
{csrf, opts} when is_binary(csrf) ->
{[csrf: csrf], opts}
{true, opts} ->
{[csrf: csrf_token(to)], opts}
{false, opts} ->
{[], opts}
end
end
defp csrf_token(to) do
{mod, fun, args} = Application.fetch_env!(:surface, :csrf_token_reader)
apply(mod, fun, [to | args])
end
def skip_csrf(opts) do
Keyword.delete(opts, :csrf_token)
end
def opts_to_phx_opts(opts) do
for {key, value} <- opts do
case key do
:trigger_action -> {:phx_trigger_action, value}
_ -> {key, value}
end
end
end
def events_to_opts(assigns) do
[
event_to_opts(assigns.capture_click, :phx_capture_click),
event_to_opts(assigns.click, :phx_click),
event_to_opts(assigns.window_focus, :phx_window_focus),
event_to_opts(assigns.window_blur, :phx_window_blur),
event_to_opts(assigns.focus, :phx_focus),
event_to_opts(assigns.blur, :phx_blur),
event_to_opts(assigns.window_keyup, :phx_window_keyup),
event_to_opts(assigns.window_keydown, :phx_window_keydown),
event_to_opts(assigns.keyup, :phx_keyup),
event_to_opts(assigns.keydown, :phx_keydown),
values_to_opts(assigns.values)
]
|> List.flatten()
end
defp values_to_opts([]) do
[]
end
defp values_to_opts(values) when is_list(values) do
values_to_attrs(values)
end
defp values_to_opts(_values) do
[]
end
defp values_to_attrs(values) when is_list(values) do
for {key, value} <- values do
{:"phx-value-#{key}", value}
end
end
end
|
lib/surface/components/utils.ex
| 0.565659
| 0.500732
|
utils.ex
|
starcoder
|
defmodule Ecto.Adapters.SQL.Sandbox do
@moduledoc ~S"""
A pool for concurrent transactional tests.
The sandbox pool is implemented on top of an ownership mechanism.
When started, the pool is in automatic mode, which means the
repository will automatically check connections out as with any
other pool.
The `mode/2` function can be used to change the pool mode from
automatic to either manual or shared. In the later two modes,
the connection must be explicitly checked out before use.
When explicit checkouts are made, the sandbox will wrap the
connection in a transaction by default and control who has
access to it. This means developers have a safe mechanism for
running concurrent tests against the database.
## Database support
While both PostgreSQL and MySQL support SQL Sandbox, only PostgreSQL
supports concurrent tests while running the SQL Sandbox. Therefore, do
not run concurrent tests with MySQL as you may run into deadlocks due to
its transaction implementation.
## Example
The first step is to configure your database to use the
`Ecto.Adapters.SQL.Sandbox` pool. You set those options in your
`config/config.exs` (or preferably `config/test.exs`) if you
haven't yet:
config :my_app, Repo,
pool: Ecto.Adapters.SQL.Sandbox
Now with the test database properly configured, you can write
transactional tests:
# At the end of your test_helper.exs
# Set the pool mode to manual for explicit checkouts
Ecto.Adapters.SQL.Sandbox.mode(Repo, :manual)
defmodule PostTest do
# Once the mode is manual, tests can also be async
use ExUnit.Case, async: true
setup do
# Explicitly get a connection before each test
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo)
end
test "create post" do
# Use the repository as usual
assert %Post{} = Repo.insert!(%Post{})
end
end
## Collaborating processes
The example above is straight-forward because we have only
a single process using the database connection. However,
sometimes a test may need to interact with multiple processes,
all using the same connection so they all belong to the same
transaction.
Before we discuss solutions, let's see what happens if we try
to use a connection from a new process without explicitly
checking it out first:
setup do
# Explicitly get a connection before each test
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo)
end
test "calls worker that runs a query" do
GenServer.call(MyApp.Worker, :run_query)
end
The test above will fail with an error similar to:
** (DBConnection.OwnershipError) cannot find ownership process for #PID<0.35.0>
That's because the `setup` block is checking out the connection only
for the test process. Once the worker attempts to perform a query,
there is no connection assigned to it and it will fail.
The sandbox module provides two ways of doing so, via allowances or
by running in shared mode.
### Allowances
The idea behind allowances is that you can explicitly tell a process
which checked out connection it should use, allowing multiple processes
to collaborate over the same connection. Let's give it a try:
test "calls worker that runs a query" do
allow = Process.whereis(MyApp.Worker)
Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), allow)
GenServer.call(MyApp.Worker, :run_query)
end
And that's it, by calling `allow/3`, we are explicitly assigning
the parent's connection (i.e. the test process' connection) to
the task.
Because allowances use an explicit mechanism, their advantage
is that you can still run your tests in async mode. The downside
is that you need to explicitly control and allow every single
process. This is not always possible. In such cases, you will
want to use shared mode.
### Shared mode
Shared mode allows a process to share its connection with any other
process automatically, without relying on explicit allowances.
Let's change the example above to use shared mode:
setup do
# Explicitly get a connection before each test
:ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo)
# Setting the shared mode must be done only after checkout
Ecto.Adapters.SQL.Sandbox.mode(Repo, {:shared, self()})
end
test "calls worker that runs a query" do
GenServer.call(MyApp.Worker, :run_query)
end
By calling `mode({:shared, self()})`, any process that needs
to talk to the database will now use the same connection as the
one checked out by the test process during the `setup` block.
Make sure to always check a connection out before setting the mode
to `{:shared, self()}`.
The advantage of shared mode is that by calling a single function,
you will ensure all upcoming processes and operations will use that
shared connection, without a need to explicitly allow them. The
downside is that tests can no longer run concurrently in shared mode.
Also, beware that if the test process terminates while the worker is
using the connection, the connection will be taken away from the worker,
which will error. Therefore it is important to guarantee the work is done
before the test concludes. In the example above, we are using a `call`,
which is synchronous, avoiding the problem, but you may need to explicitly
flush the worker or terminate it under such scenarios in your tests.
### Summing up
There are two mechanisms for explicit ownerships:
* Using allowances - requires explicit allowances via `allow/3`.
Tests may run concurrently.
* Using shared mode - does not require explicit allowances.
Tests cannot run concurrently.
## FAQ
When running the sandbox mode concurrently, developers may run into
issues we explore in the upcoming sections.
### "owner exited"
In some situations, you may see error reports similar to the one below:
23:59:59.999 [error] Postgrex.Protocol (#PID<>) disconnected:
** (DBConnection.Error) owner #PID<> exited
Client #PID<> is still using a connection from owner
Such errors are usually followed by another error report from another
process that failed while executing a database query.
To understand the failure, we need to answer the question: who are the
owner and client processes? The owner process is the one that checks
out the connection, which, in the majority of cases, is the test process,
the one running your tests. In other words, the error happens because
the test process has finished, either because the test succeeded or
because it failed, while the client process was trying to get information
from the database. Since the owner process, the one that owns the
connection, no longer exists, Ecto will check the connection back in
and notify the client process using the connection that the connection
owner is no longer available.
This can happen in different situations. For example, imagine you query
a GenServer in your test that is using a database connection:
test "gets results from GenServer" do
{:ok, pid} = MyAppServer.start_link()
Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), pid)
assert MyAppServer.get_my_data_fast(timeout: 1000) == [...]
end
In the test above, we spawn the server and allow it to perform database
queries using the connection owned by the test process. Since we gave
a timeout of 1 second, in case the database takes longer than one second
to reply, the test process will fail, due to the timeout, making the
"owner down" message to be printed because the server process is still
waiting on a connection reply.
In some situations, such failures may be intermittent. Imagine that you
allow a process that queries the database every half second:
test "queries periodically" do
{:ok, pid} = PeriodicServer.start_link()
Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), pid)
# more tests
end
Because the server is querying the database from time to time, there is
a chance that, when the test exits, the periodic process may be querying
the database, regardless of test success or failure.
### "owner timed out because it owned the connection for longer than Nms"
In some situations, you may see error reports similar to the one below:
09:56:43.081 [error] Postgrex.Protocol (#PID<>) disconnected:
** (DBConnection.ConnectionError) owner #PID<> timed out
because it owned the connection for longer than 120000ms
If you have a long running test (or you're debugging with IEx.pry),
the timeout for the connection ownership may be too short. You can
increase the timeout by setting the `:ownership_timeout` options for
your repo config in `config/config.exs` (or preferably in `config/test.exs`):
config :my_app, MyApp.Repo,
ownership_timeout: NEW_TIMEOUT_IN_MILLISECONDS
The `:ownership_timeout` option is part of `DBConnection.Ownership`
and defaults to 120000ms. Timeouts are given as integers in milliseconds.
Alternately, if this is an issue for only a handful of long-running tests,
you can pass an `:ownership_timeout` option when calling
`Ecto.Adapters.SQL.Sandbox.checkout/2` instead of setting a longer timeout
globally in your config.
### Deferred constraints
Some databases allow to defer constraint validation to the transaction
commit time, instead of the particular statement execution time. This
feature, for instance, allows for a cyclic foreign key referencing.
Since the SQL Sandbox mode rolls back transactions, tests might report
false positives because deferred constraints are never checked by the
database. To manually force deferred constraints validation when using
PostgreSQL use the following line right at the end of your test case:
Repo.query!("SET CONSTRAINTS ALL IMMEDIATE")
### Database locks and deadlocks
Since the sandbox relies on concurrent transactional tests, there is
a chance your tests may trigger deadlocks in your database. This is
specially true with MySQL, where the solutions presented here are not
enough to avoid deadlocks and therefore making the use of concurrent tests
with MySQL prohibited.
However, even on databases like PostgreSQL, performance degradations or
deadlocks may still occur. For example, imagine multiple tests are
trying to insert the same user to the database. They will attempt to
retrieve the same database lock, causing only one test to succeed and
run while all other tests wait for the lock.
In other situations, two different tests may proceed in a way that
each test retrieves locks desired by the other, leading to a situation
that cannot be resolved, a deadlock. For instance:
Transaction 1: Transaction 2:
begin
begin
update posts where id = 1
update posts where id = 2
update posts where id = 1
update posts where id = 2
**deadlock**
There are different ways to avoid such problems. One of them is
to make sure your tests work on distinct data. Regardless of
your choice between using fixtures or factories for test data,
make sure you get a new set of data per test. This is specially
important for data that is meant to be unique like user emails.
For example, instead of:
def insert_user do
Repo.insert! %User{email: "<EMAIL>"}
end
prefer:
def insert_user do
Repo.insert! %User{email: "<EMAIL>-#{counter()}<EMAIL>"}
end
defp counter do
System.unique_integer [:positive]
end
In fact, avoiding unique emails like above can also have a positive
impact on the test suite performance, as it reduces contention and
wait between concurrent tests. We have heard reports where using
dynamic values for uniquely indexed columns, as we did for e-mail
above, made a test suite run between 2x to 3x faster.
Deadlocks may happen in other circumstances. If you believe you
are hitting a scenario that has not been described here, please
report an issue so we can improve our examples. As a last resort,
you can always disable the test triggering the deadlock from
running asynchronously by setting "async: false".
"""
defmodule Connection do
@moduledoc false
if Code.ensure_loaded?(DBConnection) do
@behaviour DBConnection
end
def connect(_opts) do
raise "should never be invoked"
end
def disconnect(err, {conn_mod, state, _in_transaction?}) do
conn_mod.disconnect(err, state)
end
def checkout(state), do: proxy(:checkout, state, [])
def checkin(state), do: proxy(:checkin, state, [])
def ping(state), do: proxy(:ping, state, [])
def handle_begin(opts, {conn_mod, state, false}) do
opts = [mode: :savepoint] ++ opts
case conn_mod.handle_begin(opts, state) do
{:ok, value, state} ->
{:ok, value, {conn_mod, state, true}}
{kind, err, state} ->
{kind, err, {conn_mod, state, false}}
end
end
def handle_commit(opts, {conn_mod, state, true}) do
opts = [mode: :savepoint] ++ opts
proxy(:handle_commit, {conn_mod, state, false}, [opts])
end
def handle_rollback(opts, {conn_mod, state, _}) do
opts = [mode: :savepoint] ++ opts
proxy(:handle_rollback, {conn_mod, state, false}, [opts])
end
def handle_status(opts, state),
do: proxy(:handle_status, state, [maybe_savepoint(opts, state)])
def handle_prepare(query, opts, state),
do: proxy(:handle_prepare, state, [query, maybe_savepoint(opts, state)])
def handle_execute(query, params, opts, state),
do: proxy(:handle_execute, state, [query, params, maybe_savepoint(opts, state)])
def handle_close(query, opts, state),
do: proxy(:handle_close, state, [query, maybe_savepoint(opts, state)])
def handle_declare(query, params, opts, state),
do: proxy(:handle_declare, state, [query, params, maybe_savepoint(opts, state)])
def handle_fetch(query, cursor, opts, state),
do: proxy(:handle_fetch, state, [query, cursor, maybe_savepoint(opts, state)])
def handle_deallocate(query, cursor, opts, state),
do: proxy(:handle_deallocate, state, [query, cursor, maybe_savepoint(opts, state)])
defp maybe_savepoint(opts, {_, _, in_transaction?}) do
if not in_transaction? and Keyword.get(opts, :sandbox_subtransaction, true) do
[mode: :savepoint] ++ opts
else
opts
end
end
defp proxy(fun, {conn_mod, state, in_transaction?}, args) do
result = apply(conn_mod, fun, args ++ [state])
pos = :erlang.tuple_size(result)
:erlang.setelement(pos, result, {conn_mod, :erlang.element(pos, result), in_transaction?})
end
end
@doc """
Starts a process that owns the connection and returns its pid.
The owner process is not linked to the caller, it is your responsibility to
ensure it will be stopped. In tests, this is done by terminating the pool
in an `ExUnit.Callbacks.on_exit/2` callback:
setup tags do
pid = Ecto.Adapters.SQL.Sandbox.start_owner!(MyApp.Repo, shared: not tags[:async])
on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end)
:ok
end
## Options
* `:shared` - if `true`, the pool runs in the shared mode. Defaults to `false`
The remaining options are passed to `checkout/2`.
"""
@doc since: "3.4.4"
def start_owner!(repo, opts \\ []) do
parent = self()
{:ok, pid} =
Agent.start(fn ->
{shared, opts} = Keyword.pop(opts, :shared, false)
:ok = checkout(repo, opts)
if shared do
:ok = mode(repo, {:shared, self()})
else
:ok = allow(repo, self(), parent)
end
end)
pid
end
@doc """
Stops an owner process started by `start_owner!/2`.
"""
@doc since: "3.4.4"
@spec stop_owner(pid()) :: :ok
def stop_owner(pid) do
GenServer.stop(pid)
end
@doc """
Sets the mode for the `repo` pool.
The modes can be:
* `:auto` - this is the default mode. When trying to use the repository,
processes can automatically checkout a connection without calling
`checkout/2` or `start_owner/2` before. This is the mode you will run
on before your test suite starts
* `:manual` - in this mode, the connection always has to be explicitly
checked before used. Other processes are allowed to use the same
connection if they are explicitly allowed via `allow/4`. You usually
set the mode to manual at the end of your `test/test_helper.exs` file.
This is also the mode you will run your async tests in
* `{:shared, pid}` - after checking out a connection in manual mode,
you can change the mode to `{:shared, pid}`, where pid is the process
that owns the connection, most often `{:shared, self()}`. This makes it
so all processes can use the same connection as the one owner by the
current process. This is the mode you will your sync tests in
Whenever you change the mode to `:manual` or `:auto`, all existing
connections are checked in. Therefore, it is recommend to set those
modes before your test suite starts, as otherwise you will check in
connections being used in any other test running concurrently.
"""
def mode(repo, mode)
when (is_atom(repo) or is_pid(repo)) and mode in [:auto, :manual]
when (is_atom(repo) or is_pid(repo)) and elem(mode, 0) == :shared and is_pid(elem(mode, 1)) do
%{pid: pool, opts: opts} = lookup_meta!(repo)
DBConnection.Ownership.ownership_mode(pool, mode, opts)
end
@doc """
Checks a connection out for the given `repo`.
The process calling `checkout/2` will own the connection
until it calls `checkin/2` or until it crashes in which case
the connection will be automatically reclaimed by the pool.
## Options
* `:sandbox` - when true the connection is wrapped in
a transaction. Defaults to true.
* `:isolation` - set the query to the given isolation level.
* `:ownership_timeout` - limits how long the connection can be
owned. Defaults to the value in your repo config in
`config/config.exs` (or preferably in `config/test.exs`), or
120000 ms if not set. The timeout exists for sanity checking
purposes, to ensure there is no connection leakage, and can
be bumped whenever necessary.
"""
def checkout(repo, opts \\ []) when is_atom(repo) or is_pid(repo) do
%{pid: pool, opts: pool_opts} = lookup_meta!(repo)
pool_opts =
if Keyword.get(opts, :sandbox, true) do
[
post_checkout: &post_checkout(&1, &2, opts),
pre_checkin: &pre_checkin(&1, &2, &3, opts)
] ++ pool_opts
else
pool_opts
end
pool_opts_overrides = Keyword.take(opts, [:ownership_timeout, :isolation_level])
pool_opts = Keyword.merge(pool_opts, pool_opts_overrides)
case DBConnection.Ownership.ownership_checkout(pool, pool_opts) do
:ok ->
if isolation = opts[:isolation] do
set_transaction_isolation_level(repo, isolation)
end
:ok
other ->
other
end
end
defp set_transaction_isolation_level(repo, isolation) do
query = "SET TRANSACTION ISOLATION LEVEL #{isolation}"
case Ecto.Adapters.SQL.query(repo, query, [], sandbox_subtransaction: false) do
{:ok, _} ->
:ok
{:error, error} ->
checkin(repo, [])
raise error
end
end
@doc """
Checks in the connection back into the sandbox pool.
"""
def checkin(repo, _opts \\ []) when is_atom(repo) or is_pid(repo) do
%{pid: pool, opts: opts} = lookup_meta!(repo)
DBConnection.Ownership.ownership_checkin(pool, opts)
end
@doc """
Allows the `allow` process to use the same connection as `parent`.
"""
def allow(repo, parent, allow, _opts \\ []) when is_atom(repo) or is_pid(repo) do
%{pid: pool, opts: opts} = lookup_meta!(repo)
DBConnection.Ownership.ownership_allow(pool, parent, allow, opts)
end
@doc """
Runs a function outside of the sandbox.
"""
def unboxed_run(repo, fun) when is_atom(repo) or is_pid(repo) do
checkin(repo)
checkout(repo, sandbox: false)
try do
fun.()
after
checkin(repo)
end
end
defp lookup_meta!(repo) do
%{opts: opts} =
meta =
repo
|> find_repo()
|> Ecto.Adapter.lookup_meta()
if opts[:pool] != DBConnection.Ownership do
raise """
cannot invoke sandbox operation with pool #{inspect(opts[:pool])}.
To use the SQL Sandbox, configure your repository pool as:
pool: #{inspect(__MODULE__)}
"""
end
meta
end
defp find_repo(repo) when is_atom(repo), do: repo.get_dynamic_repo()
defp find_repo(repo), do: repo
defp post_checkout(conn_mod, conn_state, opts) do
case conn_mod.handle_begin([mode: :transaction] ++ opts, conn_state) do
{:ok, _, conn_state} ->
{:ok, Connection, {conn_mod, conn_state, false}}
{_error_or_disconnect, err, conn_state} ->
{:disconnect, err, conn_mod, conn_state}
end
end
defp pre_checkin(:checkin, Connection, {conn_mod, conn_state, _in_transaction?}, opts) do
case conn_mod.handle_rollback([mode: :transaction] ++ opts, conn_state) do
{:ok, _, conn_state} ->
{:ok, conn_mod, conn_state}
{:idle, _conn_state} ->
raise """
Ecto SQL sandbox transaction was already committed/rolled back.
The sandbox works by running each test in a transaction and closing the\
transaction afterwards. However, the transaction has already terminated.\
Your test code is likely committing or rolling back transactions manually,\
either by invoking procedures or running custom SQL commands.
One option is to manually checkout a connection without a sandbox:
Ecto.Adapters.SQL.Sandbox.checkout(repo, sandbox: false)
But remember you will have to undo any database changes performed by such tests.
"""
{_error_or_disconnect, err, conn_state} ->
{:disconnect, err, conn_mod, conn_state}
end
end
defp pre_checkin(_, Connection, {conn_mod, conn_state, _in_transaction?}, _opts) do
{:ok, conn_mod, conn_state}
end
end
|
lib/ecto/adapters/sql/sandbox.ex
| 0.911389
| 0.615261
|
sandbox.ex
|
starcoder
|
defmodule Geocoder.Store do
use GenServer
use Towel
# Public API
def geocode(opts) do
GenServer.call(name(), {:geocode, opts[:address]})
end
def reverse_geocode(opts) do
GenServer.call(name(), {:reverse_geocode, opts[:latlng]})
end
def update(location) do
GenServer.call(name(), {:update, location})
end
def link(from, to) do
GenServer.cast(name(), {:link, from, to})
end
def state do
GenServer.call(name(), :state)
end
# GenServer API
@defaults [precision: 4]
def start_link(opts \\ []) do
opts = Keyword.merge(@defaults, opts)
GenServer.start_link(__MODULE__, {%{}, %{}, opts}, [name: name()])
end
# Fetch geocode
def handle_call({:geocode, location}, _from, {links,store,_} = state) do
key = encode(location)
result = Maybe.wrap(links) |> fmap(&Map.get(&1, key)) |> fmap(&Map.get(store, &1))
{:reply, result, state}
end
# Fetch reverse geocode
def handle_call({:reverse_geocode, latlon}, _from, {_,store,opts} = state) do
key = encode(latlon, opts[:precision])
result = Maybe.wrap(store) |> fmap(&Map.get(&1, key))
{:reply, result, state}
end
# Update store
def handle_call({:update, coords}, _from, {links,store,opts}) do
%{lat: lat, lon: lon} = coords
location =
coords.location
|> Map.take(~w[city, state, country]a)
|> Enum.filter_map(&is_binary(elem(&1, 1)), &elem(&1, 1))
|> Enum.join("")
key = encode({lat, lon}, opts[:precision])
link = encode(location)
state = {Map.put(links, link, key), Map.put(store, key, coords), opts}
{:reply, coords, state}
end
# Get the state
def handle_call(:state, _from, state) do
{:reply, state, state}
end
# Link a query to a cached value
def handle_cast({:link, from, %{lat: lat, lon: lon}}, {links, store, opts}) do
key = encode({lat, lon}, opts[:precision])
link = encode(from[:address] || from[:latlng], opts[:precision])
{:noreply, {Map.put(links, link, key), store, opts}}
end
# Private API
defp encode(location, opt \\ nil)
defp encode({lat, lon}, precision) do
Geohash.encode(:erlang.float(lat), :erlang.float(lon), precision)
end
defp encode(location, _) when is_binary(location) do
location
|> String.downcase
|> String.replace(~r/[^\w]/, "")
|> String.strip
|> :base64.encode
end
# Config
@name :geocoder_store
def name, do: @name
end
|
lib/geocoder/store.ex
| 0.721449
| 0.420391
|
store.ex
|
starcoder
|
defmodule RethinkDB.Pseudotypes do
@moduledoc false
defmodule Binary do
@moduledoc false
defstruct data: nil
def parse(%{"$reql_type$" => "BINARY", "data" => data}, opts) do
case Keyword.get(opts, :binary_format) do
:raw ->
%__MODULE__{data: data}
_ ->
:base64.decode(data)
end
end
end
defmodule Geometry do
@moduledoc false
defmodule Point do
@moduledoc false
defstruct coordinates: []
end
defmodule Line do
@moduledoc false
defstruct coordinates: []
end
defmodule Polygon do
@moduledoc false
defstruct coordinates: []
end
def parse(%{"$reql_type$" => "GEOMETRY", "coordinates" => [x, y], "type" => "Point"}) do
%Point{coordinates: {x, y}}
end
def parse(%{"$reql_type$" => "GEOMETRY", "coordinates" => coords, "type" => "LineString"}) do
%Line{coordinates: Enum.map(coords, &List.to_tuple/1)}
end
def parse(%{"$reql_type$" => "GEOMETRY", "coordinates" => coords, "type" => "Polygon"}) do
%Polygon{coordinates: for(points <- coords, do: Enum.map(points, &List.to_tuple/1))}
end
end
defmodule Time do
@moduledoc false
defstruct epoch_time: nil, timezone: nil
def parse(
%{"$reql_type$" => "TIME", "epoch_time" => epoch_time, "timezone" => timezone},
opts
) do
case Keyword.get(opts, :time_format) do
:raw ->
%__MODULE__{epoch_time: epoch_time, timezone: timezone}
_ ->
with <<sign::binary-size(1)>> <> rest = timezone <> ":00",
{:ok, {h, m, _, _}} = Calendar.ISO.parse_time(rest) do
sec = (h * 60 + m) * 60
zone_abbr =
case sec do
0 -> "UTC"
_ -> timezone
end
time_zone =
case {div(sec, 3600), rem(sec, 3600)} do
{0, 0} ->
"Etc/UTC"
{hours, 0} ->
"Etc/GMT" <> sign <> Integer.to_string(hours)
{hours, seconds} ->
"Etc/GMT" <>
sign <>
Integer.to_string(hours) <>
":" <> String.pad_leading(Integer.to_string(seconds), 2, "0")
end
(epoch_time * 1000)
|> trunc
|> DateTime.from_unix!(:millisecond)
|> struct(utc_offset: sec, zone_abbr: zone_abbr, time_zone: time_zone)
end
end
end
end
def convert_reql_pseudotypes(nil, _opts), do: nil
def convert_reql_pseudotypes(%{"$reql_type$" => "BINARY"} = data, opts) do
Binary.parse(data, opts)
end
def convert_reql_pseudotypes(%{"$reql_type$" => "GEOMETRY"} = data, _opts) do
Geometry.parse(data)
end
def convert_reql_pseudotypes(%{"$reql_type$" => "GROUPED_DATA"} = data, _opts) do
parse_grouped_data(data)
end
def convert_reql_pseudotypes(%{"$reql_type$" => "TIME"} = data, opts) do
Time.parse(data, opts)
end
def convert_reql_pseudotypes(list, opts) when is_list(list) do
Enum.map(list, fn data -> convert_reql_pseudotypes(data, opts) end)
end
def convert_reql_pseudotypes(map, opts) when is_map(map) do
Enum.map(map, fn {k, v} ->
{k, convert_reql_pseudotypes(v, opts)}
end)
|> Enum.into(%{})
end
def convert_reql_pseudotypes(string, _opts), do: string
def parse_grouped_data(%{"$reql_type$" => "GROUPED_DATA", "data" => data}) do
Enum.map(data, fn [k, data] ->
{k, data}
end)
|> Enum.into(%{})
end
def create_grouped_data(data) when is_map(data) do
data = data |> Enum.map(fn {k, v} -> [k, v] end)
%{"$reql_type$" => "GROUPED_DATA", "data" => data}
end
end
|
lib/rethinkdb/pseudotypes.ex
| 0.632049
| 0.562357
|
pseudotypes.ex
|
starcoder
|
defmodule Commanded.Scheduler do
@moduledoc """
One-off command scheduler for [Commanded][1] CQRS/ES applications.
[1]: https://hex.pm/packages/commanded
- [Getting started](getting-started.html)
- [Usage](usage.html)
- [Testing](testing.html)
"""
alias Commanded.Scheduler.{
ScheduleBatch,
CancelSchedule,
Router,
ScheduleOnce
}
@type schedule_uuid :: String.t()
@type due_at :: DateTime.t() | NaiveDateTime.t()
@doc """
Schedule a uniquely identified one-off job using the given command to dispatch
at the specified date/time.
## Example
Commanded.Scheduler.schedule_once(reservation_id, %TimeoutReservation{..}, ~N[2020-01-01 12:00:00])
Name the scheduled job:
Commanded.Scheduler.schedule_once(reservation_id, %TimeoutReservation{..}, due_at, name: "timeout")
"""
@spec schedule_once(schedule_uuid, struct, due_at, name: String.t()) :: :ok | {:error, term}
def schedule_once(schedule_uuid, command, due_at, opts \\ [])
def schedule_once(schedule_uuid, command, due_at, opts)
when is_bitstring(schedule_uuid) do
schedule_once = %ScheduleOnce{
schedule_uuid: schedule_uuid,
name: name(opts),
command: command,
due_at: due_at
}
Router.dispatch(schedule_once)
end
@doc """
Schedule multiple one-off commands in a single batch.
This guarantees that all, or none, of the commands are scheduled.
## Example
alias Commanded.Scheduler
alias Commanded.Scheduler.Batch
batch =
reservation_id
|> Batch.new()
|> Batch.schedule_once(%TimeoutReservation{..}, timeout_due_at, name: "timeout")
|> Batch.schedule_once(%ReleaseSeat{..}, release_due_at, name: "release")
Scheduler.schedule_batch(batch)
"""
@spec schedule_batch(ScheduleBatch.t()) :: :ok | {:error, term}
def schedule_batch(%ScheduleBatch{} = batch) do
Router.dispatch(batch)
end
@doc """
Cancel a one-off or recurring schedule.
"""
@spec cancel_schedule(schedule_uuid, name: String.t()) :: :ok | {:error, term}
def cancel_schedule(schedule_uuid, opts \\ [])
def cancel_schedule(schedule_uuid, opts)
when is_bitstring(schedule_uuid) do
cancel_schedule = %CancelSchedule{
schedule_uuid: schedule_uuid,
name: Keyword.get(opts, :name)
}
Router.dispatch(cancel_schedule)
end
defp name(opts), do: Keyword.get(opts, :name)
end
|
lib/commanded/scheduler.ex
| 0.881768
| 0.46563
|
scheduler.ex
|
starcoder
|
defmodule Module.ParallelChecker do
@moduledoc false
@type cache() :: {pid(), :ets.tid()}
@type warning() :: term()
@type kind() :: :def | :defmacro
@doc """
Receives pairs of module maps and BEAM binaries. In parallel it verifies
the modules and adds the ExCk chunk to the binaries. Returns the updated
binaries and a list of warnings from the verification.
"""
@spec verify([{map(), binary()}], [{module(), binary()}], pos_integer()) :: [warning()]
def verify(compiled_modules, runtime_binaries, schedulers \\ nil) do
compiled_maps = Enum.map(compiled_modules, fn {map, _binary} -> {map.module, map} end)
check_modules = compiled_maps ++ runtime_binaries
schedulers = schedulers || max(:erlang.system_info(:schedulers_online), 2)
{:ok, server} = :gen_server.start_link(__MODULE__, [check_modules, self(), schedulers], [])
preload_cache(get_ets(server), check_modules)
start(server)
collect_results(length(check_modules), [])
end
defp collect_results(0, warnings) do
warnings
end
defp collect_results(count, warnings) do
receive do
{__MODULE__, _module, new_warnings} ->
collect_results(count - 1, new_warnings ++ warnings)
end
end
@doc """
Preloads a module into the cache. Call this function before any other
cache lookups for the module.
"""
@spec preload_module(cache(), module()) :: :ok
def preload_module({server, ets}, module) do
case :ets.lookup(ets, {:cached, module}) do
[{_key, _}] -> :ok
[] -> cache_module({server, ets}, module)
end
end
@doc """
Returns the export kind and deprecation reason for the given MFA from
the cache. If the module does not exist return `{:error, :module}`,
or if the function does not exist return `{:error, :function}`.
"""
@spec fetch_export(cache(), module(), atom(), arity()) ::
{:ok, kind(), binary() | nil} | {:error, :function | :module}
def fetch_export({_server, ets}, module, fun, arity) do
case :ets.lookup(ets, {:cached, module}) do
[{_key, true}] ->
case :ets.lookup(ets, {:export, {module, fun, arity}}) do
[{_key, kind, reason}] -> {:ok, kind, reason}
[] -> {:error, :function}
end
[{_key, false}] ->
{:error, :module}
end
end
@doc """
Returns all exported functions and macros for the given module from
the cache.
"""
@spec all_exports(cache(), module()) :: [{atom(), arity()}]
def all_exports({_server, ets}, module) do
# This is only called after we get a deprecation notice
# so we can assume it's a cached module
[{_key, exports}] = :ets.lookup(ets, {:all_exports, module})
exports
|> Enum.map(fn {function, _kind} -> function end)
|> Enum.sort()
end
def init([modules, send_results, schedulers]) do
ets = :ets.new(:checker_cache, [:set, :public, {:read_concurrency, true}])
state = %{
ets: ets,
waiting: %{},
send_results: send_results,
modules: modules,
spawned: 0,
schedulers: schedulers
}
{:ok, state}
end
def handle_call({:lock, module}, from, %{waiting: waiting} = state) do
case waiting do
%{^module => froms} ->
waiting = Map.put(state.waiting, module, [from | froms])
{:noreply, %{state | waiting: waiting}}
%{} ->
waiting = Map.put(state.waiting, module, [])
{:reply, true, %{state | waiting: waiting}}
end
end
def handle_call({:unlock, module}, _from, %{waiting: waiting} = state) do
froms = Map.fetch!(waiting, module)
Enum.each(froms, &:gen_server.reply(&1, false))
waiting = Map.delete(waiting, module)
{:reply, :ok, %{state | waiting: waiting}}
end
def handle_call(:get_ets, _from, %{ets: ets} = state) do
{:reply, ets, state}
end
def handle_cast(:start, %{modules: []} = state) do
{:stop, :normal, state}
end
def handle_cast(:start, state) do
{:noreply, spawn_checkers(state)}
end
def handle_info({__MODULE__, :done}, state) do
state = %{state | spawned: state.spawned - 1}
if state.spawned == 0 and state.modules == [] do
{:stop, :normal, state}
else
state = spawn_checkers(state)
{:noreply, state}
end
end
defp lock(server, module) do
:gen_server.call(server, {:lock, module}, :infinity)
end
defp unlock(server, module) do
:gen_server.call(server, {:unlock, module})
end
defp get_ets(server) do
:gen_server.call(server, :get_ets)
end
defp start(server) do
:gen_server.cast(server, :start)
end
defp preload_cache(ets, modules) do
Enum.each(modules, fn
{_module, map} when is_map(map) -> cache_from_module_map(ets, map)
{module, binary} when is_binary(binary) -> cache_from_chunk(ets, module, binary)
end)
end
defp spawn_checkers(%{modules: []} = state) do
state
end
defp spawn_checkers(%{spawned: spawned, schedulers: schedulers} = state)
when spawned >= schedulers do
state
end
defp spawn_checkers(%{modules: [{module, _} = verify | modules]} = state) do
parent = self()
ets = state.ets
send_results_pid = state.send_results
spawn_link(fn ->
warnings = Module.Checker.verify(verify, {parent, ets})
send(send_results_pid, {__MODULE__, module, warnings})
send(parent, {__MODULE__, :done})
end)
spawn_checkers(%{state | modules: modules, spawned: state.spawned + 1})
end
defp cache_module({server, ets}, module) do
if lock(server, module) do
cache_from_chunk(ets, module) || cache_from_info(ets, module)
unlock(server, module)
end
end
defp cache_from_chunk(ets, module) do
case :code.get_object_code(module) do
{^module, binary, _filename} -> cache_from_chunk(ets, module, binary)
_other -> false
end
end
defp cache_from_chunk(ets, module, binary) do
with {:ok, {_, [{'ExCk', chunk}]}} <- :beam_lib.chunks(binary, ['ExCk']),
{:elixir_checker_v1, contents} <- :erlang.binary_to_term(chunk) do
cache_chunk(ets, module, contents.exports)
true
else
_ -> false
end
end
defp cache_from_module_map(ets, map) do
exports = [{{:__info__, 1}, :def} | definitions_to_exports(map.definitions)]
deprecated = Map.new(map.deprecated)
cache_info(ets, map.module, exports, deprecated)
end
defp cache_from_info(ets, module) do
if Code.ensure_loaded?(module) do
exports = info_exports(module)
deprecated = info_deprecated(module)
cache_info(ets, module, exports, deprecated)
else
:ets.insert(ets, {{:cached, module}, false})
end
end
defp info_exports(module) do
Map.new(
[{{:__info__, 1}, :def}] ++
Enum.map(module.__info__(:macros), &{&1, :defmacro}) ++
Enum.map(module.__info__(:functions), &{&1, :def})
)
rescue
_ -> Map.new(Enum.map(module.module_info(:exports), &{&1, :def}))
end
defp info_deprecated(module) do
Map.new(module.__info__(:deprecated))
rescue
_ -> %{}
end
defp cache_info(ets, module, exports, deprecated) do
exports =
Enum.map(exports, fn {{fun, arity}, kind} ->
reason = Map.get(deprecated, {fun, arity})
:ets.insert(ets, {{:export, {module, fun, arity}}, kind, reason})
{{fun, arity}, kind}
end)
:ets.insert(ets, {{:all_exports, module}, exports})
:ets.insert(ets, {{:cached, module}, true})
end
defp cache_chunk(ets, module, exports) do
exports =
Enum.map(exports, fn {{fun, arity}, %{kind: kind, deprecated_reason: reason}} ->
:ets.insert(ets, {{:export, {module, fun, arity}}, kind, reason})
{{fun, arity}, kind}
end)
:ets.insert(ets, {{:export, {module, :__info__, 1}}, :def, nil})
exports = [{{:__info__, 1}, :def} | exports]
:ets.insert(ets, {{:all_exports, module}, exports})
:ets.insert(ets, {{:cached, module}, true})
end
defp definitions_to_exports(definitions) do
Enum.flat_map(definitions, fn {function, kind, _meta, _clauses} ->
if kind in [:def, :defmacro] do
[{function, kind}]
else
[]
end
end)
end
end
|
lib/elixir/lib/module/parallel_checker.ex
| 0.808899
| 0.444143
|
parallel_checker.ex
|
starcoder
|
defmodule Broadway.Topology.RateLimiter do
@moduledoc false
use GenServer
@atomics_index 1
def start_link(opts) do
case Keyword.fetch!(opts, :rate_limiting) do
# If we don't have rate limiting options, we don't even need to start this rate
# limiter process.
nil ->
:ignore
rate_limiting_opts ->
name = Keyword.fetch!(opts, :name)
producers_names = Keyword.fetch!(opts, :producers_names)
args = {name, rate_limiting_opts, producers_names}
GenServer.start_link(__MODULE__, args, name: rate_limiter_name(name))
end
end
def rate_limit(counter, amount)
when is_reference(counter) and is_integer(amount) and amount > 0 do
:atomics.sub_get(counter, @atomics_index, amount)
end
def get_currently_allowed(counter) when is_reference(counter) do
:atomics.get(counter, @atomics_index)
end
def rate_limiter_name(broadway_name) when is_atom(broadway_name) do
Module.concat(broadway_name, RateLimiter)
end
def update_rate_limiting(rate_limiter, opts) do
GenServer.call(rate_limiter, {:update_rate_limiting, opts})
end
def get_rate_limiting(rate_limiter) do
GenServer.call(rate_limiter, :get_rate_limiting)
end
def get_rate_limiter_ref(rate_limiter) do
GenServer.call(rate_limiter, :get_rate_limiter_ref)
end
@impl true
def init({_broadway_name, rate_limiting_opts, producers_names}) do
interval = Keyword.fetch!(rate_limiting_opts, :interval)
allowed = Keyword.fetch!(rate_limiting_opts, :allowed_messages)
counter = :atomics.new(@atomics_index, [])
:atomics.put(counter, @atomics_index, allowed)
_ = schedule_next_reset(interval)
state = %{
interval: interval,
allowed: allowed,
producers_names: producers_names,
counter: counter
}
{:ok, state}
end
@impl true
def handle_call({:update_rate_limiting, opts}, _from, state) do
%{interval: interval, allowed: allowed} = state
state = %{
state
| interval: Keyword.get(opts, :interval, interval),
allowed: Keyword.get(opts, :allowed_messages, allowed)
}
{:reply, :ok, state}
end
def handle_call(:get_rate_limiting, _from, state) do
%{interval: interval, allowed: allowed} = state
{:reply, %{interval: interval, allowed_messages: allowed}, state}
end
def handle_call(:get_rate_limiter_ref, _from, %{counter: counter} = state) do
{:reply, counter, state}
end
@impl true
def handle_info(:reset_limit, state) do
%{producers_names: producers_names, interval: interval, allowed: allowed, counter: counter} =
state
:atomics.put(counter, @atomics_index, allowed)
for name <- producers_names,
pid = Process.whereis(name),
is_pid(pid),
do: send(pid, {__MODULE__, :reset_rate_limiting})
_ = schedule_next_reset(interval)
{:noreply, state}
end
defp schedule_next_reset(interval) do
_ref = Process.send_after(self(), :reset_limit, interval)
end
end
|
lib/broadway/topology/rate_limiter.ex
| 0.789518
| 0.582283
|
rate_limiter.ex
|
starcoder
|
defmodule MeshxRpc.Protocol.Default do
@moduledoc """
RPC protocol default functions.
"""
require Logger
@int32_max round(:math.pow(2, 32) - 1)
@ser_flag_bin 0
@ser_flag_ser 1
@doc """
Calculates checksum for given `data` with `:erlang.crc32/1`.
Function returns checksum as 4 bytes binary big endian unsigned integer.
```elixir
iex(1)> MeshxRpc.Protocol.Default.checksum("test", [])
<<216, 127, 126, 12>>
```
"""
@spec checksum(data :: binary(), _opts :: term()) :: binary()
def checksum(data, _opts), do: :erlang.crc32(data) |> :binary.encode_unsigned()
@doc """
Returns node reference as `Node.self()` converted to string with length limited to 255 characters.
```elixir
iex(1)> MeshxRpc.Protocol.Default.node_ref()
"nonode@nohost"
```
"""
@spec node_ref() :: binary()
def node_ref(), do: Node.self() |> to_string() |> String.slice(0..255)
@doc """
Returns connection reference as 4 bytes random binary.
```elixir
iex(1)> MeshxRpc.Protocol.Default.conn_ref()
<<171, 248, 41, 163>>
iex(2)> MeshxRpc.Protocol.Default.conn_ref() |> Base.encode64(padding: false)
"IKWzCw"
```
"""
@spec conn_ref() :: binary()
def conn_ref(), do: :rand.uniform(@int32_max) |> :binary.encode_unsigned()
@doc """
Serializes given Erlang `term` to binary with `:erlang.term_to_binary/2`.
If successful function returns serialized binary as `result` and `serialization_flag`.
If user provided `term` is of binary type, serialization step is skipped and `serialization_flag` is set to `0`.
Otherwise `:erlang.term_to_binary(term, opts)` is called and `serialization_flag` is set to `1`.
Function argument `opts` is passed as options to `:erlang.term_to_binary/2`. Serialization options can be used to force binary data compression, which by default is disabled.
```elixir
iex(1)> {:ok, bin, ser_flag} = MeshxRpc.Protocol.Default.serialize(%{test_k: "test_v"}, [])
{:ok,
<<131, 116, 0, 0, 0, 1, 100, 0, 6, 116, 101, 115, 116, 95, 107, 109, 0, 0, 0,
6, 116, 101, 115, 116, 95, 118>>, 1}
iex(2)> MeshxRpc.Protocol.Default.deserialize(bin, [], ser_flag)
{:ok, %{test_k: "test_v"}}
iex(3)> {:ok, bin, ser_flag} = MeshxRpc.Protocol.Default.serialize("test", [])
{:ok, "test", 0}
iex(4)> MeshxRpc.Protocol.Default.deserialize(bin, [], ser_flag)
{:ok, "test"}
```
"""
@spec serialize(term :: term(), opts :: Keyword.t()) ::
{:ok, result :: binary(), serialization_flag :: 0..255} | {:error, reason :: term()}
def serialize(term, _opts) when is_binary(term), do: {:ok, term, @ser_flag_bin}
def serialize(term, opts) do
:erlang.term_to_binary(term, opts)
catch
:error, e ->
Logger.error(__STACKTRACE__)
{:error, e}
else
res -> {:ok, res, @ser_flag_ser}
end
@doc """
De-serializes given `bin` to Erlang term with `:erlang.binary_to_term/2`.
Function performs reverse operation to `serialize/2`. If `serialization_flag` is `0` de-serialization step is skipped and function returns `{:ok, bin}`. Otherwise `bin` is de-serialized using `:erlang.binary_to_term/2`.
`opts` is passed as second argument to `:erlang.binary_to_term/2`. `serialize/2` provides usage example.
"""
@spec deserialize(bin :: binary(), opts :: Keyword.t(), serialization_flag :: 0..255) ::
{:ok, result :: term()} | {:error, reason :: term}
def deserialize(bin, opts, ser_flag \\ @ser_flag_ser) do
if ser_flag == @ser_flag_bin do
{:ok, bin}
else
try do
:erlang.binary_to_term(bin, opts)
catch
:error, e ->
Logger.error(__STACKTRACE__)
{:error, e}
else
res -> {:ok, res}
end
end
end
end
|
lib/protocol/default.ex
| 0.854521
| 0.728265
|
default.ex
|
starcoder
|
defmodule CadetWeb.ViewHelper do
@moduledoc """
Helper functions shared throughout views
"""
defp build_staff(user) do
transform_map_for_view(user, [:name, :id])
end
def unsubmitted_by_builder(nil), do: nil
def unsubmitted_by_builder(staff) do
build_staff(staff)
end
def grader_builder(nil), do: nil
def grader_builder(_) do
fn %{grader: grader} -> build_staff(grader) end
end
def graded_at_builder(nil), do: nil
def graded_at_builder(_) do
fn %{updated_at: updated_at} -> format_datetime(updated_at) end
end
def format_datetime(nil), do: nil
def format_datetime(datetime = %DateTime{}) do
datetime
|> DateTime.truncate(:millisecond)
|> Timex.format!("{ISO:Extended}")
end
def format_datetime(datetime = %NaiveDateTime{}) do
datetime
|> Timex.to_datetime()
|> format_datetime()
end
@doc """
This function allows you to build a map for a view from a map of transformations or a list of fields.
Given a `key_list`, it is the equivalent of `Map.take(source, key_list)`.
Given a map of `%{view_field: source_field, ...}`, it is the equivalent of `%{view_field: Map.get(source, source_field), ...}`
Given a map of `%{view_field: source_function, ...}`, it is the equivalent of `%{view_field: apply(source_function, source)}`
Examples:
```
source = %{
foofoo: "ho",
barbar: "ha",
foobar: "hoha"
}
field_list = [:foofoo, :barbar]
transform_map_for_view(source, field_list)
> %{
foofoo: "ho",
barbar: "ha"
}
key_transformations = %{
foo: :foofoo,
bar: :barbar
}
transform_map_for_view(source, key_transformations)
> %{
foo: Map.get(source, :foofoo),
bar: Map.get(source, :barbar)
}
function_transformations = %{
foo: fn source -> source.foofoo <> "hoho",
bar: fn source -> source.barbar <> "barbar"
}
transform_map_for_view(source, function_transformations)
> %{
foo: source.foofoo <> "hoho",
bar: source.barbar <> "barbar"
}
```
"""
def transform_map_for_view(source, transformations) when is_map(transformations) do
Enum.reduce(
transformations,
%{},
fn {field_name, transformation}, acc ->
Map.put(acc, field_name, get_value(transformation, source))
end
)
end
def transform_map_for_view(source, fields) when is_list(fields) do
transform_map_for_view(
source,
Enum.reduce(fields, %{}, fn field, acc -> Map.put(acc, field, field) end)
)
end
defp get_value(source_spec, value_store) when is_function(source_spec) do
Kernel.apply(source_spec, [value_store])
end
defp get_value(source_spec, value_store) when is_binary(source_spec) or is_atom(source_spec) do
Map.get(value_store, source_spec)
end
end
|
lib/cadet_web/helpers/view_helper.ex
| 0.87168
| 0.866246
|
view_helper.ex
|
starcoder
|
defmodule NaturalSetDemo do
@moduledoc """
Tests to demonstrate `NaturalSet`
## Bit vector demonstration
iex> NaturalSet.new()
#NaturalSet<[]>
iex> NaturalSet.new().bits
0
iex> NaturalSet.new([0]).bits
1
iex> NaturalSet.new([1]).bits
2
iex> NaturalSet.new([1]).bits |> inspect(base: :binary)
"0b10"
iex> NaturalSet.new([0, 1]).bits |> inspect(base: :binary)
"0b11"
iex> NaturalSet.new(1..5).bits |> inspect(base: :binary)
"0b111110"
iex> NaturalSet.new([1, 3, 5]).bits |> inspect(base: :binary)
"0b101010"
iex> NaturalSet.new([0, 2, 4]).bits |> inspect(base: :binary)
"0b10101"
iex> NaturalSet.new([100, 1, 0, 50]).bits |> inspect(base: :binary)
"0b10000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000011"
iex> NaturalSet.new([100, 1, 0, 50]).bits
1267650600228230527396610048003
iex> NaturalSet.new([100, 1, 0, 50])
#NaturalSet<[0, 1, 50, 100]>
## How to put an element in a `NaturalSet`
iex(1)> use Bitwise
Bitwise
iex(2)> ns = NaturalSet.new([0, 4, 5])
#NaturalSet<[0, 4, 5]>
iex(3)> ns.bits
49
iex(4)> ns.bits |> inspect(base: :binary)
"0b110001"
iex(5)> element = 2
2
iex(6)> 1 <<< element
4
iex(7)> (1 <<< element) |> inspect(base: :binary)
"0b100"
iex(8)> 0b100 ||| ns.bits
53
iex(9)> (0b100 ||| ns.bits) |> inspect(base: :binary)
"0b110101"
iex(10)> %NaturalSet{bits: 0b110101}
#NaturalSet<[0, 2, 4, 5]>
## Protocol `String.Chars`
iex> 65 |> to_string
"65"
iex> [65, 66, 67] |> to_string
"ABC"
iex> :café |> to_string
"café"
iex> MapSet.new |> to_string
** (Protocol.UndefinedError) protocol String.Chars not implemented for #MapSet<[]> of type MapSet (a struct). This protocol is implemented for the following type(s): NaturalSet, Float, DateTime, Time, List, Version.Requirement, Atom, Integer, Version, Date, BitString, NaiveDateTime, URI
## After implementing protocol `String.Chars` for `NaturalSet`:
iex> NaturalSet.new([2, 3, 5, 6]) |> to_string
"0b1101100"
"""
defimpl String.Chars, for: NaturalSet do
def to_string(natural_set), do: natural_set.bits |> inspect(base: :binary)
end
end
|
natural_set_demo/lib/natural_set_demo.ex
| 0.729231
| 0.420332
|
natural_set_demo.ex
|
starcoder
|
defmodule TelemetryMetricsTelegraf.Telegraf.ConfigTemplates do
@moduledoc "Telegraf toml configuration templates."
@type opts :: keyword({:period, String.t()})
@type basicstats_opts :: keyword({:period, String.t()} | {:stats, [atom | String.t()]})
@type hisogram_opts ::
keyword(
{:period, String.t()}
| {:histogram_reset, boolean}
| {:histogram_cumulative, boolean()}
)
@spec basicstats_aggeregator(measurements :: [String.t()], basicstats_opts) :: String.t()
def basicstats_aggeregator(measurements, opts) do
~s"""
[[aggregators.basicstats]]
period = "#{opts[:period]}"
drop_original = true#{basicstats_stats_list(opts[:stats])}
namepass = #{render_namepass(measurements)}
"""
end
defp basicstats_stats_list(nil), do: ""
defp basicstats_stats_list(stats) do
"\nstats = " <> toml_list_of_string(stats)
end
@spec final_aggeregator([measurement :: String.t()], opts) :: String.t()
def final_aggeregator(measurements, opts) do
~s"""
[[aggregators.final]]
period = "#{opts[:period]}"
drop_original = true
namepass = #{render_namepass(measurements)}
"""
end
@spec histogram_aggregator(
[{measurement_name :: String.t(), buckets :: [float]}],
hisogram_opts
) :: String.t()
def histogram_aggregator(measurements_with_buckets, opts) do
~s"""
[[aggregators.histogram]]
period = "#{opts[:period]}"
drop_original = true
reset = #{opts[:histogram_reset]}
cumulative = #{opts[:histogram_cumulative]}
#{measurements_with_buckets |> Enum.map(&histogram_config/1) |> Enum.join("\n")}
"""
end
@spec histogram_config({measurement_name :: String.t(), buckets :: [float]}) :: String.t()
def histogram_config({measurement_name, buckets}) do
~s"""
[[aggregators.histogram.config]]
buckets = #{"[" <> (buckets |> Enum.map(&to_string/1) |> Enum.join(", ")) <> "]"}
measurement_name = "#{measurement_name}"
"""
end
@spec unknown_metric_type(module, [Telemetry.Metrics.t()], keyword()) :: String.t()
def unknown_metric_type(metric_type, metrics, _opts) do
"# renderer for #{Macro.to_string(metric_type)} is not implemented\n# #{inspect(metrics)} will pass unchanged"
end
defp render_namepass(measurements) do
measurements
|> Enum.sort()
|> toml_list_of_string()
end
@inline_toml_list_max_items_length 100
defp toml_list_of_string(list) do
items = Enum.map(list, &~s["#{&1}"])
if Enum.reduce(items, 0, &(&2 + String.length(&1))) > @inline_toml_list_max_items_length do
"[\n" <> Enum.join(items, ",\n") <> "\n]"
else
"[" <> Enum.join(items, ", ") <> "]"
end
end
end
|
lib/telemetry_metrics_telegraf/telegraf/config_templates.ex
| 0.696371
| 0.409368
|
config_templates.ex
|
starcoder
|
defmodule LcdDisplay.HD44780.MCP23008 do
@moduledoc """
Knows how to commuticate with HD44780 type display through the 8-bit I/O expander
[MCP23008](https://ww1.microchip.com/downloads/en/DeviceDoc/MCP23008-MCP23S08-Data-Sheet-20001919F.pdf).
You can turn on/off the backlight.
## Usage
```
iex(2)> Circuits.I2C.detect_devices
Devices on I2C bus "i2c-1":
* 32 (0x20)
1 devices detected on 1 I2C buses
```
```
config = %{
i2c_bus: "i2c-1", # I2C bus name
i2c_address: 0x20, # 7-bit address
rows: 2, # the number of display rows
cols: 16, # the number of display columns
font_size: "5x8" # "5x10" or "5x8"
}
# Start the LCD driver and get the initial display state.
{:ok, display} = LcdDisplay.HD44780.MCP23008.start(config)
# Run a command and the display state will be updated.
{:ok, display} = LcdDisplay.HD44780.MCP23008.execute(display, {:print, "Hello world"})
```
## Pin assignment
This module assumes the following pin assignment:
| MCP23008 | HD44780 |
| ------- | -------------------- |
| GP0 | - |
| GP1 | RS (Register Select) |
| GP2 | E (Enable) |
| GP3 | DB4 (Data Bus 4) |
| GP4 | DB5 (Data Bus 5) |
| GP5 | DB6 (Data Bus 6) |
| GP6 | DB7 (Data Bus 7) |
| GP7 | LED |
"""
use LcdDisplay.HD44780.Driver
@default_i2c_bus "i2c-1"
@default_i2c_address 0x20
@enable_bit 0x04
@backlight_on 0x80
# MCP23008 registers
@mcp23008_iodir 0x00
@mcp23008_gpio 0x09
@type display_driver :: LcdDisplay.HD44780.Driver.t()
@typedoc """
The configuration options.
"""
@type config :: %{
optional(:rows) => String.t(),
optional(:cols) => pos_integer,
optional(:font_size) => pos_integer
}
@doc """
Initializes the LCD driver and returns the initial display state.
"""
@impl LcdDisplay.HD44780.Driver
@spec start(config) :: {:ok, display_driver} | {:error, any()}
def start(config) do
number_of_lines = if config[:rows] == 1, do: @number_of_lines_1, else: @number_of_lines_2
font_size = if config[:font_size] == "5x10", do: @font_size_5x10, else: @font_size_5x8
{:ok,
config
|> initial_state()
|> expander_write(@backlight_on)
|> initialize_display(function_set: @cmd_function_set ||| font_size ||| number_of_lines)}
rescue
e -> {:error, e}
end
@spec initial_state(config) :: display_driver | no_return()
defp initial_state(opts) do
i2c_bus = opts[:i2c_bus] || @default_i2c_bus
i2c_address = opts[:i2c_address] || @default_i2c_address
{:ok, i2c_ref} = initialize_serial_bus(i2c_bus, i2c_address)
%{
driver_module: __MODULE__,
i2c_ref: i2c_ref,
i2c_address: i2c_address,
rows: opts[:rows] || @default_rows,
cols: opts[:cols] || @default_cols,
# Initial values for features that we can change later.
entry_mode: @cmd_entry_mode_set ||| @entry_left,
display_control: @cmd_display_control ||| @display_on,
backlight: true
}
end
@spec initialize_serial_bus(String.t(), byte) :: {:ok, reference} | no_return
defp initialize_serial_bus(i2c_bus, i2c_address) do
{:ok, i2c_ref} = LcdDisplay.I2C.open(i2c_bus)
# Make all the pins be outputs. Please refer to MCP23008 data sheet 1.6.1.
:ok = LcdDisplay.I2C.write(i2c_ref, i2c_address, <<@mcp23008_iodir, 0x00>>)
{:ok, i2c_ref}
end
# Initializes the display for 4-bit interface. See Hitachi HD44780 datasheet page 46 for details.
@spec initialize_display(display_driver, list) :: display_driver | no_return()
defp initialize_display(display, function_set: function_set) do
display
# Function set (8-bit mode; Interface is 8 bits long)
|> write_four_bits(0x03)
|> delay(5)
|> write_four_bits(0x03)
|> delay(5)
|> write_four_bits(0x03)
|> delay(1)
# Function set (4-bit mode; Interface is 8 bits long)
|> write_four_bits(0x02)
# Function set (4-bit mode; Interface is 4 bits long)
# The number of display lines and character font cannot be changed after this point.
|> write_instruction(function_set)
|> write_feature(:display_control)
|> clear()
|> write_feature(:entry_mode)
end
@doc """
Executes the specified command and returns a new display state.
"""
@impl LcdDisplay.HD44780.Driver
def execute(display, :clear), do: {:ok, clear(display)}
def execute(display, :home), do: {:ok, home(display)}
def execute(display, {:print, text}), do: {:ok, print(display, text)}
def execute(display, {:set_cursor, row, col}), do: {:ok, set_cursor(display, row, col)}
def execute(display, {:cursor, on_off_bool}), do: {:ok, set_display_control_flag(display, @cursor_on, on_off_bool)}
def execute(display, {:blink, on_off_bool}), do: {:ok, set_display_control_flag(display, @blink_on, on_off_bool)}
def execute(display, {:display, on_off_bool}), do: {:ok, set_display_control_flag(display, @display_on, on_off_bool)}
def execute(display, {:autoscroll, on_off_bool}), do: {:ok, set_entry_mode_flag(display, @autoscroll, on_off_bool)}
def execute(display, {:text_direction, :right_to_left}), do: {:ok, set_entry_mode_flag(display, @entry_left, false)}
def execute(display, {:text_direction, :left_to_right}), do: {:ok, set_entry_mode_flag(display, @entry_left, true)}
def execute(display, {:scroll, cols}), do: {:ok, scroll(display, cols)}
def execute(display, {:right, cols}), do: {:ok, right(display, cols)}
def execute(display, {:left, cols}), do: {:ok, left(display, cols)}
def execute(display, {:backlight, on_off_bool}), do: {:ok, set_backlight(display, on_off_bool)}
def execute(_display, command), do: {:error, {:unsupported, command}}
defp clear(display), do: display |> write_instruction(@cmd_clear_display) |> delay(2)
defp home(display), do: display |> write_instruction(@cmd_return_home) |> delay(2)
defp print(display, char) when is_integer(char), do: write_data(display, char)
defp print(display, text) when is_binary(text) do
# Translates a text to a charlist (list of bytes).
text |> to_charlist() |> Enum.each(&write_data(display, &1))
display
end
# Set the DDRAM address corresponding to the specified cursor position.
@spec set_cursor(display_driver, pos_integer, pos_integer) :: display_driver
defp set_cursor(display, row, col) when row >= 0 and col >= 0 do
ddram_address = determine_ddram_address({row, col}, Map.take(display, [:rows, :cols]))
write_instruction(display, @cmd_set_ddram_address ||| ddram_address)
end
@spec set_entry_mode_flag(display_driver, byte, boolean) :: display_driver
defp set_entry_mode_flag(display, flag, on_off_bool) do
entry_mode =
if on_off_bool,
do: display.entry_mode ||| flag,
else: display.entry_mode &&& ~~~flag
write_feature(%{display | entry_mode: entry_mode}, :entry_mode)
end
@spec set_display_control_flag(display_driver, byte, boolean) :: display_driver
defp set_display_control_flag(display, flag, on_off_bool) do
display_control =
if on_off_bool,
do: display.display_control ||| flag,
else: display.display_control &&& ~~~flag
write_feature(%{display | display_control: display_control}, :display_control)
end
# Write a feature based on the display state.
@spec write_feature(display_driver, LcdDisplay.HD44780.Driver.feature()) ::
display_driver
defp write_feature(display, feature_key) when is_atom(feature_key) do
display |> write_instruction(Map.fetch!(display, feature_key))
end
defp scroll(display, 0), do: display
# Scroll the entire display left
defp scroll(display, cols) when cols < 0 do
write_instruction(display, @cmd_cursor_shift_control ||| @shift_display)
scroll(display, cols + 1)
end
# Scroll the entire display right
defp scroll(display, cols) when cols > 0 do
write_instruction(display, @cmd_cursor_shift_control ||| @shift_display ||| @shift_right)
scroll(display, cols - 1)
end
# Move cursor right
defp right(display, 0), do: display
defp right(display, cols) do
write_instruction(display, @cmd_cursor_shift_control ||| @shift_right)
right(display, cols - 1)
end
# Move cursor left
defp left(display, 0), do: display
defp left(display, cols) do
write_instruction(display, @cmd_cursor_shift_control)
left(display, cols - 1)
end
@spec set_backlight(display_driver, boolean) :: display_driver
defp set_backlight(display, flag) when is_boolean(flag) do
# Set backlight and write 0 (nothing) to trigger it.
%{display | backlight: flag} |> expander_write(0)
end
@impl LcdDisplay.HD44780.Driver
def write_instruction(display, byte), do: write_byte(display, byte, 0)
@impl LcdDisplay.HD44780.Driver
def write_data(display, byte), do: write_byte(display, byte, 1)
@spec write_byte(display_driver, byte, 0..1) :: display_driver
defp write_byte(display, byte, rs_bit) when byte in 0..255 and rs_bit in 0..1 do
<<high_four_bits::4, low_four_bits::4>> = <<byte>>
display
|> write_four_bits(high_four_bits, rs_bit)
|> write_four_bits(low_four_bits, rs_bit)
end
@spec write_four_bits(display_driver, 0..15, 0..1) :: display_driver
defp write_four_bits(display, four_bits, rs_bit \\ 0)
when is_integer(four_bits) and four_bits in 0..15 and rs_bit in 0..1 do
# Map the four bits to the data pins.
<<dfc00:e968:6179::de52:7100, dfdf8:f53e:61e4::18, d5::1, d4::1>> = <<four_bits::4>>
<<data_byte>> = <<0::1, dfc00:e968:6179::de52:7100, dfdf8:f53e:61e4::18, dfd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, dfc00:db20:35b:7399::5, 0::1, rs_bit::1, 0::1>>
display
|> expander_write(data_byte)
|> pulse_enable(data_byte)
end
@spec pulse_enable(display_driver, byte) :: display_driver
defp pulse_enable(display, byte) do
display
|> expander_write(byte ||| @enable_bit)
|> expander_write(byte &&& ~~~@enable_bit)
end
@spec expander_write(display_driver, byte) :: display_driver
defp expander_write(%{i2c_ref: i2c_ref, i2c_address: i2c_address, backlight: backlight} = display, byte)
when is_reference(i2c_ref) and is_integer(i2c_address) and is_boolean(backlight) and is_integer(byte) do
data =
if backlight,
do: byte ||| @backlight_on,
else: byte
:ok = LcdDisplay.I2C.write(i2c_ref, i2c_address, [@mcp23008_gpio, data])
display
end
end
|
lib/lcd_display/driver/hd44780_mcp23008.ex
| 0.786746
| 0.797872
|
hd44780_mcp23008.ex
|
starcoder
|
defmodule Grizzly.ZWave.Commands.NodeAddDSKSet do
@moduledoc """
Command to set the DSK for a including node
Params:
* `:seq_number` - the sequence number for the command (required)
* `:accept` - the including controller accepts the inclusion process
and should proceed with adding the including node (required)
* `input_dsk_length` - the length of the DSK provided (required)
* `input_dsk` - the DSK pin for the including node (required)
"""
@behaviour Grizzly.ZWave.Command
alias Grizzly.ZWave
alias Grizzly.ZWave.{Command, DSK}
alias Grizzly.ZWave.CommandClasses.NetworkManagementInclusion
@type param ::
{:seq_number, ZWave.seq_number()}
| {:accept, boolean()}
| {:input_dsk_length, 0..0xF}
| {:input_dsk, DSK.t()}
@impl true
@spec new([param()]) :: {:ok, Command.t()}
def new(params) do
# TODO validate params
command = %Command{
name: :node_add_dsk_set,
command_byte: 0x14,
command_class: NetworkManagementInclusion,
params: params,
impl: __MODULE__
}
{:ok, command}
end
@impl true
@spec encode_params(Command.t()) :: binary()
def encode_params(command) do
seq_number = Command.param!(command, :seq_number)
accept = Command.param!(command, :accept)
input_dsk_length = Command.param!(command, :input_dsk_length)
input_dsk = Command.param(command, :input_dsk)
dsk = dsk_to_binary(input_dsk, input_dsk_length)
<<seq_number, bool_to_bit(accept)::size(1), 0::size(3), input_dsk_length::size(4)>> <> dsk
end
@impl true
@spec decode_params(binary()) :: {:ok, [param()]}
def decode_params(
<<seq_number, accept::size(1), _::size(3), input_dsk_length::size(4),
input_dsk::binary-size(input_dsk_length)-unit(8)>>
) do
{:ok,
[
seq_number: seq_number,
accept: bit_to_bool(accept),
input_dsk_length: input_dsk_length,
input_dsk: DSK.new(input_dsk)
]}
end
defp bool_to_bit(true), do: 1
defp bool_to_bit(false), do: 0
defp bit_to_bool(1), do: true
defp bit_to_bool(0), do: false
defp dsk_to_binary(nil, dsk_len) when dsk_len == 0 do
<<>>
end
defp dsk_to_binary(0, dsk_len) when dsk_len == 0 do
<<>>
end
defp dsk_to_binary(%DSK{} = dsk, dsk_len) do
:binary.part(dsk.raw, 0, dsk_len)
end
end
|
lib/grizzly/zwave/commands/node_add_dsk_set.ex
| 0.678753
| 0.438845
|
node_add_dsk_set.ex
|
starcoder
|
defmodule AgeGuard do
@moduledoc """
Verifies if a person born at a given date meets provided age requirements.
It checks given DOB (date of birth) against given age.
Useful when registering users.
Acceptable formats of DOB (mix of integers and strings):
```
1, 12, 2020
01, 03, 2010
"01", "12", "2020"
"1", "3", "2010"
"03", "March", "2011"
"17", "Mar", "2018"
17, "Mar", 2019
"13", 02, "2019"
```
Also does some dates validations (dates from the future are rejected).
## Installation
The package can be installed by adding `age_guard` to your list of dependencies in `mix.exs`:
```elixir
def deps do
[
{:age_guard, "~> 0.1.0"}
]
end
```
and running `mix deps.get` in your console to fetch from Hex.
"""
@doc """
Checks if enough years (min_age) have passed since given date.
```
AgeGuard.is_old_enough?(day_of_birth, month_of_birth, year_of_birth, required_age)
```
### Examples
```
iex> AgeGuard.is_old_enough?("1","5","2019", 21)
false
iex> AgeGuard.is_old_enough?(3, "March", 2000, 21)
false
iex> AgeGuard.is_old_enough?(3, 3, 2000, 18)
true
iex> AgeGuard.is_old_enough?(3, "Dec", 1995, 18)
true
```
"""
def is_old_enough?(day, month, year, min_age \\ 18) do
if is_integer(min_age) && min_age > 0 do
with {:ok, dob} <- verify_date(day, month, year) do
meets_req_age?(dob, min_age)
end
else
{:error, "min_age must be a positive integer"}
end
end
# makes the actual calculations
defp meets_req_age?(dob, min_req_age) do
today = Date.utc_today()
iso_day = format_to_iso(today.day)
iso_month = format_to_iso(today.month)
case Date.from_iso8601("#{to_string(today.year - min_req_age)}-#{iso_month}-#{iso_day}") do
{:ok, today_minus_min_req_age} ->
days_between_today_and_dob = Enum.count(Date.range(today, dob))
days_between_today_and_req_yrs = Enum.count(Date.range(today, today_minus_min_req_age))
days_between_today_and_dob >= days_between_today_and_req_yrs
_ ->
false
end
end
# verifies if day, date, month make sense
defp verify_date(day, month, year) do
# ensure date meets acceptable ISO format
iso_day = format_to_iso(day)
iso_month = format_to_iso(month)
iso_year = to_string(year)
today = Date.utc_today()
case Date.from_iso8601("#{iso_year}-#{iso_month}-#{iso_day}") do
{:ok, iso_dob} ->
if Date.diff(today, iso_dob) < 0 do
{:error, "DOB cannot be in the future"}
else
{:ok, iso_dob}
end
_ ->
{:error, "invalid format"}
end
end
# ISO format requires day and month to be two digits
defp format_to_iso(val) do
cond do
is_integer(val) and val < 10 ->
"0" <> to_string(val)
is_binary(val) and String.length(val) > 2 ->
month_to_iso(String.downcase(val))
is_binary(val) and String.length(val) == 1 ->
"0" <> val
true ->
to_string(val)
end
end
defp month_to_iso(month) when month in ["january", "jan"], do: "01"
defp month_to_iso(month) when month in ["february", "feb"], do: "02"
defp month_to_iso(month) when month in ["march", "mar"], do: "03"
defp month_to_iso(month) when month in ["april", "apr"], do: "04"
defp month_to_iso(month) when month in ["may", "may"], do: "05"
defp month_to_iso(month) when month in ["june", "jun"], do: "06"
defp month_to_iso(month) when month in ["july", "jul"], do: "07"
defp month_to_iso(month) when month in ["august", "aug"], do: "08"
defp month_to_iso(month) when month in ["september", "sept", "sep"], do: "09"
defp month_to_iso(month) when month in ["october", "oct"], do: "10"
defp month_to_iso(month) when month in ["november", "nov"], do: "11"
defp month_to_iso(month) when month in ["december", "dec"], do: "12"
defp month_to_iso(_), do: "incorrect"
end
|
lib/age_guard.ex
| 0.824956
| 0.919317
|
age_guard.ex
|
starcoder
|
defmodule Expline.Matrix do
@moduledoc false
@enforce_keys [:n_rows, :m_cols, :internal]
defstruct [:n_rows, :m_cols, :internal]
@type t() :: %__MODULE__{n_rows: pos_integer(), m_cols: pos_integer(), internal: internal()}
@type vector() :: Expline.Vector.t()
@typep internal() :: tuple()
@typep binary_op() :: (float(), float() -> float())
@typep unary_op() :: (float() -> float())
@spec zeros(pos_integer(), pos_integer()) :: __MODULE__.t()
def zeros(n_rows, m_cols) do
construct(n_rows, m_cols, fn _, _ -> 0.0 end)
end
@spec identity(pos_integer()) :: __MODULE__.t()
def identity(n) do
construct(n, n, fn
i, i -> 1.0
_i, _j -> 0.0
end)
end
@spec sub(__MODULE__.t(), __MODULE__.t()) ::
__MODULE__.t() | {:error, :dimension_mismatch}
def sub(%__MODULE__{} = a, %__MODULE__{} = b), do: do_binary_op(a, b, &Kernel.-/2)
@spec add(__MODULE__.t(), __MODULE__.t()) ::
__MODULE__.t() | {:error, :dimension_mismatch}
def add(%__MODULE__{} = a, %__MODULE__{} = b), do: do_binary_op(a, b, &Kernel.+/2)
@spec do_binary_op(__MODULE__.t(), __MODULE__.t(), binary_op()) ::
__MODULE__.t() | {:error, :dimension_mismatch}
defp do_binary_op(
%__MODULE__{n_rows: n_rows, m_cols: m_cols} = a,
%__MODULE__{n_rows: n_rows, m_cols: m_cols} = b,
op
)
when is_function(op, 2) do
construct(n_rows, m_cols, fn
i, j -> op.(at(a, i, j), at(b, i, j))
end)
end
defp do_binary_op(%__MODULE__{}, %__MODULE__{}, _op),
do: {:error, :dimension_mismatch}
@spec scale(__MODULE__.t(), float()) :: __MODULE__.t()
def scale(%__MODULE__{} = matrix, scalar)
when is_float(scalar) do
transform(matrix, &(scalar * &1))
end
@spec transform(__MODULE__.t(), unary_op()) :: __MODULE__.t()
def transform(%__MODULE__{n_rows: n_rows, m_cols: m_cols} = matrix, op)
when is_function(op, 1) do
construct(n_rows, m_cols, fn
i, j ->
matrix |> at(i, j) |> op.()
end)
end
@spec construct(pos_integer(), pos_integer(), (non_neg_integer(), non_neg_integer() -> float())) ::
__MODULE__.t()
def construct(n_rows, m_cols, elem_fn)
when n_rows > 0 and
m_cols > 0 and
is_function(elem_fn, 2) do
internal =
0..(n_rows - 1)
|> Enum.reduce({}, fn i, matrix ->
row =
0..(m_cols - 1)
|> Enum.reduce({}, fn j, row ->
Tuple.append(row, elem_fn.(i, j))
end)
Tuple.append(matrix, row)
end)
%__MODULE__{n_rows: n_rows, m_cols: m_cols, internal: internal}
end
@spec at(__MODULE__.t(), non_neg_integer(), non_neg_integer()) :: float()
def at(%__MODULE__{n_rows: n_rows, m_cols: m_cols, internal: internal}, i, j)
when is_integer(i) and
i < n_rows and
is_integer(j) and
j < m_cols do
internal
|> elem(i)
|> elem(j)
end
@spec transpose(__MODULE__.t()) :: __MODULE__.t()
def transpose(%__MODULE__{} = matrix) do
construct(matrix.m_cols, matrix.n_rows, fn
i, j -> at(matrix, j, i)
end)
end
@spec symmetric?(__MODULE__.t()) :: boolean()
def symmetric?(%__MODULE__{} = matrix) do
matrix == transpose(matrix)
end
@spec lower_triangular?(__MODULE__.t()) :: boolean()
def lower_triangular?(%__MODULE__{n_rows: n_rows, m_cols: m_cols} = matrix) do
for i <- 0..(n_rows - 1), j <- 0..(m_cols - 1), i < j do
at(matrix, i, j)
end
|> Enum.all?(fn
0.0 -> true
_ -> false
end)
end
@spec upper_triangular?(__MODULE__.t()) :: boolean()
def upper_triangular?(%__MODULE__{n_rows: n_rows, m_cols: m_cols} = matrix) do
for i <- 0..(n_rows - 1), j <- 0..(m_cols - 1), i > j do
at(matrix, i, j)
end
|> Enum.all?(fn
0.0 -> true
_ -> false
end)
end
@spec positive_definite?(__MODULE__.t()) :: boolean()
def positive_definite?(%__MODULE__{n_rows: n, m_cols: n} = matrix) do
case cholesky_decomposition(matrix) do
{:ok, _} -> true
{:error, _} -> false
end
end
@spec cholesky_decomposition(__MODULE__.t()) ::
{:ok, __MODULE__.t()}
| {:error, :not_square}
| {:error, :not_symmetric}
| {:error, :not_positive_definite}
def cholesky_decomposition(%__MODULE__{n_rows: n, m_cols: n} = matrix) do
if symmetric?(matrix) do
l_internal =
0..(n - 1)
|> Enum.reduce_while(matrix.internal, fn i, mat_l ->
row_a_i = elem(matrix.internal, i)
row_l_i = elem(mat_l, i)
new_row =
0..(n - 1)
|> Enum.reduce_while(row_l_i, fn j, row_l_i ->
cond do
i == j ->
summation =
for k <- 0..(j - 1), k >= 0, k <= j - 1 do
l_jk = elem(row_l_i, k)
:math.pow(l_jk, 2)
end
|> Enum.sum()
a_jj = elem(row_a_i, j)
case a_jj - summation do
value when value < 0.0 ->
{:halt, {:error, :not_positive_definite}}
value ->
new_row = put_elem(row_l_i, j, :math.sqrt(value))
{:cont, new_row}
end
i > j ->
summation =
for k <- 0..(j - 1), k >= 0, k <= j - 1 do
row_l_j = elem(mat_l, j)
l_ik = elem(row_l_i, k)
l_jk = elem(row_l_j, k)
l_ik * l_jk
end
|> Enum.sum()
a_ij = elem(row_a_i, j)
l_jj = mat_l |> elem(j) |> elem(j)
new_row = put_elem(row_l_i, j, (a_ij - summation) / l_jj)
{:cont, new_row}
# i < j
true ->
{:cont, put_elem(row_l_i, j, 0.0)}
end
end)
case new_row do
{:error, :not_positive_definite} ->
{:halt, new_row}
_row ->
{:cont, put_elem(mat_l, i, new_row)}
end
end)
case l_internal do
{:error, :not_positive_definite} ->
{:error, :not_positive_definite}
_ ->
{:ok, %{matrix | internal: l_internal}}
end
else
{:error, :not_symmetric}
end
end
def cholesky_decomposition(%__MODULE__{}), do: {:error, :not_square}
@spec product(__MODULE__.t(), __MODULE__.t()) ::
{:ok, __MODULE__.t()}
| {:error, :dimension_mismatch}
def product(
%__MODULE__{n_rows: a_rows, m_cols: a_cols, internal: a_internal},
%__MODULE__{n_rows: b_rows, m_cols: b_cols} = b
)
when a_cols == b_rows do
b_internal = transpose(b).internal
c =
construct(a_rows, b_cols, fn
i, j ->
as = elem(a_internal, i) |> Tuple.to_list()
bs = elem(b_internal, j) |> Tuple.to_list()
Enum.zip(as, bs)
|> Enum.map(fn {a_ik, b_kj} -> a_ik * b_kj end)
|> Enum.sum()
end)
{:ok, c}
end
def product(%__MODULE__{}, %__MODULE__{}), do: {:error, :dimension_mismatch}
@spec forward_substitution(__MODULE__.t(), vector()) ::
{:ok, vector()}
| {:error, :dimension_mismatch}
| {:error, :not_lower_triangular}
def forward_substitution(
%__MODULE__{n_rows: n_rows} = matrix,
%Expline.Vector{n_slots: n_slots} = vector
)
when n_rows == n_slots do
if lower_triangular?(matrix) do
solution = do_forward_substitution(matrix, vector, 0, {})
{:ok, solution}
else
{:error, :not_lower_triangular}
end
end
def forward_substitution(%__MODULE__{}, %Expline.Vector{}), do: {:error, :dimension_mismatch}
@spec do_forward_substitution(__MODULE__.t(), vector(), integer(), tuple()) :: vector()
defp do_forward_substitution(_matrix, %Expline.Vector{n_slots: n_slots}, _row, solution)
when n_slots == tuple_size(solution) do
Expline.Vector.construct(tuple_size(solution), fn
i -> elem(solution, i)
end)
end
defp do_forward_substitution(matrix, vector, nth_row, solution) do
summation =
for i <- 0..(nth_row - 1), i >= 0, i <= nth_row - 1 do
at(matrix, nth_row, i) * elem(solution, i)
end
|> Enum.sum()
new_solution = (Expline.Vector.at(vector, nth_row) - summation) / at(matrix, nth_row, nth_row)
do_forward_substitution(matrix, vector, nth_row + 1, Tuple.append(solution, new_solution))
end
@spec backward_substitution(__MODULE__.t(), vector()) ::
{:ok, vector()}
| {:error, :dimension_mismatch}
| {:error, :not_upper_triangular}
def backward_substitution(
%__MODULE__{n_rows: n_rows} = matrix,
%Expline.Vector{n_slots: n_slots} = vector
)
when n_rows == n_slots do
if upper_triangular?(matrix) do
sln_buffer = 1..n_rows |> Enum.reduce({}, fn _, t -> Tuple.append(t, 0.0) end)
solution = do_backward_substitution(matrix, vector, n_rows - 1, sln_buffer)
{:ok, solution}
else
{:error, :not_upper_triangular}
end
end
def backward_substitution(%__MODULE__{}, %Expline.Vector{}), do: {:error, :dimension_mismatch}
@spec do_backward_substitution(__MODULE__.t(), vector(), integer(), tuple()) :: vector()
defp do_backward_substitution(_matrix, _vector, -1, solution) do
Expline.Vector.construct(tuple_size(solution), fn
i -> elem(solution, i)
end)
end
defp do_backward_substitution(matrix, vector, nth_row, solution) do
summation =
for i <- nth_row..(matrix.n_rows - 1),
i >= 0,
i <= matrix.n_rows do
at(matrix, nth_row, i) * elem(solution, i)
end
|> Enum.sum()
new_solution = (Expline.Vector.at(vector, nth_row) - summation) / at(matrix, nth_row, nth_row)
do_backward_substitution(
matrix,
vector,
nth_row - 1,
put_elem(solution, nth_row, new_solution)
)
end
@spec disaugment(__MODULE__.t()) ::
{:ok, {__MODULE__.t(), vector()}}
| {:error, :dimension_mismatch}
def disaugment(%__MODULE__{n_rows: n_rows, m_cols: m_cols} = matrix)
when m_cols > 1 do
augment =
Expline.Vector.construct(n_rows, fn
i ->
at(matrix, i, m_cols - 1)
end)
disaugmented_matrix =
construct(n_rows, m_cols - 1, fn
i, j ->
at(matrix, i, j)
end)
{:ok, {disaugmented_matrix, augment}}
end
def disaugment(%__MODULE__{}), do: {:error, :dimension_mismatch}
end
defimpl Inspect, for: Expline.Matrix do
import Inspect.Algebra
def inspect(%Expline.Matrix{internal: internal}, opts) do
internal
|> Tuple.to_list()
|> Enum.map(&to_doc(&1, opts))
|> Enum.intersperse(break("\n"))
|> concat
end
end
|
lib/expline/matrix.ex
| 0.839898
| 0.561275
|
matrix.ex
|
starcoder
|
defmodule AWS.ECR do
@moduledoc """
Amazon Elastic Container Registry
Amazon Elastic Container Registry (Amazon ECR) is a managed container image
registry service. Customers can use the familiar Docker CLI, or their
preferred client, to push, pull, and manage images. Amazon ECR provides a
secure, scalable, and reliable registry for your Docker or Open Container
Initiative (OCI) images. Amazon ECR supports private repositories with
resource-based permissions using IAM so that specific users or Amazon EC2
instances can access repositories and images.
"""
@doc """
Checks the availability of one or more image layers in a repository.
When an image is pushed to a repository, each image layer is checked to
verify if it has been uploaded before. If it has been uploaded, then the
image layer is skipped.
<note> This operation is used by the Amazon ECR proxy and is not generally
used by customers for pulling and pushing images. In most cases, you should
use the `docker` CLI to pull, tag, and push images.
</note>
"""
def batch_check_layer_availability(client, input, options \\ []) do
request(client, "BatchCheckLayerAvailability", input, options)
end
@doc """
Deletes a list of specified images within a repository. Images are
specified with either an `imageTag` or `imageDigest`.
You can remove a tag from an image by specifying the image's tag in your
request. When you remove the last tag from an image, the image is deleted
from your repository.
You can completely delete an image (and all of its tags) by specifying the
image's digest in your request.
"""
def batch_delete_image(client, input, options \\ []) do
request(client, "BatchDeleteImage", input, options)
end
@doc """
Gets detailed information for an image. Images are specified with either an
`imageTag` or `imageDigest`.
When an image is pulled, the BatchGetImage API is called once to retrieve
the image manifest.
"""
def batch_get_image(client, input, options \\ []) do
request(client, "BatchGetImage", input, options)
end
@doc """
Informs Amazon ECR that the image layer upload has completed for a
specified registry, repository name, and upload ID. You can optionally
provide a `sha256` digest of the image layer for data validation purposes.
When an image is pushed, the CompleteLayerUpload API is called once per
each new image layer to verify that the upload has completed.
<note> This operation is used by the Amazon ECR proxy and is not generally
used by customers for pulling and pushing images. In most cases, you should
use the `docker` CLI to pull, tag, and push images.
</note>
"""
def complete_layer_upload(client, input, options \\ []) do
request(client, "CompleteLayerUpload", input, options)
end
@doc """
Creates a repository. For more information, see [Amazon ECR
Repositories](https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html)
in the *Amazon Elastic Container Registry User Guide*.
"""
def create_repository(client, input, options \\ []) do
request(client, "CreateRepository", input, options)
end
@doc """
Deletes the lifecycle policy associated with the specified repository.
"""
def delete_lifecycle_policy(client, input, options \\ []) do
request(client, "DeleteLifecyclePolicy", input, options)
end
@doc """
Deletes a repository. If the repository contains images, you must either
delete all images in the repository or use the `force` option to delete the
repository.
"""
def delete_repository(client, input, options \\ []) do
request(client, "DeleteRepository", input, options)
end
@doc """
Deletes the repository policy associated with the specified repository.
"""
def delete_repository_policy(client, input, options \\ []) do
request(client, "DeleteRepositoryPolicy", input, options)
end
@doc """
Returns the scan findings for the specified image.
"""
def describe_image_scan_findings(client, input, options \\ []) do
request(client, "DescribeImageScanFindings", input, options)
end
@doc """
Returns metadata about the images in a repository.
<note> Beginning with Docker version 1.9, the Docker client compresses
image layers before pushing them to a V2 Docker registry. The output of the
`docker images` command shows the uncompressed image size, so it may return
a larger image size than the image sizes returned by `DescribeImages`.
</note>
"""
def describe_images(client, input, options \\ []) do
request(client, "DescribeImages", input, options)
end
@doc """
Describes image repositories in a registry.
"""
def describe_repositories(client, input, options \\ []) do
request(client, "DescribeRepositories", input, options)
end
@doc """
Retrieves an authorization token. An authorization token represents your
IAM authentication credentials and can be used to access any Amazon ECR
registry that your IAM principal has access to. The authorization token is
valid for 12 hours.
The `authorizationToken` returned is a base64 encoded string that can be
decoded and used in a `docker login` command to authenticate to a registry.
The AWS CLI offers an `get-login-password` command that simplifies the
login process. For more information, see [Registry
Authentication](https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth)
in the *Amazon Elastic Container Registry User Guide*.
"""
def get_authorization_token(client, input, options \\ []) do
request(client, "GetAuthorizationToken", input, options)
end
@doc """
Retrieves the pre-signed Amazon S3 download URL corresponding to an image
layer. You can only get URLs for image layers that are referenced in an
image.
When an image is pulled, the GetDownloadUrlForLayer API is called once per
image layer that is not already cached.
<note> This operation is used by the Amazon ECR proxy and is not generally
used by customers for pulling and pushing images. In most cases, you should
use the `docker` CLI to pull, tag, and push images.
</note>
"""
def get_download_url_for_layer(client, input, options \\ []) do
request(client, "GetDownloadUrlForLayer", input, options)
end
@doc """
Retrieves the lifecycle policy for the specified repository.
"""
def get_lifecycle_policy(client, input, options \\ []) do
request(client, "GetLifecyclePolicy", input, options)
end
@doc """
Retrieves the results of the lifecycle policy preview request for the
specified repository.
"""
def get_lifecycle_policy_preview(client, input, options \\ []) do
request(client, "GetLifecyclePolicyPreview", input, options)
end
@doc """
Retrieves the repository policy for the specified repository.
"""
def get_repository_policy(client, input, options \\ []) do
request(client, "GetRepositoryPolicy", input, options)
end
@doc """
Notifies Amazon ECR that you intend to upload an image layer.
When an image is pushed, the InitiateLayerUpload API is called once per
image layer that has not already been uploaded. Whether or not an image
layer has been uploaded is determined by the BatchCheckLayerAvailability
API action.
<note> This operation is used by the Amazon ECR proxy and is not generally
used by customers for pulling and pushing images. In most cases, you should
use the `docker` CLI to pull, tag, and push images.
</note>
"""
def initiate_layer_upload(client, input, options \\ []) do
request(client, "InitiateLayerUpload", input, options)
end
@doc """
Lists all the image IDs for the specified repository.
You can filter images based on whether or not they are tagged by using the
`tagStatus` filter and specifying either `TAGGED`, `UNTAGGED` or `ANY`. For
example, you can filter your results to return only `UNTAGGED` images and
then pipe that result to a `BatchDeleteImage` operation to delete them. Or,
you can filter your results to return only `TAGGED` images to list all of
the tags in your repository.
"""
def list_images(client, input, options \\ []) do
request(client, "ListImages", input, options)
end
@doc """
List the tags for an Amazon ECR resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Creates or updates the image manifest and tags associated with an image.
When an image is pushed and all new image layers have been uploaded, the
PutImage API is called once to create or update the image manifest and the
tags associated with the image.
<note> This operation is used by the Amazon ECR proxy and is not generally
used by customers for pulling and pushing images. In most cases, you should
use the `docker` CLI to pull, tag, and push images.
</note>
"""
def put_image(client, input, options \\ []) do
request(client, "PutImage", input, options)
end
@doc """
Updates the image scanning configuration for the specified repository.
"""
def put_image_scanning_configuration(client, input, options \\ []) do
request(client, "PutImageScanningConfiguration", input, options)
end
@doc """
Updates the image tag mutability settings for the specified repository. For
more information, see [Image Tag
Mutability](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html)
in the *Amazon Elastic Container Registry User Guide*.
"""
def put_image_tag_mutability(client, input, options \\ []) do
request(client, "PutImageTagMutability", input, options)
end
@doc """
Creates or updates the lifecycle policy for the specified repository. For
more information, see [Lifecycle Policy
Template](https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html).
"""
def put_lifecycle_policy(client, input, options \\ []) do
request(client, "PutLifecyclePolicy", input, options)
end
@doc """
Applies a repository policy to the specified repository to control access
permissions. For more information, see [Amazon ECR Repository
Policies](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html)
in the *Amazon Elastic Container Registry User Guide*.
"""
def set_repository_policy(client, input, options \\ []) do
request(client, "SetRepositoryPolicy", input, options)
end
@doc """
Starts an image vulnerability scan. An image scan can only be started once
per day on an individual image. This limit includes if an image was scanned
on initial push. For more information, see [Image
Scanning](https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html)
in the *Amazon Elastic Container Registry User Guide*.
"""
def start_image_scan(client, input, options \\ []) do
request(client, "StartImageScan", input, options)
end
@doc """
Starts a preview of a lifecycle policy for the specified repository. This
allows you to see the results before associating the lifecycle policy with
the repository.
"""
def start_lifecycle_policy_preview(client, input, options \\ []) do
request(client, "StartLifecyclePolicyPreview", input, options)
end
@doc """
Adds specified tags to a resource with the specified ARN. Existing tags on
a resource are not changed if they are not specified in the request
parameters.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Deletes specified tags from a resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Uploads an image layer part to Amazon ECR.
When an image is pushed, each new image layer is uploaded in parts. The
maximum size of each image layer part can be 20971520 bytes (or about
20MB). The UploadLayerPart API is called once per each new image layer
part.
<note> This operation is used by the Amazon ECR proxy and is not generally
used by customers for pulling and pushing images. In most cases, you should
use the `docker` CLI to pull, tag, and push images.
</note>
"""
def upload_layer_part(client, input, options \\ []) do
request(client, "UploadLayerPart", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "ecr"}
host = build_host("api.ecr", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AmazonEC2ContainerRegistry_V20150921.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/ecr.ex
| 0.915479
| 0.525734
|
ecr.ex
|
starcoder
|
defmodule K8s.Operation do
@moduledoc "Encapsulates Kubernetes REST API operations."
alias K8s.Operation
@derive {Jason.Encoder, except: [:path_params]}
@allow_http_body [:put, :patch, :post]
@verb_map %{
list_all_namespaces: :get,
list: :get,
deletecollection: :delete,
create: :post,
update: :put,
patch: :patch
}
defstruct method: nil,
verb: nil,
api_version: nil,
name: nil,
data: nil,
path_params: [],
query_params: %{},
label_selector: nil
@typedoc "`K8s.Operation` name. May be an atom, string, or tuple of `{resource, subresource}`."
@type name_t :: binary() | atom() | {binary(), binary()}
@typedoc """
* `api_version` - API `groupVersion`, AKA `apiVersion`
* `name` - The name of the REST operation (Kubernets kind/resource/subresource). This is *not* _always_ the same as the `kind` key in the `data` field. e.g: `deployments` when POSTing, GETting a deployment.
* `data` - HTTP request body to submit when applicable. (POST, PUT, PATCH, etc)
* `method` - HTTP Method
* `verb` - Kubernetes [REST API verb](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb) (`deletecollection`, `update`, `create`, `watch`, etc)
* `path_params` - Parameters to interpolate into the Kubernetes REST URL
* `query_params` - Query parameter (`map`). Merged w/ params provided to any `K8s.Client.Runner`. `K8s.Client.Runner` options win.
`name` would be `deployments` in the case of a deployment, but may be `deployments/status` or `deployments/scale` for Status and Scale subresources.
## `name` and `data` field examples
The following example would `update` the *nginx* deployment's `Scale`. Note the `deployments/scale` operation will have a `Scale` *data* payload:
```elixir
%K8s.Operation{
method: :put,
verb: :update,
api_version: "v1", # api version of the "Scale" kind
name: "deployments/scale",
data: %{"apiVersion" => "v1", "kind" => "Scale"}, # `data` is of kind "Scale"
path_params: [name: "nginx", namespace: "default"]
}
```
The following example would `update` the *nginx* deployment's `Status`. Note the `deployments/status` operation will have a `Deployment` *data* payload:
```elixir
%K8s.Operation{
method: :put,
verb: :update,
api_version: "apps/v1", # api version of the "Deployment" kind
name: "deployments/status",
data: %{"apiVersion" => "apps/v1", "kind" => "Deployment"}, # `data` is of kind "Deployment"
path_params: [name: "nginx", namespace: "default"]
}
```
"""
@type t :: %__MODULE__{
method: atom(),
verb: atom(),
api_version: binary(),
name: name_t(),
data: map() | nil,
path_params: keyword(atom()),
label_selector: K8s.Selector.t() | nil,
query_params: map()
}
@doc """
Builds an `Operation` given a verb and a k8s resource.
## Examples
iex> deploy = %{"apiVersion" => "apps/v1", "kind" => "Deployment", "metadata" => %{"namespace" => "default", "name" => "nginx"}}
...> K8s.Operation.build(:update, deploy)
%K8s.Operation{
method: :put,
verb: :update,
data: %{"apiVersion" => "apps/v1", "kind" => "Deployment", "metadata" => %{"namespace" => "default", "name" => "nginx"}},
path_params: [namespace: "default", name: "nginx"],
api_version: "apps/v1",
name: "Deployment"
}
"""
@spec build(atom, map) :: __MODULE__.t()
def build(
verb,
%{
"apiVersion" => v,
"kind" => k,
"metadata" => %{"name" => name, "namespace" => ns}
} = resource
) do
build(verb, v, k, [namespace: ns, name: name], resource)
end
def build(
verb,
%{"apiVersion" => v, "kind" => k, "metadata" => %{"name" => name}} = resource
) do
build(verb, v, k, [name: name], resource)
end
def build(
verb,
%{"apiVersion" => v, "kind" => k, "metadata" => %{"namespace" => ns}} = resource
) do
build(verb, v, k, [namespace: ns], resource)
end
def build(verb, %{"apiVersion" => v, "kind" => k} = resource) do
build(verb, v, k, [], resource)
end
@doc """
Builds an `Operation` given an verb and a k8s resource info.
*Note:* The `name` here may be a `Kind` and not a REST resource name in the event that the operation was built using a map.
Use `K8s.Discovery.ResourceFinder.resource_name_for_kind/3` to get the correct REST resource name, given a `kind`.
## Examples
Building a GET deployment operation:
iex> K8s.Operation.build(:get, "apps/v1", :deployment, [namespace: "default", name: "nginx"])
%K8s.Operation{
method: :get,
verb: :get,
data: nil,
path_params: [namespace: "default", name: "nginx"],
api_version: "apps/v1",
name: :deployment
}
Building a GET deployments/status operation:
iex> K8s.Operation.build(:get, "apps/v1", "deployments/status", [namespace: "default", name: "nginx"])
%K8s.Operation{
method: :get,
verb: :get,
data: nil,
path_params: [namespace: "default", name: "nginx"],
api_version: "apps/v1",
name: "deployments/status"
}
"""
@spec build(atom, binary, name_t(), keyword(), map() | nil) :: __MODULE__.t()
def build(verb, api_version, name_or_kind, path_params, data \\ nil) do
http_method = @verb_map[verb] || verb
http_body =
case http_method do
method when method in @allow_http_body -> data
_ -> nil
end
%__MODULE__{
method: http_method,
verb: verb,
data: http_body,
api_version: api_version,
name: name_or_kind,
path_params: path_params
}
end
@doc "Converts a `K8s.Operation` into a URL path."
@spec to_path(Operation.t()) ::
{:ok, String.t()} | {:error, :missing_required_param, list(atom)}
def to_path(%Operation{} = operation), do: Operation.Path.build(operation)
@doc """
Add a query param to an operation
## Examples
iex> operation = %K8s.Operation{}
...> K8s.Operation.put_query_param(operation, "foo", "bar")
%K8s.Operation{query_params: %{"foo" => "bar"}}
"""
@spec put_query_param(Operation.t(), atom(), String.t() | K8s.Selector.t()) :: Operation.t()
def put_query_param(%Operation{query_params: params} = op, key, value) do
new_params = Map.put(params, key, value)
%Operation{op | query_params: new_params}
end
@doc """
Get a query param of an operation
## Examples
iex> operation = %K8s.Operation{query_params: %{foo: "bar"}}
...> K8s.Operation.get_query_param(operation, :foo)
"bar"
"""
@spec get_query_param(Operation.t(), atom()) :: any()
def get_query_param(%Operation{query_params: params}, key), do: Map.get(params, key)
end
|
lib/k8s/operation.ex
| 0.918462
| 0.690403
|
operation.ex
|
starcoder
|
defmodule ExVmstats do
use GenServer
defstruct [:backend, :use_histogram, :interval, :sched_time, :prev_sched, :timer_ref, :namespace, :prev_io, :prev_gc]
@timer_msg :interval_elapsed
def start_link do
GenServer.start_link(__MODULE__, [])
end
def init(_args) do
interval = Application.get_env(:ex_vmstats, :interval, 3000)
namespace = Application.get_env(:ex_vmstats, :namespace, "vm_stats")
use_histogram = Application.get_env(:ex_vmstats, :use_histogram, false)
sched_time =
case {sched_time_available?(), Application.get_env(:ex_vmstats, :sched_time, false)} do
{true, true} -> :enabled
{true, _} -> :disabled
{false, _} -> :unavailable
end
prev_sched =
:erlang.statistics(:scheduler_wall_time)
|> Enum.sort
backend =
Application.get_env(:ex_vmstats, :backend, :ex_statsd)
|> get_backend
{{:input, input}, {:output, output}} = :erlang.statistics(:io)
state = %__MODULE__{
backend: backend,
use_histogram: use_histogram,
interval: interval,
sched_time: sched_time,
prev_sched: prev_sched,
timer_ref: :erlang.start_timer(interval, self(), @timer_msg),
namespace: namespace,
prev_io: {input, output},
prev_gc: :erlang.statistics(:garbage_collection)
}
{:ok, state}
end
def handle_info({:timeout, _timer_ref, @timer_msg}, state) do
%__MODULE__{interval: interval, namespace: namespace, backend: backend} = state
metric_name = fn (name) -> metric(namespace, name) end
memory_metric_name = fn (name) -> memory_metric(namespace, name) end
# Processes
gauge_or_hist(state, :erlang.system_info(:process_count), metric_name.("proc_count"))
gauge_or_hist(state, :erlang.system_info(:process_limit), metric_name.("proc_limit"))
# Messages in queues
total_messages =
Enum.reduce Process.list, 0, fn pid, acc ->
case Process.info(pid, :message_queue_len) do
{:message_queue_len, count} -> count + acc
_ -> acc
end
end
gauge_or_hist(state, total_messages, metric_name.("messages_in_queues"))
# Modules loaded
gauge_or_hist(state, length(:code.all_loaded), metric_name.("modules"))
# Queued up processes (lower is better)
gauge_or_hist(state, :erlang.statistics(:run_queue), metric_name.("run_queue"))
# Error logger backlog (lower is better)
error_logger_backlog =
Process.whereis(:error_logger)
|> Process.info(:message_queue_len)
|> elem(1)
gauge_or_hist(state, error_logger_backlog, metric_name.("error_logger_queue_len"))
# Memory usage. There are more options available, but not all were kept.
# Memory usage is in bytes.
mem = :erlang.memory
for metric <- [:total, :processes_used, :atom_used, :binary, :ets] do
gauge_or_hist(state, Keyword.get(mem, metric), memory_metric_name.(metric))
end
# Incremental values
%__MODULE__{prev_io: {old_input, old_output}, prev_gc: {old_gcs, old_words, _}} = state
{{:input, input}, {:output, output}} = :erlang.statistics(:io)
gc = {gcs, words, _} = :erlang.statistics(:garbage_collection)
backend.counter(input - old_input, metric_name.("io.bytes_in"))
backend.counter(output - old_output, metric_name.("io.bytes_out"))
backend.counter(gcs - old_gcs, metric_name.("gc.count"))
backend.counter(words - old_words, metric_name.("gc.words_reclaimed"))
# Reductions across the VM, excluding current time slice, already incremental
{_, reds} = :erlang.statistics(:reductions)
backend.counter(reds, metric_name.("reductions"))
#Scheduler wall time
sched =
case state.sched_time do
:enabled ->
new_sched = Enum.sort(:erlang.statistics(:scheduler_wall_time))
for {sid, active, total} <- wall_time_diff(state.prev_sched, new_sched) do
scheduler_metric_base = "#{namespace}.scheduler_wall_time.#{sid}"
backend.timer(active, scheduler_metric_base <> ".active")
backend.timer(total, scheduler_metric_base <> ".total")
end
new_sched
_ ->
nil
end
timer_ref = :erlang.start_timer(interval, self(), @timer_msg)
{:noreply, %{state | timer_ref: timer_ref, prev_sched: sched, prev_io: {input, output}, prev_gc: gc}}
end
defp metric(namespace, metric) do
"#{namespace}.#{metric}"
end
defp memory_metric(namespace, metric) do
"#{namespace}.memory.#{metric}"
end
defp gauge_or_hist(%__MODULE__{use_histogram: true, backend: backend}, value, metric) do
backend.histogram(value, metric)
end
defp gauge_or_hist(%__MODULE__{backend: backend}, value, metric), do: backend.gauge(value, metric)
defp get_backend(:ex_statsd), do: ExVmstats.Backends.ExStatsD
defp get_backend(backend), do: backend
defp sched_time_available? do
try do
:erlang.system_flag(:scheduler_wall_time, true)
else
_ -> true
catch
_ -> true
rescue
ArgumentError -> false
end
end
defp wall_time_diff(prev_sched, new_sched) do
for {{i, prev_active, prev_total}, {i, new_active, new_total}} <- Enum.zip(prev_sched, new_sched) do
{i, new_active - prev_active, new_total - prev_total}
end
end
end
|
lib/ex_vmstats.ex
| 0.708112
| 0.412323
|
ex_vmstats.ex
|
starcoder
|
defmodule Nebulex.Hook do
@moduledoc """
This module specifies the behaviour for pre/post hooks callbacks.
These functions are defined in order to intercept any cache operation
and be able to execute a set of actions before and/or after the operation
takes place.
## Execution modes
It is possible to setup the mode how the hooks are evaluated. The
`pre_hooks/0` and `post_hooks/0` callbacks must return a tuple
`{mode, hook_funs}`, where the first element `mode` is the one
that defines the execution mode. The available modes are:
* `:async` - (the default) all hooks are evaluated asynchronously
(in parallel) and their results are ignored.
* `:sync` - hooks are evaluated synchronously (sequentially) and their
results are ignored.
* `:pipe` - similar to `:sync` but each hook result is passed to the
next one and so on, until the last hook evaluation is returned.
## Example
defmodule MyApp.MyCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Local
def pre_hooks do
{:async, [... your pre hook functions ...]}
end
def post_hooks do
{:pipe, [... your post hook functions ...]}
end
end
"""
@typedoc "Defines a cache command"
@type command :: {Nebulex.Cache.t(), action :: atom, args :: [any]}
@typedoc "Defines the hook callback function"
@type hook_fun :: (result :: any, command -> any)
@typedoc "Hook execution mode"
@type mode :: :async | :sync | :pipe
@doc """
Returns a list of hook functions that will be executed before invoke the
cache action.
## Examples
defmodule MyCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Local
def pre_hooks do
pre_hook =
fn
(result, {_, :get, _} = call) ->
# do your stuff ...
(result, _) ->
result
end
{:async, [pre_hook]}
end
end
"""
@callback pre_hooks() :: {mode, [hook_fun]}
@doc """
Returns a list of hook functions that will be executed after invoke the
cache action.
## Examples
defmodule MyCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Local
def post_hooks do
{:pipe, [&post_hook/2]}
end
def post_hook(result, {_, :set, _} = call) do
send(:hooked_cache, call)
end
def post_hook(_, _) do
:noop
end
end
"""
@callback post_hooks() :: {mode, [hook_fun]}
@doc """
Evaluates the `hooks` according to the given execution `mode`.
"""
@spec eval({mode, hooks :: [hook_fun]}, command, result :: any) :: any
def eval({_mode, []}, _command, result), do: result
def eval({mode, hooks}, {_cache, _action, _args} = command, result) do
Enum.reduce(hooks, result, fn
hook, acc when is_function(hook, 2) and mode == :pipe ->
hook.(acc, command)
hook, ^result when is_function(hook, 2) and mode == :sync ->
_ = hook.(result, command)
result
hook, ^result when is_function(hook, 2) ->
_ = Task.start_link(:erlang, :apply, [hook, [result, command]])
result
_, acc when mode == :pipe ->
acc
_, _ ->
result
end)
end
end
|
lib/nebulex/hook.ex
| 0.9064
| 0.625424
|
hook.ex
|
starcoder
|
defmodule Raygun.Format do
@moduledoc """
This module builds payloads of error messages that Raygun will understand.
These functions return maps of data which will be encoding as JSON prior
to submission to Raygun.
"""
@raygun_version Mix.Project.config()[:version]
@doc """
Builds an error payload that Raygun will understand for a string.
"""
def message_payload(msg, opts) when is_list(msg) do
msg |> List.to_string() |> message_payload(opts)
end
def message_payload(msg, opts) do
%{
"occurredOn" => now(),
"details" =>
details()
|> Map.merge(environment())
|> Map.merge(user(opts))
|> Map.merge(custom(opts))
|> Map.merge(request(Keyword.get(opts, :conn, nil)))
|> Map.merge(response(Keyword.get(opts, :conn, nil)))
|> Map.merge(%{"error" => %{"message" => msg}})
}
end
@doc """
Builds an error payload that Raygun will understand for an exception and its
corresponding stacktrace.
"""
def stacktrace_payload(stacktrace, exception, opts) do
%{
"occurredOn" => now(),
"details" =>
details()
|> Map.merge(err(stacktrace, exception))
|> Map.merge(environment())
|> Map.merge(user(opts))
|> Map.merge(custom(opts))
}
end
@doc """
Builds an error payload that Raygun will understand for an exception that was
caught in our Plug.
"""
def conn_payload(conn, stacktrace, exception, opts) do
%{
"occurredOn" => now(),
"details" =>
details(opts)
|> Map.merge(err(stacktrace, exception))
|> Map.merge(environment())
|> Map.merge(request(conn))
|> Map.merge(response(conn))
|> Map.merge(user(opts))
|> Map.merge(custom(opts))
}
end
@doc """
Return custom information. Tags are configured per application via config and
user custom data can be provided per error.
"""
def custom(opts) do
%{
"tags" => Raygun.Util.get_env(:raygun, :tags),
"userCustomData" => Enum.into(opts |> Keyword.delete(:user), %{})
}
end
@doc """
Get the logged in user from the opts if one is provided.
If not, it gets the system user if one is specified.
"""
def user(opts) do
cond do
Keyword.has_key?(opts, :user) and Keyword.get(opts, :user) ->
%{"user" => Keyword.get(opts, :user)}
Raygun.Util.get_env(:raygun, :system_user) ->
%{"user" => Raygun.Util.get_env(:raygun, :system_user)}
true ->
%{}
end
end
@doc """
Return a map of information about the environment in which the bug was encountered.
"""
def environment do
%{
"environment" => %{
"osVersion" => os_version(),
"architecture" => List.to_string(:erlang.system_info(:system_architecture)),
"packageVersion" => List.to_string(:erlang.system_info(:system_version)),
"processorCount" => :erlang.system_info(:logical_processors_online),
"totalPhysicalMemory" => :erlang.memory(:total),
"deviceName" => device_name(),
"diskSpaceFree" => []
}
}
end
@doc """
Returns deatils about the client and server machine.
"""
def details(opts \\ []) do
%{
"machineName" => device_name(),
"version" => app_version(opts),
"client" => %{
"name" => Raygun.Util.get_env(:raygun, :client_name),
"version" => @raygun_version,
"clientUrl" => Raygun.Util.get_env(:raygun, :url)
}
}
end
defp now, do: DateTime.to_iso8601(DateTime.utc_now())
@doc """
Given a Plug Conn return a map containing information about the request.
"""
def request(nil), do: %{}
def request(conn) do
%{
"request" => %{
"hostName" => conn.host,
"url" => conn.request_path,
"httpMethod" => conn.method,
"iPAddress" => find_ip_address(conn),
"queryString" => Plug.Conn.fetch_query_params(conn).query_params,
"form" => find_params(conn),
"headers" => Raygun.Util.format_headers(conn.req_headers),
"rawData" => %{}
}
}
end
@doc """
Given a Plug Conn return a map containing information about the response.
"""
def response(nil), do: %{}
def response(conn) do
%{"response" => %{"statusCode" => conn.status}}
end
@doc """
Given a stacktrace and an exception, return a map with the error data.
"""
def err(stacktrace, error) do
err(
stacktrace,
error,
stacktrace_entry(Enum.at(stacktrace, 0))
)
end
def err(stacktrace, error, line) do
%{
"error" => %{
"innerError" => nil,
"data" => %{
"fileName" => line["fileName"],
"lineNumber" => line["lineNumber"],
"function" => line["methodName"]
},
"className" => line["className"],
"message" => Exception.message(error),
"stackTrace" => stacktrace(stacktrace)
}
}
end
@doc """
Given a stacktrace return a list of maps for the frames.
"""
def stacktrace(s) do
Enum.map(s, &stacktrace_entry/1)
end
@doc """
Given a stacktrace frame, return a map with the information in a structure
that Raygun will understand.
"""
def stacktrace_entry({function, arity_or_args, location}) do
stacktrace_entry({__MODULE__, function, arity_or_args, location})
end
def stacktrace_entry({module, function, arity_or_args, location}) do
%{
"lineNumber" => Raygun.Util.line_from(location),
"className" => Raygun.Util.mod_for(module),
"fileName" => Raygun.Util.file_from(location),
"methodName" => Exception.format_mfa(module, function, arity_or_args)
}
end
defp app_version(opts) do
if opts[:version] do
opts[:version]
else
Raygun.Util.get_env(:raygun, :client_version)
end
end
defp device_name do
case :inet.gethostname() do
{:ok, hostname} -> List.to_string(hostname)
_other -> ""
end
end
defp os_version do
{os_type, os_flavor} = :os.type()
"#{os_type} - #{os_flavor}"
end
defp find_params(%Plug.Conn{params: %Plug.Conn.Unfetched{}} = conn) do
Plug.Parsers.call(conn, []).params
end
defp find_params(conn), do: conn.params
defp find_ip_address(conn) do
conn |> proxy_or_remote() |> :inet_parse.ntoa() |> to_string()
end
defp proxy_or_remote(conn) do
from_proxy(conn.adapter) || conn.remote_ip
end
defp from_proxy({_module, request}) do
request
|> Map.get(:proxy_header, %{})
|> Map.get(:src_address, nil)
end
end
|
lib/raygun/format.ex
| 0.773687
| 0.413832
|
format.ex
|
starcoder
|
defmodule SieveOfAtkin do
@moduledoc """
Documentation for `SieveOfAtkin`.
"""
@doc """
Generates prime numbers up to a given maximum.
## Examples
iex> SieveOfAtkin.generate_primes(30) |> Enum.sort()
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
"""
def generate_primes(n)
def generate_primes(n) when n < 0 do
raise "Can't compute negative primes"
end
def generate_primes(n) when n <= 1, do: []
def generate_primes(n) when n <= 2, do: [2]
def generate_primes(n) when n <= 3, do: [2, 3]
def generate_primes(n) when n <= 5, do: [2, 3, 5]
def generate_primes(max_value) do
max_power = floor(:math.sqrt(max_value))
xy_pairs =
1..max_power
|> Stream.flat_map(fn x ->
Stream.map(1..max_power, fn y ->
{x, y}
end)
end)
first_possible_primes =
Task.async(fn ->
xy_pairs
|> Flow.from_enumerable()
|> Flow.map(fn {x, y} ->
(4 * x * x) + (y * y)
end)
|> Flow.filter(fn n ->
(n <= max_value) and (Integer.mod(n, 12) == 1 or Integer.mod(n, 12) == 5)
end)
|> Flow.partition()
|> Flow.reduce(fn -> %{} end, fn key, acc ->
case acc do
%{^key => value} -> %{acc | key => value + 1}
%{} -> Map.put(acc, key, 1)
end
end)
|> Flow.filter(fn {_n, freq} ->
Integer.mod(freq, 2) == 1
end)
|> Flow.map(fn {n, _freq} ->
n
end)
|> Enum.to_list()
end)
second_possible_primes =
Task.async(fn ->
xy_pairs
|> Flow.from_enumerable()
|> Flow.map(fn {x, y} ->
(3 * x * x) + (y * y)
end)
|> Flow.filter(fn n ->
(n <= max_value) and (Integer.mod(n, 12) == 7)
end)
|> Flow.partition()
|> Flow.reduce(fn -> %{} end, fn key, acc ->
case acc do
%{^key => value} -> %{acc | key => value + 1}
%{} -> Map.put(acc, key, 1)
end
end)
|> Flow.filter(fn {_n, freq} ->
Integer.mod(freq, 2) == 1
end)
|> Flow.map(fn {n, _freq} ->
n
end)
|> Enum.to_list()
end)
third_possible_primes =
Task.async(fn ->
xy_pairs
|> Flow.from_enumerable()
|> Flow.filter(fn {x, y} -> x > y end)
|> Flow.map(fn {x, y} ->
(3 * x * x) - (y * y)
end)
|> Flow.filter(fn n ->
(n <= max_value) and (Integer.mod(n, 12) == 11)
end)
|> Flow.partition()
|> Flow.reduce(fn -> %{} end, fn key, acc ->
case acc do
%{^key => value} -> %{acc | key => value + 1}
%{} -> Map.put(acc, key, 1)
end
end)
|> Flow.filter(fn {_n, freq} ->
Integer.mod(freq, 2) == 1
end)
|> Flow.map(fn {n, _freq} ->
n
end)
|> Enum.to_list()
end)
possible_primes =
Task.await(first_possible_primes, :infinity)
|> Stream.concat(Task.await(second_possible_primes, :infinity))
|> Stream.concat(Task.await(third_possible_primes, :infinity))
non_primes =
possible_primes
|> Flow.from_enumerable()
|> Flow.flat_map(fn n ->
sq = n * n
Stream.iterate(1, &(&1 + 1))
|> Stream.map(&(&1 * sq))
|> Stream.take_while(&(&1 <= max_value))
end)
|> Flow.uniq()
|> MapSet.new()
possible_primes
|> Stream.reject(fn n ->
MapSet.member?(non_primes, n)
end)
|> Stream.concat([2, 3])
|> Enum.to_list()
end
end
|
lib/sieve_of_atkin.ex
| 0.847905
| 0.560253
|
sieve_of_atkin.ex
|
starcoder
|
defmodule Bandit.HTTP2.Frame.Data do
@moduledoc false
import Bandit.HTTP2.Frame.Flags
alias Bandit.HTTP2.{Connection, Errors, Frame, Stream}
defstruct stream_id: nil,
end_stream: false,
data: nil
@typedoc "An HTTP/2 DATA frame"
@type t :: %__MODULE__{
stream_id: Stream.stream_id(),
end_stream: boolean(),
data: iodata()
}
@end_stream_bit 0
@padding_bit 3
@spec deserialize(Frame.flags(), Stream.stream_id(), iodata()) ::
{:ok, t()} | {:error, Connection.error()}
def deserialize(_flags, 0, _payload) do
{:error,
{:connection, Errors.protocol_error(), "DATA frame with zero stream_id (RFC7540§6.1)"}}
end
def deserialize(flags, stream_id, <<padding_length::8, rest::binary>>)
when set?(flags, @padding_bit) and byte_size(rest) >= padding_length do
{:ok,
%__MODULE__{
stream_id: stream_id,
end_stream: set?(flags, @end_stream_bit),
data: binary_part(rest, 0, byte_size(rest) - padding_length)
}}
end
def deserialize(flags, stream_id, <<data::binary>>) when clear?(flags, @padding_bit) do
{:ok,
%__MODULE__{
stream_id: stream_id,
end_stream: set?(flags, @end_stream_bit),
data: data
}}
end
def deserialize(flags, _stream_id, <<_padding_length::8, _rest::binary>>)
when set?(flags, @padding_bit) do
{:error,
{:connection, Errors.protocol_error(),
"DATA frame with invalid padding length (RFC7540§6.1)"}}
end
defimpl Frame.Serializable do
alias Bandit.HTTP2.Frame.Data
@end_stream_bit 0
def serialize(%Data{} = frame, max_frame_size) do
data_length = IO.iodata_length(frame.data)
if data_length <= max_frame_size do
flags = if frame.end_stream, do: [@end_stream_bit], else: []
[{0x0, set(flags), frame.stream_id, frame.data}]
else
<<this_frame::binary-size(max_frame_size), rest::binary>> =
IO.iodata_to_binary(frame.data)
[
{0x0, 0x00, frame.stream_id, this_frame}
| Frame.Serializable.serialize(
%Data{
stream_id: frame.stream_id,
end_stream: frame.end_stream,
data: rest
},
max_frame_size
)
]
end
end
end
end
|
lib/bandit/http2/frame/data.ex
| 0.752104
| 0.405831
|
data.ex
|
starcoder
|
defmodule Contex.OrdinalScale do
@moduledoc """
An ordinal scale to map discrete values (text or numeric) to a plotting coordinate system.
An ordinal scale is commonly used for the category axis in barcharts. It has to be able
to generate the centre-point of the bar (e.g. for tick annotations) as well as the
available width the bar or bar-group has to fill.
In order to do that the ordinal scale requires a 'padding' option to be set (defaults to 0.5 in the scale)
that defines the gaps between the bars / categories. The ordinal scale has two mapping functions for
the data domain to the plotting range. One returns the centre point (`range_to_domain_fn`) and one
returns the "band" the category can occupy (`domain_to_range_band_fn`).
An `OrdinalScale` is initialised with a list of values which represent the categories. The scale generates
a tick for each value in that list.
Typical usage of this scale would be as follows:
iex> category_scale
...> = Contex.OrdinalScale.new(["Hippo", "Turtle", "Rabbit"])
...> |> Contex.Scale.set_range(0.0, 9.0)
...> |> Contex.OrdinalScale.padding(2)
...> category_scale.domain_to_range_fn.("Turtle")
4.5
iex> category_scale.domain_to_range_band_fn.("Hippo")
{1.0, 2.0}
iex> category_scale.domain_to_range_band_fn.("Turtle")
{4.0, 5.0}
"""
alias __MODULE__
defstruct [
:domain,
:range,
:padding,
:domain_to_range_fn,
:range_to_domain_fn,
:domain_to_range_band_fn
]
@type t() :: %__MODULE__{}
@doc """
Creates a new ordinal scale.
"""
@spec new(list()) :: Contex.OrdinalScale.t()
def new(domain) when is_list(domain) do
%OrdinalScale{domain: domain, padding: 0.5}
end
@doc """
Updates the domain data for the scale.
"""
@spec domain(Contex.OrdinalScale.t(), list()) :: Contex.OrdinalScale.t()
def domain(%OrdinalScale{} = ordinal_scale, data) when is_list(data) do
%{ordinal_scale | domain: data}
|> update_transform_funcs()
end
@doc """
Sets the padding between the categories for the scale.
Defaults to 0.5.
Defined in terms of plotting coordinates.
*Note* that if the padding is greater than the calculated width of each category
you might get strange effects (e.g. the end of a band being before the beginning)
"""
def padding(%OrdinalScale{} = scale, padding) when is_number(padding) do
# We need to update the transform functions if we change the padding as the band calculations need it
%{scale | padding: padding}
|> update_transform_funcs()
end
@doc false
def update_transform_funcs(
%OrdinalScale{domain: domain, range: {start_r, end_r}, padding: padding} = scale
)
when is_list(domain) and is_number(start_r) and is_number(end_r) and is_number(padding) do
domain_count = Kernel.length(domain)
range_width = end_r - start_r
item_width =
case domain_count do
0 -> 0.0
_ -> range_width / domain_count
end
flip_padding =
case start_r < end_r do
true -> 1.0
_ -> -1.0
end
# Returns centre point of bucket
domain_to_range_fn = fn domain_val ->
case Enum.find_index(domain, fn x -> x == domain_val end) do
nil ->
start_r
index ->
start_r + item_width / 2.0 + index * item_width
end
end
domain_to_range_band_fn = fn domain_val ->
case Enum.find_index(domain, fn x -> x == domain_val end) do
nil ->
{start_r, start_r}
index ->
band_start = start_r + flip_padding * padding / 2.0 + index * item_width
band_end = start_r + (index + 1) * item_width - flip_padding * padding / 2.0
{band_start, band_end}
end
end
range_to_domain_fn =
case range_width do
0 ->
fn -> "" end
_ ->
fn range_val ->
case domain_count do
0 ->
""
_ ->
bucket_index = Kernel.trunc((range_val - start_r) / item_width)
Enum.at(domain, bucket_index)
end
end
end
%{
scale
| domain_to_range_fn: domain_to_range_fn,
range_to_domain_fn: range_to_domain_fn,
domain_to_range_band_fn: domain_to_range_band_fn
}
end
def update_transform_funcs(%OrdinalScale{} = scale), do: scale
@doc """
Returns the band for the nominated category in terms of plotting coordinate system.
If the category isn't found, the start of the plotting range is returned.
"""
@spec get_band(Contex.OrdinalScale.t(), any) :: {number(), number()}
def get_band(%OrdinalScale{domain_to_range_band_fn: domain_to_range_band_fn}, domain_value)
when is_function(domain_to_range_band_fn) do
domain_to_range_band_fn.(domain_value)
end
defimpl Contex.Scale do
def ticks_domain(%OrdinalScale{domain: domain}), do: domain
def ticks_range(%OrdinalScale{domain_to_range_fn: transform_func} = scale)
when is_function(transform_func) do
ticks_domain(scale)
|> Enum.map(transform_func)
end
def domain_to_range_fn(%OrdinalScale{domain_to_range_fn: domain_to_range_fn}),
do: domain_to_range_fn
def domain_to_range(%OrdinalScale{domain_to_range_fn: transform_func}, range_val)
when is_function(transform_func) do
transform_func.(range_val)
end
def get_range(%OrdinalScale{range: {min_r, max_r}}), do: {min_r, max_r}
def set_range(%OrdinalScale{} = scale, start, finish)
when is_number(start) and is_number(finish) do
%{scale | range: {start, finish}}
|> OrdinalScale.update_transform_funcs()
end
def set_range(%OrdinalScale{} = scale, {start, finish})
when is_number(start) and is_number(finish),
do: set_range(scale, start, finish)
def get_formatted_tick(_, tick_val), do: tick_val
end
end
|
lib/chart/scale/ordinal_scale.ex
| 0.899481
| 0.774626
|
ordinal_scale.ex
|
starcoder
|
defmodule NcsaHmac.EndpointPlug do
import Plug.Conn
@moduledoc """
The EndpointPlug module provides functions to authenticate a web request at the Endpoint level.
This allows the user to removal all auth configuration from the Controller level.
"""
@doc """
Set default opts values.
"""
def init(opts) do
%{id_name: "auth_id", id_field: "auth_id", key_field: "signing_key"}
|> Map.merge(opts)
end
@doc """
Comply with the module plug standard. Take a conn and optional map (opts) as arguments.
The :mount list is configured in the endpoint. Will match the beginning of the
request path up to the mount list. This will greedily match all methods and routes
with additional path parameters. If the conn.path_info list does not match the opts:mount
list, the conn will be returned to the endpoint for further processing.
TODO: upgrade to elixir 1.5 and use the @impl annotation
"""
def call(conn, opts) do
case match_mount?(conn, opts) do
true -> authorize_resource(conn, opts)
false -> conn
end
end
@doc """
:preprocess_conn will set the raw request body into :private.raw_body on the conn.
:assign_resource_id will set the auth_id into :private at the opts[:field_name] key.
:load_resource will query the approriate model and repo based on opts values.
:authorize_request will validate the request authorization key againt the request
details. See NcsaHmac.Canonical for specifics.
:purge_resource will set the :assigns.authorized value and remove the model from
the conn request struct.
"""
def authorize_resource(conn, opts) do
try do
conn
|> preprocess_conn(opts)
|> assign_resource_id(opts)
|> load_resource(opts)
|> authorize_request(opts)
|> purge_resource(opts)
rescue
e in NcsaHmac.AuthorizationError -> handle_unauthorized(conn, e.message, opts)
end
end
defp load_resource(conn, opts) do
loaded_resource = fetch_resource(conn, opts)
%{conn | assigns: Map.put(conn.assigns, resource_name(opts), loaded_resource)}
end
defp authorize_request(conn, opts) do
authentication = NcsaHmac.Authentication.authenticate!(conn, opts)
case authentication do
{:ok, true} ->
conn
|> Plug.Conn.assign(:authorized, true)
|> purge_resource(opts)
{:error, message} ->
handle_unauthorized(conn, message, opts)
end
end
defp preprocess_conn(conn, _opts) do
{status, body, _} = Plug.Conn.read_body(conn)
case status do
:ok -> conn |> Plug.Conn.put_private(:raw_body, body)
_ -> conn
end
end
defp purge_resource(conn, opts) do
%{conn | assigns: Map.put(conn.assigns, resource_name(opts), nil)}
end
defp fetch_resource(conn, opts) do
repo = opts[:repo] || Application.get_env(:ncsa_hmac, :repo)
map_args = get_resource_args(conn, opts)
conn.assigns
|> Map.fetch(resource_name(opts)) # check if a resource is already loaded at the key
|> case do
:error ->
repo.get_by(opts[:model], map_args)
{:ok, nil} ->
repo.get_by(opts[:model], map_args)
{:ok, resource} ->
case (resource.__struct__ == opts[:model]) do
true -> # A resource of the type passed as opts[:model] is already loaded; do not clobber it
resource
false ->
repo.get_by(opts[:model], map_args)
end
end
end
defp assign_resource_id(conn, opts) do
field_name = field_name(opts)
Plug.Conn.put_private(conn, field_name, get_resource_args(conn, opts)[field_name])
end
defp field_name(opts) do
String.to_atom(opts[:id_field] || "id")
end
defp get_resource_args(conn, opts) do
resource_id = NcsaHmac.Authentication.auth_id(conn)
resource = case resource_id do
nil -> get_resource_id(conn, opts)
_ -> resource_id
end
%{field_name(opts) => resource}
end
defp get_resource_id(conn, opts) do
case opts[:id_name] do
nil ->
conn.params["id"]
id_name ->
conn.params[id_name]
end
end
defp resource_name(opts) do
case opts[:as] do
nil ->
opts[:model]
|> Module.split
|> List.last
|> Macro.underscore
|> String.to_atom
as -> as
end
end
defp handle_unauthorized(conn, message, opts) do
conn
|> purge_resource(opts)
|> assign(:authorized, false)
|> assign(:error_message, message)
|> put_resp_content_type("application/json")
|> send_resp(401, error_json(message))
|> halt()
end
defp error_json(error_message) do
JSON.encode! %{errors: [%{message: "Unauthorized", detail: error_message}]}
end
defp match_mount?(conn, opts) do
path = conn.path_info
mount = Map.get(opts, :mount)
mount == Enum.slice(path, 0, Enum.count(mount))
end
end
|
lib/ncsa_hmac/endpoint_plug.ex
| 0.513668
| 0.441914
|
endpoint_plug.ex
|
starcoder
|
use Bitwise
defmodule Zeam do
@moduledoc """
Zeam is a module of ZEAM. ZEAM is ZACKY's Elixir Abstract Machine, which is aimed at being BEAM compatible.
Zeam now provides bytecode analyzing functions.
"""
@tags [
SMALL: 15,
BIG: 11,
FLOAT: 9,
ATOM: 7,
REFER: 6,
PORT: 5,
PID: 3,
TUPLE: 2,
NIL: (11 + 16), # BIG + 16
LIST: 1,
ARITYVAL: 10,
MOVED: 12,
CATCH: 13, # THING
THING: 13,
BINARY: 14,
BLANK: 10, # ARITYVAL
IC: 15, # SMALL
CP0: 0,
CP4: 4,
CP8: 8,
CP12: 12
]
@doc """
Hello world.
## Examples
iex> Zeam.hello
"ZEAM is ZACKY's Elixir Abstract Machine, which is aimed at being BEAM compatible."
"""
def hello do
"ZEAM is ZACKY's Elixir Abstract Machine, which is aimed at being BEAM compatible."
end
@doc """
This converts a binary into a list.
## Parameter
- binary: is a binary to convert into a list.
## Examples
iex> Zeam.bin2list(<<0, 1, 2, 3>>)
[0, 1, 2, 3]
"""
@spec bin2list(binary) :: list
def bin2list(binary) do
case binary do
<<>> -> []
<<x :: integer>> -> [x]
<<x :: integer, y :: binary>> -> [x] ++ bin2list(y)
end
end
@doc """
This bundles three values away from each value of a list.
## Parameter
- list: is a list to bundle.
## Examples
iex> Zeam.bundle3Values([0, 1, 2, 3])
[[0, 1, 2], [1, 2, 3]]
"""
@spec bundle3Values(list) :: list
def bundle3Values(list) do
case list do
[] -> []
[_] -> []
[_, _] -> []
[a, b, c] -> [[a, b, c]]
[a, b, c | r] -> [[a, b, c]] ++ bundle3Values([b, c] ++ r)
end
end
@doc """
This concats a list of integer in the manner of little endian.
## Parameter
- list: is a list of integer to concat
## Examples
iex> Integer.to_string(Zeam.concatLittleEndian([0, 1, 2]), 16)
"20100"
"""
@spec concatLittleEndian(list) :: integer
def concatLittleEndian(list) do
case list do
[] -> 0
[a] -> a
[a | r] -> a + concatLittleEndian(r) * 256
end
end
@doc """
This concats a list of integer in the manner of big endian.
## Parameter
- list: is a list of integer to concat
## Examples
iex> Integer.to_string(Zeam.concatBigEndian([0, 1, 2]), 16)
"102"
"""
@spec concatBigEndian(list) :: integer
def concatBigEndian(list) do
list |> reverseList |> concatLittleEndian
end
@doc """
This reverses a list.
## Parameter
- list: is a list to reverse
## Examples
iex> Zeam.reverseList([0, 1, 2])
[2, 1, 0]
"""
@spec reverseList(list) :: list
def reverseList(list) do
case list do
[] -> []
[a | r] -> reverseList(r) ++ [a]
end
end
@doc """
This reads binary (a sequence of bytes) and generates a list of integers that each value is regarded as a 24 bits (3 bytes) in little endian.
## Parameter
- binary: is a binary to read
## Examples
iex> Zeam.toAddressInLittleEndian(<<0, 1, 2, 3>>)
[131328, 197121]
iex> Zeam.toAddressInLittleEndian(<<255, 255, 255>>)
[-1]
iex> Zeam.toAddressInLittleEndian(<<254, 255, 255>>)
[-2]
"""
@spec toAddressInLittleEndian(binary) :: list
def toAddressInLittleEndian(binary) do
toAddress(&Zeam.concatLittleEndian/1, binary)
end
@doc """
This reads binary (a sequence of bytes) and generates a list of integers that each value is regarded as a 24 bits (3 bytes) in big endian.
## Parameter
- binary: is a binary to read
## Examples
iex> Zeam.toAddressInBigEndian(<<0, 1, 2, 3>>)
[258, 66051]
iex> Zeam.toAddressInBigEndian(<<255, 255, 255>>)
[-1]
iex> Zeam.toAddressInBigEndian(<<255, 255, 254>>)
[-2]
"""
@spec toAddressInBigEndian(binary) :: list
def toAddressInBigEndian(binary) do
toAddress(&Zeam.concatBigEndian/1, binary)
end
@doc """
This provides Template Method of toAddress{Little/Big}Endian/1.
## Parameter
- function: is one of concat{Little/Big}Endian/1.
- binary: is a binary to read.
## Examples
iex> Zeam.toAddress(&Zeam.concatLittleEndian/1, <<0, 1, 2, 3>>)
[131328, 197121]
iex> Zeam.toAddress(&Zeam.concatBigEndian/1, <<0, 1, 2, 3>>)
[258, 66051]
"""
@spec toAddress(function, binary) :: list
def toAddress(function, binary) when is_function(function, 1) do
for y <- (for x <- (binary |> bin2list |> bundle3Values), do: function.(x)), do: (if y < ((1 <<< 23) -1) do y else (y - (1 <<< 24)) end)
end
@doc """
This returns list of absolute addresses.
## Parameter
- function: is one of concat{Little/Big}Endian/1.
- binary: is a binary to read.
## Examples
iex> Zeam.toAbsoluteAddress(&Zeam.concatLittleEndian/1, <<0, 1, 2, 3>>)
[131328, 197122]
iex> Zeam.toAbsoluteAddress(&Zeam.concatBigEndian/1, <<0, 1, 2, 3>>)
[258, 66052]
"""
@spec toAbsoluteAddress(function, binary) :: list
def toAbsoluteAddress(function, binary) when is_function(function, 1) do
for x <- Enum.with_index(toAddress(function, binary)), do: elem(x, 0) + elem(x, 1)
end
@doc """
This returns list of absolute addresses in little endian.
## Parameter
- binary: is a binary to read.
## Examples
iex> Zeam.toAbsoluteAddressInLittleEndian(<<0, 1, 2, 3>>)
[131328, 197122]
"""
@spec toAbsoluteAddressInLittleEndian(binary) :: list
def toAbsoluteAddressInLittleEndian(binary) do
toAbsoluteAddress(&Zeam.concatLittleEndian/1, binary)
end
@doc """
This returns list of absolute addresses in big endian.
## Parameter
- binary: is a binary to read.
## Examples
iex> Zeam.toAbsoluteAddressInBigEndian(<<0, 1, 2, 3>>)
[258, 66052]
"""
@spec toAbsoluteAddressInBigEndian(binary) :: list
def toAbsoluteAddressInBigEndian(binary) do
toAbsoluteAddress(&Zeam.concatBigEndian/1, binary)
end
@doc """
This returns a list of tupples of absolute addresses of the origin and the target.
## Parameter
- function: is one of concat{Little/Big}Endian/1.
- binary: is a binary to read.
## Examples
iex> Zeam.toAddressOfOriginAndTarget(&Zeam.concatLittleEndian/1, <<0, 0, 0>>)
[{0, 0}]
iex> Zeam.toAddressOfOriginAndTarget(&Zeam.concatLittleEndian/1, <<1, 0, 0, 0>>)
[{0, 1}, {1, 1}]
iex> Zeam.toAddressOfOriginAndTarget(&Zeam.concatBigEndian/1, <<0, 0, 0>>)
[{0, 0}]
iex> Zeam.toAddressOfOriginAndTarget(&Zeam.concatBigEndian/1, <<0, 0, 1, 0>>)
[{0, 1}, {1, 257}]
"""
@spec toAddressOfOriginAndTarget(function, binary) :: list
def toAddressOfOriginAndTarget(function, binary) when is_function(function, 1) do
for x <- toAbsoluteAddress(function, binary) |> Enum.with_index, do: {elem(x, 1), elem(x, 0)}
end
@doc """
This returns a list of tupples of absolute addresses in little endian of the origin and the target.
## Parameter
- binary: is a binary to read.
## Examples
iex> Zeam.toAddressInLittleEndianOfOriginAndTarget(<<0, 0, 0>>)
[{0, 0}]
iex> Zeam.toAddressInLittleEndianOfOriginAndTarget(<<1, 0, 0, 0>>)
[{0, 1}, {1, 1}]
"""
@spec toAddressInLittleEndianOfOriginAndTarget(binary) :: list
def toAddressInLittleEndianOfOriginAndTarget(binary) do
toAddressOfOriginAndTarget(&Zeam.concatLittleEndian/1, binary)
end
@doc """
This returns a list of tupples of absolute addresses in big endian of the origin and the target.
## Parameter
- binary: is a binary to read.
## Examples
iex> Zeam.toAddressInBigEndianOfOriginAndTarget(<<0, 0, 0>>)
[{0, 0}]
iex> Zeam.toAddressInBigEndianOfOriginAndTarget(<<0, 0, 1, 0>>)
[{0, 1}, {1, 257}]
"""
@spec toAddressInBigEndianOfOriginAndTarget(binary) :: list
def toAddressInBigEndianOfOriginAndTarget(binary) do
toAddressOfOriginAndTarget(&Zeam.concatBigEndian/1, binary)
end
@doc """
This returns a sorted list of tupples of absolute addresses of the origin and the target in order of the target address.
## Parameter
- function: is one of concat{Little/Big}Endian/1.
- binary: is a binary to read.
## Examples
iex> Zeam.toSortedListOfAddressOfOriginAndTarget(&Zeam.concatLittleEndian/1, <<0, 0, 0>>)
[{0, 0}]
iex> Zeam.toSortedListOfAddressOfOriginAndTarget(&Zeam.concatLittleEndian/1, <<1, 0, 0, 0>>)
[{1, 1}, {0, 1}]
iex> Zeam.toSortedListOfAddressOfOriginAndTarget(&Zeam.concatBigEndian/1, <<0, 0, 0>>)
[{0, 0}]
iex> Zeam.toSortedListOfAddressOfOriginAndTarget(&Zeam.concatBigEndian/1, <<0, 0, 1, 0>>)
[{0, 1}, {1, 257}]
"""
@spec toSortedListOfAddressOfOriginAndTarget(function, binary) :: list
def toSortedListOfAddressOfOriginAndTarget(function, binary) when is_function(function, 1) do
Enum.sort(toAddressOfOriginAndTarget(function, binary), fn(a, b) -> elem(a, 1) < elem(b, 1) end)
end
@doc """
This returns a sorted list of tupples of absolute addresses in little endian of the origin and the target in order of the target address.
## Parameter
- binary: is a binary to read.
## Examples
iex> Zeam.toSortedListOfAddressInLittleEndianOfOriginAndTarget(<<0, 0, 0>>)
[{0, 0}]
iex> Zeam.toSortedListOfAddressInLittleEndianOfOriginAndTarget(<<1, 0, 0, 0>>)
[{1, 1}, {0, 1}]
"""
@spec toSortedListOfAddressInLittleEndianOfOriginAndTarget(binary) :: list
def toSortedListOfAddressInLittleEndianOfOriginAndTarget(binary) do
toSortedListOfAddressOfOriginAndTarget(&Zeam.concatLittleEndian/1, binary)
end
@doc """
This returns a sorted list of tupples of absolute addresses in big endian of the origin and the target in order of the target address.
## Parameter
- binary: is a binary to read.
## Examples
iex> Zeam.toSortedListOfAddressInBigEndianOfOriginAndTarget(<<0, 0, 0>>)
[{0, 0}]
iex> Zeam.toSortedListOfAddressInBigEndianOfOriginAndTarget(<<0, 0, 1, 0>>)
[{0, 1}, {1, 257}]
"""
@spec toSortedListOfAddressInBigEndianOfOriginAndTarget(binary) :: list
def toSortedListOfAddressInBigEndianOfOriginAndTarget(binary) do
toSortedListOfAddressOfOriginAndTarget(&Zeam.concatBigEndian/1, binary)
end
@doc """
This calls a function with a path, and puts to IO.
## Parameter
- function: is a function that receives the path
- path: is data or a binary file path
"""
@spec put(function, Path.t()) :: String.t()
def put(function, path) when is_function(function, 1) do
IO.puts openAndCall(function, path)
end
@doc """
This opens the file of a path and calls a function.
## Parameter
- function: is a function that receives the path
- path: is data or a binary file path to dump.
"""
@spec openAndCall(function, Path.t()) :: String.t()
def openAndCall(function, path) when is_function(function, 1) do
{:ok, file} = File.open path, [:read]
readFile(function, file)
end
@doc """
This dumps binary files to String.
## Parameter
- function: is a function that receives the path
- file: is data or a binary file path to dump.
"""
@spec readFile(function, File.t()) :: String.t()
def readFile(function, file) when is_function(function, 1) do
case IO.binread(file, :all) do
{:error, reason} -> {:error, reason}
:eof -> ""
"" -> ""
data -> "#{function.(data)}\n#{readFile(function, file)}"
end
end
@doc """
This prints a tuple of addresses of the origin and the target
## Parameter
- x: is a tuple including addresses of the origin and the target.
## Examples
iex> Zeam.printElem({0, 0})
"{0, 0}"
iex> Zeam.printElem({0, -1})
""
"""
@spec printElem(tuple) :: String.t()
def printElem(x) do
if elem(x, 1) < 0 do
""
else
"{#{Integer.to_string(elem(x, 0), 16)}, #{Integer.to_string(elem(x, 1), 16)}}"
end
end
@doc """
This calls a function with a binary, obtains a list of tuples, and prints it.
## Parameter
- function: is a function to call with a binary.
- binary: is a binary to be converted by the function.
"""
@spec printTupleList(function, binary) :: String.t()
def printTupleList(function, binary) when is_function(function, 1) do
for x <- function.(binary), do: printElem(x)
end
@doc """
This prints a sorted list of addresses in little endian of the origin and target from a binary.
## Parameter
- binary: is a binary to print the list.
## Examples
iex> Zeam.printSortedListInLittleEndian(<<0, 0, 0, 0>>)
["{0, 0}", "{1, 1}"]
iex> Zeam.printSortedListInLittleEndian(<<1, 0, 0, 0>>)
["{1, 1}", "{0, 1}"]
"""
@spec printSortedListInLittleEndian(binary) :: String.t()
def printSortedListInLittleEndian(binary) do
printTupleList(&Zeam.toSortedListOfAddressInLittleEndianOfOriginAndTarget/1, binary)
end
@doc """
This prints a sorted list of addresses in big endian of the origin and target from a binary.
## Parameter
- binary: is a binary to print the list.
## Examples
iex> Zeam.printSortedListInBigEndian(<<0, 0, 0, 0>>)
["{0, 0}", "{1, 1}"]
iex> Zeam.printSortedListInBigEndian(<<0, 0, 1, 0>>)
["{0, 1}", "{1, 101}"]
"""
@spec printSortedListInBigEndian(binary) :: String.t()
def printSortedListInBigEndian(binary) do
printTupleList(&Zeam.toSortedListOfAddressInBigEndianOfOriginAndTarget/1, binary)
end
@doc """
This puts a sorted list of addresses in little endian of the origin and the target from the binary read from the file of a path.
## Parameter
- path: is data or a binary file path to put.
## Examples
iex> Zeam.putAddressInLittleEndian("./test/sample")
"{0, 434241}{1, 444343}{2, 454445}{3, 464547}{4, 474649}{5, 48474B}{6, 49484D}{7, 4A494F}{8, 4B4A51}{9, 4C4B53}{A, 4D4C55}{B, 4E4D57}\n"
"""
@spec putAddressInLittleEndian(Path.t()) :: String.t()
def putAddressInLittleEndian(path) do
openAndCall(&Zeam.printSortedListInLittleEndian/1, path)
end
@doc """
This puts a sorted list of addresses in big endian of the origin and the target from the binary read from the file of a path.
## Parameter
- path: is data or a binary file path to put.
## Examples
iex> Zeam.putAddressInBigEndian("./test/sample")
"{0, 414243}{1, 424345}{2, 434447}{3, 444549}{4, 45464B}{5, 46474D}{6, 47484F}{7, 484951}{8, 494A53}{9, 4A4B55}{A, 4B4C57}{B, 4C4D59}\n"
"""
@spec putAddressInBigEndian(Path.t()) :: String.t()
def putAddressInBigEndian(path) do
openAndCall(&Zeam.printSortedListInBigEndian/1, path)
end
@doc """
This dumps binary files to stdard output.
## Parameter
- path: is data or a binary file path to dump.
"""
@spec dump(Path.t()) :: String.t()
def dump(path) do
put(&Zeam.dump_d/1, path)
end
@doc """
This dumps binary files to String.
## Parameter
- path: is data or a binary file path to dump.
## Examples
iex> Zeam.dump_p("./test/sample")
"41 42 43 44 45 46 47 48\\n49 4A 4B 4C 4D 4E\\n\\n"
"""
@spec dump_p(Path.t()) :: String.t()
def dump_p(path) do
{:ok, file} = File.open path, [:read]
dump_f(file)
end
@doc """
This dumps binary files to String.
## Parameter
- file: is data or a binary file path to dump.
"""
@spec dump_f(File.t()) :: String.t()
def dump_f(file) do
case IO.binread(file, 8) do
{:error, reason} -> {:error, reason}
:eof -> "\n"
data -> "#{dump_d(data)}\n#{dump_f(file)}"
end
end
@doc """
This dumps binary data to String.
## Parameters
- data: is binary data to dump.
## Examples
iex> Zeam.dump_d(<<0, 1, 2, 3>>)
"00 01 02 03"
"""
@spec dump_d(binary) :: String.t()
def dump_d(data) do
case data do
<<>> -> :ok
<<x :: integer>> -> "0#{Integer.to_string(x, 16)}" |> last2
<<x :: integer, y :: binary>> -> "#{dump_d(<<x>>)} #{dump_d(y)}"
end
end
@doc """
This slices the last 2 chars.
## Parameters
- string: is string to slice.
## Examples
iex> Zeam.last2("0123")
"23"
"""
@spec last2(String.t()) :: String.t()
def last2(string) do
String.slice(string, String.length(string) - 2, String.length(string))
end
end
|
lib/zeam.ex
| 0.824815
| 0.502747
|
zeam.ex
|
starcoder
|
defmodule OddJob.Utils do
@moduledoc """
Internal utilities for working with OddJob processes.
To avoid unexpected behavior, it's recommended that developers do not interact
directly with OddJob processes. See `OddJob` for the public API.
"""
@moduledoc since: "0.4.0"
import OddJob.Registry, only: [via: 2]
@type name :: OddJob.Registry.name()
@doc """
Links and monitors the `pid` and returns the tuple `{pid, monitor_ref}`.
## Example
iex> pid = spawn(fn -> [] end)
iex> {^pid, ref} = OddJob.Utils.link_and_monitor(pid)
iex> receive do
...> {:DOWN, ^ref, :process, ^pid, :normal} -> :received
...> end
:received
"""
@spec link_and_monitor(pid) :: {pid, reference}
def link_and_monitor(pid) do
Process.link(pid)
{pid, Process.monitor(pid)}
end
@doc """
Extracts the `pid` from an `{:ok, pid}` tuple.
iex> pid = spawn_link(fn -> :hello end)
iex> pid == OddJob.Utils.extract_pid({:ok, pid})
true
"""
@spec extract_pid({:ok, pid}) :: pid
def extract_pid({:ok, pid}), do: pid
@doc """
Returns the `OddJob.Scheduler.Supervisor` name in :via for the given `name`.
iex> OddJob.Utils.scheduler_sup_name(:job)
{:via, Registry, {OddJob.Registry, {:job, :scheduler_sup}}}
"""
@spec scheduler_sup_name(term) :: name
def scheduler_sup_name(name), do: via(name, :scheduler_sup)
@doc """
Returns the `OddJob.Queue` name in :via for the given `name`.
iex> OddJob.Utils.queue_name(:job)
{:via, Registry, {OddJob.Registry, {:job, :queue}}}
"""
@spec queue_name(term) :: name
def queue_name(name), do: via(name, :queue)
@doc """
Returns the `OddJob.Pool.Supervisor` name in :via for the given `name`.
iex> OddJob.Utils.pool_supervisor_name(:job)
{:via, Registry, {OddJob.Registry, {:job, :pool_sup}}}
"""
@spec pool_supervisor_name(term) :: name
def pool_supervisor_name(name), do: via(name, :pool_sup)
@doc """
Returns the `OddJob.Async.ProxySupervisor` name in :via for the given `name`.
iex> OddJob.Utils.proxy_sup_name(:job)
{:via, Registry, {OddJob.Registry, {:job, :proxy_sup}}}
"""
@spec proxy_sup_name(term) :: name
def proxy_sup_name(name), do: via(name, :proxy_sup)
end
|
lib/odd_job/utils.ex
| 0.892176
| 0.416678
|
utils.ex
|
starcoder
|
defmodule AWS.Sdb do
@moduledoc """
Amazon SimpleDB is a web service providing the core database functions of
data indexing and querying in the cloud. By offloading the time and effort
associated with building and operating a web-scale database, SimpleDB
provides developers the freedom to focus on application development. A
traditional, clustered relational database requires a sizable upfront
capital outlay, is complex to design, and often requires extensive and
repetitive database administration. Amazon SimpleDB is dramatically
simpler, requiring no schema, automatically indexing your data and
providing a simple API for storage and access. This approach eliminates the
administrative burden of data modeling, index maintenance, and performance
tuning. Developers gain access to this functionality within Amazon's proven
computing environment, are able to scale instantly, and pay only for what
they use.
Visit [http://aws.amazon.com/simpledb/](http://aws.amazon.com/simpledb/)
for more information.
"""
@doc """
Performs multiple DeleteAttributes operations in a single call, which
reduces round trips and latencies. This enables Amazon SimpleDB to optimize
requests, which generally yields better throughput.
<note> If you specify BatchDeleteAttributes without attributes or values,
all the attributes for the item are deleted.
BatchDeleteAttributes is an idempotent operation; running it multiple times
on the same item or attribute doesn't result in an error.
The BatchDeleteAttributes operation succeeds or fails in its entirety.
There are no partial deletes. You can execute multiple
BatchDeleteAttributes operations and other operations in parallel. However,
large numbers of concurrent BatchDeleteAttributes calls can result in
Service Unavailable (503) responses.
This operation is vulnerable to exceeding the maximum URL size when making
a REST request using the HTTP GET method.
This operation does not support conditions using Expected.X.Name,
Expected.X.Value, or Expected.X.Exists.
</note> The following limitations are enforced for this operation: <ul>
<li>1 MB request size</li> <li>25 item limit per BatchDeleteAttributes
operation</li> </ul>
"""
def batch_delete_attributes(client, input, options \\ []) do
request(client, "BatchDeleteAttributes", input, options)
end
@doc """
The `BatchPutAttributes` operation creates or replaces attributes within
one or more items. By using this operation, the client can perform multiple
`PutAttribute` operation with a single call. This helps yield savings in
round trips and latencies, enabling Amazon SimpleDB to optimize requests
and generally produce better throughput.
The client may specify the item name with the `Item.X.ItemName` parameter.
The client may specify new attributes using a combination of the
`Item.X.Attribute.Y.Name` and `Item.X.Attribute.Y.Value` parameters. The
client may specify the first attribute for the first item using the
parameters `Item.0.Attribute.0.Name` and `Item.0.Attribute.0.Value`, and
for the second attribute for the first item by the parameters
`Item.0.Attribute.1.Name` and `Item.0.Attribute.1.Value`, and so on.
Attributes are uniquely identified within an item by their name/value
combination. For example, a single item can have the attributes `{
"first_name", "first_value" }` and `{ "first_name", "second_value" }`.
However, it cannot have two attribute instances where both the
`Item.X.Attribute.Y.Name` and `Item.X.Attribute.Y.Value` are the same.
Optionally, the requester can supply the `Replace` parameter for each
individual value. Setting this value to `true` will cause the new attribute
values to replace the existing attribute values. For example, if an item
`I` has the attributes `{ 'a', '1' }, { 'b', '2'}` and `{ 'b', '3' }` and
the requester does a BatchPutAttributes of `{'I', 'b', '4' }` with the
Replace parameter set to true, the final attributes of the item will be `{
'a', '1' }` and `{ 'b', '4' }`, replacing the previous values of the 'b'
attribute with the new value.
<note> You cannot specify an empty string as an item or as an attribute
name. The `BatchPutAttributes` operation succeeds or fails in its entirety.
There are no partial puts. </note> <important> This operation is vulnerable
to exceeding the maximum URL size when making a REST request using the HTTP
GET method. This operation does not support conditions using
`Expected.X.Name`, `Expected.X.Value`, or `Expected.X.Exists`. </important>
You can execute multiple `BatchPutAttributes` operations and other
operations in parallel. However, large numbers of concurrent
`BatchPutAttributes` calls can result in Service Unavailable (503)
responses.
The following limitations are enforced for this operation: <ul> <li>256
attribute name-value pairs per item</li> <li>1 MB request size</li> <li>1
billion attributes per domain</li> <li>10 GB of total user data storage per
domain</li> <li>25 item limit per `BatchPutAttributes` operation</li> </ul>
"""
def batch_put_attributes(client, input, options \\ []) do
request(client, "BatchPutAttributes", input, options)
end
@doc """
The `CreateDomain` operation creates a new domain. The domain name should
be unique among the domains associated with the Access Key ID provided in
the request. The `CreateDomain` operation may take 10 or more seconds to
complete.
<note> CreateDomain is an idempotent operation; running it multiple times
using the same domain name will not result in an error response. </note>
The client can create up to 100 domains per account.
If the client requires additional domains, go to [
http://aws.amazon.com/contact-us/simpledb-limit-request/](http://aws.amazon.com/contact-us/simpledb-limit-request/).
"""
def create_domain(client, input, options \\ []) do
request(client, "CreateDomain", input, options)
end
@doc """
Deletes one or more attributes associated with an item. If all attributes
of the item are deleted, the item is deleted.
<note> If `DeleteAttributes` is called without being passed any attributes
or values specified, all the attributes for the item are deleted. </note>
`DeleteAttributes` is an idempotent operation; running it multiple times on
the same item or attribute does not result in an error response.
Because Amazon SimpleDB makes multiple copies of item data and uses an
eventual consistency update model, performing a `GetAttributes` or `Select`
operation (read) immediately after a `DeleteAttributes` or `PutAttributes`
operation (write) might not return updated item data.
"""
def delete_attributes(client, input, options \\ []) do
request(client, "DeleteAttributes", input, options)
end
@doc """
The `DeleteDomain` operation deletes a domain. Any items (and their
attributes) in the domain are deleted as well. The `DeleteDomain` operation
might take 10 or more seconds to complete.
<note> Running `DeleteDomain` on a domain that does not exist or running
the function multiple times using the same domain name will not result in
an error response. </note>
"""
def delete_domain(client, input, options \\ []) do
request(client, "DeleteDomain", input, options)
end
@doc """
Returns information about the domain, including when the domain was
created, the number of items and attributes in the domain, and the size of
the attribute names and values.
"""
def domain_metadata(client, input, options \\ []) do
request(client, "DomainMetadata", input, options)
end
@doc """
Returns all of the attributes associated with the specified item.
Optionally, the attributes returned can be limited to one or more
attributes by specifying an attribute name parameter.
If the item does not exist on the replica that was accessed for this
operation, an empty set is returned. The system does not return an error as
it cannot guarantee the item does not exist on other replicas.
<note> If GetAttributes is called without being passed any attribute names,
all the attributes for the item are returned. </note>
"""
def get_attributes(client, input, options \\ []) do
request(client, "GetAttributes", input, options)
end
@doc """
The `ListDomains` operation lists all domains associated with the Access
Key ID. It returns domain names up to the limit set by
[MaxNumberOfDomains](#MaxNumberOfDomains). A [NextToken](#NextToken) is
returned if there are more than `MaxNumberOfDomains` domains. Calling
`ListDomains` successive times with the `NextToken` provided by the
operation returns up to `MaxNumberOfDomains` more domain names with each
successive operation call.
"""
def list_domains(client, input, options \\ []) do
request(client, "ListDomains", input, options)
end
@doc """
The PutAttributes operation creates or replaces attributes in an item. The
client may specify new attributes using a combination of the
`Attribute.X.Name` and `Attribute.X.Value` parameters. The client specifies
the first attribute by the parameters `Attribute.0.Name` and
`Attribute.0.Value`, the second attribute by the parameters
`Attribute.1.Name` and `Attribute.1.Value`, and so on.
Attributes are uniquely identified in an item by their name/value
combination. For example, a single item can have the attributes `{
"first_name", "first_value" }` and `{ "first_name", second_value" }`.
However, it cannot have two attribute instances where both the
`Attribute.X.Name` and `Attribute.X.Value` are the same.
Optionally, the requestor can supply the `Replace` parameter for each
individual attribute. Setting this value to `true` causes the new attribute
value to replace the existing attribute value(s). For example, if an item
has the attributes `{ 'a', '1' }`, `{ 'b', '2'}` and `{ 'b', '3' }` and the
requestor calls `PutAttributes` using the attributes `{ 'b', '4' }` with
the `Replace` parameter set to true, the final attributes of the item are
changed to `{ 'a', '1' }` and `{ 'b', '4' }`, which replaces the previous
values of the 'b' attribute with the new value.
<note> Using `PutAttributes` to replace attribute values that do not exist
will not result in an error response. </note> You cannot specify an empty
string as an attribute name.
Because Amazon SimpleDB makes multiple copies of client data and uses an
eventual consistency update model, an immediate `GetAttributes` or `Select`
operation (read) immediately after a `PutAttributes` or `DeleteAttributes`
operation (write) might not return the updated data.
The following limitations are enforced for this operation: <ul> <li>256
total attribute name-value pairs per item</li> <li>One billion attributes
per domain</li> <li>10 GB of total user data storage per domain</li> </ul>
"""
def put_attributes(client, input, options \\ []) do
request(client, "PutAttributes", input, options)
end
@doc """
The `Select` operation returns a set of attributes for `ItemNames` that
match the select expression. `Select` is similar to the standard SQL SELECT
statement.
The total size of the response cannot exceed 1 MB in total size. Amazon
SimpleDB automatically adjusts the number of items returned per page to
enforce this limit. For example, if the client asks to retrieve 2500 items,
but each individual item is 10 kB in size, the system returns 100 items and
an appropriate `NextToken` so the client can access the next page of
results.
For information on how to construct select expressions, see Using Select to
Create Amazon SimpleDB Queries in the Developer Guide.
"""
def select(client, input, options \\ []) do
request(client, "Select", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "sdb"}
host = build_host("sdb", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-www-form-urlencoded"}
]
input = Map.merge(input, %{"Action" => action, "Version" => "2009-04-15"})
payload = AWS.Util.encode_query(input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, AWS.Util.decode_xml(body), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = AWS.Util.decode_xml(body)
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/sdb.ex
| 0.913508
| 0.709447
|
sdb.ex
|
starcoder
|
defmodule Sneex.Ops.ProcessorStatus do
@moduledoc """
This represents the op codes for interacting with the processor status bits.
This includes the following commands:
CLC, SEC, CLD, SED, REP, SEP, SEI, CLI, CLV, NOP, XBA, and XCE
One thing to note about this opcode is that since it doesn't do a lot of memory addressing,
it has not (and may never?) be updated to make use of the new addressing mode fuctionality.
"""
defstruct [:opcode]
use Bitwise
alias Sneex.{BasicTypes, Cpu}
@opaque t :: %__MODULE__{
opcode:
0x18 | 0x38 | 0xD8 | 0xF8 | 0xC2 | 0xE2 | 0x78 | 0x58 | 0xB8 | 0xEA | 0xEB | 0xFB
}
@spec new(byte()) :: nil | __MODULE__.t()
def new(oc)
when oc == 0x18 or oc == 0x38 or oc == 0xD8 or oc == 0xF8 or oc == 0xC2 or oc == 0xEB do
%__MODULE__{opcode: oc}
end
def new(oc)
when oc == 0xE2 or oc == 0x78 or oc == 0x58 or oc == 0xB8 or oc == 0xEA or oc == 0xFB do
%__MODULE__{opcode: oc}
end
def new(_opcode), do: nil
defimpl Sneex.Ops.Opcode do
@clc 0x18
@sec 0x38
@cld 0xD8
@sed 0xF8
@rep 0xC2
@sep 0xE2
@sei 0x78
@cli 0x58
@clv 0xB8
@nop 0xEA
@xba 0xEB
@xce 0xFB
def byte_size(%{opcode: oc}, _cpu)
when oc == @clc or oc == @sec or oc == @cld or oc == @sed or oc == @sei,
do: 1
def byte_size(%{opcode: oc}, _cpu)
when oc == @cli or oc == @clv or oc == @nop or oc == @xba or oc == @xce,
do: 1
def byte_size(%{opcode: oc}, _cpu) when oc == @rep or oc == @sep, do: 2
def total_cycles(%{opcode: oc}, _cpu)
when oc == @clc or oc == @sec or oc == @cld or oc == @sed or oc == @sei,
do: 2
def total_cycles(%{opcode: oc}, _cpu)
when oc == @cli or oc == @clv or oc == @nop or oc == @xce,
do: 2
def total_cycles(%{opcode: oc}, _cpu) when oc == @rep or oc == @sep or oc == @xba, do: 3
def execute(%{opcode: @clc}, cpu), do: cpu |> Cpu.carry_flag(false)
def execute(%{opcode: @sec}, cpu), do: cpu |> Cpu.carry_flag(true)
def execute(%{opcode: @cld}, cpu), do: cpu |> Cpu.decimal_mode(false)
def execute(%{opcode: @sed}, cpu), do: cpu |> Cpu.decimal_mode(true)
def execute(%{opcode: @sei}, cpu), do: cpu |> Cpu.irq_disable(true)
def execute(%{opcode: @cli}, cpu), do: cpu |> Cpu.irq_disable(false)
def execute(%{opcode: @clv}, cpu), do: cpu |> Cpu.overflow_flag(false)
def execute(%{opcode: @nop}, cpu), do: cpu
def execute(%{opcode: @xba}, cpu) do
b = cpu |> Cpu.b()
a = cpu |> Cpu.a() |> bsl(8)
c = b + a
cpu |> Cpu.acc(c) |> Cpu.negative_flag(c > 0x7FFF) |> Cpu.zero_flag(c == 0x0000)
end
def execute(%{opcode: @xce}, cpu) do
carry_flag = Cpu.carry_flag(cpu)
emu_mode = Cpu.emu_mode(cpu)
cpu |> exchange_carry_and_emu(carry_flag, emu_mode)
end
def execute(%{opcode: @rep}, cpu) do
operand = Cpu.read_operand(cpu, 1)
emu_mode = Cpu.emu_mode(cpu)
{cpu, _} = {cpu, operand} |> modify_flags(emu_mode, false)
cpu
end
def execute(%{opcode: @sep}, cpu) do
operand = Cpu.read_operand(cpu, 1)
emu_mode = Cpu.emu_mode(cpu)
{cpu, _} = {cpu, operand} |> modify_flags(emu_mode, true)
cpu
end
defp modify_flags(cpu_mask, _emulation_mode = :emulation, value) do
cpu_mask
|> modify_neg_flag(value)
|> modify_overflow_flag(value)
|> modify_decimal_mode(value)
|> modify_irq_disable(value)
|> modify_zero_flag(value)
|> modify_carry_flag(value)
end
defp modify_flags(cpu_mask, _emulation_mode, false) do
cpu_mask
|> modify_neg_flag(false)
|> modify_overflow_flag(false)
|> modify_acc_size(:bit16)
|> modify_index_size(:bit16)
|> modify_decimal_mode(false)
|> modify_irq_disable(false)
|> modify_zero_flag(false)
|> modify_carry_flag(false)
end
defp modify_flags(cpu_mask, _emulation_mode, _value) do
cpu_mask
|> modify_neg_flag(true)
|> modify_overflow_flag(true)
|> modify_acc_size(:bit8)
|> modify_index_size(:bit8)
|> modify_decimal_mode(true)
|> modify_irq_disable(true)
|> modify_zero_flag(true)
|> modify_carry_flag(true)
end
defp modify_neg_flag({cpu, mask}, value) when (mask &&& 0x80) == 0x80 do
{Cpu.negative_flag(cpu, value), mask}
end
defp modify_neg_flag(cpu_mask, _), do: cpu_mask
defp modify_overflow_flag({cpu, mask}, value) when (mask &&& 0x40) == 0x40 do
{Cpu.overflow_flag(cpu, value), mask}
end
defp modify_overflow_flag(cpu_mask, _), do: cpu_mask
defp modify_acc_size({cpu, mask}, value) when (mask &&& 0x20) == 0x20 do
{Cpu.acc_size(cpu, value), mask}
end
defp modify_acc_size(cpu_mask, _), do: cpu_mask
defp modify_index_size({cpu, mask}, value) when (mask &&& 0x10) == 0x10 do
{Cpu.index_size(cpu, value), mask}
end
defp modify_index_size(cpu_mask, _), do: cpu_mask
defp modify_decimal_mode({cpu, mask}, value) when (mask &&& 0x08) == 0x08 do
{Cpu.decimal_mode(cpu, value), mask}
end
defp modify_decimal_mode(cpu_mask, _), do: cpu_mask
defp modify_irq_disable({cpu, mask}, value) when (mask &&& 0x04) == 0x04 do
{Cpu.irq_disable(cpu, value), mask}
end
defp modify_irq_disable(cpu_mask, _), do: cpu_mask
defp modify_zero_flag({cpu, mask}, value) when (mask &&& 0x02) == 0x02 do
{Cpu.zero_flag(cpu, value), mask}
end
defp modify_zero_flag(cpu_mask, _), do: cpu_mask
defp modify_carry_flag({cpu, mask}, value) when (mask &&& 0x01) == 0x01 do
{Cpu.carry_flag(cpu, value), mask}
end
defp modify_carry_flag(cpu_mask, _), do: cpu_mask
# Not switching modes, so do nothing:
defp exchange_carry_and_emu(cpu, _carry = true, _emu_mode = :emulation), do: cpu
defp exchange_carry_and_emu(cpu, _carry = false, _emu_mode = :native), do: cpu
defp exchange_carry_and_emu(cpu, _carry = true, _emu_mode) do
cpu |> Cpu.carry_flag(false) |> Cpu.emu_mode(:emulation)
end
defp exchange_carry_and_emu(cpu, _carry = false, _emu_mode) do
cpu
|> Cpu.carry_flag(true)
|> Cpu.emu_mode(:native)
|> Cpu.acc_size(:bit8)
|> Cpu.index_size(:bit8)
end
def disasm(%{opcode: @clc}, _cpu), do: "CLC"
def disasm(%{opcode: @sec}, _cpu), do: "SEC"
def disasm(%{opcode: @cld}, _cpu), do: "CLD"
def disasm(%{opcode: @sed}, _cpu), do: "SED"
def disasm(%{opcode: @sei}, _cpu), do: "SEI"
def disasm(%{opcode: @cli}, _cpu), do: "CLI"
def disasm(%{opcode: @clv}, _cpu), do: "CLV"
def disasm(%{opcode: @nop}, _cpu), do: "NOP"
def disasm(%{opcode: @xba}, _cpu), do: "XBA"
def disasm(%{opcode: @xce}, _cpu), do: "XCE"
def disasm(%{opcode: @rep}, cpu) do
status_bits = cpu |> Cpu.read_operand(1) |> BasicTypes.format_byte()
"REP ##{status_bits}"
end
def disasm(%{opcode: @sep}, cpu) do
status_bits = cpu |> Cpu.read_operand(1) |> BasicTypes.format_byte()
"SEP ##{status_bits}"
end
end
end
|
lib/sneex/ops/processor_status.ex
| 0.738292
| 0.644603
|
processor_status.ex
|
starcoder
|
defmodule Robot do
defstruct direction: :north, position: {0,0}
end
defmodule RobotSimulator do
@doc """
Create a Robot Simulator given an initial direction and position.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@valid_directions [:north, :east, :south, :west]
@valid_instructions ["R", "L", "A"]
defguard is_valid_position(x,y) when is_integer(x) and is_integer(y)
@spec create(any, any) :: {:error, msg: String} | %Robot{direction: any, position: {integer, integer}}
def create(direction, {x, y}) when is_valid_position(x,y) do
cond do
direction not in @valid_directions -> {:error, "invalid direction"}
true -> %Robot{direction: direction, position: {x, y}}
end
end
def create(_, _) do
{:error, "invalid position"}
end
@spec create :: %Robot{direction: :north, position: {0, 0}}
def create() do
%Robot{}
end
defp turn(:west, "L") do
:south
end
defp turn(:west, "R") do
:north
end
defp turn(:east, "R") do
:south
end
defp turn(:east, "L") do
:north
end
defp turn(:north,"R") do
:east
end
defp turn(:north,"L") do
:west
end
defp turn(:south, "R") do
:west
end
defp turn(:south, "L") do
:east
end
defp advance(robot) do
{x, y} = robot.position
cond do
robot.direction == :north -> %{robot | position: {x, y + 1}}
robot.direction == :south -> %{robot | position: {x, y - 1}}
robot.direction == :east -> %{robot | position: {x + 1, y}}
robot.direction == :west -> %{robot | position: {x - 1, y}}
end
end
defp move(instruction, robot) do
if instruction == "L" or instruction == "R" do
%{robot | direction: turn(robot.direction, instruction)}
else
advance(robot)
end
end
@doc """
Simulate the robot's movement given a string of instructions.
Valid instructions are: "R" (turn right), "L", (turn left), and "A" (advance)
"""
@spec simulate(robot :: any, instructions :: String.t()) :: any
def simulate(robot, instructions) do
String.graphemes(instructions) |>
Enum.reduce_while(robot,
fn instruction, rbt ->
if instruction in @valid_instructions, do: {:cont, move(instruction, rbt)}, else: {:halt, {:error, "invalid instruction"}}
end)
end
@doc """
Return the robot's direction.
Valid directions are: `:north`, `:east`, `:south`, `:west`
"""
@spec direction(%{:direction => any, optional(any) => any}) :: any
def direction(%{direction: dir}) do dir end
@doc """
Return the robot's position.
"""
@spec position(%{:position => any, optional(any) => any}) :: any
def position(%{position: pos}) do pos end
end
|
robot-simulator/lib/robot_simulator.ex
| 0.851398
| 0.790207
|
robot_simulator.ex
|
starcoder
|
defmodule Filtrex.Condition.DateTime do
use Filtrex.Condition
use Timex
@format "{ISO:Extended}"
@comparators ["equals", "does not equal", "after", "on or after", "before", "on or before"]
@moduledoc """
`Filtrex.Condition.DateTime` is a specific condition type for handling datetime filters with various comparisons.
Configuration Options:
| Key | Type | Description |
|--------|---------|----------------------------------------------------------------|
| format | string | the format\* to use for parsing the incoming date string \|
| | | (defaults to {ISOz} and can use any valid Timex format) |
\\\* See https://hexdocs.pm/timex/Timex.Format.DateTime.Formatters.Default.html
There are three different value formats allowed based on the type of comparator:
| Key | Type | Format / Allowed Values |
|------------|---------|-------------------------------------------|
| inverse | boolean | See `Filtrex.Condition.Text` |
| column | string | any allowed keys from passed `config` |
| comparator | string | after, on or after, before, on or before,\|
| | | equals, does not equal |
| value | string | "YYYY-MM-DD'T'HH:MM:ss.SSS'Z'" ({ISOz}) |
| type | string | "datetime" |
"""
def type, do: :datetime
def comparators, do: @comparators
def parse(config, %{column: column, comparator: comparator, value: value, inverse: inverse}) do
with {:ok, parsed_comparator} <- validate_comparator(type(), comparator, @comparators),
{:ok, parsed_value} <- validate_value(config, value) do
{:ok, %__MODULE__{type: type(), inverse: inverse,
column: column, comparator: parsed_comparator,
value: parsed_value}}
end
end
defp validate_value(config, value) do
Timex.parse(value, config.options[:format] || @format)
end
defimpl Filtrex.Encoder do
encoder "after", "before", "column > ?", &default/1
encoder "before", "after", "column < ?", &default/1
encoder "on or after", "on or before", "column >= ?", &default/1
encoder "on or before", "on or after", "column <= ?", &default/1
encoder "equals", "does not equal", "column = ?", &default/1
encoder "does not equal", "equals", "column != ?", &default/1
defp default(timex_date) do
{:ok, format} = Timex.format(timex_date, "{ISOdate} {ISOtime}")
[format]
end
end
end
|
lib/filtrex/conditions/datetime.ex
| 0.86923
| 0.497986
|
datetime.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.