code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule MircParser do
@moduledoc """
Parse mIRC colour codes and render to HTML.
Full documentation about these codes can be found [online](https://modern.ircdocs.horse/formatting.html).
The characters used for each kind of formatting are:
* "x02": Bold. Represented with `<b>`.
* "x1D": Italic. Represented with `<i>`.
* "x1F": Underline. Represented with `<u>`.
* "x16": Reverse text. Represented with a span of class `reverse`.
* "x1E": Strikethrough. Represented with `<s>`.
* "x11": Monospaced. Represented with `<tt>`.
* "x0F": Strips all formatting.
* "x03<ASCII int>": Sets the foreground colour. This is represented with a
san of class `fg<int>`.
* "x03<ASCII int>,<ASCII int>": Sets the foreground and background colour.
This is represented with a span of classes `fg<int> bg<int>`.
* "x03": Terminates colouring.
The colour codes are:
* 0: White (#FFFFFF)
* 1: Black (#000000)
* 2: Navy (#00007F)
* 3: Green (#009300)
* 4: Red (#FF0000)
* 5: Maroon (#7F0000)
* 6: Purple (#9C009C)
* 7: Orange (#FC7F00)
* 8: Yellow (#FFFF00)
* 9: Light Green (#00FC00)
* 10: Teal (#009393)
* 11: Cyan (#00FFFF)
* 12: Blue (#0000FC)
* 13: Pink (#FF00FF)
* 14: Grey (#7F7F7F)
* 15: Light Grey (#D2D2D2)
"""
require Integer
@doc ~S"""
Converts a string of mIRC formatted text into tokens.
## Examples
iex> MircParser.parse("plain\x1Ditalic")
["plain", :italic, "italic"]
"""
def parse(string) do
string
|> List.wrap
|> Enum.map(&tokenize_colour_fg_bg/1)
|> List.flatten
|> Enum.map(fn str -> tokenize(str, "\x02", :bold) end)
|> List.flatten
|> Enum.map(fn str -> tokenize(str, "\x1D", :italic) end)
|> List.flatten
|> Enum.map(fn str -> tokenize(str, "\x1F", :underline) end)
|> List.flatten
|> Enum.map(fn str -> tokenize(str, "\x16", :reverse) end)
|> List.flatten
|> Enum.map(fn str -> tokenize(str, "\x1E", :strike) end)
|> List.flatten
|> Enum.map(fn str -> tokenize(str, "\x11", :mono) end)
|> List.flatten
|> Enum.map(fn str -> tokenize(str, "\x0F", :plain) end)
|> List.flatten
|> Enum.map(fn str -> tokenize(str, "\x03", :color) end)
|> List.flatten
end
defp tokenize_colour_match({val, idx}) do
if Integer.is_even(idx) do
val
else
tval = String.trim_leading(val, "\x03")
case String.split(tval, ",") do
[fg, bg] -> {:color, fg, bg}
[fg] -> {:color, fg}
end
end
end
defp tokenize_colour_fg_bg(string) when is_binary(string) do
matches = Regex.split(~r{\x03[0-9]+(?:,[0-9]+)?}, string, include_captures: true)
matches # Because map_every doesn't take an offset
|> Enum.with_index
|> Enum.map(&tokenize_colour_match/1)
end
defp tokenize_colour_fg_bg(obj) do
obj
end
defp tokenize(string, symbol, token) when is_binary(string) do
string
|> String.split(symbol)
|> Enum.intersperse(token)
end
defp tokenize(obj, _, _) do
obj
end
defp open_tag(token) do
case token do
:bold -> "<b>"
:italic -> "<i>"
:underline -> "<u>"
:strike -> "<s>"
:mono -> "<tt>"
:reverse -> "<span class=\"reverse\">"
:color -> "<span class=\"color-invalid\">"
{:color, foreground} ->
"<span class=\"fg#{foreground}\">"
{:color, foreground, background} ->
"<span class=\"fg#{foreground} bg#{background}\">"
end
end
defp close_tag(token) do
case token do
:bold -> "</b>"
:italic -> "</i>"
:underline -> "</u>"
:strike -> "</s>"
:mono -> "</tt>"
:reverse -> "</span>"
:color -> "</span>"
{:color, _} -> "</span>"
{:color, _, _} -> "</span>"
end
end
defp close_tag_stack(tag_stack) do
tag_stack
|> Enum.map(&close_tag/1)
|> Enum.join
end
defp open_tag_stack(tag_stack) do
tag_stack
|> Enum.map(&open_tag/1)
|> Enum.join
end
defp just_tag(tuple) when is_tuple(tuple) do
elem(tuple, 0)
end
defp just_tag(tag) do
tag
end
defp not_token(popped, token) do
# If we find a :color on the left (regardless of what it is),
# make sure just :color without a tuple will match it.
just_tag(popped) != token
end
defp find_colour_token(popped) do
just_tag(popped) != :color
end
defp handle_token(token, tag_stack) do
case Enum.split_while(tag_stack, ¬_token(&1, token)) do
{_, []} -> # Not found
# Special case: if any colour is on stack, and our token is a colour
# tuple, pop until we hit it. (If we didn't care, we could just elide
# this entire if part and just use the else. Alas, it's ugly.)
if just_tag(token) == :color do
case Enum.split_while(tag_stack, &find_colour_token/1) do
{to_close, [head | tail]} ->
# Like the regular have-to-close case...
{[token] ++ to_close ++ tail,
close_tag_stack(to_close ++ [head])
<> open_tag_stack(to_close)
<> open_tag(token)}
_ -> {[token | tag_stack], open_tag(token)}
end
else
{[token | tag_stack], open_tag(token)}
end
{to_close, [head | tail]} ->
# Reopen anything caught if we had to close something in the middle.
{to_close ++ tail,
close_tag_stack(to_close ++ [head]) <> open_tag_stack(to_close)}
end
end
# mIRC reuses the background if it's set.
defp backgroundize({:color, fg}, tag_stack) do
Enum.find_value(tag_stack, {:color, fg}, fn tag ->
case tag do
{:color, _, bg} -> {:color, fg, bg}
_ -> false
end
end)
end
defp render(input, tag_stack, output) do
case input do
[:plain | tail] ->
render(tail, [], output <> close_tag_stack(tag_stack))
[token | tail] when token in [:bold, :italic, :underline, :reverse, :strike, :mono, :color] ->
{new_tag_stack, new_output} = handle_token(token, tag_stack)
render(tail, new_tag_stack, output <> new_output)
[{:color, fg, bg} | tail] ->
{new_tag_stack, new_output} = handle_token({:color, fg, bg}, tag_stack)
render(tail, new_tag_stack, output <> new_output)
[{:color, fg} | tail] ->
maybe_bg = backgroundize({:color, fg}, tag_stack)
{new_tag_stack, new_output} = handle_token(maybe_bg, tag_stack)
render(tail, new_tag_stack, output <> new_output)
[head | tail] when is_binary(head) ->
render(tail, tag_stack, output <> head)
[] ->
output <> close_tag_stack(tag_stack)
end
end
@doc ~S"""
Turns a string with mIRC formatting into an HTML string.
## Examples
iex> MircParser.render("foo\x02bar")
"foo<b>bar</b>"
"""
def render(string) when is_binary(string) do
render_tokens(parse(string))
end
@doc ~S"""
Turns a list of tokens into an HTML string.
## Examples
iex> MircParser.render_tokens(["foo", :bold, "bar"])
"foo<b>bar</b>"
"""
def render_tokens(tokens) when is_list(tokens) do
render(tokens, [], "")
end
end
|
lib/mirc_parser.ex
| 0.8488
| 0.691914
|
mirc_parser.ex
|
starcoder
|
defprotocol Enum.Iterator do
@moduledoc """
This is the protocol used by the `Enum` module.
Usually, when you invoke a function in the module `Enum`,
the first argument passed to `Enum` is a collection which
is forwarded to this protocol in order to retrieve information
on how to iterate the collection. That said, when:
Enum.map [1,2,3], &1 * 2
Is invoked, it invokes `Enum.Iterator.iterator([1,2,3])`
which returns all the information required by Enum.
Read each function documentation below for more information.
"""
@only [List, Record, Function]
@doc """
Iteration in Elixir happens with the help of a iterator
function. Every time this function is called, it must
return a tuple with two elements. The first element
is the next item and the second can be any Elixir term
which the function is going to receive as argument the
next time it is invoked.
When there are no more items to be iterated, the function
must return the atom `:stop`.
In order to retrieve this iterator function, Elixir invokes
`Enum.Iterator.iterator(collection)` which should return a
tuple with two elements: the first element is the iterator
function and the second is the first step of iteration.
As an example, here is the implementation of iterator for lists:
def iterator(list), do: { iterate(&1), iterate(list) }
defp iterate([h|t]), do: { h, t }
defp iterate([]), do: :stop
## Iterating lists
If a data structure needs to be converted to a list in order
to be iterated, the iterator function can simply return the
list and the Enum module will be able to take over the list
and retrieve the proper iterator function.
"""
def iterator(collection)
@doc """
The function used to retrieve the collection size.
"""
def count(collection)
end
defprotocol Enum.OrdIterator do
@moduledoc """
This protocol is invoked by some functions in Enum which
requires an ordered collection to function correctly. For
instance, `Enum.split_with/2`, `Enum.take_while` all rely
on this protocol.
An ordered collection does not mean the items are ordered
according to the Elixir ordering but simply that any two
distinct instances of the same collection with exactly
the same items always yield the same order when iterated.
"""
@only [List, Record]
@doc """
Must return a tuple under the same conditions as
`Enum.Iterator.iterator`.
"""
def iterator(collection)
@doc """
On each step, the iterator function returned by `iterator/1`
returns a tuple with two elements. This function receives
those two elements as a tuple and must return a list back.
This is used in order to quicky return a list from any point
during iteration. For example, consider the function `Enum.drop`.
`Enum.drop collection, 3` should drop 3 items and return a list
back. While we could loop over the remaining items to get a list
back, this function is invoked allowing us to get a result
back without a need to loop the remaining items.
"""
def to_list({ current, next }, iterator)
end
defmodule Enum do
require Enum.Iterator, as: I
require Enum.OrdIterator, as: O
@moduledoc """
Provides a set of algorithms that enumerate over collections according to the
`Enum.Iterator` protocol. Most of the functions in this module have two
flavours. If a given collection implements the mentioned protocol (like
list, for instance), you can do:
Enum.map [1,2,3], fn(x) -> x * 2 end
Depending on the type of the collection, the user-provided function will
accept a certain type of argument. For dicts, the argument is always a
`{ key, value }` tuple.
"""
@doc """
Invokes the given `fun` for each item in the `collection` and returns true if
each invocation returns true as well, otherwise it shirt-circuits and returns
false.
## Examples
Enum.all? [2,4,6], fn(x) -> rem(x, 2) == 0 end
#=> true
Enum.all? [2,3,4], fn(x) -> rem(x, 2) == 0 end
#=> false
If no function is given, it defaults to checking if
all items in the collection evaluate to true.
Enum.all? [1,2,3] #=> true
Enum.all? [1,nil,3] #=> false
"""
def all?(collection, fun // fn(x) -> x end)
def all?(collection, fun) when is_list(collection) do
do_all?(collection, fun)
end
def all?(collection, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_all?(pointer, iterator, fun)
list when is_list(list) ->
do_all?(list, fun)
end
end
@doc """
Invokes the given `fun` for each item in the `collection` and returns true if
at least one invocation returns true. Returns false otherwise.
## Examples
Enum.any? [2,4,6], fn(x) -> rem(x, 2) == 1 end
#=> false
Enum.any? [2,3,4], fn(x) -> rem(x, 2) == 1 end
#=> true
If no function is given, it defaults to checking if
at least one item in the collection evaluates to true.
Enum.any? [false,false,false] #=> false
Enum.any? [false,true,false] #=> true
"""
def any?(collection, fun // fn(x) -> x end)
def any?(collection, fun) when is_list(collection) do
do_any?(collection, fun)
end
def any?(collection, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_any?(pointer, iterator, fun)
list when is_list(list) ->
do_any?(list, fun)
end
end
@doc """
Returns the collection size.
## Examples
Enum.count [1,2,3] #=> 3
"""
def count(collection) do
I.count(collection)
end
@doc """
Drops the first `count` items from the collection. Expects an ordered
collection.
## Examples
Enum.drop [1,2,3], 2 #=> [3]
Enum.drop [1,2,3], 10 #=> []
Enum.drop [1,2,3], 0 #=> [1,2,3]
"""
def drop(collection, count) do
elem split(collection, count), 2
end
@doc """
Drops items at the beginning of `collection` while `fun` returns true.
Expects an ordered collection.
## Examples
Enum.drop_while [1,2,3,4,5], fn(x) -> x < 3 end
#=> [3,4,5]
"""
def drop_while(collection, fun) when is_list(collection) do
do_drop_while(collection, fun)
end
def drop_while(collection, fun) do
case O.iterator(collection) do
{ iterator, pointer } ->
module = O.__impl_for__!(collection)
do_drop_while(pointer, iterator, fun, module)
list when is_list(list) ->
do_drop_while(list, fun)
end
end
@doc """
Invokes the given `fun` for each item in the `collection`.
Returns the `collection` itself.
## Examples
Enum.each ['some', 'example'], fn(x) -> IO.puts x end
"""
def each(collection, fun) when is_list(collection) do
:lists.foreach(fun, collection)
:ok
end
def each(collection, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_each(pointer, iterator, fun)
:ok
list when is_list(list) ->
each(list, fun)
end
end
@doc """
Returns true if the collection is empty, otherwise false.
## Examples
Enum.empty? [] #=> true
Enum.empty? [1,2,3] #=> false
"""
def empty?(collection) when is_list(collection) do
collection == []
end
def empty?(collection) do
case I.iterator(collection) do
{ _iterator, pointer } -> pointer === :stop
list when is_list(list) -> list == []
end
end
@doc """
Filters the collection, i.e. returns only those elements
for which `fun` returns true.
## Examples
Enum.filter [1, 2, 3], fn(x) -> rem(x, 2) == 0 end
#=> [2]
"""
def filter(collection, fun) when is_list(collection) do
lc item inlist collection, fun.(item), do: item
end
def filter(collection, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_filter(pointer, iterator, fun)
list when is_list(list) ->
filter(list, fun)
end
end
@doc """
Filters the collection and maps its values in one pass.
## Examples
Enum.filter_map [1, 2, 3], fn(x) -> rem(x, 2) == 0 end, &1 * 2
#=> [4]
"""
def filter_map(collection, filter, mapper) when is_list(collection) do
lc item inlist collection, filter.(item), do: mapper.(item)
end
def filter_map(collection, filter, mapper) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_filter_map(pointer, iterator, filter, mapper)
list when is_list(list) ->
filter_map(list, filter, mapper)
end
end
@doc """
Returns the first item for which `fun` returns a truthy value. If no such
item is found, returns `ifnone`.
## Examples
Enum.find [2,4,6], fn(x) -> rem(x, 2) == 1 end
#=> nil
Enum.find [2,4,6], 0, fn(x) -> rem(x, 2) == 1 end
#=> 0
Enum.find [2,3,4], fn(x) -> rem(x, 2) == 1 end
#=> 3
"""
def find(collection, ifnone // nil, fun)
def find(collection, ifnone, fun) when is_list(collection) do
do_find(collection, ifnone, fun)
end
def find(collection, ifnone, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_find(pointer, iterator, ifnone, fun)
list when is_list(list) ->
do_find(list, ifnone, fun)
end
end
@doc """
Similar to find, but returns the value of the function
invocation instead of the element itself.
## Examples
Enum.find_value [2,4,6], fn(x) -> rem(x, 2) == 1 end
#=> nil
Enum.find_value [2,3,4], fn(x) -> rem(x, 2) == 1 end
#=> true
"""
def find_value(collection, ifnone // nil, fun)
def find_value(collection, ifnone, fun) when is_list(collection) do
do_find_value(collection, ifnone, fun)
end
def find_value(collection, ifnone, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_find_value(pointer, iterator, ifnone, fun)
list when is_list(list) ->
do_find_value(list, ifnone, fun)
end
end
@doc """
Similar to find, but returns the index (count starts with 1)
of the item instead of the element itself.
Expects an ordered collection.
## Examples
Enum.find_index [2,4,6], fn(x) -> rem(x, 2) == 1 end
#=> nil
Enum.find_index [2,3,4], fn(x) -> rem(x, 2) == 1 end
#=> 2
"""
def find_index(collection, fun) when is_list(collection) do
do_find_index(collection, 1, fun)
end
def find_index(collection, fun) do
case O.iterator(collection) do
{ iterator, pointer } ->
do_find_index(pointer, iterator, 1, fun)
list when is_list(list) ->
do_find_index(list, 1, fun)
end
end
@doc """
Returns the first item in the collection or nil otherwise.
## Examples
Enum.first [] #=> nil
Enum.first [1,2,3] #=> 1
"""
def first([]), do: nil
def first([h|_]), do: h
def first(collection) do
case O.iterator(collection) do
{ _iterator, { h, _ } } -> h
{ _iterator, :stop } -> nil
list when is_list(list) -> first(list)
end
end
@doc """
Joins the given `collection` according to `joiner`.
Joiner can be either a binary or a list and the
result will be of the same type as joiner. If
joiner is not passed at all, it defaults to an
empty binary.
All items in the collection must be convertible
to binary, otherwise an error is raised.
## Examples
Enum.join([1,2,3]) #=> "123"
Enum.join([1,2,3], " = ") #=> "1 = 2 = 3"
Enum.join([1,2,3], ' = ') #=> '1 = 2 = 3'
"""
def join(collection, joiner // "")
def join(collection, joiner) when is_list(joiner) do
binary_to_list join(collection, list_to_binary(joiner))
end
def join(collection, joiner) when is_list(collection) and is_binary(joiner) do
do_join(collection, joiner, nil)
end
def join(collection, joiner) when is_binary(joiner) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_join(pointer, iterator, joiner, nil)
list when is_list(list) ->
do_join(list, joiner, nil)
end
end
@doc """
Returns a new collection, where each item is the result
of invoking `fun` on each corresponding item of `collection`.
For dicts, the function accepts a key-value tuple.
## Examples
Enum.map [1, 2, 3], fn(x) -> x * 2 end
#=> [2, 4, 6]
Enum.map [a: 1, b: 2], fn({k, v}) -> { k, -v } end
#=> [a: -1, b: -2]
"""
def map(collection, fun) when is_list(collection) do
lc item inlist collection, do: fun.(item)
end
def map(collection, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_map(pointer, iterator, fun)
list when is_list(list) ->
map(list, fun)
end
end
@doc """
Maps and joins the given `collection` in one pass.
Joiner can be either a binary or a list and the
result will be of the same type as joiner. If
joiner is not passed at all, it defaults to an
empty binary.
All items in the collection must be convertible
to binary, otherwise an error is raised.
## Examples
Enum.map_join([1,2,3], &1 * 2) #=> "246"
Enum.map_join([1,2,3], &1 * 2, " = ") #=> "2 = 4 = 6"
Enum.map_join([1,2,3], &1 * 2, ' = ') #=> '2 = 4 = 6'
"""
def map_join(collection, joiner // "", mapper)
def map_join(collection, joiner, mapper) when is_list(joiner) do
binary_to_list map_join(collection, list_to_binary(joiner), mapper)
end
def map_join(collection, joiner, mapper) when is_list(collection) and is_binary(joiner) do
do_map_join(collection, mapper, joiner, nil)
end
def map_join(collection, joiner, mapper) when is_binary(joiner) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_map_join(pointer, iterator, mapper, joiner, nil)
list when is_list(list) ->
do_map_join(list, mapper, joiner, nil)
end
end
@doc """
Invokes the given `fun` for each item in the `collection`
while also keeping an accumulator. Returns a tuple where
the first element is the mapped collection and the second
one is the final accumulator.
For dicts, the first tuple element has to be a { key, value }
tuple itself.
## Examples
Enum.map_reduce [1, 2, 3], 0, fn(x, acc) -> { x * 2, x + acc } end
#=> { [2, 4, 6], 6 }
"""
def map_reduce(collection, acc, f) when is_list(collection) do
:lists.mapfoldl(f, acc, collection)
end
def map_reduce(collection, acc, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_map_reduce(pointer, iterator, [], acc, fun)
list when is_list(list) ->
map_reduce(list, acc, fun)
end
end
@doc """
Finds the element at the nth index. Returns nil in case
the given index is outside the range of the collection.
Expects an ordered collection.
## Examples
Enum.nth! [2,4,6], 1 #=> 2
Enum.nth! [2,4,6], 3 #=> 6
Enum.nth! [2,4,6], 5 #=> raises Enum.OutOfBoundsError
"""
def nth!(collection, n) when is_list(collection) and n > 0 do
do_nth!(collection, n)
end
def nth!(collection, n) when n > 0 do
case O.iterator(collection) do
{ iterator, pointer } ->
do_nth!(pointer, iterator, n)
list when is_list(list) ->
do_nth!(list, n)
end
end
@doc """
Partitions `collection` into two where the first one contains elements
for which `fun` returns a truthy value, and the second one -- for which `fun`
returns false or nil.
## Examples
Enum.partition [1, 2, 3], fn(x) -> rem(x, 2) == 0 end
#=> { [2], [1,3] }
"""
def partition(collection, fun) when is_list(collection) do
do_partition(collection, fun, [], [])
end
def partition(collection, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_partition(pointer, iterator, fun, [], [])
list when is_list(list) ->
do_partition(list, fun, [], [])
end
end
@doc """
Invokes `fun` for each element in the collection passing the accumulator
`acc` and the element as arguments. The return value is stored in `acc`.
Returns the accumulator.
## Examples
Enum.reduce [1, 2, 3], 0, fn(x, acc) -> x + acc end
#=> 6
"""
def reduce(collection, acc, fun) when is_list(collection) do
:lists.foldl(fun, acc, collection)
end
def reduce(collection, acc, fun) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_reduce(pointer, iterator, acc, fun)
list when is_list(list) ->
reduce(list, acc, fun)
end
end
@doc """
Sorts the collection according to the quick sort algorithm.
## Examples
Enum.qsort [3,2,1] #=> [1,2,3]
"""
def qsort(collection) when is_list(collection) do
do_list_qsort(collection, [])
end
def qsort(collection) do
case I.iterator(collection) do
{ iterator, pointer } ->
do_qsort(pointer, iterator, [])
list when is_list(list) ->
qsort(list)
end
end
@doc """
Splits the enumerable into two collections, leaving `count` elements in the
first one. Expects an ordered collection.
## Examples
Enum.split [1,2,3], 2 #=> { [1,2], [3] }
Enum.split [1,2,3], 10 #=> { [1,2,3], [] }
Enum.split [1,2,3], 0 #=> { [], [1,2,3] }
"""
def split(collection, count) when is_list(collection) and count >= 0 do
do_split(collection, count, [])
end
def split(collection, count) when count >= 0 do
case O.iterator(collection) do
{ iterator, pointer } ->
module = O.__impl_for__!(collection)
do_split(pointer, iterator, count, [], module)
list when is_list(list) ->
do_split(list, count, [])
end
end
@doc """
Splits `collection` at the first element, for which `fun` returns true.
Expects an ordered collection.
## Examples
Enum.split_with [1,2,3,4], fn x -> x == 2 end
#=> { [1], [2, 3, 4] }
"""
def split_with(collection, fun) when is_list(collection) do
do_split_with(collection, fun, [])
end
def split_with(collection, fun) do
case O.iterator(collection) do
{ iterator, pointer } ->
module = O.__impl_for__!(collection)
do_split_with(pointer, iterator, fun, [], module)
list when is_list(list) ->
do_split_with(list, fun, [])
end
end
@doc """
Takes the first `count` items from the collection. Expects an ordered
collection.
## Examples
Enum.take [1,2,3], 2 #=> [1,2]
Enum.take [1,2,3], 10 #=> [1,2,3]
Enum.take [1,2,3], 0 #=> []
"""
def take(collection, count) do
elem split(collection, count), 1
end
@doc """
Takes the items at the beginning of `collection` while `fun` returns true.
Expects an ordered collection.
## Examples
Enum.take_while [1,2,3], fn(x) -> x < 3 end
#=> [1, 2]
"""
def take_while(collection, fun) when is_list(collection) do
do_take_while(collection, fun)
end
def take_while(collection, fun) do
case O.iterator(collection) do
{ iterator, pointer } ->
do_take_while(pointer, iterator, fun)
list when is_list(list) ->
do_take_while(list, fun)
end
end
@doc """
Iterates the given function n times, passing values from 1
to n.
## Examples
Enum.times 3, fn(x) -> IO.inspect x end
1
2
3
"""
def times(times, function) when times >= 0 do
case is_function(function, 0) do
true ->
do_times_0(times, 1, function)
_ ->
do_times_1(times, 1, function)
end
times
end
@doc """
Iterates the given function n times, passing values from 1
to n. Also has an accumulator similar to reduce to store the
value between computations.
## Examples
Enum.times 5, 0, fn(x, acc) -> acc + x end
#=> 15
"""
def times(times, acc, function) when times >= 0 do
do_times_2(times, 1, function, acc)
end
## Implementations
## all?
defp do_all?([h|t], fun) do
if fun.(h) do
do_all?(t, fun)
else
false
end
end
defp do_all?([], _) do
true
end
defp do_all?({ h, next }, iterator, fun) do
if fun.(h) do
do_all?(iterator.(next), iterator, fun)
else
false
end
end
defp do_all?(:stop, _, _) do
true
end
## any?
defp do_any?([h|t], fun) do
if fun.(h) do
true
else
do_any?(t, fun)
end
end
defp do_any?([], _) do
false
end
defp do_any?({ h, next }, iterator, fun) do
if fun.(h) do
true
else
do_any?(iterator.(next), iterator, fun)
end
end
defp do_any?(:stop, _, _) do
false
end
## drop_while
defp do_drop_while([h|t], fun) do
if fun.(h) do
do_drop_while(t, fun)
else
[h|t]
end
end
defp do_drop_while([], _) do
[]
end
defp do_drop_while({ h, next } = extra, iterator, fun, module) do
if fun.(h) do
do_drop_while(iterator.(next), iterator, fun, module)
else
module.to_list(extra, iterator)
end
end
defp do_drop_while(:stop, _, _, _) do
[]
end
## find
defp do_find([h|t], ifnone, fun) do
if fun.(h) do
h
else
do_find(t, ifnone, fun)
end
end
defp do_find([], ifnone, _) do
ifnone
end
defp do_find({ h, next }, iterator, ifnone, fun) do
if fun.(h) do
h
else
do_find(iterator.(next), iterator, ifnone, fun)
end
end
defp do_find(:stop, _, ifnone, _) do
ifnone
end
## find_value
defp do_find_value([h|t], ifnone, fun) do
fun.(h) || do_find_value(t, ifnone, fun)
end
defp do_find_value([], ifnone, _) do
ifnone
end
defp do_find_value({ h, next }, iterator, ifnone, fun) do
fun.(h) || do_find_value(iterator.(next), iterator, ifnone, fun)
end
defp do_find_value(:stop, _, ifnone, _) do
ifnone
end
## find_index
defp do_find_index([h|t], counter, fun) do
if fun.(h) do
counter
else
do_find_index(t, counter + 1, fun)
end
end
defp do_find_index([], _, _) do
nil
end
defp do_find_index({ h, next }, iterator, counter, fun) do
if fun.(h) do
counter
else
do_find_index(iterator.(next), iterator, counter + 1, fun)
end
end
defp do_find_index(:stop, _, _, _) do
nil
end
## each
defp do_each({ h, next }, iterator, fun) do
fun.(h)
do_each(iterator.(next), iterator, fun)
end
defp do_each(:stop, _, _) do
[]
end
## filter
defp do_filter({ h, next }, iterator, fun) do
if fun.(h) do
[h|do_filter(iterator.(next), iterator, fun)]
else
do_filter(iterator.(next), iterator, fun)
end
end
defp do_filter(:stop, _, _) do
[]
end
## filter_map
defp do_filter_map({ h, next }, iterator, filter, mapper) do
if filter.(h) do
[mapper.(h)|do_filter_map(iterator.(next), iterator, filter, mapper)]
else
do_filter_map(iterator.(next), iterator, filter, mapper)
end
end
defp do_filter_map(:stop, _, _, _) do
[]
end
## nth
defp do_nth!([h|_], 1), do: h
defp do_nth!([_|t], n), do: do_nth!(t, n - 1)
defp do_nth!([], _), do: raise Enum.OutOfBoundsError
defp do_nth!({ h, _next }, _iterator, 1), do: h
defp do_nth!({ _, next }, iterator, n), do: do_nth!(iterator.(next), iterator, n - 1)
defp do_nth!(:stop, _iterator, _), do: raise Enum.OutOfBoundsError
## reduce
defp do_reduce({ h, next }, iterator, acc, fun) do
do_reduce(iterator.(next), iterator, fun.(h, acc), fun)
end
defp do_reduce(:stop, _, acc, _) do
acc
end
## split_with
defp do_split_with([h|t], fun, acc) do
if fun.(h) do
do_split_with(t, fun, [h|acc])
else
{ List.reverse(acc), [h|t] }
end
end
defp do_split_with([], _, acc) do
{ List.reverse(acc), [] }
end
defp do_split_with({ h, next } = extra, iterator, fun, acc, module) do
if fun.(h) do
do_split_with(iterator.(next), iterator, fun, [h|acc], module)
else
{ List.reverse(acc), module.to_list(extra, iterator) }
end
end
defp do_split_with(:stop, _, _, acc, _module) do
{ List.reverse(acc), [] }
end
## join
defp do_join([h|t], joiner, nil) do
do_join(t, joiner, to_binary(h))
end
defp do_join([h|t], joiner, acc) do
acc = << acc | :binary, joiner | :binary, to_binary(h) | :binary >>
do_join(t, joiner, acc)
end
defp do_join([], _joiner, acc) do
acc || ""
end
defp do_join({ h, next }, iterator, joiner, nil) do
do_join(iterator.(next), iterator, joiner, to_binary(h))
end
defp do_join({ h, next }, iterator, joiner, acc) do
acc = << acc | :binary, joiner | :binary, to_binary(h) | :binary >>
do_join(iterator.(next), iterator, joiner, acc)
end
defp do_join(:stop, _, _joiner, acc) do
acc || ""
end
## map join
defp do_map_join([h|t], mapper, joiner, nil) do
do_map_join(t, mapper, joiner, to_binary(mapper.(h)))
end
defp do_map_join([h|t], mapper, joiner, acc) do
acc = << acc | :binary, joiner | :binary, to_binary(mapper.(h)) | :binary >>
do_map_join(t, mapper, joiner, acc)
end
defp do_map_join([], _mapper, _joiner, acc) do
acc || ""
end
defp do_map_join({ h, next }, iterator, mapper, joiner, nil) do
do_map_join(iterator.(next), iterator, mapper, joiner, to_binary(mapper.(h)))
end
defp do_map_join({ h, next }, iterator, mapper, joiner, acc) do
acc = << acc | :binary, joiner | :binary, to_binary(mapper.(h)) | :binary >>
do_map_join(iterator.(next), iterator, mapper, joiner, acc)
end
defp do_map_join(:stop, _, _mapper, _joiner, acc) do
acc || ""
end
## map
defp do_map({ h, next }, iterator, fun) do
[fun.(h)|do_map(iterator.(next), iterator, fun)]
end
defp do_map(:stop, _, _) do
[]
end
## map_reduce
defp do_map_reduce({ h, next }, iterator, list_acc, acc, f) do
{ result, acc } = f.(h, acc)
do_map_reduce(iterator.(next), iterator, [result|list_acc], acc, f)
end
defp do_map_reduce(:stop, _, list_acc, acc, _f) do
{ List.reverse(list_acc), acc }
end
## partition
defp do_partition([h|t], fun, acc1, acc2) do
if fun.(h) do
do_partition(t, fun, [h|acc1], acc2)
else
do_partition(t, fun, acc1, [h|acc2])
end
end
defp do_partition([], _, acc1, acc2) do
{ List.reverse(acc1), List.reverse(acc2) }
end
defp do_partition({ h, next }, iterator, fun, acc1, acc2) do
if fun.(h) do
do_partition(iterator.(next), iterator, fun, [h|acc1], acc2)
else
do_partition(iterator.(next), iterator, fun, acc1, [h|acc2])
end
end
defp do_partition(:stop, _, _, acc1, acc2) do
{ List.reverse(acc1), List.reverse(acc2) }
end
## qsort (lists)
defp do_list_qsort([], acc) do
acc
end
defp do_list_qsort([h|t], acc) do
do_list_qsort_part(h, t, {[], [h], []}, acc)
end
defp do_list_qsort_part(_, [], { l, e, g }, acc) do
do_list_qsort(l, e ++ do_list_qsort(g, acc))
end
defp do_list_qsort_part(x, [h|t], { l, e, g }, acc) do
cond do
h < x ->
do_list_qsort_part(x, t, { [h|l], e, g }, acc)
h > x ->
do_list_qsort_part(x, t, { l, e, [h|g] }, acc)
true ->
do_list_qsort_part(x, t, { l, [h|e], g }, acc)
end
end
## qsort (iterator)
defp do_qsort({ h, next }, iterator, acc) do
do_qsort_part(h, iterator.(next), iterator, {[], [h], []}, acc)
end
defp do_qsort(:stop, _iterator, acc) do
acc
end
defp do_qsort_part(_, :stop, _iterator, { l, e, g }, acc) do
do_list_qsort(l, e ++ do_list_qsort(g, acc))
end
defp do_qsort_part(x, { h, next }, iterator, { l, e, g }, acc) do
cond do
h < x ->
do_qsort_part(x, iterator.(next), iterator, { [h|l], e, g }, acc)
h > x ->
do_qsort_part(x, iterator.(next), iterator, { l, e, [h|g] }, acc)
true ->
do_qsort_part(x, iterator.(next), iterator, { l, [h|e], g }, acc)
end
end
## split
defp do_split([h|t], counter, acc) when counter > 0 do
do_split(t, counter - 1, [h|acc])
end
defp do_split(list, 0, acc) do
{ List.reverse(acc), list }
end
defp do_split([], _, acc) do
{ List.reverse(acc), [] }
end
defp do_split({ h, next }, iterator, counter, acc, module) when counter > 0 do
do_split(iterator.(next), iterator, counter - 1, [h|acc], module)
end
defp do_split(extra, iterator, 0, acc, module) do
{ List.reverse(acc), module.to_list(extra, iterator) }
end
defp do_split(:stop, _, _, acc, _module) do
{ List.reverse(acc), [] }
end
## take_while
defp do_take_while([h|t], fun) do
if fun.(h) do
[h|do_take_while(t, fun)]
else
[]
end
end
defp do_take_while([], _) do
[]
end
defp do_take_while({ h, next }, iterator, fun) do
if fun.(h) do
[h|do_take_while(iterator.(next), iterator, fun)]
else
[]
end
end
defp do_take_while(:stop, _, _) do
[]
end
## times
defp do_times_0(limit, counter, _function) when counter > limit do
end
defp do_times_0(limit, counter, function) do
function.()
do_times_0(limit, 1 + counter, function)
end
defp do_times_1(limit, counter, _function) when counter > limit do
end
defp do_times_1(limit, counter, function) do
function.(counter)
do_times_1(limit, 1 + counter, function)
end
defp do_times_2(limit, counter, _function, acc) when counter > limit do
acc
end
defp do_times_2(limit, counter, function, acc) do
new_acc = function.(counter, acc)
do_times_2(limit, 1 + counter, function, new_acc)
end
end
defimpl Enum.Iterator, for: List do
def iterator(list), do: list
def count(list), do: length(list)
end
defimpl Enum.OrdIterator, for: List do
def iterator(list), do: list
def to_list({ h, next }, _), do: [h|next]
end
defimpl Enum.Iterator, for: Function do
def iterator(function) do
{ function, function.(:start) }
end
def count(function) do
do_count(function.(:start), function, 0)
end
defp do_count({ _, next }, function, acc) do
do_count(function.(next), function, acc + 1)
end
defp do_count(:stop, _, acc) do
acc
end
end
defimpl Enum.OrdIterator, for: Function do
def iterator(function) do
{ function, function.(:start) }
end
def to_list({ h, next }, function) do
[h|to_list(function.(next), function)]
end
def to_list(:stop, _function) do
[]
end
end
|
lib/elixir/lib/enum.ex
| 0.894306
| 0.75729
|
enum.ex
|
starcoder
|
defmodule Matcha.Context do
@moduledoc """
Different types of match spec are intended to be used for different purposes,
and support different instructions in their bodies for different use-cases.
The modules implementing the `Matcha.Context` behaviour define the different types of `Matcha.Spec`,
provide documentation for what specialized instructions that type supports, and are used during
Elixir-to-match spec conversion as a concrete function definition to use when expanding instructions
(since most of these specialized instructions do not exist anywhere as an actual functions,
this lets the Elixir compiler complain about invalid instructions as `UndefinedFunctionError`s).
### Predefined contexts
Currently there are three applications of match specs supported:
- `:memory`:
Matchspecs intended to be used to filter/map over an in-memory list in an optimized fashion.
These types of match spec reference the `Matcha.Context.Memory` module.
- `:table`:
Matchspecs intended to be used to efficiently select data from BEAM VM "table"
tools, such as `:ets`, `:dets`, and `:mnesia`, and massage the values returned.
These types of match spec reference the `Matcha.Context.Table` module.
- `:trace`:
Matchspecs intended to be used to instruct tracing utilities such as
`:dbg` and `:recon_trace` exactly what function calls with what arguments to trace,
and allows invoking special trace command instructions in response.
These types of match spec reference the `Matcha.Context.Trace` module.
### Custom contexts
The context mechanism is technically extensible: any module can implement the `Matcha.Context`
behaviour, define the callbacks, and list public no-op functions to allow their usage in
specs compiled with that context (via `Matcha.spec(CustomContext) do...`).
In practice there is little point in defining a custom context:
the supported use-cases for match specs are tightly coupled to the erlang language,
and `Matcha` covers all of them with its provided contexts, which should be sufficient for any application.
The module+behaviour+callback implementation used in `Matcha` is less about offering extensibility,
but instead used to simplify special-casing in `Matcha.Spec` function implementations,
raise Elixir-flavored errors when an invalid instruction is used in the different types of spec,
and provide a place to document what they do when invoked.
"""
alias Matcha.Error
alias Matcha.Source
@type t :: module()
@callback __context_name__() :: atom()
@callback __erl_test_type__() :: Source.erl_test_type()
@callback __default_test_target__() :: any
@callback __valid_test_target__(test_target :: any) :: boolean()
@callback __prepare_source__(source :: any) :: any
@callback __emit_test_result__(result :: any) :: any
@callback __invalid_test_target_error_message__(test_target :: any) :: String.t()
@callback __handle_erl_test_results__(return :: any) ::
{:ok, result :: any} | {:error, Error.problems()}
end
|
lib/matcha/context.ex
| 0.867738
| 0.775137
|
context.ex
|
starcoder
|
defmodule TableRex.Renderer.Text do
@moduledoc """
Renderer module which handles outputting ASCII-style tables for display.
"""
alias TableRex.Cell
alias TableRex.Table
alias TableRex.Renderer.Text.Meta
@behaviour TableRex.Renderer
@doc """
Provides a level of sane defaults for the Text rendering module.
Example with color function:
```elixir
%{
horizontal_symbol: "─",
right_intersection_symbol: "┤",
left_intersection_symbol: "├",
vertical_symbol: "│",
top_intersection_symbol: "┼",
bottom_intersection_symbol: "┴",
inner_intersection_symbol: "┼",
top_frame_symbol: "─",
header_separator_symbol: "─",
bottom_frame_symbol: "─",
top_left_corner_symbol: "├",
top_right_corner_symbol: "┤",
bottom_left_corner_symbol: "└",
bottom_right_corner_symbol: "┘",
row_seperator: true,
header_color_function: fn col_index ->
cond do
rem(col_index, 4) == 0 ->
[IO.ANSI.black(), IO.ANSI.magenta_background()]
rem(col_index, 4) == 1 ->
[IO.ANSI.black(), IO.ANSI.green_background()]
rem(col_index, 4) == 2 ->
[IO.ANSI.black(), IO.ANSI.color_background(9)]
rem(col_index, 4) == 3 ->
[IO.ANSI.black(), IO.ANSI.yellow_background()]
end
end,
table_color_function: fn row_index, col_index ->
cond do
rem(col_index, 4) == 0 ->
[IO.ANSI.magenta()]
rem(col_index, 4) == 1 ->
[IO.ANSI.green()]
rem(col_index, 4) == 2 ->
[IO.ANSI.color(9)]
rem(col_index, 4) == 3 ->
[IO.ANSI.yellow()]
end
end
}
```
"""
def default_options do
%{
horizontal_symbol: "─",
right_intersection_symbol: "┤",
left_intersection_symbol: "├",
vertical_symbol: "│",
top_intersection_symbol: "┼",
bottom_intersection_symbol: "┴",
inner_intersection_symbol: "┼",
top_frame_symbol: "─",
header_separator_symbol: "─",
bottom_frame_symbol: "─",
top_left_corner_symbol: "├",
top_right_corner_symbol: "┤",
bottom_left_corner_symbol: "└",
bottom_right_corner_symbol: "┘",
row_seperator: true,
header_color_function: fn _ -> [IO.ANSI.bright()] end,
table_color_function: fn _, _ -> nil end
}
end
@doc """
Implementation of the TableRex.Renderer behaviour.
"""
def render(table = %Table{}, opts) do
{col_widths, row_heights} = max_dimensions(table)
# Calculations that would otherwise be carried out multiple times are done once and their
# results are stored in the %Meta{} struct which is then passed through the pipeline.
table_width = table_width(col_widths)
intersections = intersections(col_widths)
meta = %Meta{
col_widths: col_widths,
row_heights: row_heights,
table_width: table_width,
inner_intersections: intersections
}
rendered =
{table, meta, opts, []}
|> render_header
|> render_rows
|> render_bottom_frame
|> render_to_string
{:ok, rendered}
end
defp render_line(table_width, intersections, separator_symbol, intersection_symbol) do
for n <- 1..(table_width - 2) do
if n in intersections, do: intersection_symbol, else: separator_symbol
end
|> Enum.join()
end
defp render_header({%Table{header_row: []} = table, meta, opts, rendered}) do
{table, meta, opts, rendered}
end
defp render_header({%Table{header_row: header_row} = table, meta, opts, rendered}) do
row_index = -1
header =
header_row
|> Enum.map(fn cell ->
%{cell | rendered_value: String.replace(cell.rendered_value, "\n", "\\n")}
end)
|> Enum.with_index()
|> Enum.map(fn {cell, col_index} ->
color = opts[:header_color_function].(col_index) || cell.color
{%{cell | color: color}, col_index}
end)
|> Enum.map(&render_cell(table, meta, row_index, &1, opts[:header_separator_symbol]))
|> column_cells_to_rows()
|> Enum.map(&Enum.intersperse(&1, opts[:top_intersection_symbol]))
|> Enum.map(&Enum.join(&1))
|> Enum.map(&(opts[:top_left_corner_symbol] <> &1 <> opts[:top_right_corner_symbol]))
{table, meta, opts, header ++ rendered}
end
defp render_rows({%Table{rows: rows} = table, meta, opts, rendered}) do
row_separator =
render_line(
meta.table_width,
meta.inner_intersections,
opts[:horizontal_symbol],
opts[:inner_intersection_symbol]
)
lines =
Enum.with_index(rows)
|> Enum.map(fn {row, row_index} ->
row
|> Enum.with_index()
|> Enum.map(fn {cell, col_index} ->
color = opts[:table_color_function].(row_index, col_index) || cell.color
{%{cell | color: color}, col_index}
end)
|> Enum.map(&render_cell(table, meta, row_index, &1, " "))
|> column_cells_to_rows()
|> Enum.map(&Enum.intersperse(&1, opts[:vertical_symbol]))
end)
|> (&(if opts[:row_seperator] do
Enum.intersperse(&1, [[row_separator]])
else
&1
end)).()
|> Enum.flat_map(& &1)
|> Enum.map(&Enum.join(&1))
|> Enum.map(fn line -> opts[:vertical_symbol] <> line <> opts[:vertical_symbol] end)
{table, meta, opts, rendered ++ lines}
end
# Transforms
# ```elixir
# [
# [" Keaton & Hive! ", " ", " "],
# [" The Plague ", " hello ", " world "],
# [" 2003 ", " ", " "]
# ]
# ```
# into
# ```elixir
# [
# [" Keaton & Hive! ", " The Plague ", " 2003 "],
# [" ", " hello ", " "],
# [" ", " world ", " "]
# ]
# ```
defp column_cells_to_rows(cell_lines) do
cell_lines
|> Enum.zip()
|> Enum.map(&Tuple.to_list/1)
end
defp render_cell(
%Table{} = table,
%Meta{} = meta,
row_index,
{%Cell{} = cell, col_index},
space_filler
)
when is_binary(space_filler) do
col_width = Meta.col_width(meta, col_index)
row_height = Meta.row_height(meta, row_index)
col_padding = Table.get_column_meta(table, col_index, :padding)
cell_align = Map.get(cell, :align) || Table.get_column_meta(table, col_index, :align)
cell_color = Map.get(cell, :color) || Table.get_column_meta(table, col_index, :color)
cell.rendered_value
|> String.split("\n")
|> add_height_padding(col_width, row_height, col_padding, space_filler)
|> Enum.map(&do_render_cell(&1, col_width, col_padding, space_filler, align: cell_align))
|> Enum.map(&format_with_color(&1, cell_color))
end
defp add_height_padding(lines, inner_width, row_height, col_padding, space_filler)
when is_list(lines) and is_integer(row_height) and is_integer(inner_width) do
empty_line = String.duplicate(space_filler, inner_width - col_padding)
empty_height_padding = List.duplicate(empty_line, max(0, row_height - length(lines)))
lines ++ empty_height_padding
end
defp do_render_cell(value, inner_width, _padding, space_filler, align: :center)
when is_binary(space_filler) do
value_len = String.length(strip_ansi_color_codes(value))
post_value = ((inner_width - value_len) / 2) |> round
pre_value = inner_width - (post_value + value_len)
String.duplicate(space_filler, pre_value) <>
value <> String.duplicate(space_filler, post_value)
end
defp do_render_cell(value, inner_width, padding, space_filler, align: align)
when is_binary(space_filler) do
value_len = String.length(strip_ansi_color_codes(value))
alt_side_padding = inner_width - value_len - padding
{pre_value, post_value} =
case align do
:left ->
{padding, alt_side_padding}
:right ->
{alt_side_padding, padding}
end
String.duplicate(space_filler, pre_value) <>
value <> String.duplicate(space_filler, post_value)
end
defp render_bottom_frame({%Table{} = table, %Meta{} = meta, opts, rendered}) do
line =
opts[:bottom_left_corner_symbol] <>
render_line(
meta.table_width,
meta.inner_intersections,
opts[:bottom_frame_symbol],
opts[:bottom_intersection_symbol]
) <>
opts[:bottom_right_corner_symbol]
{table, meta, opts, rendered ++ [line]}
end
defp intersections(%{} = col_widths) do
ordered_col_widths(col_widths)
|> Enum.reduce([0], fn x, [acc_h | _] = acc ->
[acc_h + x + 1 | acc]
end)
|> Enum.into(MapSet.new())
end
defp max_dimensions(%Table{} = table) do
{col_widths, row_heights} =
[table.header_row | table.rows]
|> Enum.with_index(-1)
|> Enum.reduce({%{}, %{}}, &reduce_row_maximums(table, &1, &2))
num_columns = Map.size(col_widths)
# Infer padding on left and right of title
title_padding =
[0, num_columns - 1]
|> Enum.map(&Table.get_column_meta(table, &1, :padding))
|> Enum.sum()
# Compare table body width with title width
col_separators_widths = num_columns - 1
body_width = (col_widths |> Map.values() |> Enum.sum()) + col_separators_widths
title_width = if(is_nil(table.title), do: 0, else: String.length(table.title)) + title_padding
# Add extra padding equally to all columns if required to match body and title width.
revised_col_widths =
if body_width >= title_width do
col_widths
else
extra_padding = ((title_width - body_width) / num_columns) |> Float.ceil() |> round
Enum.into(col_widths, %{}, fn {k, v} -> {k, v + extra_padding} end)
end
{revised_col_widths, row_heights}
end
defp reduce_row_maximums(%Table{} = table, {row, row_index}, {col_widths, row_heights}) do
row
|> Enum.with_index()
|> Enum.reduce({col_widths, row_heights}, &reduce_cell_maximums(table, &1, &2, row_index))
end
defp reduce_cell_maximums(
%Table{} = table,
{cell, col_index},
{col_widths, row_heights},
row_index
) do
padding = Table.get_column_meta(table, col_index, :padding)
is_header = row_index == -1
{width, height} = content_dimensions(cell.rendered_value, padding, is_header)
col_widths = Map.update(col_widths, col_index, width, &Enum.max([&1, width]))
row_heights = Map.update(row_heights, row_index, height, &Enum.max([&1, height]))
{col_widths, row_heights}
end
defp content_dimensions(value, padding, is_header)
when is_binary(value) and is_number(padding) do
lines =
if is_header do
[strip_ansi_color_codes(value)]
else
value
|> strip_ansi_color_codes()
|> String.split("\n")
end
height = Enum.count(lines)
width = Enum.map(lines, &String.length(&1)) |> Enum.max()
{width + padding * 2, height}
end
defp table_width(%{} = col_widths) do
seperator_width = 1
width =
col_widths
|> Map.values()
|> Enum.intersperse(seperator_width)
|> Enum.sum()
width + 2 * seperator_width
end
defp ordered_col_widths(%{} = col_widths) do
col_widths
|> Enum.into([])
|> Enum.sort()
|> Enum.map(&elem(&1, 1))
end
defp render_to_string({_, _, _, rendered_lines}) when is_list(rendered_lines) do
rendered_lines
|> Enum.map(&String.trim_trailing/1)
|> Enum.join("\n")
|> Kernel.<>("\n")
end
defp format_with_color(text, nil), do: text
defp format_with_color(text, color) do
[[color | text] | IO.ANSI.reset()]
|> IO.ANSI.format_fragment(true)
end
defp strip_ansi_color_codes(text) do
Regex.replace(~r|\e\[\d+m|u, text, "")
end
end
|
lib/table_rex/renderer/text.ex
| 0.89117
| 0.685397
|
text.ex
|
starcoder
|
defmodule Relate do
@moduledoc """
Relate implements relational operators on Elixir enumerables.
Join functions take two enumerables containing objects as well as one or
two arguments to specify how the value to use as a basis for joining
will be determined.
If either `fki1` or `fki2` are functions, the function will be
called with the object as an argument on `e1` or `e2`,
respectively. If the value is an atom, the object will be treated as
a map and the join value will be the value of the key associated
with the atom. If the value is a non-negative integer, the object
will be treated as a tuple and the value will be treated as an index
into it.
If `fki2` is not specified or is `nil` or `false`, the same accessor
will be used on both `e1` and `e2`.
## Example
iex> iso_countries = [
...> {"us", "United States"},
...> {"uk", "United Kingdom"},
...> {"ca", "Canada"},
...> {"de", "Germany"},
...> {"nl", "Netherlands"},
...> {"sg", "Singapore"},
...> {"ru", "Russian Federation"},
...> {"fr", "France"},
...> {"ja", "Japan"},
...> {"it", "Italy"},
...> {"hk", "Hong Kong"},
...> {"au", "Australia"},
...> {"ch", "Switzerland"},
...> {"be", "Belgium"},
...> {"rk", "Korea, Republic of"},
...> {"es", "Spain"},
...> {"il", "Israel"}
...> ]
...>
...> country_clicks = [
...> {"United States", "13"},
...> {"United Kingdom", "11"},
...> {"Canada", "4"},
...> {"Germany", "4"},
...> {"Netherlands", "3"},
...> {"Singapore", "3"},
...> {"Russian Federation", "2"},
...> {"France", "2"},
...> {"Japan", "2"},
...> {"Italy", "2"},
...> {"Hong Kong", "2"},
...> {"Australia", "2"},
...> {"Switzerland", "1"},
...> {"Belgium", "1"},
...> {"Korea, Republic of", "1"},
...> {"Spain", "1"},
...> {"Israel", "1"}
...> ]
...>
...> Relate.left_join(country_clicks, iso_countries, 0, 1)
...> |> Relate.select([right: 0, left: 1])
...> |> Enum.sort_by(&elem(&1, 0))
[
{"au", "2"},
{"be", "1"},
{"ca", "4"},
{"ch", "1"},
{"de", "4"},
{"es", "1"},
{"fr", "2"},
{"hk", "2"},
{"il", "1"},
{"it", "2"},
{"ja", "2"},
{"nl", "3"},
{"rk", "1"},
{"ru", "2"},
{"sg", "3"},
{"uk", "11"},
{"us", "13"}
]
"""
@doc ~S"""
Return an enumerable of tuples containing all elements of `e1` and
`e2` which yield the same value when `fki1` and `fki2` are applied,
respectively. If `fki2` is `nil` or `false`, `fki1` will be used to
make comparisons on both enumerables.
## Examples
iex> Relate.inner_join([%{k: 0, v: "zero"}, %{k: 1, v: "one"}],
...> [%{k: 1, v: "i"}, %{k: 2, v: "ii"}],
...> :k, :k)
[{%{k: 1, v: "one"}, %{k: 1, v: "i"}}]
iex> Relate.inner_join([%{k: 0, v: "zero"}, %{k: 1, v: "one"}],
...> [%{k: 1, v: "i"}, %{k: 2, v: "ii"}],
...> :k) # NOTE: only one key function
[{%{k: 1, v: "one"}, %{k: 1, v: "i"}}]
"""
def inner_join(ds1, ds2, fki1, fki2 \\ nil) do
{i1, i2} = indices(ds1, ds2, fki1, fki2)
Enum.flat_map(intersection(Map.keys(i1), Map.keys(i2)), &rows(&1, i1, i2))
end
@doc """
Return an enumerable of tuples containing all elements of `e1` and
`e2` which yield the same value when `fki1` and `fki2` are applied,
respectively. If `fki2` is `nil` or `false`, `fki1` will be used to
make comparisons on both enumerables. Additionally, a tuple of the
form `{nil, i}` or `{i, nil}` will be returned for every element of
`e2` that did not match on any element in `e1` and vice-versa.
## Examples
iex> Relate.outer_join([%{k: 0, v: "zero"}, %{k: 1, v: "one"}],
...> [%{k: 1, v: "i"}, %{k: 2, v: "ii"}],
...> :k, :k)
[{%{k: 0, v: "zero"}, nil}, {%{k: 1, v: "one"}, %{k: 1, v: "i"}}, {nil, %{k: 2, v: "ii"}}]
iex> Relate.outer_join([%{k: 0, v: "zero"}, %{k: 1, v: "one"}],
...> [%{k: 1, v: "i"}, %{k: 2, v: "ii"}],
...> :k) # NOTE: only one key function
...> |> Enum.sort()
[{nil, %{k: 2, v: "ii"}}, {%{k: 0, v: "zero"}, nil}, {%{k: 1, v: "one"}, %{k: 1, v: "i"}}]
"""
def outer_join(ds1, ds2, fki1, fki2 \\ nil) do
{i1, i2} = indices(ds1, ds2, fki1, fki2)
Enum.flat_map(union(Map.keys(i1), Map.keys(i2)), &rows(&1, i1, i2))
end
@doc """
Return an enumerable of tuples containing all elements of `e1` and
`e2` which yield the same value when `fki1` and `fki2` are applied,
respectively. If `fki2` is `nil` or `false`, `fki1` will be used to
make comparisons on both enumerables. Additionally, a tuple of the
form `{i, nil}` will be returned for every element of `e1` that did
not match on any element in `e2`.
## Examples
iex> Relate.left_join([%{k: 0, v: "zero"}, %{k: 1, v: "one"}],
...> [%{k: 1, v: "i"}, %{k: 2, v: "ii"}],
...> :k, :k)
[{%{k: 0, v: "zero"}, nil}, {%{k: 1, v: "one"}, %{k: 1, v: "i"}}]
iex> Relate.left_join([%{k: 0, v: "zero"}, %{k: 1, v: "one"}],
...> [%{k: 1, v: "i"}, %{k: 2, v: "ii"}],
...> :k) # NOTE: only one key function
[{%{k: 0, v: "zero"}, nil}, {%{k: 1, v: "one"}, %{k: 1, v: "i"}}]
"""
def left_join(ds1, ds2, fki1, fki2 \\ nil) do
{i1, i2} = indices(ds1, ds2, fki1, fki2)
Enum.flat_map(Map.keys(i1), &rows(&1, i1, i2))
end
@doc """
Return an enumerable of tuples containing all elements of `e1` and
`e2` which yield the same value when `fki1` and `fki2` are applied,
respectively. If `fki2` is `nil` or `false`, `fki1` will be used to
make comparisons on both enumerables. Additionally, a tuple of the
form `{nil, i}` will be returned for every element of `e2` that did
not match on any element in `e1`.
## Examples
iex> Relate.right_join([%{k: 0, v: "zero"}, %{k: 1, v: "one"}],
...> [%{k: 1, v: "i"}, %{k: 2, v: "ii"}],
...> :k, :k)
[{%{k: 1, v: "one"}, %{k: 1, v: "i"}}, {nil, %{k: 2, v: "ii"}}]
iex> Relate.right_join([%{k: 0, v: "zero"}, %{k: 1, v: "one"}],
...> [%{k: 1, v: "i"}, %{k: 2, v: "ii"}],
...> :k) # NOTE: only one key function
[{%{k: 1, v: "one"}, %{k: 1, v: "i"}}, {nil, %{k: 2, v: "ii"}}]
"""
def right_join(ds1, ds2, fki1, fki2 \\ nil) do
{i1, i2} = indices(ds1, ds2, fki1, fki2)
Enum.flat_map(Map.keys(i2), &rows(&1, i1, i2))
end
defp rows(k, i1, i2) do
for t1 <- Map.get(i1, k, MapSet.new([nil])),
t2 <- Map.get(i2, k, MapSet.new([nil])),
do: {t1, t2}
end
defp indices(ds1, ds2, fki1, fki2) do
{make_index(ds1, to_f(fki1)), make_index(ds2, to_f(fki2 || fki1))}
end
defp make_index(ds, f) do
Enum.map(ds, &{f.(&1), &1})
|> Enum.reduce(
%{},
fn {k, v}, acc -> update_in(acc, [k], &set_put(&1, v)) end
)
end
defp set_put(s, el), do: MapSet.put(to_set(s), el)
defp to_set(nil), do: MapSet.new()
defp to_set(x), do: MapSet.new(x)
defp union(s1, s2), do: MapSet.union(MapSet.new(s1), MapSet.new(s2))
defp intersection(s1, s2) do
MapSet.intersection(MapSet.new(s1), MapSet.new(s2))
end
@doc ~S"""
For each two element tuple in enumerable `join`, select each row
specified by the `cols` keyword list. Each element `cols` should be
a tuple with an initial element of `:left` or `:right` and a second
element that acts as an accessor as in `inner_join/4` et al.
## Example
iex> Relate.select([{{0, 1, 2}, {:a, :b, :c}},
...> {{3, 4, 5}, {:d, :e, :f}}],
...> [left: 0, right: 1, left: 2])
[{0, :b, 2}, {3, :e, 5}]
"""
def select(join, cols) do
Enum.map(join, &(select1(&1, cols) |> List.to_tuple()))
end
defp select1(_t, []), do: []
defp select1(t = {nil, _right}, [{:left, _} | rest]) do
[nil | select1(t, rest)]
end
defp select1(t = {_left, nil}, [{:right, _} | rest]) do
[nil | select1(t, rest)]
end
defp select1(t = {left, _right}, [{:left, fki} | rest]) do
[to_f(fki).(left) | select1(t, rest)]
end
defp select1(t = {_left, right}, [{:right, fki} | rest]) do
[to_f(fki).(right) | select1(t, rest)]
end
defp to_f(f) when is_function(f), do: f
defp to_f(k) when is_atom(k), do: & &1[k]
defp to_f(i) when is_integer(i) and i >= 0, do: &elem(&1, i)
end
|
lib/relate.ex
| 0.898503
| 0.764232
|
relate.ex
|
starcoder
|
defmodule Kday do
@moduledoc """
Functions to return the date of the first, last or nth day of the week
on, nearest, before or after a given date.
"""
@days_in_a_week 7
@doc """
Return the date of the `day_of_week` on or before the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or a Rata Die
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Kday.kday_on_or_before(~D[2016-02-29], 2)
~D[2016-02-23]
iex> Kday.kday_on_or_before(~D[2017-11-30], 1)
~D[2017-11-27]
iex> Kday.kday_on_or_before(~D[2017-06-30], 6)
~D[2017-06-24]
"""
@spec kday_on_or_before(Calendar.day() | Date.t(), Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_on_or_before(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..@days_in_a_week do
date
|> Date.to_gregorian_days
|> kday_on_or_before(k)
|> Date.from_gregorian_days(calendar)
end
def kday_on_or_before(iso_days, k) when is_integer(iso_days) do
iso_days - iso_days_to_day_of_week(iso_days - k)
end
@doc """
Return the date of the `day_of_week` on or after the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or a Rata Die
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Kday.kday_on_or_after(~D[2016-02-29], 2)
~D[2016-03-01]
iex> Kday.kday_on_or_after(~D[2017-11-30], 1)
~D[2017-12-04]
iex> Kday.kday_on_or_after(~D[2017-06-30], 6)
~D[2017-07-01]
"""
@spec kday_on_or_after(Calendar.day() | Date.t(), Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_on_or_after(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..@days_in_a_week do
date
|> Date.to_gregorian_days
|> kday_on_or_after(k)
|> Date.from_gregorian_days(calendar)
end
def kday_on_or_after(iso_days, k) when is_integer(iso_days) do
kday_on_or_before(iso_days + @days_in_a_week, k)
end
@doc """
Return the date of the `day_of_week` nearest the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or a Rata Die
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Kday.kday_nearest(~D[2016-02-29], 2)
~D[2016-03-01]
iex> Kday.kday_nearest(~D[2017-11-30], 1)
~D[2017-11-27]
iex> Kday.kday_nearest(~D[2017-06-30], 6)
~D[2017-07-01]
"""
@spec kday_nearest(Calendar.day() | Date.t(), Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_nearest(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..@days_in_a_week do
date
|> Date.to_gregorian_days
|> kday_nearest(k)
|> Date.from_gregorian_days(calendar)
end
def kday_nearest(iso_days, k) when is_integer(iso_days) do
kday_on_or_before(iso_days + 3, k)
end
@doc """
Return the date of the `day_of_week` before the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or a Rata Die
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Kday.kday_before(~D[2016-02-29], 2)
~D[2016-02-23]
iex> Kday.kday_before(~D[2017-11-30], 1)
~D[2017-11-27]
# 6 means Saturday. Use either the integer value or the atom form.
iex> Kday.kday_before(~D[2017-06-30], 6)
~D[2017-06-24]
"""
@spec kday_before(Calendar.day() | Date.t(), Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_before(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..@days_in_a_week do
date
|> Date.to_gregorian_days
|> kday_before(k)
|> Date.from_gregorian_days(calendar)
end
def kday_before(iso_days, k) do
kday_on_or_before(iso_days - 1, k)
end
@doc """
Return the date of the `day_of_week` after the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or
ISO days since epoch.
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
iex> Kday.kday_after(~D[2016-02-29], 2)
~D[2016-03-01]
iex> Kday.kday_after(~D[2017-11-30], 1)
~D[2017-12-04]
iex> Kday.kday_after(~D[2017-06-30], 6)
~D[2017-07-01]
iex> Kday.kday_after(~D[2021-03-28], 7)
~D[2021-04-04]
"""
@spec kday_after(Calendar.day() | Date.t(), Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def kday_after(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..@days_in_a_week do
date
|> Date.to_gregorian_days()
|> kday_after(k)
|> Date.from_gregorian_days(calendar)
end
def kday_after(iso_days, k) do
kday_on_or_after(iso_days + 1, k)
end
@doc """
Return the date of the `nth` `day_of_week` on or before/after the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or
ISO days since epoch.
* `n` is the cardinal number of `k` before (negative `n`) or after
(positive `n`) the specified date
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Examples
# Thanksgiving in the US
iex> Kday.nth_kday(~D[2017-11-01], 4, 4)
~D[2017-11-23]
# Labor day in the US
iex> Kday.nth_kday(~D[2017-09-01], 1, 1)
~D[2017-09-04]
# Daylight savings time starts in the US
iex> Kday.nth_kday(~D[2017-03-01], 2, 7)
~D[2017-03-12]
"""
@spec nth_kday(Calendar.day() | Date.t(), integer(), Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def nth_kday(%{year: _, month: _, day: _, calendar: calendar} = date, n, k)
when k in 1..@days_in_a_week and is_integer(n) do
date
|> Date.to_gregorian_days
|> nth_kday(n, k)
|> Date.from_gregorian_days(calendar)
end
def nth_kday(iso_days, n, k) when is_integer(iso_days) and n > 0 do
weeks_to_days(n) + kday_on_or_before(iso_days, k)
end
def nth_kday(iso_days, n, k) when is_integer(iso_days) do
weeks_to_days(n) + kday_on_or_after(iso_days, k)
end
@doc """
Return the date of the first `day_of_week` on or after the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or
ISO days since epoch.
* `k` is an integer day of the week.
## Returns
* A `%Date{”}` in the calendar of the date provided as an argument
## Examples
# US election day
iex> Kday.first_kday(~D[2017-11-02], 2)
~D[2017-11-07]
# US Daylight savings end
iex> Kday.first_kday(~D[2017-11-01], 7)
~D[2017-11-05]
"""
@spec first_kday(Calendar.day() | Date.t(), Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def first_kday(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..@days_in_a_week do
date
|> Date.to_gregorian_days
|> first_kday(k)
|> Date.from_gregorian_days(calendar)
end
def first_kday(iso_days, k) do
nth_kday(iso_days, 1, k)
end
@doc """
Return the date of the last `day_of_week` on or before the
specified `date`.
## Arguments
* `date` is `%Date{}`, a `%DateTime{}`, `%NaiveDateTime{}` or
ISO days since epoch.
* `k` is an integer day of the week.
## Returns
* A `%Date{}` in the calendar of the date provided as an argument
## Example
# Memorial Day in the US
iex> Kday.last_kday(~D[2017-05-31], 1)
~D[2017-05-29]
"""
@spec last_kday(Calendar.day() | Date.t(), Calendar.day_of_week()) ::
Calendar.day() | Date.t()
def last_kday(%{year: _, month: _, day: _, calendar: calendar} = date, k)
when k in 1..@days_in_a_week do
date
|> Date.to_gregorian_days
|> last_kday(k)
|> Date.from_gregorian_days(calendar)
end
def last_kday(iso_days, k) do
nth_kday(iso_days, -1, k)
end
@doc """
Returns the day of the week for a given
`iso_day_number`
## Arguments
* `iso_day_number` is the number of days since the start
of the epoch.
## Returns
* An integer representing a day of the week where Monday
is represented by `1` and Sunday is represented by `7`
## Examples
iex> days = Date.to_gregorian_days ~D[2019-01-01]
iex> Kday.iso_days_to_day_of_week(days) == 2
true
"""
@spec iso_days_to_day_of_week(integer()) :: Calendar.day_of_week()
def iso_days_to_day_of_week(iso_day_number) when is_integer(iso_day_number) do
Integer.mod(iso_day_number + 5, @days_in_a_week) + 1
end
@doc """
Returns the number of days in `n` weeks
## Example
iex> Kday.weeks_to_days(2)
14
"""
@spec weeks_to_days(integer) :: integer
def weeks_to_days(n) do
n * @days_in_a_week
end
end
|
lib/kday.ex
| 0.953676
| 0.714155
|
kday.ex
|
starcoder
|
defmodule Harnais.Map do
@moduledoc ~S"""
Functions for Testing Maps
## Documentation Terms
In the documentation these terms, usually in *italics*, are used to mean the same thing.
### *opts*
An *opts* is a `Keyword` list.
## Bang and Query Functions
All functions have bang and query peers.
These functions do not appear in the function list to stop clutter. (They are `@doc false`).
## Errors
Errors are managed by `Harnais.Error`.
The doctests include examples where the exception message
(`Exception.message/1`) is shown. Other doctests export the
exception (`Harnais.Error.export_exception/1`) and show the
exception details.
"""
use Plymio.Codi
use Harnais.Attribute
use Harnais.Attribute.Data
use Harnais.Error.Attribute
@codi_opts [
{@plymio_codi_key_vekil, Plymio.Vekil.Codi.__vekil__},
]
import Harnais.Error, only: [
new_error_result: 1,
new_errors_result: 1,
]
import Plymio.Fontais.Option, only: [
opts_create_aliases_dict: 1,
opts_canonical_keys: 2,
opts_get: 3,
]
import Plymio.Funcio.Enum.Map.Gather, only: [
map_gather0_enum: 2,
]
@type opts :: Harnais.opts
@type error :: Harnais.error
@harnais_map_error_message_map_invalid "map invalid"
@harnais_map_error_message_map_compare_failed "map compare failed"
@harnais_map_compare_worker_kvs_aliases [
{@harnais_key_filter_keys, nil},
{@harnais_key_compare_values, nil},
]
@harnais_map_compare_worker_dict_aliases @harnais_map_compare_worker_kvs_aliases
|> opts_create_aliases_dict
@doc false
def opts_canonical_compare_worker_opts(opts, dict \\ @harnais_map_compare_worker_dict_aliases) do
opts |> opts_canonical_keys(dict)
end
defp map_compare_worker(map1, map2, opts) when is_map(map1) and is_map(map2) do
with {:ok, opts} <- opts |> opts_canonical_compare_worker_opts,
{:ok, fun_filter_keys} <- opts |> opts_get(@harnais_key_filter_keys, fn _ -> true end),
{:ok, fun_filter_keys} <- fun_filter_keys |> Plymio.Funcio.Predicate.Utility.validate_predicate1_fun,
{:ok, fun_compare_values} <- opts |> opts_get(@harnais_key_compare_values, fn _loc, v1, v2 -> v1 == v2 end),
true <- true do
Map.keys(map1) ++ Map.keys(map2)
|> Enum.uniq
|> Enum.filter(fun_filter_keys)
# this will drop nil or unset results
|> map_gather0_enum(
fn k ->
map1
|> Map.fetch(k)
|> case do
{:ok, map1_v} ->
map2
|> Map.fetch(k)
|> case do
{:ok, map2_v} ->
fun_compare_values.(k, map1_v, map2_v)
|> case do
true -> {:ok, k}
x when x in [nil, false] ->
new_error_result(
t: :value,
m: @harnais_map_error_message_map_compare_failed,
r: @harnais_error_reason_mismatch,
i: k,
v1: map1_v,
v2: map2_v)
{:ok, _} -> @plymio_fontais_the_unset_value
{:error, %{__exception__: true}} = result -> result
end
# key not in map2
_ ->
new_error_result(
t: @harnais_error_value_field_type_key,
m: @harnais_map_error_message_map_compare_failed,
r: @harnais_error_reason_missing,
i: k,
v1: map1_v,
v2: @harnais_error_status_value_no_value)
end
# key not in map1
_ ->
new_error_result(
t: @harnais_error_value_field_type_key,
m: @harnais_map_error_message_map_compare_failed,
r: @harnais_error_reason_missing,
i: k,
v1: @harnais_error_status_value_no_value,
v2: map2 |> Map.get(k))
end
end)
|> case do
{:error, %{__struct__: _}} = result -> result
{:ok, gather_opts} ->
gather_opts
|> Plymio.Fontais.Funcio.gather_opts_has_error?
|> case do
true ->
with {:ok, errors} <- gather_opts |> Plymio.Fontais.Funcio.gather_opts_error_values_get,
{:ok, error} <- [add_errors: errors] |> Harnais.Error.Status.new do
{:error, error}
else
{:error, %{__exception__: true}} = result -> result
end
# no errors
_ ->
{:ok, map1}
end
end
else
{:error, %{__exception__: true}} = result -> result
end
end
@doc ~S"""
`harnais_map_compare/3` takes two arguments, expected to be maps, together with (optional) *opts* and compares them key by key.
If the maps are equal its returns `{:ok, map1}`.
If the maps are not equal in any way, its returns `{:error, error}`
If either argument is a `Keyword`, it is converted into a `Map` first.
The default is to compare all keys but this can be overriden
using the `:filter_keys` option with a function of arity 1 which is
passed the `key` and should return `true` or `false`.
The default is to use `Kernel.==/2` to compare the values of each
key. A `falsy` result will cause a new error to be added.
The compare function can be overriden using the `:compare_values` option
together with a function of arity 3 which is passed the `key`, `value1` and
`value2` and should return `true`, `false`, `nil`, `{:ok, value}`
or `{error, error}`.
## Examples
iex> harnais_map_compare(%{}, %{})
{:ok, %{}}
iex> harnais_map_compare(%{a: 1}, %{a: 1})
{:ok, %{a: 1}}
iex> {:error, error} = harnais_map_compare(%{a: 1}, :not_a_map)
...> error |> Exception.message
"map compare failed, got: :not_a_map"
iex> {:error, error} = harnais_map_compare(%{a: 1}, :not_a_map)
...> error|> Harnais.Error.export_exception
{:ok, [error: [[m: "map compare failed",
r: :not_map,
t: :arg,
l: 1,
v: :not_a_map]]]}
A `Keyword` can be provided as either (or both) maps but the `value` in `{:ok, value}` will be the first argument:
iex> harnais_map_compare(%{a: 1}, [a: 1])
{:ok, %{a: 1}}
iex> harnais_map_compare([a: 1], %{a: 1})
{:ok, [a: 1]}
If the first argument is a *struct* and the second a `Keyword`, the
*struct*'s name is added to the "mapified" second argument.
iex> harnais_map_compare(%{__struct__: __MODULE__, a: 1}, [a: 1])
{:ok, %{__struct__: __MODULE__, a: 1}}
Here the key (`:a`) is missing in the second map:
iex> {:error, error} = harnais_map_compare(%{a: 1}, %{})
...> error |> Exception.message
"map compare failed, reason=:missing, type=:key, location=:a, value1=1, value2=:no_value"
iex> {:error, error} = harnais_map_compare(%{a: 1}, [])
...> error |> Harnais.Error.export_exception
{:ok, [error: [[m: "map compare failed",
r: :missing,
t: :key,
l: :a,
v1: 1,
v2: :no_value]]]}
Here the values of the `:a` key are different:
iex> {:error, error} = harnais_map_compare(%{a: 1}, %{a: 2})
...> error |> Exception.message
"map compare failed, reason=:mismatch, type=:value, location=:a, value1=1, value2=2"
Here the keys do not match and this generates multiple errors:
iex> {:error, error} = harnais_map_compare(%{a: 1}, %{b: 2})
...> error |> Exception.message
"map compare failed, reason=:missing, type=:key, location=:a, value1=1, value2=:no_value; map compare failed, reason=:missing, type=:key, location=:b, value1=:no_value, value2=2"
When there are multiple errors, it can be easier to understand the differences by gathering the exception export:
iex> {:error, error} = harnais_map_compare(%{a: 1}, %{b: 2})
...> {:ok, export} = error |> Harnais.Error.export_exception
...> export |> Harnais.Error.gather_export
{:ok, [error: [[m: "map compare failed",
r: :missing,
t: :key,
l: :a,
v1: 1,
v2: :no_value],
[m: "map compare failed",
r: :missing,
t: :key,
l: :b,
v1: :no_value,
v2: 2]]]}
In this example a `:compare_values` function is provided that always returns `true`:
iex> harnais_map_compare(%{a: 1}, %{a: 2}, compare_values: fn _,_,_ -> true end)
{:ok, %{a: 1}}
This `:compare_values` function always returns `false` failing two maps that are in fact equal:
iex> {:error, error} = harnais_map_compare(%{a: 1}, %{a: 1}, compare_values: fn _,_,_ -> false end)
...> error |> Exception.message
"map compare failed, reason=:mismatch, type=:value, location=:a, value1=1, value2=1"
The `:filter_keys` function can be used to select the keys for comparision:
iex> harnais_map_compare(%{a: 1, b: 21, c: 3}, %{a: 1, b: 22, c: 3},
...> filter_keys: fn
...> # don't compare b's values
...> :b -> false
...> _ -> true
...> end)
{:ok, %{a: 1, b: 21, c: 3}}
iex> harnais_map_compare(%{a: 1, b: 21, c: 3}, %{a: 1, c: 3},
...> filter_keys: fn
...> # don't compare b's values (or spot missing key)
...> :b -> false
...> _ -> true
...> end)
{:ok, %{a: 1, b: 21, c: 3}}
Query examples:
iex> harnais_map_compare?(%{a: 1}, %{a: 1})
true
iex> harnais_map_compare?(%{__struct__: __MODULE__, a: 1}, [a: 1])
true
iex> harnais_map_compare?(%{a: 1}, %{b: 2})
false
iex> harnais_map_compare?(%{a: 1, b: 21, c: 3}, %{a: 1, c: 3},
...> filter_keys: fn
...> # don't compare b's values (or spot missing key)
...> :b -> false
...> _ -> true
...> end)
true
Bang examples:
iex> harnais_map_compare!(%{a: 1}, %{a: 1})
%{a: 1}
iex> harnais_map_compare!(%{a: 1}, %{b: 2})
** (Harnais.Error.Status) map compare failed, reason=:missing, type=:key, location=:a, value1=1, value2=:no_value; map compare failed, reason=:missing, type=:key, location=:b, value1=:no_value, value2=2
"""
@since "0.1.0"
@spec harnais_map_compare(any, any, opts) :: {:ok, map} | {:error, error}
def harnais_map_compare(map1, map2, opts \\ [])
def harnais_map_compare(%{__struct__: map1_struct_name} = map1, arg2, opts) when is_list(arg2) do
arg2
|> map_normalise
|> case do
{:ok, map2} ->
harnais_map_compare(map1, map2 |> Map.put(:__struct__, map1_struct_name), opts)
_ ->
new_error_result(
message_config: [:message, :value],
t: @harnais_error_value_field_type_arg,
m: @harnais_map_error_message_map_compare_failed,
r: @harnais_error_reason_not_map,
i: 1,
v: arg2)
end
end
def harnais_map_compare(map1, map2, opts) do
# build errors incrementally
[map1, map2]
|> Stream.with_index
|> map_gather0_enum(fn {value, ndx} ->
value
|> map_normalise
|> case do
{:ok, _} = result -> result
_ ->
new_error_result(
message_config: [:message, :value],
t: @harnais_error_value_field_type_arg,
m: @harnais_map_error_message_map_compare_failed,
r: @harnais_error_reason_not_map,
i: ndx,
v: value)
end
end)
|> case do
{:error, %{__struct__: _}} = result -> result
{:ok, gather_opts} ->
gather_opts
|> Plymio.Fontais.Funcio.gather_opts_has_error?
|> case do
true ->
with {:ok, errors} <- gather_opts |> Plymio.Fontais.Funcio.gather_opts_error_values_get,
{:ok, error} <- errors |> new_errors_result do
{:error, error}
else
{:error, %{__exception__: true}} = result -> result
end
# no errors yet
_ ->
with {:ok, [map1_norm, map2_norm]} <- gather_opts
|> Plymio.Fontais.Funcio.gather_opts_ok_values_get do
map_compare_worker(map1_norm, map2_norm, opts)
|> case do
{:error, %{__struct__: _}} = result -> result
{:ok, _} -> {:ok, map1}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
end
end
@doc ~S"""
`harnais_map/1` tests whether the argument is a `Map` and,
if true, returns `{:ok, argument}` else `{:error, error}`.
## Examples
iex> harnais_map(%{a: 1})
{:ok, %{a: 1}}
iex> {:error, error} = harnais_map(42)
...> error |> Harnais.Error.export_exception
{:ok, [error: [[m: "map invalid", r: :not_map, t: :arg, v: 42]]]}
iex> {:error, error} = harnais_map(:atom)
...> error |> Exception.message
"map invalid, got: :atom"
Query examples:
iex> harnais_map?(%{a: 1})
true
iex> harnais_map?(42)
false
iex> harnais_map?([1, 2, 3])
false
Bang examples:
iex> harnais_map!(%{a: 1})
%{a: 1}
iex> harnais_map!(42)
** (Harnais.Error) map invalid, got: 42
iex> harnais_map!([1, 2, 3])
** (Harnais.Error) map invalid, got: [1, 2, 3]
"""
@since "0.1.0"
@spec harnais_map(any) :: {:ok, map} | {:error, error}
def harnais_map(value) when is_map(value) do
{:ok, value}
end
def harnais_map(value) when is_map(value) do
{:ok, value}
end
def harnais_map(value) do
new_error_result(
message_config: [:message, :value],
t: @harnais_error_value_field_type_arg,
m: @harnais_map_error_message_map_invalid,
r: @harnais_error_reason_not_map,
v: value)
end
defp map_normalise(map)
defp map_normalise(map) when is_map(map) do
{:ok, map}
end
defp map_normalise(value) when is_list(value) do
value
|> Keyword.keyword?
|> case do
true ->
{:ok, value |> Enum.into(%{})}
_ ->
value |> harnais_map
end
end
defp map_normalise(value) do
value |> harnais_map
end
@quote_result_map_no_return quote(do: map | no_return)
[
delegate: [doc: false, name: :harnais_map?, as: :is_map, to: Kernel, args: :map, since: "0.1.0", result: :boolean],
bang: [doc: false, as: :harnais_map, args: :map, since: "0.1.0", result: @quote_result_map_no_return],
bang: [doc: false, as: :harnais_map_compare, args: [:map1, :map2], since: "0.1.0", result: @quote_result_map_no_return],
query: [doc: false, as: :harnais_map_compare, args: [:map1, :map2], since: "0.1.0", result: true],
bang: [doc: false, as: :harnais_map_compare, args: [:map1, :map2, :opts], since: "0.1.0", result: @quote_result_map_no_return],
query: [doc: false, as: :harnais_map_compare, args: [:map1, :map2, :opts], since: "0.1.0", result: true],
]
|> Enum.flat_map(fn {pattern,opts} ->
[pattern: [pattern: pattern] ++ opts]
end)
|> CODI.reify_codi(@codi_opts)
end
|
lib/map.ex
| 0.878105
| 0.547706
|
map.ex
|
starcoder
|
defmodule Search.Scope do
@moduledoc """
Defines a set of behaviours the `Search.Request` depends on to build a proper search request.
"""
@typedoc """
Defined set of key / values that define an Elasticsearch Filter.
"""
@type filter_body :: %{
optional(:must) => list(),
optional(:must_not) => list(),
optional(:should) => list()
}
@doc """
List of filter names that are allowed to be applied from a request.
Names should be underscored to match the formatted request.
EX: `job-assignments.state` matches to `job_assignments.state` after formatted to params.
"""
@callback allowed_search_filters() :: list(String.t()) | list()
@doc """
Map of the relationship between requestable 'include' paths and the data returned.
The keys represent the relationship paths requested by the FE
The values represent the corresponding types of records that could be returned
"""
@callback include_types_mapping() :: %{required(String.t()) => list(String.t())}
@doc """
Mapping of the allowed fields that can be queried in a search request.
The key represents the abreviated request field.
The value represents the full path to the attribute in the document.
EX: `created-at => data.attributes.created-at`
"""
@callback allowed_search_fields() :: %{required(String.t()) => String.t() | list(String.t())}
@doc """
Mapping of the allowed fields that can be sorted in a search request.
The key represents the abbreviated request field.
The value represents the full path to the attribute in the document.
EX: `external-reference => data.attributes.external-reference.raw`
"""
@callback allowed_sort_fields() :: %{required(String.t()) => String.t() | list(String.t())}
@doc """
"""
@callback filter(name :: String.t(), value :: String.t(), conn :: Plug.Conn.t()) :: filter_body()
@doc """
The document types the search applies to. There should only be one type in a index.
"""
@callback types() :: list(String.t())
end
|
lib/elastic_jsonapi/scope.ex
| 0.857828
| 0.488405
|
scope.ex
|
starcoder
|
defmodule Chunky.Geometry.Triangle.Predicates do
@moduledoc """
Predicate functions take a single triangle as an argument, and return a boolean, an assessment of some
characteristic of the triangle. Predicate functions will, in almost every possible case, return a boolean,
even in situations that would normally throw an error (like invalid triangles).
## Analyzing Triangles
Most often using individual predicates is the right approach. Sometimes, though, it can be useful to inspect
a wider set of properties about a triangle. The `analyze/2` function applies all of the predicate functions
for triangles against the provided triangle.
- `analyze/2` - Assess properties of a triangle
## Shape
Properties of the edges of a triangle:
- `is_equilateral?/1` - Is a triangle an equilateral triangle?
- `is_isoceles?/1` - Is a triangle an isoceles triangle?
- `is_pythagorean_triangle?/1` - Is the given triangle a right triangle with integer sides?
- `is_scalene?/1` - Is a triangle a scalene triangle?
Properties of the angles of a triangle:
- `is_acute?/1` - Are all of the interior angles equal or less than 90 degrees?
- `is_obtuse?/1` - Are any of the interior angles greater than 90 degrees?
## Heronian Properties
Basic heronian triangles:
- `is_almost_equilateral_heronian_triangle?/1` - Heronian triangle with sides `n - 1`, `n`, `n + 1`
- `is_heronian_triangle?/1` - Is a triangle a _heronian_ triangle, with integer sides and integer area?
- `is_super_heronian_triangle?/1` - Does a triangle have integer sides, integer area, and a perimeter equal to area?
An extension of _super_ heronian triangles, heronian triangles with areas that are a specific multiple of the
perimeter:
- `is_2_heronian_triangle?/1` - Is a triangle heronian, with area 2 times perimeter?
- `is_3_heronian_triangle?/1` - Is a triangle heronian, with area 3 times perimeter?
- `is_4_heronian_triangle?/1` - Is a triangle heronian, with area 4 times perimeter?
- `is_5_heronian_triangle?/1` - Is a triangle heronian, with area 5 times perimeter?
- `is_6_heronian_triangle?/1` - Is a triangle heronian, with area 6 times perimeter?
- `is_7_heronian_triangle?/1` - Is a triangle heronian, with area 7 times perimeter?
- `is_8_heronian_triangle?/1` - Is a triangle heronian, with area 8 times perimeter?
- `is_9_heronian_triangle?/1` - Is a triangle heronian, with area 9 times perimeter?
- `is_10_heronian_triangle?/1` - Is a triangle heronian, with area 10 times perimeter?
- `is_20_heronian_triangle?/1` - Is a triangle heronian, with area 20 times perimeter?
- `is_30_heronian_triangle?/1` - Is a triangle heronian, with area 30 times perimeter?
- `is_40_heronian_triangle?/1` - Is a triangle heronian, with area 40 times perimeter?
- `is_50_heronian_triangle?/1` - Is a triangle heronian, with area 50 times perimeter?
- `is_60_heronian_triangle?/1` - Is a triangle heronian, with area 60 times perimeter?
- `is_70_heronian_triangle?/1` - Is a triangle heronian, with area 70 times perimeter?
- `is_80_heronian_triangle?/1` - Is a triangle heronian, with area 80 times perimeter?
- `is_90_heronian_triangle?/1` - Is a triangle heronian, with area 90 times perimeter?
- `is_100_heronian_triangle?/1` - Is a triangle heronian, with area 100 times perimeter?
## Composability
- `is_decomposable?/1` - Can a triangle be decomposed into two, smaller, right triangles?
- `is_indecomposable?/1` - Can a triangle _not_ be decomposed into two, smaller, right triangles?
"""
import Chunky.Geometry
import Chunky.Geometry.Triangle,
only: [
angles: 1,
area: 1,
decompose: 1,
is_multiple_heronian_triangle?: 2,
normalize: 1
]
@doc """
Apply all of the Triangle predicte functions to the triangel `t`, and collect the
passing, or true, predicates.
This function uses the names of all of the predicate functions as sources for labels, and collects
the resulting labels from a triangle being analyzed.
Some predicate functions can take a long time to run depending on the shape or properties of the triangle `t`, so the `analyze/2` function
uses a timeout for each predicate. See the `predicate_wait_time` option for more details.
## Options
- `predicate_wait_time` - Integer, default `100`. Maximum number of milliseconds to wait for an answer from each predicate function
## Examples
iex> Triangle.Predicates.analyze({3, 4, 5})
[:acute, :almost_equilateral_heronian_triangle, :heronian_triangle, :indecomposable, :pythagorean_triangle, :scalene]
iex> Triangle.Predicates.analyze({4, 5, 10})
[:invalid_triangle]
iex> Triangle.Predicates.analyze({5, 5, 5})
[:acute, :equilateral, :indecomposable]
iex> Triangle.Predicates.analyze({53, 54, 55})
[:acute, :indecomposable, :scalene]
iex> Triangle.Predicates.analyze({10, 13, 13})
[:acute, :decomposable, :heronian_triangle, :isoceles]
iex> Triangle.Predicates.analyze({477, 477, 504})
[:"70_heronian_triangle", :acute, :decomposable, :heronian_triangle, :isoceles]
"""
def analyze(t, opts \\ []) when is_triangle?(t) do
# how long are we waiting for each predicate
wait_time = opts |> Keyword.get(:predicate_wait_time, 100)
# what functions are we skipping?
skip_list = []
skip_set = MapSet.new(skip_list)
if is_valid_triangle?(t) do
# find all predicates
Chunky.Geometry.Triangle.Predicates.__info__(:functions)
|> Enum.filter(fn {func, arity} ->
# take these apart and find is_*?/1 functions
f_name = func |> Atom.to_string()
String.starts_with?(f_name, "is_") && String.ends_with?(f_name, "?") && arity == 1
end)
# reject anything in our skip set
|> Enum.reject(fn {func, _arity} -> skip_set |> MapSet.member?(func) end)
# map to funct/name pairs
|> Enum.map(fn {func, _arity} ->
f_atom = func |> Atom.to_string() |> String.slice(3..-2) |> String.to_atom()
{func, f_atom}
end)
# apply and filter
|> Enum.filter(fn {func, _name} ->
pred_task =
Task.async(fn ->
apply(Chunky.Geometry.Triangle.Predicates, func, [t])
end)
case Task.yield(pred_task, wait_time) || Task.shutdown(pred_task) do
{:ok, result} ->
# ran to completion
result
nil ->
# timed out
false
end
end)
# map to names
|> Enum.map(fn {_func, name} -> name end)
|> Enum.sort()
else
[:invalid_triangle]
end
end
@doc """
Is a triangle `2-heronian`, a triangle with an area 2 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_2_heronian_triangle?({13, 14, 15})
true
iex> Triangle.Predicates.is_2_heronian_triangle?({11, 25, 30})
true
"""
def is_2_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 2)
end
@doc """
Is a triangle `3-heronian`, a triangle with an area 3 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_3_heronian_triangle?({25, 26, 17})
true
iex> Triangle.Predicates.is_3_heronian_triangle?({25, 28, 17})
true
"""
def is_3_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 3)
end
@doc """
Is a triangle `4-heronian`, a triangle with an area 4 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_4_heronian_triangle?({30, 30, 48})
true
iex> Triangle.Predicates.is_4_heronian_triangle?({18, 80, 82})
true
"""
def is_4_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 4)
end
@doc """
Is a triangle `5-heronian`, a triangle with an area 5 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_5_heronian_triangle?({30, 40, 50})
true
iex> Triangle.Predicates.is_5_heronian_triangle?({45, 50, 85})
true
"""
def is_5_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 5)
end
@doc """
Is a triangle `6-heronian`, a triangle with an area 6 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_6_heronian_triangle?({33, 56, 65})
true
iex> Triangle.Predicates.is_6_heronian_triangle?({34, 61, 75})
true
"""
def is_6_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 6)
end
@doc """
Is a triangle `7-heronian`, a triangle with an area 7 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_7_heronian_triangle?({42, 56, 70})
true
iex> Triangle.Predicates.is_7_heronian_triangle?({35, 84, 91})
true
"""
def is_7_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 7)
end
@doc """
Is a triangle `8-heronian`, a triangle with an area 8 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_8_heronian_triangle?({52, 56, 60})
true
iex> Triangle.Predicates.is_8_heronian_triangle?({41, 84, 85})
true
"""
def is_8_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 8)
end
@doc """
Is a triangle `9-heronian`, a triangle with an area 9 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_9_heronian_triangle?({52, 73, 75})
true
iex> Triangle.Predicates.is_9_heronian_triangle?({54, 72, 90})
true
"""
def is_9_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 9)
end
@doc """
Is a triangle `10-heronian`, a triangle with an area 10 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_10_heronian_triangle?({65, 70, 75})
true
iex> Triangle.Predicates.is_10_heronian_triangle?({65, 72, 97})
true
"""
def is_10_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 10)
end
@doc """
Is a triangle `20-heronian`, a triangle with an area 20 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_20_heronian_triangle?({130, 140, 150})
true
iex> Triangle.Predicates.is_20_heronian_triangle?({125, 145, 180})
true
"""
def is_20_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 20)
end
@doc """
Is a triangle `30-heronian`, a triangle with an area 30 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_30_heronian_triangle?({195, 210, 225})
true
iex> Triangle.Predicates.is_30_heronian_triangle?({200, 200, 240})
true
"""
def is_30_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 30)
end
@doc """
Is a triangle `40-heronian`, a triangle with an area 40 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_40_heronian_triangle?({267, 278, 289})
true
iex> Triangle.Predicates.is_40_heronian_triangle?({260, 280, 300})
true
"""
def is_40_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 40)
end
@doc """
Is a triangle `50-heronian`, a triangle with an area 50 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_50_heronian_triangle?({325, 350, 375})
true
iex> Triangle.Predicates.is_50_heronian_triangle?({300, 390, 390})
true
"""
def is_50_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 50)
end
@doc """
Is a triangle `60-heronian`, a triangle with an area 60 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_60_heronian_triangle?({408, 416, 424})
true
iex> Triangle.Predicates.is_60_heronian_triangle?({389, 425, 444})
true
"""
def is_60_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 60)
end
@doc """
Is a triangle `70-heronian`, a triangle with an area 70 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_70_heronian_triangle?({477, 477, 504})
true
iex> Triangle.Predicates.is_70_heronian_triangle?({456, 481, 545})
true
"""
def is_70_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 70)
end
@doc """
Is a triangle `80-heronian`, a triangle with an area 80 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_80_heronian_triangle?({534, 556, 578})
true
iex> Triangle.Predicates.is_80_heronian_triangle?({520, 560, 600})
true
"""
def is_80_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 80)
end
@doc """
Is a triangle `90-heronian`, a triangle with an area 90 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_90_heronian_triangle?({612, 624, 636})
true
iex> Triangle.Predicates.is_90_heronian_triangle?({546, 690, 696})
true
"""
def is_90_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 90)
end
@doc """
Is a triangle `100-heronian`, a triangle with an area 100 times it's perimeter?
See also:
- `Chunky.Geometry.Triangle.is_multiple_heronian_triangle?/2`
## Examples
iex> Triangle.Predicates.is_100_heronian_triangle?({650, 700, 750})
true
iex> Triangle.Predicates.is_100_heronian_triangle?({663, 678, 765})
true
"""
def is_100_heronian_triangle?(t) when is_triangle?(t) do
is_multiple_heronian_triangle?(t, 100)
end
@doc """
Are all of the interior angles of a triangle equal to or smaller than 90 degrees?
## Examples
iex> Triangle.Predicates.is_acute?({5, 5, 5})
true
iex> Triangle.Predicates.is_acute?({35, 5, 35})
true
iex> Triangle.Predicates.is_acute?({35, 19, 19})
false
iex> Triangle.Predicates.is_acute?({15, 16, 17})
true
"""
def is_acute?(t) when is_triangle?(t) do
angles(t)
|> Tuple.to_list()
|> Enum.all?(fn a -> a <= 90.0 end)
end
@doc """
An _almost_ equilateral heronian triangle has sequentially sized integer sides and an integer area.
Sometimes called _super heronian_ (which is a different contruction in this library), parker triangles,
or almost equilaterals, these are integer triangles with sequential increasing sides, like `{3, 4, 5}`.
## Examples
iex> Triangle.Predicates.is_almost_equilateral_heronian_triangle?({3, 4, 5})
true
iex> Triangle.Predicates.is_almost_equilateral_heronian_triangle?({51, 52, 53})
true
iex> Triangle.Predicates.is_almost_equilateral_heronian_triangle?({53, 54, 55})
false
iex> Triangle.Predicates.is_almost_equilateral_heronian_triangle?({3, 7, 9})
false
"""
def is_almost_equilateral_heronian_triangle?(t) when is_triangle?(t) do
{a, b, c} = normalize(t)
a == b - 1 && b == c - 1 && is_heronian_triangle?(t)
end
@doc """
Is a triangle decomposable into two smaller right triangles?
See also:
- `Chunky.Geometry.Triangle.decompose/1`
- `is_indecomposable?/1`
## Examples
iex> Triangle.Predicates.is_decomposable?({6, 6, 6})
false
iex> Triangle.Predicates.is_decomposable?({10, 13, 13})
true
iex> Triangle.Predicates.is_decomposable?({13, 14, 15})
true
"""
def is_decomposable?(t) when is_triangle?(t) do
case decompose(t) do
{:ok, _, _} -> true
_ -> false
end
end
@doc """
Is the provided triangle an equilateral triangle?
## Examples
iex> Triangle.Predicates.is_equilateral?({5, 5, 5})
true
iex> Triangle.Predicates.is_equilateral?({3, 4, 5})
false
iex> Triangle.Predicates.is_equilateral?({15, 5, 15})
false
iex> Triangle.Predicates.is_equilateral?({0, 0, 0})
** (FunctionClauseError) no function clause matching in Chunky.Geometry.Triangle.Predicates.is_equilateral?/1
iex> Triangle.Predicates.is_equilateral?({-5, 5, 5})
** (FunctionClauseError) no function clause matching in Chunky.Geometry.Triangle.Predicates.is_equilateral?/1
"""
def is_equilateral?(t = {a, b, c}) when is_triangle?(t) do
a == b && b == c
end
@doc """
Is a triangle a _heronian_ triangle, one with integer sides and integer area?
## Examples
iex> Triangle.Predicates.is_heronian_triangle?({3, 4, 5})
true
iex> Triangle.Predicates.is_heronian_triangle?({5, 6, 7})
false
iex> Triangle.Predicates.is_heronian_triangle?({17, 16, 17})
true
iex> Triangle.Predicates.is_heronian_triangle?({41, 41, 80})
true
iex> Triangle.Predicates.is_heronian_triangle?({30, 5, 29})
true
"""
def is_heronian_triangle?(t) when is_triangle?(t) do
is_valid_triangle?(t) &&
case area(t) do
{:integer, _} -> true
_ -> false
end
end
@doc """
Is a triangle decomposable into two smaller right triangles?
See also:
- `Chunky.Geometry.Triangle.decompose/1`
- `is_decomposable?/1`
## Examples
iex> Triangle.Predicates.is_indecomposable?({6, 6, 6})
true
iex> Triangle.Predicates.is_indecomposable?({10, 13, 13})
false
iex> Triangle.Predicates.is_indecomposable?({13, 14, 15})
false
"""
def is_indecomposable?(t) when is_triangle?(t) do
case decompose(t) do
{:ok, _, _} -> false
_ -> true
end
end
@doc """
Is the triangle an isoceles triangle, a triangle with two equal sides?
## Examples
iex> Triangle.Predicates.is_isoceles?({5, 5, 5})
false
iex> Triangle.Predicates.is_isoceles?({15, 5, 15})
true
iex> Triangle.Predicates.is_isoceles?({5, 15, 25})
false
iex> Triangle.Predicates.is_isoceles?({5, 25, 25})
true
"""
def is_isoceles?(t = {a, b, c}) when is_triangle?(t) do
is_equilateral?(t) == false && (a == b || b == c || a == c)
end
@doc """
Are any of the interior angles of a triangle greater than 90 degrees?
## Examples
iex> Triangle.Predicates.is_obtuse?({5, 5, 5})
false
iex> Triangle.Predicates.is_obtuse?({35, 5, 35})
false
iex> Triangle.Predicates.is_obtuse?({35, 19, 19})
true
iex> Triangle.Predicates.is_obtuse?({15, 16, 17})
false
"""
def is_obtuse?(t) when is_triangle?(t) do
angles(t)
|> Tuple.to_list()
|> Enum.any?(fn a -> a > 90.0 end)
end
@doc """
Is the given triangle a _right triangle_ with integer sides?
## Examples
iex> Triangle.Predicates.is_pythagorean_triangle?({5, 5, 5})
false
iex> Triangle.Predicates.is_pythagorean_triangle?({10, 24, 26})
true
iex> Triangle.Predicates.is_pythagorean_triangle?({24, 10, 26})
true
iex> Triangle.Predicates.is_pythagorean_triangle?({3, 4, 5})
true
iex> Triangle.Predicates.is_pythagorean_triangle?({4, 5, 3})
true
iex> Triangle.Predicates.is_pythagorean_triangle?({4, 5, 6})
false
"""
def is_pythagorean_triangle?(t = {a, b, c}) when is_triangle?(t) do
hyp = Enum.max([a, b, c])
edges =
([a, b, c] -- [hyp])
|> Enum.map(fn v -> v * v end)
|> Enum.sum()
edges == hyp * hyp
end
@doc """
Is the triangle a scalene triangle, a triangle with three different length sides?
## Examples
iex> Triangle.Predicates.is_scalene?({5, 5, 5})
false
iex> Triangle.Predicates.is_scalene?({15, 15, 5})
false
iex> Triangle.Predicates.is_scalene?({5, 15, 25})
true
iex> Triangle.Predicates.is_scalene?({15, 5, 35})
true
"""
def is_scalene?(t = {a, b, c}) when is_triangle?(t) do
a != b && b != c && a != c
end
@doc """
Is a triangle _super heronian_, a triangle with integer area and perimeter that are equal?
Not to be confused with _almost equilateral heronians_, which are also sometimes called super
heronian triangles. In our definition, the super heronian triangles have an integer area and
perimeter that are equal. This is a finite set:
- `{5, 12, 13}`
- `{6, 8, 10}`
- `{6, 25, 29}`
- `{7, 15, 20}`
- `{9, 10, 17}`
## Examples
iex> Triangle.Predicates.is_super_heronian_triangle?({5, 12, 13})
true
iex> Triangle.Predicates.is_super_heronian_triangle?({5, 12, 15})
false
iex> Triangle.Predicates.is_super_heronian_triangle?({3, 4, 5})
false
iex> Triangle.Predicates.is_super_heronian_triangle?({6, 29, 25})
true
"""
def is_super_heronian_triangle?(t = {a, b, c}) when is_triangle?(t) do
case area(t) do
{:integer, ta} -> ta == a + b + c
_ -> false
end
end
end
|
lib/geometry/triangle/predicates.ex
| 0.924925
| 0.946695
|
predicates.ex
|
starcoder
|
defmodule Unicode do
alias Unicode.CompiletimeHelper
@moduledoc """
Provides functionality to efficiently check properties of Unicode codepoints, graphemes and strings.
The current implementation is based on Unicode version _8.0.0_.
"""
@derived_core_properties %{
Math: :math,
Alphabetic: :alphabetic,
Lowercase: :lowercase,
Uppercase: :uppercase,
# Cased: :cased,
# Case_Ignorable: :case_ignorable,
# Changes_When_Lowercased: :changes_when_lowercased,
# Changes_When_Titlecased: :changes_when_titlecased,
# Changes_When_Casefolded: :changes_when_casefolded,
# Changes_When_Casemapped: :changes_when_casemapped,
# ID_Start: :id_start,
# ID_Continue: :id_continue,
# XID_Start: :xid_start,
# XID_Continue: :xid_continue,
# Default_Ignorable_Code_Point: :default_ignorable_code_point,
# Grapheme_Extend: :grapheme_extend,
# Grapheme_Base: :grapheme_base
}
# TODO: Add empty function heads and document them.
@doc """
Checks if a single Unicode codepoint (or all characters in the given binary string) adhere to the Derived Core Property `Math`.
These are all characters whose primary usage is in mathematical concepts (and not in alphabets).
Notice that the numerical digits are not part of this group. Use `Unicode.digit?/1` instead.
The function takes a unicode codepoint or a string as input.
For the string-version, the result will be true only if _all_ codepoints in the string adhere to the property.
## Examples
iex>Unicode.math?(?=)
true
iex>Unicode.math?("=")
true
iex>Unicode.math?("1+1=2") # Note that digits themselves are not part of `Math`.
false
iex>Unicode.math?("परिस")
false
iex>Unicode.math?("∑") # Summation, \u2211
true
iex>Unicode.math?("Σ") # Greek capital letter sigma, \u03a3
false
"""
@spec math?(String.codepoint | String.t) :: boolean
def math?(codepoint_or_string)
@doc """
Checks if a single Unicode codepoint (or all characters in the given binary string) adhere to the Derived Core Property `Alphabetic`.
These are all characters that are usually used as representations of letters/syllabes/ in words/sentences.
The function takes a unicode codepoint or a string as input.
For the string-version, the result will be true only if _all_ codepoints in the string adhere to the property.
## Examples
iex>Unicode.alphabetic?(?a)
true
iex>Unicode.alphabetic?("A")
true
iex>Unicode.alphabetic?("Elixir")
true
iex>Unicode.alphabetic?("الإكسير")
true
iex>Unicode.alphabetic?("foo, bar") # comma and whitespace
false
iex>Unicode.alphabetic?("42")
false
iex>Unicode.alphabetic?("龍王")
true
iex>Unicode.alphabetic?("∑") # Summation, \u2211
false
iex>Unicode.alphabetic?("Σ") # Greek capital letter sigma, \u03a3
true
"""
@spec alphabetic?(String.codepoint | String.t) :: boolean
def alphabetic?(codepoint_or_string)
@doc """
Checks if a single Unicode codepoint (or all characters in the given binary string) adhere to the Derived Core Property `Lowercase`.
Notice that there are many languages that do not have a distinction between cases. Their characters are not included in this group.
The function takes a unicode codepoint or a string as input.
For the string-version, the result will be true only if _all_ codepoints in the string adhere to the property.
## Examples
iex>Unicode.lowercase?(?a)
true
iex>Unicode.lowercase?("A")
false
iex>Unicode.lowercase?("Elixir")
false
iex>Unicode.lowercase?("léon")
true
iex>Unicode.lowercase?("foo, bar")
false
iex>Unicode.lowercase?("42")
false
iex>Unicode.lowercase?("Σ")
false
iex>Unicode.lowercase?("σ")
true
"""
@spec lowercase?(String.codepoint | String.t) :: boolean
def lowercase?(codepoint_or_string)
@doc """
Checks if a single Unicode codepoint (or all characters in the given binary string) adhere to the Derived Core Property `Uppercase`.
Notice that there are many languages that do not have a distinction between cases. Their characters are not included in this group.
The function takes a unicode codepoint or a string as input.
For the string-version, the result will be true only if _all_ codepoints in the string adhere to the property.
## Examples
iex>Unicode.uppercase?(?a)
false
iex>Unicode.uppercase?("A")
true
iex>Unicode.uppercase?("Elixir")
false
iex>Unicode.uppercase?("CAMEMBERT")
true
iex>Unicode.uppercase?("foo, bar")
false
iex>Unicode.uppercase?("42")
false
iex>Unicode.uppercase?("Σ")
true
iex>Unicode.uppercase?("σ")
false
"""
@spec uppercase?(String.codepoint | String.t) :: boolean
def uppercase?(codepoint_or_string)
# Define methods from the DerivedCoreProperties.txt
File.stream!('unicode_source_files/DerivedCoreProperties.txt')
|> Stream.reject(fn raw_line ->
# skip comments and empty lines.
String.strip(raw_line) == "" || String.starts_with?(raw_line, "#")
end)
|> Stream.map(fn raw_line ->
[charinfo, property] = CompiletimeHelper.split_derived_core_properties_line(raw_line)
# Only define functions for the properties that are part of @derived_core_properties
snake_cased_property_name = Map.get(@derived_core_properties, String.to_atom(property))
if snake_cased_property_name do
method_name = String.to_atom("#{snake_cased_property_name}?")
case String.split(charinfo, "..") do
[low_code, high_code] ->
low_code = String.to_integer(low_code, 16)
high_code = String.to_integer(high_code, 16)
def unquote(method_name)(code) when code in (unquote(low_code))..(unquote(high_code)), do: true
[single_code] ->
single_code = String.to_integer(single_code, 16)
def unquote(method_name)(unquote(single_code)), do: true
end
end
end)
|> Stream.run
for {_unicode_property, snake_cased_property_name} <- @derived_core_properties do
method_name = String.to_atom("#{snake_cased_property_name}?")
# Non-matching codepoints return `false`
def unquote(method_name)(codepoint) when is_integer(codepoint), do: false
# String handling: Match first codepoint and rest of codepoints.
def unquote(method_name)(string) when is_binary(string) do
case String.next_codepoint(string) do
nil -> false
{<<codepoint::utf8>>, ""} -> unquote(method_name)(codepoint)
{<<codepoint::utf8>>, rest} -> unquote(method_name)(codepoint) && unquote(method_name)(rest)
end
end
def unquote(method_name)(string, []), do: unquote(method_name)(string)
def unquote(method_name)(string, block) when is_list(block) do
blocks = Enum.map(block, &CompiletimeHelper.normalize_block_name/1)
in_block?(string, blocks) && unquote(method_name)(string)
end
def unquote(method_name)(string, block) when is_binary(block), do: unquote(method_name)(string, [block])
File.stream!('unicode_source_files/Blocks.txt')
|> Stream.reject(fn raw_line ->
String.strip(raw_line) == "" || String.starts_with?(raw_line, "#")
end)
|> Stream.map(fn raw_line ->
[charinfo, block_name] = CompiletimeHelper.split_block_line(raw_line)
block_name = CompiletimeHelper.normalize_block_name(block_name) # as per comments on Block.txt
[low_code, high_code] = String.split(charinfo, "..")
low_code = String.to_integer(low_code, 16)
high_code = String.to_integer(high_code, 16)
defp in_block?(code, block) when block == unquote(block_name) and code in unquote(low_code)..unquote(high_code), do: true
end) # defp
|> Stream.run
defp in_block?(code, block) when is_integer(code) and is_list(block), do: Enum.any?(block, &in_block?(code, &1))
defp in_block?(string, block) when is_binary(string) do
case String.next_codepoint(string) do
nil -> false
{<<codepoint::utf8>>, ""} -> in_block?(codepoint, block)
{<<codepoint::utf8>>, rest} -> in_block?(codepoint, block) && in_block?(rest, block)
end
end
defp in_block?(_, _), do: false # what happens when a non-existent block is passed?
end
@doc """
True for the digits [0-9], but much more performant than a `\d` regexp checking the same thing.
Derived from [http://www.unicode.org/reports/tr18/#digit](http://www.unicode.org/reports/tr18/#digit)
### Examples
iex> Unicode.numeric?("65535")
true
iex> Unicode.numeric?("42")
true
iex> Unicode.numeric?("lapis philosophorum")
false
"""
# The regexp 'digit' group. Matches only the ASCII ?0 t/m ?9 characters.
def numeric?(codepoint) when codepoint in ?0..?9, do: true
def numeric?(codepoint) when is_integer(codepoint), do: false
def numeric?(string) when is_binary(string) do
case String.next_codepoint(string) do
nil -> false
{<<codepoint::utf8>>, ""} -> numeric?(codepoint)
{<<codepoint::utf8>>, rest} -> numeric?(codepoint) && numeric?(rest)
end
end
@doc """
True for alphanumeric characters, but much more performant than an `:alnum:` regexp checking the same thing.
Returns true if `Unicode.alphabetic?(x) or Unicode.numeric?(x)`.
Derived from [http://www.unicode.org/reports/tr18/#alnum](http://www.unicode.org/reports/tr18/#alnum)
### Examples
iex> Unicode.alphanumeric? "1234"
true
iex> Unicode.alphanumeric? "KeyserSöze1995"
true
iex> Unicode.alphanumeric? "3段"
true
iex> Unicode.alphanumeric? "<EMAIL>"
false
"""
def alphanumeric?(codepoint, block \\ [])
def alphanumeric?(codepoint, block) when is_integer(codepoint) do
numeric?(codepoint) || alphabetic?(codepoint, block)
end
def alphanumeric?(string, block) when is_binary(string) do
case String.next_codepoint(string) do
nil -> false
{<<codepoint::utf8>>, ""} -> alphanumeric?(codepoint, block)
{<<codepoint::utf8>>, rest} -> alphanumeric?(codepoint, block) && alphanumeric?(rest, block)
end
end
end
|
lib/unicode.ex
| 0.705785
| 0.484563
|
unicode.ex
|
starcoder
|
defmodule Terminus do
@moduledoc """

Terminus allows you to crawl and subscribe to Bitcoin transaction events and
download binary data from transactions, using a combination of
[Bitbus](https://bitbus.network) and [Bitsocket](https://bitsocket.network),
and [BitFS](https://bitfs.network).
Terminus provides a single unified interface for querying Planaria corp APIs
in a highly performant manner. Each request is a `GenStage` process, enabling
you to create powerful concurrent data flows. Terminus may well be the most
powerful way of querying Bitcoin in the Universe!
## APIs
Terminus can be used to interface with the following Planaria Corp APIs.
* [`Bitbus`](`Terminus.Bitbus`) - crawl filtered subsets of **confirmed** Bitcoin transactions in blocks.
* [`Bitsocket`](`Terminus.Bitsocket`) - subscribe to a live, filterable stream of realtime transaction events.
* [`BitFS`](`Terminus.BitFS`) - fetch raw binary data chunks (over 512kb) indexed from all Bitcoin transactions.
### Authentication
Both Bitbus and Bitsocket require a token to authenticate requests. *(The Bitsocket
`listen` API currently doesn't require a token but that is likely to change).*
Currently tokens are free with no usage limits. *(Also likely to change)*
**[Get your Planaria Token](https://token.planaria.network).**
Where a token is given as a tuple pair in the format, `{app, key}`, Terminus
will fetch the token at runtime using `Application.get_env(app, key)`.callback()
iex> Terminus.Omni.find(txid, token: {:my_app, :planaria_token})
{:ok, %{...}}
### Query language
Both Bitbus and Bitsocket use the same MongoDB-like query language, known as
[Bitquery](https://bitquery.planaria.network). Terminus fully supports both
the TXO (Transaction Object) and BOB (Bitcoin OP_RETURN Bytecode) schemas, and
allows the optional use of shorthand queries (just the `q` value).
iex> Terminus.Bitbus.fetch!(%{
...> find: %{ "out.s2" => "1LtyME6b5AnMopQrBPLk4FGN8UBuhxKqrn" },
...> sort: %{ "blk.i": -1 },
...> project: %{ "tx.h": 1 },
...> limit: 5
...> }, token: token)
[
%{"tx" => %{"h" => "fca7bdd7658613418c54872212811cf4c5b4f8ee16864eaf70cb1393fb0df6ca"}},
%{"tx" => %{"h" => "79ae3ca23d1067b9ab45aba7e8ff4de1943e383e9a33e562d5ffd8489f388c93"}},
%{"tx" => %{"h" => "5526989417f28da5e0c99b58863db58c1faf8862ac9325dc415ad4b11605c1b1"}},
%{"tx" => %{"h" => "0bac587681360f961dbccba4c49a5c8f1b6f0bef61fe8501a28dcfe981a920b5"}},
%{"tx" => %{"h" => "fa13a8f0f5688f761b2f34949bb35fa5d6fd14cb3d49c2c1617363b6984df162"}}
]
## Using Terminus
Terminus can be used as a simple API client, or a turbo-charged, concurrent
multi-stream Bitcoin scraper on steroids. You decide.
The following modules are the primary ways of using Terminus.
* `Terminus.Bitbus` - functions for crawling and query confirmed Bitcoin transactions.
* `Terminus.Bitsocket` - query mempool transactions and listen to realtime transaction events.
* `Terminus.BitFS` - fetch binary data blobs embedded in Bitcoin transactions.
* `Terminus.Omni` - conveniently fetch confirmed and mempool transactions together.
* `Terminus.Planaria` - run Bitcoin scraper processes under your application's supervision tree.
### Streams
Most Terminus functions return a streaming `t:Enumerable.t/0` allowing you to
compose data processing pipelines and operations.
iex> Terminus.Bitbus.crawl!(query, token: token)
...> |> Stream.map(&Terminus.BitFS.scan_tx/1)
...> |> Stream.each(&save_to_db/1)
...> |> Stream.run
:ok
### Omni
Sometimes it's necessary to query both confirmed and confirmed transaction
simultaneously. This is where `Terminus.Omni` comes in, effectively replicating
the functionality of legacy Planaria APIs and returning returning results from
Bitbus and Bitsocket in one call.
iex> Terminus.Omni.fetch(query, token: token)
{:ok, %{
c: [...], # collection of confirmed tx
u: [...] # collection of mempool tx
}}
You can also easily find a single transaction by its [`txid`](`t:Terminus.txid`)
irrespective of whether it is confirmed or not.
iex> Terminus.Omni.find(txid, token: token)
{:ok, %{
"tx" => %{"h" => "fca7bdd7658613418c54872212811cf4c5b4f8ee16864eaf70cb1393fb0df6ca"},
...
}}
### Planaria
Using `Terminus.Planaria` inside a module allows you to simply recreate
[Planaria](https://neon.planaria.network)-like state machine functionality.
Planarias can be started under your app's supervision tree, allowing multiple
Planaria processes to run concurrently.
defmodule TwetchScraper do
@query %{
"find" => %{
"out.s2": "19HxigV4QyBv3tHpQVcUEQyq1pzZVdoAut",
"out.s25": "twetch"
}
}
use Terminus.Planaria, token: {:my_app, :planaria_token},
from: 600000,
query: @query
def handle_data(:block, txns) do
# Handle confirmed transactions
end
def handle_data(:mempool, txns) do
# Handle unconfirmed transactions
end
end
### Concurrency
Under the hood, each Terminus request is a `GenStage` producer process, and
the bare [`pid`](`t:pid/0`) can be returned. This allows you to take full
advantage of Elixir's concurrency, by either using with your own `GenStage`
consumers or using a tool like `Flow` to create powerful concurrent pipelines.
# One stream of transactions will be distributed across eight concurrent
# processes for mapping and saving the data.
iex> {:ok, pid} = Terminus.Bitbus.crawl(query, token: token, stage: true)
iex> Flow.from_stages([pid], stages: 8)
...> |> Flow.map(&Terminus.BitFS.scan_tx/1)
...> |> Flow.map(&save_to_db/1)
...> |> Flow.run
:ok
"""
@typedoc "Bitcoin data query language."
@type bitquery :: map | String.t
@typedoc "BitFS URI scheme."
@type bitfs_uri :: String.t
@typedoc "On-data callback function."
@type callback :: function | nil
@typedoc "Hex-encoded transaction ID."
@type txid :: function
end
|
lib/terminus.ex
| 0.888202
| 0.687892
|
terminus.ex
|
starcoder
|
defmodule BlueHeron.HCI.Event.CommandStatus do
use BlueHeron.HCI.Event, code: 0x0F
@moduledoc """
The HCI_Command_Status event is used to indicate that the command described by
the Command_Opcode parameter has been received, and that the Controller is
currently performing the task for this command.
This event is needed to provide mechanisms for asynchronous operation, which
makes it possible to prevent the Host from waiting for a command to finish. If
the command cannot begin to execute (a parameter error may have occurred, or the
command may currently not be allowed), the Status event parameter will contain
the corresponding error code, and no complete event will follow since the
command was not started. The Num_HCI_Command_Packets event parameter allows the
Controller to indicate the number of HCI command packets the Host can send to
the Controller. If the Controller requires the Host to stop sending commands,
the Num_HCI_Command_Packets event parameter will be set to zero. To indicate to
the Host that the Controller is ready to receive HCI command packets, the
Controller generates an HCI_Command_Status event with Status 0x00 and
Command_Opcode 0x0000 and the Num_HCI_Command_Packets event parameter set to 1
or more. Command_Opcode 0x0000 is a special value indicating that this event is
not associated with a command sent by the Host. The Controller can send an
HCI_Command_Status event with Command Opcode 0x0000 at any time to change the
number of outstanding HCI command packets that the Host can send before waiting.
Reference: Version 5.2, Vol 4, Part E, 7.7.15
"""
require Logger
defparameters [
:num_hci_command_packets,
:opcode,
:status
]
defimpl BlueHeron.HCI.Serializable do
def serialize(data) do
bin = <<data.status, data.num_hci_command_packets, data.opcode::binary>>
size = byte_size(bin)
<<data.code, size, bin::binary>>
end
end
@impl BlueHeron.HCI.Event
def deserialize(<<@code, _size, status, num_hci_command_packets, opcode::binary-2>>) do
%__MODULE__{
num_hci_command_packets: num_hci_command_packets,
opcode: opcode,
status: status
}
end
def deserialize(bin), do: {:error, bin}
end
|
lib/blue_heron/hci/events/command_status.ex
| 0.672547
| 0.442155
|
command_status.ex
|
starcoder
|
defmodule Ecto.Query.Builder.Preload do
@moduledoc false
alias Ecto.Query.Builder
@doc """
Escapes a preload.
A preload may be an atom, a list of atoms or a keyword list
nested as a rose tree.
iex> escape(:foo, [])
{[:foo], []}
iex> escape([foo: :bar], [])
{[foo: [:bar]], []}
iex> escape([foo: [:bar, bar: :bat]], [])
{[foo: [:bar, bar: [:bat]]], []}
iex> escape([foo: {:^, [], ["external"]}], [])
{[foo: ["external"]], []}
iex> escape([foo: [:bar, {:^, [], ["external"]}], baz: :bat], [])
{[foo: [:bar, "external"], baz: [:bat]], []}
iex> escape([foo: {:c, [], nil}], [c: 1])
{[], [foo: {1, []}]}
iex> escape([foo: {{:c, [], nil}, bar: {:l, [], nil}}], [c: 1, l: 2])
{[], [foo: {1, [bar: {2, []}]}]}
iex> escape([foo: {:c, [], nil}, bar: {:l, [], nil}], [c: 1, l: 2])
{[], [foo: {1, []}, bar: {2, []}]}
iex> escape([foo: {{:c, [], nil}, :bar}], [c: 1])
** (Ecto.Query.CompileError) cannot preload `:bar` inside join association preload
iex> escape([foo: [bar: {:c, [], nil}]], [c: 1])
** (Ecto.Query.CompileError) cannot preload join association `:bar` with binding `c` because parent preload is not a join association
"""
@spec escape(Macro.t, Keyword.t) :: {[Macro.t], [Macro.t]} | no_return
def escape(preloads, vars) do
{preloads, assocs} = escape(preloads, :both, [], [], vars)
{Enum.reverse(preloads), Enum.reverse(assocs)}
end
defp escape(atom, mode, preloads, assocs, _vars) when is_atom(atom) do
assert_preload!(mode, atom)
{[atom|preloads], assocs}
end
defp escape(list, mode, preloads, assocs, vars) when is_list(list) do
Enum.reduce list, {preloads, assocs}, fn item, acc ->
escape_each(item, mode, acc, vars)
end
end
defp escape({:^, _, [inner]} = expr, mode, preloads, assocs, _vars) do
assert_preload!(mode, expr)
{[inner|preloads], assocs}
end
defp escape(other, _mode, _preloads, _assocs, _vars) do
Builder.error! "`#{Macro.to_string other}` is not a valid preload expression. " <>
"preload expects an atom, a (nested) list of atoms or a (nested) " <>
"keyword list with a binding, atoms or lists as values. " <>
"Use ^ if you want to interpolate a value"
end
defp escape_each({atom, {var, _, context}}, mode, {preloads, assocs}, vars)
when is_atom(atom) and is_atom(context) do
assert_assoc!(mode, atom, var)
idx = Builder.find_var!(var, vars)
{preloads, [{atom, {idx, []}}|assocs]}
end
defp escape_each({atom, {{var, _, context}, list}}, mode, {preloads, assocs}, vars)
when is_atom(atom) and is_atom(context) do
assert_assoc!(mode, atom, var)
idx = Builder.find_var!(var, vars)
{[], inner_assocs} = escape(list, :assoc, [], [], vars)
{preloads,
[{atom, {idx, Enum.reverse(inner_assocs)}}|assocs]}
end
defp escape_each({atom, list}, mode, {preloads, assocs}, vars) when is_atom(atom) do
assert_preload!(mode, {atom, list})
{inner_preloads, []} = escape(list, :preload, [], [], vars)
{[{atom, Enum.reverse(inner_preloads)}|preloads], assocs}
end
defp escape_each(other, mode, {preloads, assocs}, vars) do
escape(other, mode, preloads, assocs, vars)
end
defp assert_assoc!(mode, _atom, _var) when mode in [:both, :assoc], do: :ok
defp assert_assoc!(_mode, atom, var) do
Builder.error! "cannot preload join association `#{inspect atom}` with binding `#{var}` " <>
"because parent preload is not a join association"
end
defp assert_preload!(mode, _term) when mode in [:both, :preload], do: :ok
defp assert_preload!(_mode, term) do
Builder.error! "cannot preload `#{Macro.to_string(term)}` inside join association preload"
end
@doc """
Applies the preloaded value into the query.
The quoted expression should evaluate to a query at runtime.
If possible, it does all calculations at compile time to avoid
runtime work.
"""
@spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t
def build(query, binding, expr, env) do
binding = Builder.escape_binding(binding)
{preloads, assocs} = escape(expr, binding)
Builder.apply_query(query, __MODULE__, [Enum.reverse(preloads), Enum.reverse(assocs)], env)
end
@doc """
The callback applied by `build/4` to build the query.
"""
@spec apply(Ecto.Queryable.t, term, term) :: Ecto.Query.t
def apply(query, preloads, assocs) do
query = Ecto.Queryable.to_query(query)
%{query | preloads: query.preloads ++ preloads, assocs: query.assocs ++ assocs}
end
end
|
lib/ecto/query/builder/preload.ex
| 0.847448
| 0.529385
|
preload.ex
|
starcoder
|
defmodule Jaxon.Path do
alias Jaxon.{ParseError, EncodeError}
@moduledoc ~S"""
Utility module for parsing and encoding JSON path expressions.
"""
@type t :: [String.t() | :all | :root | integer]
@doc ~S"""
Encoding path expressions:
```
iex> Jaxon.Path.encode([:root, "test", 0])
{:ok, "$.test[0]"}
```
```
iex> Jaxon.Path.encode([:root, "with space", "other", "more space", 0])
{:ok, "$[with space].other[more space][0]"}
```
How to handle encode errors:
```
iex> Jaxon.Path.encode([:root, :whoops, "test", 0])
{:error, %Jaxon.EncodeError{message: "`:whoops` is not a valid JSON path segment"}}
```
"""
@spec encode(t()) :: {:ok, String.t()} | {:error, String.t()}
def encode(path) do
case do_encode(path) do
{:error, err} ->
{:error, %EncodeError{message: err}}
result ->
{:ok, result}
end
end
@doc ~S"""
Parse path expressions:
```
iex> Jaxon.Path.parse("$[*].pets[0]")
{:ok, [:root, :all, "pets", 0]}
```
How to handle parse errors;
```
iex> Jaxon.Path.parse("$.\"test[x]")
{:error, %Jaxon.ParseError{message: "Ending quote not found for string at `\"test[x]`"}}
```
"""
@spec parse(String.t()) :: {:ok, t} | {:error, String.t()}
def parse(bin) do
case parse_json_path(bin, "", []) do
{:error, err} ->
{:error, %ParseError{message: err}}
result ->
{:ok, result}
end
end
@doc ~S"""
Parse path expressions:
```
iex> Jaxon.Path.parse!("$[*].pets[0]")
[:root, :all, "pets", 0]
```
"""
@spec parse!(String.t()) :: t() | no_return
def parse!(bin) do
case parse(bin) do
{:error, err} ->
raise err
{:ok, path} ->
path
end
end
@spec encode!(t()) :: String.t() | no_return
def encode!(path) do
case encode(path) do
{:error, err} ->
raise err
{:ok, result} ->
result
end
end
defp add_key(_, acc = {:error, _}) do
acc
end
defp add_key("*", acc) do
[:all | acc]
end
defp add_key("$", acc) do
[:root | acc]
end
defp add_key(k, acc) do
[k | acc]
end
defp parse_string(<<?\\, ?", rest::binary>>, str) do
parse_string(rest, <<str::binary, ?">>)
end
defp parse_string(<<?", rest::binary>>, str) do
{str, rest}
end
defp parse_string("", _) do
""
end
defp parse_string(<<c, rest::binary>>, str) do
parse_string(rest, <<str::binary, c>>)
end
defp parse_bracket_string(<<?\\, ?], rest::binary>>, str) do
parse_bracket_string(rest, <<str::binary, ?]>>)
end
defp parse_bracket_string(<<?], rest::binary>>, str) do
{str, rest}
end
defp parse_bracket_string("", _) do
""
end
defp parse_bracket_string(<<c, rest::binary>>, str) do
parse_bracket_string(rest, <<str::binary, c>>)
end
defp parse_json_path(<<?\\, ?., rest::binary>>, cur, acc) do
parse_json_path(rest, <<cur::binary, ?.>>, acc)
end
defp parse_json_path(<<?., rest::binary>>, "", acc) do
parse_json_path(rest, "", acc)
end
defp parse_json_path(<<"[*]", rest::binary>>, "", acc) do
[:all | parse_json_path(rest, "", acc)]
end
defp parse_json_path(bin = <<?[, rest::binary>>, "", acc) do
case Integer.parse(rest) do
{i, <<?], rest::binary>>} ->
[i | parse_json_path(rest, "", acc)]
_ ->
case parse_bracket_string(rest, "") do
{key, rest} ->
[key | parse_json_path(rest, "", acc)]
_ ->
{:error, "Ending bracket not found for string at `#{String.slice(bin, 0, 10)}`"}
end
end
end
defp parse_json_path(rest = <<?[, _::binary>>, cur, acc) do
add_key(cur, parse_json_path(rest, "", acc))
end
defp parse_json_path(<<?., rest::binary>>, cur, acc) do
add_key(cur, parse_json_path(rest, "", acc))
end
defp parse_json_path("", "", _) do
[]
end
defp parse_json_path("", cur, acc) do
add_key(cur, acc)
end
defp parse_json_path(bin = <<?", rest::binary>>, "", acc) do
case parse_string(rest, "") do
{key, rest} ->
[key | parse_json_path(rest, "", acc)]
_ ->
{:error, "Ending quote not found for string at `#{String.slice(bin, 0, 10)}`"}
end
end
defp parse_json_path(<<c, rest::binary>>, cur, acc) do
parse_json_path(rest, <<cur::binary, c>>, acc)
end
defp append_segment(err = {:error, _}, _) do
err
end
defp append_segment(_, err = {:error, _}) do
err
end
defp append_segment(s, rest = "[" <> _) do
s <> rest
end
defp append_segment(s, "") do
s
end
defp append_segment(s, rest) do
s <> "." <> rest
end
defp do_encode_segment(:root) do
"$"
end
defp do_encode_segment(:all) do
"[*]"
end
defp do_encode_segment(i) when is_integer(i) do
"[#{i}]"
end
defp do_encode_segment("") do
~s([])
end
defp do_encode_segment(s) when is_binary(s) do
if(String.contains?(s, ["*", "$", "]", "[", ".", "\"", " "])) do
safe_str =
s
|> String.replace("[", "\\[")
|> String.replace("]", "\\]")
"[#{safe_str}]"
else
s
end
end
defp do_encode_segment(s) do
{:error, "`#{inspect(s)}` is not a valid JSON path segment"}
end
defp do_encode([]) do
""
end
defp do_encode([h | t]) do
append_segment(do_encode_segment(h), do_encode(t))
end
end
|
lib/jaxon/path.ex
| 0.793466
| 0.796767
|
path.ex
|
starcoder
|
defmodule NExJsonSchema.Validator.Properties do
alias NExJsonSchema.Schema
alias NExJsonSchema.Validator
@spec validate(Root.t(), Schema.resolved(), NExJsonSchema.json()) :: Validator.errors_with_list_paths()
def validate(root, schema, properties = %{}) do
validated_known_properties = validate_known_properties(root, schema, properties)
validation_errors(validated_known_properties) ++
validate_additional_properties(
root,
schema["additionalProperties"],
unvalidated_properties(properties, validated_known_properties)
)
end
@spec validate(Root.t(), Schema.resolved(), NExJsonSchema.data()) :: []
def validate(_, _, _), do: []
defp validate_known_properties(root, schema, properties) do
validate_named_properties(root, schema["properties"], properties) ++
validate_pattern_properties(root, schema["patternProperties"], properties)
end
defp validate_named_properties(root, schema, properties) do
schema
|> Enum.filter(&Map.has_key?(properties, elem(&1, 0)))
|> Enum.map(fn {name, property_schema} ->
{name, Validator.validate(root, property_schema, properties[name], [name])}
end)
end
defp validate_pattern_properties(_, nil, _), do: []
defp validate_pattern_properties(root, schema, properties) do
Enum.flat_map(schema, &validate_pattern_property(root, &1, properties))
end
defp validate_pattern_property(root, {pattern, schema}, properties) do
properties_matching(properties, pattern)
|> Enum.map(fn {name, property} ->
{name, Validator.validate(root, schema, property, [name])}
end)
end
defp validate_additional_properties(root, schema, properties) when is_map(schema) do
Enum.flat_map(properties, fn {name, property} -> Validator.validate(root, schema, property, [name]) end)
end
defp validate_additional_properties(_, false, properties) when map_size(properties) > 0 do
Enum.map(properties, fn {name, _} ->
{%{
description: "schema does not allow additional properties",
rule: :schema,
params: properties
}, [name]}
end)
end
defp validate_additional_properties(_, _, _), do: []
defp validation_errors(validated_properties) do
validated_properties |> Dict.values() |> List.flatten()
end
defp properties_matching(properties, pattern) do
regex = Regex.compile!(pattern)
Enum.filter(properties, &Regex.match?(regex, elem(&1, 0)))
end
defp unvalidated_properties(properties, validated_properties) do
unvalidated = Set.difference(keys_as_set(properties), keys_as_set(validated_properties))
Map.take(properties, unvalidated)
end
defp keys_as_set(properties) do
properties |> Dict.keys() |> Enum.into(HashSet.new())
end
end
|
lib/ex_json_schema/validator/properties.ex
| 0.816736
| 0.431045
|
properties.ex
|
starcoder
|
defmodule ExDiceRoller.Compiler do
@moduledoc """
Provides functionality for compiling expressions into ready-to-execute
functions.
Compiler's main job is to perform the following:
* takes a concrete parse tree, generally outputted by `ExDiceRoller.Parser`,
and recursively navigates the tree
* each expression is delegated to an appropriate module that implements the
`compile/1` callback, which then
* converts each expression that results in an invariable value into a number
* converts each expression containing variability, or randomness, into a
compiled anonymous function
* sends sub-expressions back to Compiler to be delegated appropriately
* wraps the nested set of compiled functions with an anonymous function that
also:
* checks for cache usage and acts accordingly
* applies any `ExDiceRoller.Filters` present in the arguments
* rounds and returns the final value
Note that all compiled functions outputted by Compiler accept both arguments
and options. Arguments are used exclusively for replacing variables with
values. Options affect the behavior of the anonymous functions and include
concepts such as exploding dice, choosing highest or lowest values, and more.
More information about the different types of expression compilers and their
function can be found in the individual `ExDiceRoller.Compiler.*` modules.
## Example
> parsed =
{{:operator, '+'},
{{:operator, '-'}, {:roll, 1, 4},
{{:operator, '/'}, {:roll, 3, 6}, 2}},
{:roll, {:roll, 1, 4},
{:roll, 1, 6}}}
> fun = ExDiceRoller.Compiler.compile(parsed)
#Function<1.51809653/1 in ExDiceRoller.Compiler.build_final_function/1>
> fun.([])
11
> ExDiceRoller.Compiler.fun_info(fun)
{#Function<0.102777967/1 in ExDiceRoller.Compilers.Math.compile_add/2>,
:"-compile_add/2-fun-1-",
[
{#Function<20.102777967/1 in ExDiceRoller.Compilers.Math.compile_sub/2>,
:"-compile_sub/2-fun-1-",
[
{#Function<3.31405244/1 in ExDiceRoller.Compilers.Roll.compile_roll/2>,
:"-compile_roll/2-fun-3-", [1, 4]},
{#Function<5.102777967/1 in ExDiceRoller.Compilers.Math.compile_div/2>,
:"-compile_div/2-fun-3-",
[
{#Function<3.31405244/1 in ExDiceRoller.Compilers.Roll.compile_roll/2>,
:"-compile_roll/2-fun-3-", [3, 6]},
2
]}
]},
{#Function<0.31405244/1 in ExDiceRoller.Compilers.Roll.compile_roll/2>,
:"-compile_roll/2-fun-0-",
[
{#Function<3.31405244/1 in ExDiceRoller.Compilers.Roll.compile_roll/2>,
:"-compile_roll/2-fun-3-", [1, 4]},
{#Function<3.31405244/1 in ExDiceRoller.Compilers.Roll.compile_roll/2>,
:"-compile_roll/2-fun-3-", [1, 6]}
]}
]}
"""
alias ExDiceRoller.{Args, Filters, Parser}
alias ExDiceRoller.Compilers.{Math, Roll, Separator, Variable}
@type compiled_val :: compiled_fun | calculated_val
@type compiled_fun :: (args -> calculated_val)
@type calculated_val :: number | list(number)
@type fun_info_tuple :: {function, atom, list(any)}
@type args :: Keyword.t()
@doc "Compiles the expression into a `t:compiled_val/0`."
@callback compile(Parser.expression()) :: compiled_val
@doc """
Compiles a provided `t:Parser.expression/0` into an anonymous function.
iex> expr = "1d2+x"
"1d2+x"
iex> {:ok, tokens} = ExDiceRoller.Tokenizer.tokenize(expr)
{:ok,
[
{:int, 1, '1'},
{:roll, 1, 'd'},
{:int, 1, '2'},
{:basic_operator, 1, '+'},
{:var, 1, 'x'}
]}
iex> {:ok, parsed} = ExDiceRoller.Parser.parse(tokens)
{:ok, {{:operator, '+'}, {:roll, 1, 2}, {:var, 'x'}}}
iex> fun = ExDiceRoller.Compiler.compile(parsed)
iex> fun.([x: 1, opts: [:explode]])
2
During calculation, float values are left as float for as long as possible.
If a compiled roll is invoked with a float as the number of dice or sides,
that value will be rounded to an integer. Finally, the return value is a
rounded integer. Rounding rules can be found at `Kernel.round/1`.
"""
@spec compile(Parser.expression()) :: compiled_val
def compile(expression) do
expression
|> delegate()
|> wrap_compiled_expression()
|> build_final_function()
end
@doc """
Delegates expression compilation to an appropriate module that implements
`ExDiceRoller.Compiler` behaviours.
"""
@spec delegate(Parser.expression()) :: compiled_val
def delegate({:roll, _, _} = expr), do: Roll.compile(expr)
def delegate({{:operator, _}, _, _} = expr), do: Math.compile(expr)
def delegate({:sep, _, _} = expr), do: Separator.compile(expr)
def delegate({:var, _} = var), do: Variable.compile(var)
def delegate(compiled_val) when is_number(compiled_val), do: compiled_val
@doc """
Shows the nested functions and relationships of a compiled function. The
structure of the fun_info result is `{<function>, <atom with name, arity, and
ordered function #>, [<recursive info about child functions>]}`.
> {:ok, fun} = ExDiceRoller.compile("1d8+(1-x)d(2*y)")
#=> {:ok, #Function<0.84780260/1 in ExDiceRoller.Compiler.compile_add/4>}
> ExDiceRoller.Compiler.fun_info fun
#=> {#Function<0.16543174/1 in ExDiceRoller.Compiler.compile_add/4>,
:"-compile_add/4-fun-0-",
[
{#Function<12.16543174/1 in ExDiceRoller.Compiler.compile_roll/4>,
:"-compile_roll/4-fun-3-", [1, 8]},
{#Function<9.16543174/1 in ExDiceRoller.Compiler.compile_roll/4>,
:"-compile_roll/4-fun-0-",
[
{#Function<15.16543174/1 in ExDiceRoller.Compiler.compile_sub/4>,
:"-compile_sub/4-fun-2-",
[
1,
{#Function<16.16543174/1 in ExDiceRoller.Compiler.compile_var/1>,
:"-compile_var/1-fun-0-", ['x']}
]},
{#Function<8.16543174/1 in ExDiceRoller.Compiler.compile_mul/4>,
:"-compile_mul/4-fun-2-",
[
2,
{#Function<16.16543174/1 in ExDiceRoller.Compiler.compile_var/1>,
:"-compile_var/1-fun-0-", ['y']}
]}
]}
]}
"""
@spec fun_info(compiled_fun) :: fun_info_tuple
def fun_info(fun) do
info = :erlang.fun_info(fun)
info[:env] |> hd() |> do_fun_info()
end
@doc "Performs rounding on both numbers and lists of numbers."
@spec round_val(calculated_val | float) :: calculated_val
def round_val([]), do: []
def round_val(val) when is_list(val), do: Enum.map(val, &round_val(&1))
def round_val(val) when is_number(val), do: Kernel.round(val)
@spec do_fun_info(function | number | charlist) :: function | number | charlist
defp do_fun_info(fun) when is_function(fun) do
info = :erlang.fun_info(fun)
{fun, info[:name],
info[:env]
|> Enum.reverse()
|> Enum.map(fn child ->
do_fun_info(child)
end)}
end
defp do_fun_info(num) when is_number(num), do: num
defp do_fun_info(str) when is_list(str), do: str
@spec wrap_compiled_expression(compiled_val) :: function
defp wrap_compiled_expression(compiled) do
case is_function(compiled) do
false -> fn _args -> compiled end
true -> compiled
end
end
@spec build_final_function(function) :: compiled_fun
defp build_final_function(compiled) do
fn args when is_list(args) ->
args = Args.sanitize(args)
{filters, args} = Args.get_filters(args)
args
|> compiled.()
|> round_val()
|> Filters.filter(filters)
end
end
end
|
lib/compiler.ex
| 0.876845
| 0.73035
|
compiler.ex
|
starcoder
|
defmodule Openpayex.Customers do
@moduledoc """
Functions for working with customers at Openpay. Through this API you can:
* create a customer
* get a customer
* delete a customer
* list customers
"""
alias Openpayex.OpenPay.OpenPayHelper
@doc """
Create a customer
## Example:
```
params = %{
"name": "<NAME>",
"email": "<EMAIL>",
"requires_account": false
}
iex> Openpayex.Customers.create(params)
{:ok,
%{
"address" => nil,
"clabe" => nil,
"creation_date" => "2019-06-18T18:34:32-05:00",
"email" => "<EMAIL>",
"external_id" => nil,
"id" => "a8ujqsvn9e642lhpa6uj",
"last_name" => nil,
"name" => "<NAME>",
"phone_number" => nil
}
}
```
"""
@spec create(map) :: map
def create(params) do
endpoint = "/#{_get_merchant_id()}/customers"
OpenPayHelper.http_request(:post, endpoint, params)
end
@doc """
Update a customer
## Example;
```
params = %{
"name" => "<NAME>",
"email" => "<EMAIL>",
"address" => %{
"city" =>"Queretaro",
"state" =>"Queretaro",
"line1" =>"Calle 10",
"postal_code" =>"76000",
"line2" =>"col. san pablo",
"line3" =>"entre la calle 1 y la 2",
"country_code" =>"MX"
},
"phone_number" => "44209087654"
}
customer_id = "a4wgoshubzmsjqujdsig"
iex> Openpayex.Customers.update(params, customer_id)
{:ok,
%{
"address" => %{
"city" => "Queretaro",
"country_code" => "MX",
"line1" => "Calle 10",
"line2" => "col. san pablo",
"line3" => "entre la calle 1 y la 2",
"postal_code" => "76000",
"state" => "Queretaro"
},
"clabe" => nil,
"creation_date" => "2019-06-18T18:18:14-05:00",
"email" => "<EMAIL>",
"external_id" => nil,
"id" => "a4wgoshubzmsjqujdsig",
"last_name" => nil,
"name" => "<NAME>",
"phone_number" => "44209087654"
}
}
```
"""
@spec update(map, String.t()) :: {:ok, map}
def update(params, customer_id) do
endpoint = "/#{_get_merchant_id()}/customers/#{customer_id}"
OpenPayHelper.http_request(:put, endpoint, params)
end
@doc """
Get a customer
## Example;
```
customer_id = "a4wgoshubzmsjqujdsig"
iex> Openpayex.Customers.get(ustomer_id)
{:ok,
%{
"address" => %{
"city" => "Queretaro",
"country_code" => "MX",
"line1" => "Calle 10",
"line2" => "col. san pablo",
"line3" => "entre la calle 1 y la 2",
"postal_code" => "76000",
"state" => "Queretaro"
},
"clabe" => nil,
"creation_date" => "2019-06-18T18:18:14-05:00",
"email" => "<EMAIL>",
"external_id" => nil,
"id" => "a4wgoshubzmsjqujdsig",
"last_name" => nil,
"name" => "<NAME>",
"phone_number" => "44209087654"
}
}
```
"""
@spec get(String.t()) :: {:ok, map}
def get(customer_id) do
endpoint = "/#{_get_merchant_id()}/customers/#{customer_id}"
OpenPayHelper.http_request(:get, endpoint)
end
@doc """
Delete a customer
## Example;
```
customer_id = "a4wgoshubzmsjqujdsig"
iex> Openpayex.Customers.delete(customer_id)
{:ok, ""}
```
"""
@spec delete(String.t()) :: {:ok, String.t()}
def delete(customer_id) do
endpoint = "/#{_get_merchant_id()}/customers/#{customer_id}"
OpenPayHelper.http_request(:delete, endpoint)
end
@doc """
List client
## Example;
```
iex> Openpayex.Customers.list()
{:ok,
[
%{
"address" => nil,
"clabe" => nil,
"creation_date" => "2019-06-17T21:57:47-05:00",
"email" => "<EMAIL>",
"external_id" => nil,
"id" => "a6hcwapaceqdxgrmqvfo",
"last_name" => nil,
"name" => "<NAME>",
"phone_number" => nil
},
%{
"address" => nil,
"clabe" => nil,
"creation_date" => "2019-06-17T21:54:24-05:00",
"email" => "<EMAIL>",
"external_id" => nil,
"id" => "abzaacjwlfaig45bec3m",
"last_name" => nil,
"name" => "<NAME>",
"phone_number" => nil
}
]
}
iex> Openpayex.Customers.list(%{limit: 1})
{:ok,
[
%{
"address" => nil,
"clabe" => nil,
"creation_date" => "2019-06-17T21:57:47-05:00",
"email" => "<EMAIL>",
"external_id" => nil,
"id" => "a6hcwapaceqdxgrmqvfo",
"last_name" => nil,
"name" => "<NAME>",
"phone_number" => nil
}
]
}
```
"""
@spec list(map) :: {:ok, map}
def list(params \\ %{}) do
endpoint = "/#{_get_merchant_id()}/customers"
endpoint_with_filters = _add_query_params(endpoint, params)
OpenPayHelper.http_request(:get, endpoint_with_filters)
end
# Adds query params to endpoint
@spec _add_query_params(String.t(), map) :: String.t()
defp _add_query_params(url, params) do
query_params = URI.encode_query(params)
url
|> URI.parse()
|> Map.put(:query, query_params)
|> URI.to_string()
end
# Get a merchant id
@spec _get_merchant_id() :: Strint.t()
defp _get_merchant_id() do
Application.get_env(:openpayex, :merchant_id)
end
end
|
lib/openpayex/customers.ex
| 0.802633
| 0.523238
|
customers.ex
|
starcoder
|
defmodule Search.Request do
@moduledoc """
Formats the HTTP search request prams into an Elasticsearch query AND executes the query on Elasticsearch.
"""
require Logger
import Destructure
alias Elastix.Search, as: ExSearch
alias Search.{Paginator, Response, Scope}
alias Plug.Conn
@default_sort %{"_score" => %{"order" => "desc"}}
@filter_template %{bool: %{must: [], must_not: [], should: []}}
@query_param "q"
defstruct filter: nil,
from: 0,
include: nil,
q: nil,
search_fields: nil,
size: 10,
sort: nil
@typedoc """
* `filter` - specificies how to filter the search request
* `from` - starting point for pagination
* `q` - the query term
* `search_fields` - comma separated list of fields to search with the `q` term
* `size` - number of results for pagination
* `sort` - the direction for ordering a single attribute
"""
@type t :: %__MODULE__{
filter: bool_filter(),
from: non_neg_integer,
q: String.t() | nil,
include: list(String.t()) | nil,
search_fields: list(String.t()) | nil,
size: non_neg_integer,
sort: list(sort_t()) | nil
}
@typedoc """
Data structure defining how Elasticsearch sorts the results.
"""
@type sort_t :: %{required(String.t()) => %{required(String.t()) => String.t()}}
@typedoc """
Full bool filter format. The format is verbose but allows for flexibility in building filters.
"""
@type bool_filter :: %{
required(:bool) => %{
required(:must) => list(),
required(:must_not) => list(),
required(:should) => list()
}
}
@doc """
Handles the request parameters to build the query for the specific index AND executes the query on Elasticsearch.
"""
@spec perform(index_name :: String.t(), params :: map, scope_module :: module, conn :: Conn.t()) ::
Response.resp()
def perform(index_name, params, scope_module, conn) do
search_request = params_to_struct(params, scope_module, conn)
payload = build_payload(search_request)
Logger.debug(fn ->
"Search Request payload => \n #{inspect(payload)}"
end)
Search.elasticsearch_url()
|> ExSearch.search(index_name, scope_module.types(), payload)
|> Response.perform(scope_module, conn, search_request)
end
@doc false
@spec filter_template() :: bool_filter()
def filter_template, do: @filter_template
@spec build_payload(%__MODULE__{}) :: map
defp build_payload(d(%__MODULE__{filter, from, size, sort, q: q})) when is_nil(q) or q == "" do
%{
from: from,
query: %{
bool: %{
filter: filter,
must: %{
match_all: %{}
}
}
},
size: size,
sort: sort
}
end
defp build_payload(d(%__MODULE__{filter, from, q, search_fields, size, sort})) do
%{
from: from,
query: %{
bool: %{
filter: filter,
must: %{
multi_match: %{fields: search_fields, query: q}
}
}
},
size: size,
sort: sort
}
end
@spec params_to_struct(params :: map, scope_module :: module, conn :: Conn.t()) :: __MODULE__.t()
defp params_to_struct(params, scope_module, conn) do
d(%{from, size}) = Paginator.page_to_offset(params)
search_fields =
params
|> Map.get("search_fields", nil)
|> verify_search_fields(scope_module)
search_filters =
params
|> Map.get("filter", %{})
|> verify_search_filters(scope_module, conn)
|> merge_filters()
include_types =
params
|> Map.get("include", "")
|> verify_include_types(scope_module)
sort =
params
|> Map.get("sort", nil)
|> verify_sort_fields(scope_module)
%__MODULE__{
filter: search_filters,
from: from,
q: Map.get(params, @query_param, nil),
size: size,
search_fields: search_fields,
include: include_types,
sort: sort
}
end
@doc false
@spec merge_filters([Scope.filter_body()]) :: bool_filter()
def merge_filters(filters) do
Enum.reduce(filters, @filter_template, fn filter, acc ->
updated_conds =
acc
|> Map.get(:bool)
|> Map.merge(filter, fn _k, acc_value, filter_value ->
filter_value ++ acc_value
end)
Map.put(acc, :bool, updated_conds)
end)
end
# whitelist or default the fields to apply a query search to
@spec verify_search_fields(search_fields :: String.t() | nil, scope_module :: module) :: list(String.t())
defp verify_search_fields(nil, scope_module) do
allowed_search_fields = scope_module.allowed_search_fields()
allowed_search_fields |> Map.values() |> List.flatten()
end
# parse the string of fields. get the full path of the attribute from the whitelist of attrs.
defp verify_search_fields(search_fields, scope_module) when is_binary(search_fields) do
allowed_search_fields = scope_module.allowed_search_fields()
search_fields
|> String.split(",")
|> Enum.flat_map(fn field ->
allowed_search_fields |> Map.get(field, []) |> List.wrap()
end)
end
# possible that the incorrect type is passed
defp verify_search_fields(_search_fields, scope_module) do
allowed_search_fields = scope_module.allowed_search_fields()
verify_search_fields(nil, allowed_search_fields)
end
# possible that incorrect includes could be passed in
@spec verify_include_types(include_types :: String.t(), scope_module :: module()) :: list(String.t())
defp verify_include_types("", _scope_module), do: []
defp verify_include_types(include_types, scope_module) do
allowed_include_types =
scope_module.include_types_mapping()
|> Map.keys()
include_types
|> String.split(",")
|> Enum.filter(fn type_name -> Enum.member?(allowed_include_types, type_name) end)
end
@doc false
@spec verify_search_filters(
filters :: %{required(String.t()) => any},
scope_module :: module(),
conn :: Conn.t()
) :: list(Scope.filter_body())
def verify_search_filters(filters, scope_module, conn) do
allowed_search_filters = scope_module.allowed_search_filters()
filters
|> Enum.flat_map(fn {filter_name, filter_value} ->
if Enum.member?(allowed_search_filters, filter_name) do
parsed_value = filter_value |> to_string() |> String.downcase() |> String.split(",")
filter_name |> scope_module.filter(parsed_value, conn) |> List.wrap()
else
[]
end
end)
end
@spec verify_sort_fields(sort :: String.t() | nil, scope_module :: module) :: list(sort_t())
defp verify_sort_fields(nil, _scope_module), do: [@default_sort]
defp verify_sort_fields(sort, scope_module) when is_binary(sort) do
{parsed_sort, order} =
case String.split(sort, ~r{\A-}) do
[attr] -> {attr, "asc"}
["", attr] -> {attr, "desc"}
end
allowed_sort_fields = scope_module.allowed_sort_fields()
sort_field = Map.get(allowed_sort_fields, parsed_sort)
if sort_field do
[%{sort_field => %{"order" => order}}]
else
[@default_sort]
end
end
end
|
lib/elastic_jsonapi/request.ex
| 0.836371
| 0.418875
|
request.ex
|
starcoder
|
defmodule Oban.Telemetry do
@moduledoc """
Telemetry integration for event metrics, logging and error reporting.
### Initialization Events
Oban emits the following telemetry event when an Oban supervisor is started:
* `[:oban, :supervisor, :init]` - when the Oban supervisor is started this will execute
The initialization event contains the following measurements:
* `:system_time` - The system's time when Oban was started
The initialization event contains the following metadata:
* `:conf` - The configuration used for the Oban supervisor instance
* `:pid` - The PID of the supervisor instance
### Job Events
Oban emits the following telemetry events for each job:
* `[:oban, :job, :start]` — at the point a job is fetched from the database and will execute
* `[:oban, :job, :stop]` — after a job succeeds and the success is recorded in the database
* `[:oban, :job, :exception]` — after a job fails and the failure is recorded in the database
All job events share the same details about the job that was executed. In addition, failed jobs
provide the error type, the error itself, and the stacktrace. The following chart shows which
metadata you can expect for each event:
| event | measures | metadata |
| ------------ | -------------------------- | ------------------------------------------------- |
| `:start` | `:system_time` | `:job, :conf, :state` |
| `:stop` | `:duration`, `:queue_time` | `:job, :conf, :state, :result` |
| `:exception` | `:duration`, `:queue_time` | `:job, :conf, :state, :kind, :reason, :stacktrace` |
Metadata
* `:conf` — the config of the Oban supervised producer
* `:job` — the executing `Oban.Job`
* `:state` — one of `:success`, `:discard` or `:snoozed`
* `:result` — the `perform/1` return value, only included when the state is `:success`
For `:exception` events the metadata includes details about what caused the failure. The `:kind`
value is determined by how an error occurred. Here are the possible kinds:
* `:error` — from an `{:error, error}` return value. Some Erlang functions may also throw an
`:error` tuple, which will be reported as `:error`.
* `:exit` — from a caught process exit
* `:throw` — from a caught value, this doesn't necessarily mean that an error occurred and the
error value is unpredictable
### Producer Events
Oban emits the following telemetry span events for each queue's producer:
* `[:oban, :producer, :start | :stop | :exception]` — when a producer dispatches new jobs
| event | measures | metadata |
| ------------ | -------------- | --------------------------------------------- |
| `:start` | `:system_time` | `:queue, :conf` |
| `:stop` | `:duration` | `:queue, :conf, :dispatched_count` |
| `:exception` | `:duration` | `:queue, :conf`, :kind, :reason, :stacktrace |
Metadata
* `:queue` — the name of the queue as a string, e.g. "default" or "mailers"
* `:conf` — the config of the Oban supervised producer
* `:dispatched_count` — the number of jobs fetched and started by the producer
### Engine Events
Oban emits telemetry span events for the following Engine operations:
* `[:oban, :engine, :init, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------ |
| `:start` | `:system_time` | `:conf`, `engine` |
| `:stop` | `:duration` | `:conf`, `engine` |
| `:exception` | `:duration` | `:conf`, `engine` |
* `[:oban, :engine, :refresh, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------ |
| `:start` | `:system_time` | `:conf`, `engine` |
| `:stop` | `:duration` | `:conf`, `engine` |
| `:exception` | `:duration` | `:conf`, `engine` |
* `[:oban, :engine, :put_meta, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------ |
| `:start` | `:system_time` | `:conf`, `engine` |
| `:stop` | `:duration` | `:conf`, `engine` |
| `:exception` | `:duration` | `:conf`, `engine` |
* `[:oban, :engine, :fetch_jobs, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------ |
| `:start` | `:system_time` | `:conf`, `engine` |
| `:stop` | `:duration` | `:conf`, `engine` |
| `:exception` | `:duration` | `:conf`, `engine` |
* `[:oban, :engine, :complete_job, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------------------------------------------- |
| `:start` | `:system_time` | `:conf`, `engine`, `job` |
| `:stop` | `:duration` | `:conf`, `engine`, `job` |
| `:exception` | `:duration` | `:conf`, `engine`, `job`, `kind`, `reason`, `stacktrace` |
* `[:oban, :engine, :discard_job, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------------------------------------------- |
| `:start` | `:system_time` | `:conf`, `engine`, `job` |
| `:stop` | `:duration` | `:conf`, `engine`, `job` |
| `:exception` | `:duration` | `:conf`, `engine`, `job`, `kind`, `reason`, `stacktrace` |
* `[:oban, :engine, :error_job, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------------------------------------------- |
| `:start` | `:system_time` | `:conf`, `engine`, `job` |
| `:stop` | `:duration` | `:conf`, `engine`, `job` |
| `:exception` | `:duration` | `:conf`, `engine`, `job`, `kind`, `reason`, `stacktrace` |
* `[:oban, :engine, :snooze_job, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------------------------------------------- |
| `:start` | `:system_time` | `:conf`, `engine`, `job` |
| `:stop` | `:duration` | `:conf`, `engine`, `job` |
| `:exception` | `:duration` | `:conf`, `engine`, `job`, `kind`, `reason`, `stacktrace` |
* `[:oban, :engine, :cancel_job, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------------------------------------------- |
| `:start` | `:system_time` | `:conf`, `engine`, `job` |
| `:stop` | `:duration` | `:conf`, `engine`, `job` |
| `:exception` | `:duration` | `:conf`, `engine`, `job`, `kind`, `reason`, `stacktrace` |
* `[:oban, :engine, :cancel_all_jobs, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | ------------------------------------------------- |
| `:start` | `:system_time` | `:conf`, `engine` |
| `:stop` | `:duration` | `:conf`, `engine` |
| `:exception` | `:duration` | `:conf`, `engine`, `kind`, `reason`, `stacktrace` |
Metadata
* `:conf` — the config of the Oban supervised producer
* `:engine` — the module of the engine used
* `:job` - the `Oban.Job` in question
### Notifier Events
Oban emits telemetry a span event each time the Notifier is triggered:
* `[:oban, :notifier, :notify, :start | :stop | :exception]`
| event | measures | metadata |
| ------------ | -------------- | -------------------------------------------------------------- |
| `:start` | `:system_time` | `:conf`, `channel`, `payload` |
| `:stop` | `:duration` | `:conf`, `channel`, `payload` |
| `:exception` | `:duration` | `:conf`, `channel`, `payload`, `kind`, `reason`, `stacktrace` |
* `:conf` — the config of the Oban supervised producer
* `:channel` — the channel on which the notification was sent
* `:payload` - the payload that was sent
* `kind`, `reason`, `stacktrace`, see the explanation above.
### Circuit Events
All processes that interact with the database have circuit breakers to prevent errors from
crashing the entire supervision tree. Processes emit a `[:oban, :circuit, :trip]` event when a
circuit is tripped and `[:oban, :circuit, :open]` when the breaker is subsequently opened again.
| event | measures | metadata |
| -------------------------- | -------- | ----------------------------------------------------- |
| `[:oban, :circuit, :trip]` | | `:kind, :reason, :message, :name, :stacktrace, :conf` |
| `[:oban, :circuit, :open]` | | `:name, :conf` |
Metadata
* `:kind` — the kind of error (see the explanation above)
* `:reason` — the error that tripped the circuit, see the error kinds breakdown above
* `:name` — the registered name of the process that tripped a circuit, i.e. `Oban.Notifier`
* `:message` — a formatted error message describing what went wrong
* `:stacktrace` — exception stacktrace, when available
* `:conf` — the config of the Oban supervisor that the producer is for
### Plugin Events
All the Oban plugins emit telemetry events under the `[:oban, :plugin, *]` pattern (where `*` is
either `:start`, `:stop`, or `:exception`). You can filter out for plugin events by looking into
the metadata of the event and checking the value of `:plugin`. The `:plugin` key will contain the
module name of the plugin module that emitted the event. For example, to get `Oban.Plugins.Cron`
specific events, you can filter for telemetry events with a metadata key/value of
`plugin: Oban.Plugins.Cron`.
Oban emits the following telemetry event whenever a plugin executes (be sure to check the
documentation for each plugin as each plugin can also add additional metadata specific to
the plugin):
* `[:oban, :plugin, :start]` — when the plugin beings performing its work
* `[:oban, :plugin, :stop]` — after the plugin completes its work
* `[:oban, :plugin, :exception]` — when the plugin encounters an error
The following chart shows which metadata you can expect for each event:
| event | measures | metadata |
| ------------ | ---------------| --------------------------------------------- |
| `:start` | `:system_time` | `:conf, :plugin` |
| `:stop` | `:duration` | `:conf, :plugin` |
| `:exception` | `:duration` | `:kind, :reason, :stacktrace, :conf, :plugin` |
## Default Logger
A default log handler that emits structured JSON is provided, see `attach_default_logger/0` for
usage. Otherwise, if you would prefer more control over logging or would like to instrument
events you can write your own handler.
Here is an example of the JSON output for the `job:stop` event:
```json
{
"args":{"action":"OK","ref":1},
"duration":4327295,
"event":"job:stop",
"queue":"alpha",
"queue_time":3127905,
"source":"oban",
"worker":"Oban.Integration.Worker"
}
```
All timing measurements are recorded as native time units but logged in microseconds.
## Examples
A handler that only logs a few details about failed jobs:
```elixir
defmodule MicroLogger do
require Logger
def handle_event([:oban, :job, :exception], %{duration: duration}, meta, nil) do
Logger.warn("[#\{meta.queue}] #\{meta.worker} failed in #\{duration}")
end
end
:telemetry.attach("oban-logger", [:oban, :job, :exception], &MicroLogger.handle_event/4, nil)
```
Another great use of execution data is error reporting. Here is an example of integrating with
[Honeybadger][honey], but only reporting jobs that have failed 3 times or more:
```elixir
defmodule ErrorReporter do
def handle_event([:oban, :job, :exception], _, %{attempt: attempt} = meta, _) do
if attempt >= 3 do
context = Map.take(meta, [:id, :args, :queue, :worker])
Honeybadger.notify(meta.reason, context, meta.stacktrace)
end
end
end
:telemetry.attach("oban-errors", [:oban, :job, :exception], &ErrorReporter.handle_event/4, [])
```
[honey]: https://honeybadger.io
"""
@moduledoc since: "0.4.0"
require Logger
@doc """
Attaches a default structured JSON Telemetry handler for logging.
This function attaches a handler that outputs logs with the following fields:
* `args` — a map of the job's raw arguments
* `duration` — the job's runtime duration, in the native time unit
* `event` — either `:success` or `:failure` depending on whether the job succeeded or errored
* `queue` — the job's queue
* `source` — always "oban"
* `system_time` — when the job started, in microseconds
* `worker` — the job's worker module
## Examples
Attach a logger at the default `:info` level:
:ok = Oban.Telemetry.attach_default_logger()
Attach a logger at the `:debug` level:
:ok = Oban.Telemetry.attach_default_logger(:debug)
"""
@doc since: "0.4.0"
@spec attach_default_logger(Logger.level()) :: :ok | {:error, :already_exists}
def attach_default_logger(level \\ :info) do
events = [
[:oban, :job, :start],
[:oban, :job, :stop],
[:oban, :job, :exception],
[:oban, :circuit, :trip],
[:oban, :circuit, :open]
]
:telemetry.attach_many("oban-default-logger", events, &__MODULE__.handle_event/4, level)
end
@deprecated "Use the official :telemetry.span/3 instead"
@spec span(name :: atom(), fun :: (() -> term()), meta :: map()) :: term()
def span(name, fun, meta \\ %{}) when is_atom(name) and is_function(fun, 0) do
start_time = System.system_time()
start_mono = System.monotonic_time()
execute([:oban, name, :start], %{system_time: start_time}, meta)
try do
result = fun.()
execute([:oban, name, :stop], %{duration: duration(start_mono)}, meta)
result
catch
kind, reason ->
execute(
[:oban, name, :exception],
%{duration: duration(start_mono)},
Map.merge(meta, %{kind: kind, reason: reason, stacktrace: __STACKTRACE__})
)
:erlang.raise(kind, reason, __STACKTRACE__)
end
end
defp duration(start_mono), do: System.monotonic_time() - start_mono
@doc false
def execute(event_name, measurements, meta) do
:telemetry.execute(event_name, measurements, normalize_meta(meta))
end
defp normalize_meta(%{name: {:via, Registry, {Oban.Registry, {_pid, name}}}} = meta) do
name =
with {role, name} <- name do
Module.concat([
Oban.Queue,
Macro.camelize(to_string(name)),
Macro.camelize(to_string(role))
])
end
%{meta | name: name}
end
defp normalize_meta(meta), do: meta
@doc false
@spec handle_event([atom()], map(), map(), Logger.level()) :: :ok
def handle_event([:oban, :job, event], measure, meta, level) do
meta
|> Map.take([:args, :worker, :queue])
|> Map.merge(converted_measurements(measure))
|> log_message("job:#{event}", level)
end
def handle_event([:oban, :circuit, event], _measure, meta, level) do
meta
|> Map.take([:message, :name])
|> log_message("circuit:#{event}", level)
end
defp converted_measurements(measure) do
for {key, val} <- measure, key in [:duration, :queue_time], into: %{} do
{key, System.convert_time_unit(val, :native, :microsecond)}
end
end
defp log_message(message, event, level) do
Logger.log(level, fn ->
message
|> Map.put(:event, event)
|> Map.put(:source, "oban")
|> Jason.encode_to_iodata!()
end)
end
end
|
lib/oban/telemetry.ex
| 0.84412
| 0.821367
|
telemetry.ex
|
starcoder
|
defmodule Fountainedge do
@moduledoc """
Documentation for Fountainedge.
"""
alias __MODULE__, as: Workflow
alias Fountainedge.{Schema, Edge, State, Node, Token, OutEdge}
@enforce_keys [:schema, :states]
defstruct schema: %Schema{nodes: [], edges: []}, states: []
def transition %Workflow{} = workflow, %Edge{} = edge do
%Workflow{workflow | states: transition(workflow.states, workflow.schema, edge)}
end
defp transition(states, %Schema{} = schema, %Edge{} = edge) do
edge = Edge.find(schema.edges, edge)
node = Node.find(schema.nodes, edge.next)
state = current_state states, edge
next_state = %State{state | id: edge.next}
states = [next_state | Enum.reject(states, fn s -> s == state end)]
case node.type do
:fork -> fork states, schema, node, next_state
:join -> join states, schema, node
_ -> states
end
end
defp current_state states, %Edge{} = edge do
Enum.find(states, fn state ->
state.id == edge.id and Enum.find state.tokens, fn token -> token.token == edge.next end
end) || Enum.find states, fn state -> state.id == edge.id end
end
defp fork states, %Schema{} = schema, %Node{} = node, %State{} = next_state do
edges = Enum.filter schema.edges, fn e -> e.id == node.id end
forked_states = Enum.reduce edges, [], fn edge, acc ->
token = %Token{id: edge.id, token: edge.next}
tokens = [token | next_state.tokens]
[%State{next_state | tokens: tokens} | acc]
end
Enum.reject(states, fn s -> s.id == next_state.id end) ++ forked_states
|> fork_transition(schema, edges)
end
defp fork_transition(states, %Schema{} = schema, [edge | edges]) do
states
|> transition(schema, edge)
|> fork_transition(schema, edges)
end
defp fork_transition(states, %Schema{} = _schema, []), do: states
defp join states, %Schema{} = schema, %Node{} = node do
origin_node = Enum.find schema.nodes, fn n -> n.join == node.id end
branches = Enum.count schema.edges, fn e -> e.id == origin_node.id end
arrivals = Enum.filter states, fn s ->
s.id == node.id and Enum.any? s.tokens, fn t -> t.id == origin_node.id end
end
if branches == Enum.count arrivals do
join_states(states, node, origin_node, arrivals)
|> transition(schema, Edge.find(schema.edges, node.id))
else
states
end
end
defp join_states states, %Node{} = node, %Node{} = origin_node, arrivals do
tokens = Enum.uniq join_tokens [], origin_node, arrivals
[%State{id: node.id, tokens: tokens} | states -- arrivals]
end
defp join_tokens tokens, %Node{} = origin_node, [state | arrivals] do
tokens ++ Enum.reject(state.tokens, fn t -> t.id == origin_node.id end)
|> join_tokens(origin_node, arrivals)
end
defp join_tokens(tokens, %Node{} = _origin_node, []), do: tokens
def out_edges(%Workflow{} = workflow) do
gather_out_edges_state(workflow, [], workflow.states)
|> Enum.uniq()
end
defp gather_out_edges_state(%Workflow{} = workflow, out_edges, [state | states]) do
out_edges = out_edges(workflow, state) ++ out_edges
gather_out_edges_state(workflow, out_edges, states)
end
defp gather_out_edges_state(%Workflow{} = _workflow, out_edges, []), do: out_edges
def out_edges(%Workflow{} = workflow, %State{} = state) do
edges = Enum.filter(workflow.schema.edges, fn edge -> edge.id == state.id end)
gather_out_edges(workflow, [], edges)
end
defp gather_out_edges(%Workflow{} = workflow, out_edges, [edge | edges]) do
node = Node.find(workflow.schema.nodes, edge.id)
disabled = case node.type do
:join -> true
_ -> false
end
out_edge = %OutEdge{edge: edge, disabled: disabled}
gather_out_edges(workflow, [out_edge | out_edges], edges)
end
defp gather_out_edges(%Workflow{} = _workflow, out_edges, []), do: out_edges
end
|
lib/fountainedge.ex
| 0.674479
| 0.710164
|
fountainedge.ex
|
starcoder
|
defmodule GenServer do
@moduledoc """
A behaviour module for implementing the server of a client-server relation.
A GenServer is a process as any other Elixir process and it can be used
to keep state, execute code asynchronously and so on. The advantage of using
a generic server process (GenServer) implemented using this module is that it
will have a standard set of interface functions and include functionality for
tracing and error reporting. It will also fit into a supervision tree.
## Example
The GenServer behaviour abstracts the common client-server interaction.
Developers are only required to implement the callbacks and functionality they are
interested in.
Let's start with a code example and then explore the available callbacks.
Imagine we want a GenServer that works like a stack, allowing us to push
and pop items:
defmodule Stack do
use GenServer
# Callbacks
def handle_call(:pop, _from, [h|t]) do
{:reply, h, t}
end
def handle_cast({:push, item}, state) do
{:noreply, [item|state]}
end
end
# Start the server
{:ok, pid} = GenServer.start_link(Stack, [:hello])
# This is the client
GenServer.call(pid, :pop)
#=> :hello
GenServer.cast(pid, {:push, :world})
#=> :ok
GenServer.call(pid, :pop)
#=> :world
We start our `Stack` by calling `start_link/3`, passing the module
with the server implementation and its initial argument (a list
representing the stack containing the item `:hello`). We can primarily
interact with the server by sending two types of messages. **call**
messages expect a reply from the server (and are therefore synchronous)
while **cast** messages do not.
Every time you do a `GenServer.call/3`, the client will send a message
that must be handled by the `handle_call/3` callback in the GenServer.
A `cast/2` message must be handled by `handle_cast/2`.
## Callbacks
There are 6 callbacks required to be implemented in a `GenServer`. By
adding `use GenServer` to your module, Elixir will automatically define
all 6 callbacks for you, leaving it up to you to implement the ones
you want to customize. The callbacks are:
* `init(args)` - invoked when the server is started.
It must return:
- `{:ok, state}`
- `{:ok, state, timeout}`
- `{:ok, state, :hibernate}`
- `:ignore`
- `{:stop, reason}`
* `handle_call(msg, {from, ref}, state)` - invoked to handle call (sync)
messages.
It must return:
- `{:reply, reply, new_state}`
- `{:reply, reply, new_state, timeout}`
- `{:reply, reply, new_state, :hibernate}`
- `{:noreply, new_state}`
- `{:noreply, new_state, timeout}`
- `{:noreply, new_state, :hibernate}`
- `{:stop, reason, new_state}`
- `{:stop, reason, reply, new_state}`
* `handle_cast(msg, state)` - invoked to handle cast (async) messages.
It must return:
- `{:noreply, new_state}`
- `{:noreply, new_state, timeout}`
- `{:noreply, new_state, :hibernate}`
- `{:stop, reason, new_state}`
* `handle_info(msg, state)` - invoked to handle all other messages which
are received by the process.
It must return:
- `{:noreply, state}`
- `{:noreply, state, timeout}`
- `{:stop, reason, state}`
* `terminate(reason, state)` - called when the server is about to
terminate, useful for cleaning up. It must return `:ok`.
* `code_change(old_vsn, state, extra)` - called when the application
code is being upgraded live (hot code swapping).
It must return:
- `{:ok, new_state}`
- `{:error, reason}`
## Name Registration
Both `start_link/3` and `start/3` support the `GenServer` to register
a name on start via the `:name` option. Registered names are also
automatically cleaned up on termination. The supported values are:
* an atom - the GenServer is registered locally with the given name
using `Process.register/2`.
* `{:global, term}`- the GenServer is registered globally with the given
term using the functions in the `:global` module.
* `{:via, module, term}` - the GenServer is registered with the given
mechanism and name. The `:via` option expects a module name to control
the registration mechanism alongside a name which can be any term.
For example, we could start and register our Stack server locally as follows:
# Start the server and register it locally with name MyStack
{:ok, _} = GenServer.start_link(Stack, [:hello], name: MyStack)
# Now messages can be sent directly to MyStack
GenServer.call(MyStack, :pop) #=> :hello
Once the server is started, the remaining functions in this module (`call/3`,
`cast/2`, and friends) will also accept an atom, or any `:global` or `:via`
tuples. In general, the following formats are supported:
* a `pid`
* an `atom` if the server is locally registered
* `{atom, node}` if the server is locally registered at another node
* `{:global, term}` if the server is globally registered
* `{:via, module, name}` if the server is registered through an alternative
registry
## Client / Server APIs
Although in the example above we have used `GenServer.start_link/3` and
friends to directly start and communicate with the server, most of the
time we don't call the `GenServer` functions directly. Instead, we wrap
the calls in new functions representing the public API of the server.
Here is a better implementation of our Stack module:
defmodule Stack do
use GenServer
# Client
def start_link(default) do
GenServer.start_link(__MODULE__, default)
end
def push(pid, item) do
GenServer.cast(pid, {:push, item})
end
def pop(pid) do
GenServer.call(pid, :pop)
end
# Server (callbacks)
def handle_call(:pop, _from, [h|t]) do
{:reply, h, t}
end
def handle_call(request, from, state) do
# Call the default implementation from GenServer
super(request, from, state)
end
def handle_cast({:push, item}, state) do
{:noreply, [item|state]}
end
def handle_cast(request, state) do
super(request, state)
end
end
In practice, it is common to have both server and client functions in
the same module. If the server and/or client implementations are growing
complex, you may want to have them in different modules.
## Learn more
If you wish to find out more about gen servers, the Elixir Getting Started
guide provides a tutorial-like introduction. The documentation and links
in Erlang can also provide extra insight.
* http://elixir-lang.org/getting-started/mix-otp/genserver.html
* http://www.erlang.org/doc/man/gen_server.html
* http://www.erlang.org/doc/design_principles/gen_server_concepts.html
* http://learnyousomeerlang.com/clients-and-servers
"""
@typedoc "Return values of `start*` functions"
@type on_start :: {:ok, pid} | :ignore | {:error, {:already_started, pid} | term}
@typedoc "The GenServer name"
@type name :: atom | {:global, term} | {:via, module, term}
@typedoc "Options used by the `start*` functions"
@type options :: [debug: debug,
name: name,
timeout: timeout,
spawn_opt: Process.spawn_opt]
@typedoc "debug options supported by the `start*` functions"
@type debug :: [:trace | :log | :statistics | {:log_to_file, Path.t}]
@typedoc "The server reference"
@type server :: pid | name | {atom, node}
@doc false
defmacro __using__(_) do
quote location: :keep do
@behaviour :gen_server
@doc false
def init(args) do
{:ok, args}
end
@doc false
def handle_call(msg, _from, state) do
# We do this to trick dialyzer to not complain about non-local returns.
case :random.uniform(1) do
1 -> exit({:bad_call, msg})
2 -> {:noreply, state}
end
end
@doc false
def handle_info(_msg, state) do
{:noreply, state}
end
@doc false
def handle_cast(msg, state) do
# We do this to trick dialyzer to not complain about non-local returns.
case :random.uniform(1) do
1 -> exit({:bad_cast, msg})
2 -> {:noreply, state}
end
end
@doc false
def terminate(_reason, _state) do
:ok
end
@doc false
def code_change(_old, state, _extra) do
{:ok, state}
end
defoverridable [init: 1, handle_call: 3, handle_info: 2,
handle_cast: 2, terminate: 2, code_change: 3]
end
end
@doc """
Starts a `GenServer` process linked to the current process.
This is often used to start the `GenServer` as part of a supervision tree.
Once the server is started, it calls the `init/1` function in the given `module`
passing the given `args` to initialize it. To ensure a synchronized start-up
procedure, this function does not return until `init/1` has returned.
Note that a `GenServer` started with `start_link/3` is linked to the
parent process and will exit in case of crashes. The GenServer will also
exit due to the `:normal` reasons in case it is configured to trap exits
in the `init/1` callback.
## Options
The `:name` option is used for name registration as described in the module
documentation. If the option `:timeout` option is present, the server is
allowed to spend the given milliseconds initializing or it will be
terminated and the start function will return `{:error, :timeout}`.
If the `:debug` option is present, the corresponding function in the
[`:sys` module](http://www.erlang.org/doc/man/sys.html) will be invoked.
If the `:spawn_opt` option is present, its value will be passed as options
to the underlying process as in `Process.spawn/4`.
## Return values
If the server is successfully created and initialized, the function returns
`{:ok, pid}`, where pid is the pid of the server. If a process with the
specified server name already exists, the function returns
`{:error, {:already_started, pid}}` with the pid of that process.
If the `init/1` callback fails with `reason`, the function returns
`{:error, reason}`. Otherwise, if it returns `{:stop, reason}`
or `:ignore`, the process is terminated and the function returns
`{:error, reason}` or `:ignore`, respectively.
"""
@spec start_link(module, any, options) :: on_start
def start_link(module, args, options \\ []) when is_atom(module) and is_list(options) do
do_start(:link, module, args, options)
end
@doc """
Starts a `GenServer` process without links (outside of a supervision tree).
See `start_link/3` for more information.
"""
@spec start(module, any, options) :: on_start
def start(module, args, options \\ []) when is_atom(module) and is_list(options) do
do_start(:nolink, module, args, options)
end
defp do_start(link, module, args, options) do
case Keyword.pop(options, :name) do
{nil, opts} ->
:gen.start(:gen_server, link, module, args, opts)
{atom, opts} when is_atom(atom) ->
:gen.start(:gen_server, link, {:local, atom}, module, args, opts)
{other, opts} when is_tuple(other) ->
:gen.start(:gen_server, link, other, module, args, opts)
end
end
@doc """
Makes a synchronous call to the `server` and waits for its reply.
The client sends the given `request` to the server and waits until a reply
arrives or a timeout occurs. `handle_call/3` will be called on the server
to handle the request.
The server can be any of the values described in the `Name Registration`
section of the module documentation.
## Timeouts
The `timeout` is an integer greater than zero which specifies how many
milliseconds to wait for a reply, or the atom `:infinity` to wait
indefinitely. The default value is 5000. If no reply is received within
the specified time, the function call fails. If the caller catches the
failure and continues running, and the server is just late with the reply,
it may arrive at any time later into the caller's message queue. The caller
must in this case be prepared for this and discard any such garbage messages
that are two element tuples with a reference as the first element.
"""
@spec call(server, term, timeout) :: term
def call(server, request, timeout \\ 5000) do
try do
:gen.call(server, :"$gen_call", request, timeout)
catch
:exit, reason ->
exit({reason, {__MODULE__, :call, [server, request, timeout]}})
else
{:ok, res} -> res
end
end
@doc """
Sends an asynchronous request to the `server`.
This function returns `:ok` without waiting for the
destination `server` to handle the message. Therefore it
is unknown whether the destination `server` successfully
handled the message. If the `server` is an atom without
an associated process an `ArgumentError` is raised. In
all other cases the function returns `:ok` regardless of
whether the destination `server` (or node) exists. Note
that `{name, node()}` can be used when an exception is
not desired if no process is locally associated with the
atom `name`.
`handle_cast/2` will be called on the server to handle
the request. In case the `server` is on a node which is
not yet connected to the caller one, the call is going to
block until a connection happens. This is different than
the behaviour in OTP's `:gen_server` where the message
is sent by another process in this case, which could cause
messages to other nodes to arrive out of order.
"""
@spec cast(server, term) :: :ok
def cast(server, request)
def cast({:global, name}, request) do
try do
:global.send(name, cast_msg(request))
:ok
catch
_, _ -> :ok
end
end
def cast({:via, mod, name}, request) do
try do
mod.send(name, cast_msg(request))
:ok
catch
_, _ -> :ok
end
end
def cast({name, node}, request) when is_atom(name) and is_atom(node),
do: do_send({name, node}, cast_msg(request))
def cast(dest, request) when is_atom(dest) or is_pid(dest),
do: do_send(dest, cast_msg(request))
@doc """
Casts all servers locally registered as `name` at the specified nodes.
The function returns immediately and ignores nodes that do not exist, or where the
server name does not exist.
See `multi_call/4` for more information.
"""
@spec abcast([node], name :: atom, term) :: :abcast
def abcast(nodes \\ nodes(), name, request) when is_list(nodes) and is_atom(name) do
msg = cast_msg(request)
_ = for node <- nodes, do: do_send({name, node}, msg)
:abcast
end
defp cast_msg(req) do
{:"$gen_cast", req}
end
defp do_send(dest, msg) do
send(dest, msg)
:ok
end
@doc """
Calls all servers locally registered as `name` at the specified `nodes`.
The `request` is first sent to every node and then we wait for the
replies. This function returns a tuple containing the node and its reply
as first element and all bad nodes as second element. The bad nodes is a
list of nodes that either did not exist, or where a server with the given
`name` did not exist or did not reply.
Nodes is a list of node names to which the request is sent. The default
value is the list of all known nodes.
To avoid that late answers (after the timeout) pollute the caller's message
queue, a middleman process is used to do the actual calls. Late answers will
then be discarded when they arrive to a terminated process.
"""
@spec multi_call([node], name :: atom, term, timeout) ::
{replies :: [{node, term}], bad_nodes :: [node]}
def multi_call(nodes \\ nodes(), name, request, timeout \\ :infinity) do
:gen_server.multi_call(nodes, name, request, timeout)
end
@doc """
Replies to a client.
This function can be used by a server to explicitly send a reply to a
client that called `call/3` or `multi_call/4`. When the reply cannot be
defined in the return value of `handle_call/3`.
The `client` must be the `from` argument (the second argument) received
in `handle_call/3` callbacks. Reply is an arbitrary term which will be
given back to the client as the return value of the call.
This function always returns `:ok`.
"""
@spec reply({pid, reference}, term) :: :ok
def reply(client, reply)
def reply({to, tag}, reply) do
try do
send(to, {tag, reply})
:ok
catch
_, _ -> :ok
end
end
@compile {:inline, [nodes: 0]}
defp nodes do
[node()|:erlang.nodes()]
end
end
|
lib/elixir/lib/gen_server.ex
| 0.874077
| 0.661527
|
gen_server.ex
|
starcoder
|
defmodule AWS.Kinesis.Analytics do
@moduledoc """
"""
@doc """
Adds a streaming source to your Amazon Kinesis application. For conceptual
information, see [Configuring Application
Input](http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html).
You can add a streaming source either when you create an application or you
can use this operation to add a streaming source after you create an
application. For more information, see `CreateApplication`.
Any configuration update, including adding a streaming source using this
operation, results in a new version of the application. You can use the
`DescribeApplication` operation to find the current application version.
This operation requires permissions to perform the
`kinesisanalytics:AddApplicationInput` action.
"""
def add_application_input(client, input, options \\ []) do
request(client, "AddApplicationInput", input, options)
end
@doc """
Adds an external destination to your Amazon Kinesis Analytics application.
If you want Amazon Kinesis Analytics to deliver data from an in-application
stream within your application to an external destination (such as an
Amazon Kinesis stream or a Firehose delivery stream), you add the relevant
configuration to your application using this operation. You can configure
one or more outputs for your application. Each output configuration maps an
in-application stream and an external destination.
You can use one of the output configurations to deliver data from your
in-application error stream to an external destination so that you can
analyze the errors. For conceptual information, see [Understanding
Application Output
(Destination)](http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-output.html).
Note that any configuration update, including adding a streaming source
using this operation, results in a new version of the application. You can
use the `DescribeApplication` operation to find the current application
version.
For the limits on the number of application inputs and outputs you can
configure, see
[Limits](http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html).
This operation requires permissions to perform the
`kinesisanalytics:AddApplicationOutput` action.
"""
def add_application_output(client, input, options \\ []) do
request(client, "AddApplicationOutput", input, options)
end
@doc """
Adds a reference data source to an existing application.
Amazon Kinesis Analytics reads reference data (that is, an Amazon S3
object) and creates an in-application table within your application. In the
request, you provide the source (S3 bucket name and object key name), name
of the in-application table to create, and the necessary mapping
information that describes how data in Amazon S3 object maps to columns in
the resulting in-application table.
For conceptual information, see [Configuring Application
Input](http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html).
For the limits on data sources you can add to your application, see
[Limits](http://docs.aws.amazon.com/kinesisanalytics/latest/dev/limits.html).
This operation requires permissions to perform the
`kinesisanalytics:AddApplicationOutput` action.
"""
def add_application_reference_data_source(client, input, options \\ []) do
request(client, "AddApplicationReferenceDataSource", input, options)
end
@doc """
Creates an Amazon Kinesis Analytics application. You can configure each
application with one streaming source as input, application code to process
the input, and up to five streaming destinations where you want Amazon
Kinesis Analytics to write the output data from your application. For an
overview, see [How it
Works](http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works.html).
In the input configuration, you map the streaming source to an
in-application stream, which you can think of as a constantly updating
table. In the mapping, you must provide a schema for the in-application
stream and map each data column in the in-application stream to a data
element in the streaming source, with the option of renaming, casting and
dropping columns as desired.
Your application code is one or more SQL statements that read input data,
transform it, and generate output. Your application code can create one or
more SQL artifacts like SQL streams or pumps.
In the output configuration, you can configure the application to write
data from in-application streams created in your applications to up to five
streaming destinations.
To read data from your source stream or write data to destination streams,
Amazon Kinesis Analytics needs your permissions. You grant these
permissions by creating IAM roles. This operation requires permissions to
perform the `kinesisanalytics:CreateApplication` action.
For introductory exercises to create an Amazon Kinesis Analytics
application, see [Getting
Started](http://docs.aws.amazon.com/kinesisanalytics/latest/dev/getting-started.html).
"""
def create_application(client, input, options \\ []) do
request(client, "CreateApplication", input, options)
end
@doc """
Deletes the specified application. Amazon Kinesis Analytics halts
application execution and deletes the application, including any
application artifacts (such as in-application streams, reference table, and
application code).
This operation requires permissions to perform the
`kinesisanalytics:DeleteApplication` action.
"""
def delete_application(client, input, options \\ []) do
request(client, "DeleteApplication", input, options)
end
@doc """
Deletes output destination configuration from your application
configuration. Amazon Kinesis Analytics will no longer write data from the
corresponding in-application stream to the external output destination.
This operation requires permissions to perform the
`kinesisanalytics:DeleteApplicationOutput` action.
"""
def delete_application_output(client, input, options \\ []) do
request(client, "DeleteApplicationOutput", input, options)
end
@doc """
Deletes a reference data source configuration from the specified
application configuration.
If the application is running, Amazon Kinesis Analytics immediately removes
the in-application table that you created using the
`AddApplicationReferenceDataSource` operation.
This operation requires permissions to perform the
`kinesisanalytics.DeleteApplicationReferenceDataSource` action.
"""
def delete_application_reference_data_source(client, input, options \\ []) do
request(client, "DeleteApplicationReferenceDataSource", input, options)
end
@doc """
Returns information about a specific Amazon Kinesis Analytics application.
If you want to retrieve a list of all applications in your account, use the
`ListApplications` operation.
This operation requires permissions to perform the
`kinesisanalytics:DescribeApplication` action. You can use
`DescribeApplication` to get the current application versionId, which you
need to call other operations such as `Update`.
"""
def describe_application(client, input, options \\ []) do
request(client, "DescribeApplication", input, options)
end
@doc """
Infers a schema by evaluating sample records on the specified streaming
source (Amazon Kinesis stream or Amazon Kinesis Firehose delivery stream).
In the response, the operation returns the inferred schema and also the
sample records that the operation used to infer the schema.
You can use the inferred schema when configuring a streaming source for
your application. For conceptual information, see [Configuring Application
Input](http://docs.aws.amazon.com/kinesisanalytics/latest/dev/how-it-works-input.html).
Note that when you create an application using the Amazon Kinesis Analytics
console, the console uses this operation to infer a schema and show it in
the console user interface.
This operation requires permissions to perform the
`kinesisanalytics:DiscoverInputSchema` action.
"""
def discover_input_schema(client, input, options \\ []) do
request(client, "DiscoverInputSchema", input, options)
end
@doc """
Returns a list of Amazon Kinesis Analytics applications in your account.
For each application, the response includes the application name, Amazon
Resource Name (ARN), and status. If the response returns the
`HasMoreApplications` value as true, you can send another request by adding
the `ExclusiveStartApplicationName` in the request body, and set the value
of this to the last application name from the previous response.
If you want detailed information about a specific application, use
`DescribeApplication`.
This operation requires permissions to perform the
`kinesisanalytics:ListApplications` action.
"""
def list_applications(client, input, options \\ []) do
request(client, "ListApplications", input, options)
end
@doc """
Starts the specified Amazon Kinesis Analytics application. After creating
an application, you must exclusively call this operation to start your
application.
After the application starts, it begins consuming the input data, processes
it, and writes the output to the configured destination.
The application status must be `READY` for you to start an application. You
can get the application status in the console or using the
`DescribeApplication` operation.
After you start the application, you can stop the application from
processing the input by calling the `StopApplication` operation.
This operation requires permissions to perform the
`kinesisanalytics:StartApplication` action.
"""
def start_application(client, input, options \\ []) do
request(client, "StartApplication", input, options)
end
@doc """
Stops the application from processing input data. You can stop an
application only if it is in the running state. You can use the
`DescribeApplication` operation to find the application state. After the
application is stopped, Amazon Kinesis Analytics stops reading data from
the input, the application stops processing data, and there is no output
written to the destination.
This operation requires permissions to perform the
`kinesisanalytics:StopApplication` action.
"""
def stop_application(client, input, options \\ []) do
request(client, "StopApplication", input, options)
end
@doc """
Updates an existing Kinesis Analytics application. Using this API, you can
update application code, input configuration, and output configuration.
Note that Kinesis Analytics updates the `CurrentApplicationVersionId` each
time you update your application.
This opeation requires permission for the
`kinesisanalytics:UpdateApplication` action.
"""
def update_application(client, input, options \\ []) do
request(client, "UpdateApplication", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "kinesisanalytics"}
host = get_host("kinesisanalytics", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "KinesisAnalytics_20150814.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/kinesis_analytics.ex
| 0.907515
| 0.702591
|
kinesis_analytics.ex
|
starcoder
|
defmodule Number.Phone do
@moduledoc """
Provides functions to convert numbers into formatted phone number strings.
"""
import Number.Macros, only: [is_blank: 1]
@doc """
Formats a number into a US phone number (e.g., (555) 123-9876). You can
customize the format in the options list.
## Parameters
* `number` - A float or integer to convert.
* `options` - A keyword list of options. See the documentation of all
available options below for more information.
## Options
* `:area_code` - Adds parentheses around the area code if `true`.
* `:delimiter` - Specifies the delimiter to use (defaults to “-”).
* `:extension` - Specifies an extension to add to the end of the generated number.
* `:country_code` - Sets the country code for the phone number.
Default config for these options can be specified in the `Number`
application configuration.
config :number, phone: [
area_code: false,
delimiter: "-",
extension: nil,
country_code: nil
]
## Examples
iex> Number.Phone.number_to_phone(nil)
nil
iex> Number.Phone.number_to_phone(5551234)
"555-1234"
iex> Number.Phone.number_to_phone("5551234")
"555-1234"
iex> Number.Phone.number_to_phone(1235551234)
"123-555-1234"
iex> Number.Phone.number_to_phone(1235551234, area_code: true)
"(123) 555-1234"
iex> Number.Phone.number_to_phone(1235551234, delimiter: " ")
"123 555 1234"
iex> Number.Phone.number_to_phone(1235551234, area_code: true, extension: 555)
"(123) 555-1234 x 555"
iex> Number.Phone.number_to_phone(1235551234, area_code: true, extension: 555, country_code: 1)
"+1 (123) 555-1234 x 555"
iex> Number.Phone.number_to_phone(1235551234, country_code: 1)
"+1-123-555-1234"
iex> Number.Phone.number_to_phone("123a456")
"123a456"
iex> Number.Phone.number_to_phone(1235551234, country_code: 1, extension: 1343, delimiter: ".")
"+1.123.555.1234 x 1343"
"""
@spec number_to_phone(number, list) :: String.t
def number_to_phone(number, options \\ [])
def number_to_phone(nil, _options), do: nil
def number_to_phone(number, options) do
options = Keyword.merge(config(), options)
number
|> to_string
|> delimit_number(options[:delimiter], options[:area_code])
|> prepend_country_code(options[:country_code], options[:delimiter], options[:area_code])
|> append_extension(options[:extension])
end
defp delimit_number(number, delimiter, area_code) when area_code == false do
{:ok, leading_delimiter} = "^#{Regex.escape(delimiter)}" |> Regex.compile
number
|> String.replace(~r/(\d{0,3})(\d{3})(\d{4})$/, "\\1#{delimiter}\\2#{delimiter}\\3")
|> String.replace(leading_delimiter, "")
end
defp delimit_number(number, delimiter, area_code) when area_code == true do
String.replace(number, ~r/(\d{1,3})(\d{3})(\d{4}$)/, "(\\1) \\2#{delimiter}\\3")
end
defp prepend_country_code(number, country_code, _, _) when is_blank(country_code), do: number
defp prepend_country_code(number, country_code, delimiter, area_code) do
if area_code, do: "+#{country_code} #{number}",
else: "+#{country_code}#{delimiter}#{number}"
end
defp append_extension(number, extension) when is_blank(extension), do: number
defp append_extension(number, extension) do
"#{number} x #{extension}"
end
defp config do
defaults = [
area_code: false,
delimiter: "-",
extension: nil,
country_code: nil
]
Keyword.merge(defaults, Application.get_env(:number, :phone, []))
end
end
|
lib/number/phone.ex
| 0.830697
| 0.553686
|
phone.ex
|
starcoder
|
defmodule Saxon.Sax do
@moduledoc """
An overly simplified SAX parser, aiming at reducing memory footprint when dealing with large text nodes.
Supports **only** the following events:
* `:start_document (state)`
* `:end_document (state, conn)`
* `:start_element (element_name, attributes, state)`
* `:end_element (element_name, state)`
* `:characters (chunk, state)`
Note that the `:ignorableWhitespace` event in xmerl or erlsom will be treated as `:characters`.
Even if there is only one piece of text in the element, the event `:characters` can be triggered multiple times,
each time feeding a HTML entity decoded chunk to the event handler.
The sizes of the chunks can diverse hugely, so the handler should not make decision based on the chunk size.
"""
def start(conn, reducer, opts \\ %{}) do
state = apply(reducer, :init, [])
state = apply(reducer, :start_document, [state])
continue(conn, reducer, "", state, opts)
end
defp continue(conn, reducer, tail, state, opts) do
case Plug.Conn.read_body(conn, length: opts[:chunk_size]) do
{status, chunk, conn} when status in [:ok, :more] ->
parse(tail <> chunk, reducer, conn, state, status == :more, opts)
other -> other
end
end
defp parse(<<"</", rest::binary>> = chunk, reducer, conn, state, has_more, opts) do
case split_before(rest, '>') do
{tag, <<">", rest::binary>>} ->
state = apply(reducer, :end_element, [tag, state])
parse(rest, reducer, conn, state, has_more, opts)
_broken_tag ->
continue(conn, reducer, chunk, state, opts)
end
end
defp parse(<<"<", rest::binary>> = chunk, reducer, conn, state, has_more, opts) do
case split_before(rest, '>') do
{tag, <<">", rest::binary>>} ->
{tag, attributes} = retrieve_attributes(tag)
state = apply(reducer, :start_element, [tag, attributes, state])
parse(rest, reducer, conn, state, has_more, opts)
_broken_tag ->
continue(conn, reducer, chunk, state, opts)
end
end
defp parse("" = chunk, reducer, conn, state, true = _has_more, opts) do
continue(conn, reducer, chunk, state, opts)
end
defp parse("", reducer, conn, state, false = _has_more, _opts) do
apply(reducer, :end_document, [state, conn])
end
defp parse(<<"&", _::binary>> = chunk, reducer, conn, state, has_more, opts) do
case split_after(chunk, ';') do
{html_entity, rest} ->
state = apply(reducer, :characters, [HtmlEntities.decode(html_entity), state])
parse(rest, reducer, conn, state, has_more, opts)
_broken_entity ->
continue(conn, reducer, chunk, state, opts)
end
end
defp parse(chunk, reducer, conn, state, has_more, opts) do
case split_before(chunk, '<&') do
{text, rest} ->
state = apply(reducer, :characters, [text, state])
parse(rest, reducer, conn, state, has_more, opts)
text ->
state = apply(reducer, :characters, [text, state])
continue(conn, reducer, "", state, opts)
end
end
defp retrieve_attributes(chunk) do
case split_before(chunk, ' ') do
{tag, ""} ->
{tag, %{}}
{tag, rest} ->
attributes = Regex.scan(~r/([a-z][\w-]+\w)="([^"]+)"/, IO.iodata_to_binary(rest))
|> Stream.map(fn[_, name, value] -> {name, HtmlEntities.decode(value)} end)
|> Enum.into(%{})
{tag, attributes}
tag ->
{tag, %{}}
end
end
@compile {:inline, split_after: 2}
defp split_after(str, chars) do
split_after(chars, str, [], str)
end
defp split_after(chars, <<c, rest::binary>>, acc, original) do
if c in chars do
{IO.iodata_to_binary([acc, c]), rest}
else
split_after(chars, rest, [acc, c], original)
end
end
defp split_after(_chars, "", _acc, original), do: original
@compile {:inline, split_before: 2}
defp split_before(str, chars) do
split_before(chars, str, [], str)
end
defp split_before(chars, <<c, rest::binary>> = tail, acc, original) do
if c in chars do
{IO.iodata_to_binary(acc), tail}
else
split_before(chars, rest, [acc, c], original)
end
end
defp split_before(_chars, "", _acc, original), do: original
end
|
lib/sax/sax.ex
| 0.734881
| 0.636636
|
sax.ex
|
starcoder
|
defmodule AWS.ComprehendMedical do
@moduledoc """
Amazon Comprehend Medical extracts structured information from unstructured
clinical text. Use these actions to gain insight in your documents.
"""
@doc """
Gets the properties associated with a medical entities detection job. Use
this operation to get the status of a detection job.
"""
def describe_entities_detection_v2_job(client, input, options \\ []) do
request(client, "DescribeEntitiesDetectionV2Job", input, options)
end
@doc """
Gets the properties associated with an InferICD10CM job. Use this operation
to get the status of an inference job.
"""
def describe_i_c_d10_c_m_inference_job(client, input, options \\ []) do
request(client, "DescribeICD10CMInferenceJob", input, options)
end
@doc """
Gets the properties associated with a protected health information (PHI)
detection job. Use this operation to get the status of a detection job.
"""
def describe_p_h_i_detection_job(client, input, options \\ []) do
request(client, "DescribePHIDetectionJob", input, options)
end
@doc """
Gets the properties associated with an InferRxNorm job. Use this operation
to get the status of an inference job.
"""
def describe_rx_norm_inference_job(client, input, options \\ []) do
request(client, "DescribeRxNormInferenceJob", input, options)
end
@doc """
The `DetectEntities` operation is deprecated. You should use the
`DetectEntitiesV2` operation instead.
Inspects the clinical text for a variety of medical entities and returns
specific information about them such as entity category, location, and
confidence score on that information .
"""
def detect_entities(client, input, options \\ []) do
request(client, "DetectEntities", input, options)
end
@doc """
Inspects the clinical text for a variety of medical entities and returns
specific information about them such as entity category, location, and
confidence score on that information. Amazon Comprehend Medical only
detects medical entities in English language texts.
The `DetectEntitiesV2` operation replaces the `DetectEntities` operation.
This new action uses a different model for determining the entities in your
medical text and changes the way that some entities are returned in the
output. You should use the `DetectEntitiesV2` operation in all new
applications.
The `DetectEntitiesV2` operation returns the `Acuity` and `Direction`
entities as attributes instead of types.
"""
def detect_entities_v2(client, input, options \\ []) do
request(client, "DetectEntitiesV2", input, options)
end
@doc """
Inspects the clinical text for protected health information (PHI) entities
and returns the entity category, location, and confidence score for each
entity. Amazon Comprehend Medical only detects entities in English language
texts.
"""
def detect_p_h_i(client, input, options \\ []) do
request(client, "DetectPHI", input, options)
end
@doc """
InferICD10CM detects medical conditions as entities listed in a patient
record and links those entities to normalized concept identifiers in the
ICD-10-CM knowledge base from the Centers for Disease Control. Amazon
Comprehend Medical only detects medical entities in English language texts.
"""
def infer_i_c_d10_c_m(client, input, options \\ []) do
request(client, "InferICD10CM", input, options)
end
@doc """
InferRxNorm detects medications as entities listed in a patient record and
links to the normalized concept identifiers in the RxNorm database from the
National Library of Medicine. Amazon Comprehend Medical only detects
medical entities in English language texts.
"""
def infer_rx_norm(client, input, options \\ []) do
request(client, "InferRxNorm", input, options)
end
@doc """
Gets a list of medical entity detection jobs that you have submitted.
"""
def list_entities_detection_v2_jobs(client, input, options \\ []) do
request(client, "ListEntitiesDetectionV2Jobs", input, options)
end
@doc """
Gets a list of InferICD10CM jobs that you have submitted.
"""
def list_i_c_d10_c_m_inference_jobs(client, input, options \\ []) do
request(client, "ListICD10CMInferenceJobs", input, options)
end
@doc """
Gets a list of protected health information (PHI) detection jobs that you
have submitted.
"""
def list_p_h_i_detection_jobs(client, input, options \\ []) do
request(client, "ListPHIDetectionJobs", input, options)
end
@doc """
Gets a list of InferRxNorm jobs that you have submitted.
"""
def list_rx_norm_inference_jobs(client, input, options \\ []) do
request(client, "ListRxNormInferenceJobs", input, options)
end
@doc """
Starts an asynchronous medical entity detection job for a collection of
documents. Use the `DescribeEntitiesDetectionV2Job` operation to track the
status of a job.
"""
def start_entities_detection_v2_job(client, input, options \\ []) do
request(client, "StartEntitiesDetectionV2Job", input, options)
end
@doc """
Starts an asynchronous job to detect medical conditions and link them to
the ICD-10-CM ontology. Use the `DescribeICD10CMInferenceJob` operation to
track the status of a job.
"""
def start_i_c_d10_c_m_inference_job(client, input, options \\ []) do
request(client, "StartICD10CMInferenceJob", input, options)
end
@doc """
Starts an asynchronous job to detect protected health information (PHI).
Use the `DescribePHIDetectionJob` operation to track the status of a job.
"""
def start_p_h_i_detection_job(client, input, options \\ []) do
request(client, "StartPHIDetectionJob", input, options)
end
@doc """
Starts an asynchronous job to detect medication entities and link them to
the RxNorm ontology. Use the `DescribeRxNormInferenceJob` operation to
track the status of a job.
"""
def start_rx_norm_inference_job(client, input, options \\ []) do
request(client, "StartRxNormInferenceJob", input, options)
end
@doc """
Stops a medical entities detection job in progress.
"""
def stop_entities_detection_v2_job(client, input, options \\ []) do
request(client, "StopEntitiesDetectionV2Job", input, options)
end
@doc """
Stops an InferICD10CM inference job in progress.
"""
def stop_i_c_d10_c_m_inference_job(client, input, options \\ []) do
request(client, "StopICD10CMInferenceJob", input, options)
end
@doc """
Stops a protected health information (PHI) detection job in progress.
"""
def stop_p_h_i_detection_job(client, input, options \\ []) do
request(client, "StopPHIDetectionJob", input, options)
end
@doc """
Stops an InferRxNorm inference job in progress.
"""
def stop_rx_norm_inference_job(client, input, options \\ []) do
request(client, "StopRxNormInferenceJob", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "comprehendmedical"}
host = build_host("comprehendmedical", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "ComprehendMedical_20181030.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/comprehend_medical.ex
| 0.853272
| 0.677078
|
comprehend_medical.ex
|
starcoder
|
defmodule Fares.FareInfo do
@moduledoc """
Retrieve saved fare data from the file system and map to structs.
"""
alias Fares.Fare
@doc "Load fare info from a CSV file."
@spec fare_info() :: [Fare.t()]
def fare_info() do
"priv/fares-july1.csv"
|> fare_data()
|> Enum.flat_map(&mapper/1)
|> Enum.concat(free_fare())
|> split_reduced_fares()
end
@spec mapper([String.t()]) :: [Fare.t()]
def mapper(["commuter", zone, single_trip, single_trip_reduced, monthly | _]) do
base = %Fare{
mode: :commuter_rail,
name: commuter_rail_fare_name(zone)
}
[
%{
base
| duration: :single_trip,
media: [:commuter_ticket, :cash, :mticket],
reduced: nil,
cents: dollars_to_cents(single_trip)
},
%{
base
| duration: :single_trip,
media: [:senior_card, :student_card],
reduced: :any,
cents: dollars_to_cents(single_trip_reduced)
},
%{
base
| duration: :round_trip,
media: [:commuter_ticket, :cash, :mticket],
reduced: nil,
cents: dollars_to_cents(single_trip) * 2
},
%{
base
| duration: :round_trip,
media: [:senior_card, :student_card],
reduced: :any,
cents: dollars_to_cents(single_trip_reduced) * 2
},
%{
base
| duration: :month,
media: [:commuter_ticket],
reduced: nil,
cents: dollars_to_cents(monthly),
additional_valid_modes: monthly_commuter_modes(zone)
},
%{
base
| duration: :month,
media: [:mticket],
reduced: nil,
cents: mticket_price(dollars_to_cents(monthly))
},
%{
base
| duration: :weekend,
media: [:commuter_ticket, :cash, :mticket],
reduced: nil,
cents: 1_000
}
]
end
def mapper([
"subway",
charlie_card_price,
day_reduced_price,
month_reduced_price,
day_pass_price,
week_pass_price,
month_pass_price | _
]) do
base = %Fare{
mode: :subway,
name: :subway
}
[
%{
base
| duration: :month,
media: [:charlie_card, :charlie_ticket],
reduced: nil,
cents: dollars_to_cents(month_pass_price),
additional_valid_modes: [:bus]
},
%{
base
| duration: :month,
media: [:senior_card, :student_card],
reduced: :any,
cents: dollars_to_cents(month_reduced_price),
additional_valid_modes: [:bus]
},
%{
base
| duration: :single_trip,
media: [:charlie_card, :charlie_ticket, :cash],
reduced: nil,
cents: dollars_to_cents(charlie_card_price),
additional_valid_modes: [:bus]
},
%{
base
| duration: :single_trip,
media: [:senior_card, :student_card],
reduced: :any,
cents: dollars_to_cents(day_reduced_price)
},
%{
base
| duration: :week,
media: [:charlie_card, :charlie_ticket],
reduced: nil,
cents: dollars_to_cents(week_pass_price),
additional_valid_modes: [:bus, :commuter_rail, :ferry]
},
%{
base
| duration: :day,
media: [:charlie_card, :charlie_ticket],
reduced: nil,
cents: dollars_to_cents(day_pass_price),
additional_valid_modes: [:bus, :commuter_rail, :ferry]
}
]
end
def mapper([
mode,
charlie_card_price,
day_reduced_price,
_month_reduced_price,
_day_pass_price,
_week_pass_price,
month_pass_price | _
])
when mode in ["local_bus", "express_bus"] do
base = %Fare{
mode: :bus,
name: :"#{mode}"
}
[
%{
base
| duration: :single_trip,
media: [:charlie_card, :charlie_ticket, :cash],
reduced: nil,
cents: dollars_to_cents(charlie_card_price)
},
%{
base
| duration: :single_trip,
media: [:senior_card, :student_card],
reduced: :any,
cents: dollars_to_cents(day_reduced_price)
},
%{
base
| duration: :month,
media: [:charlie_card, :charlie_ticket],
reduced: nil,
cents: dollars_to_cents(month_pass_price)
}
]
end
def mapper([
"ferry",
inner_harbor_price,
inner_harbor_month_price,
cross_harbor_price,
commuter_ferry_price,
commuter_ferry_month_price,
commuter_ferry_logan_price,
_day_pass_price,
_week_pass_price
]) do
fares = [
%Fare{
mode: :ferry,
name: :ferry_inner_harbor,
duration: :single_trip,
media: [:mticket, :paper_ferry, :cash],
reduced: nil,
cents: dollars_to_cents(inner_harbor_price)
},
%Fare{
mode: :ferry,
name: :ferry_inner_harbor,
duration: :round_trip,
media: [:mticket, :paper_ferry, :cash],
reduced: nil,
cents: dollars_to_cents(inner_harbor_price) * 2
},
%Fare{
mode: :ferry,
name: :ferry_inner_harbor,
duration: :month,
media: [:charlie_ticket],
reduced: nil,
cents: dollars_to_cents(inner_harbor_month_price),
additional_valid_modes: [:subway, :bus, :commuter_rail]
},
%Fare{
mode: :ferry,
name: :ferry_inner_harbor,
duration: :month,
media: [:mticket],
reduced: nil,
cents: dollars_to_cents(inner_harbor_month_price) - 1000
},
%Fare{
mode: :ferry,
name: :ferry_cross_harbor,
duration: :single_trip,
media: [:mticket, :paper_ferry, :cash],
reduced: nil,
cents: dollars_to_cents(cross_harbor_price)
},
%Fare{
mode: :ferry,
name: :ferry_cross_harbor,
duration: :round_trip,
media: [:mticket, :paper_ferry, :cash],
reduced: nil,
cents: dollars_to_cents(cross_harbor_price) * 2
},
%Fare{
mode: :ferry,
name: :commuter_ferry,
duration: :single_trip,
media: [:mticket, :paper_ferry, :cash],
reduced: nil,
cents: dollars_to_cents(commuter_ferry_price)
},
%Fare{
mode: :ferry,
name: :commuter_ferry,
duration: :round_trip,
media: [:mticket, :paper_ferry, :cash],
reduced: nil,
cents: dollars_to_cents(commuter_ferry_price) * 2
},
%Fare{
mode: :ferry,
name: :commuter_ferry_logan,
duration: :single_trip,
media: [:mticket, :paper_ferry, :cash],
reduced: nil,
cents: dollars_to_cents(commuter_ferry_logan_price)
},
%Fare{
mode: :ferry,
name: :commuter_ferry_logan,
duration: :round_trip,
media: [:mticket, :paper_ferry, :cash],
reduced: nil,
cents: dollars_to_cents(commuter_ferry_logan_price) * 2
},
%Fare{
mode: :ferry,
name: :commuter_ferry,
duration: :month,
media: [:charlie_ticket],
reduced: nil,
cents: dollars_to_cents(commuter_ferry_month_price),
additional_valid_modes: [:subway, :bus, :commuter_rail]
},
%Fare{
mode: :ferry,
name: :commuter_ferry,
duration: :month,
media: [:mticket],
reduced: nil,
cents: dollars_to_cents(commuter_ferry_month_price) - 1000
}
]
reduced_fares =
fares
|> Enum.filter(&(&1.duration in [:single_trip, :round_trip]))
|> Enum.flat_map(fn fare ->
reduced_price = floor_to_ten_cents(fare.cents) / 2
[%{fare | cents: reduced_price, media: [:senior_card, :student_card], reduced: :any}]
end)
fares ++ reduced_fares
end
def mapper(["the_ride", ada_ride, premium_ride | _]) do
[
%Fare{
mode: :the_ride,
name: :ada_ride,
media: [:senior_card],
reduced: :senior_disabled,
duration: :single_trip,
cents: dollars_to_cents(ada_ride)
},
%Fare{
mode: :the_ride,
name: :premium_ride,
media: [:senior_card],
reduced: :senior_disabled,
duration: :single_trip,
cents: dollars_to_cents(premium_ride)
}
]
end
def mapper(["foxboro", round_trip | _]) do
[
%Fare{
mode: :commuter_rail,
name: :foxboro,
duration: :round_trip,
media: [:mticket, :special_event, :cash],
reduced: nil,
cents: dollars_to_cents(round_trip)
}
]
end
defp fare_data(filename) do
:fares
|> Application.app_dir()
|> Path.join(filename)
|> File.stream!()
|> CSV.decode!()
end
defp monthly_commuter_modes("interzone_" <> _) do
[:bus]
end
defp monthly_commuter_modes(_zone) do
[:subway, :bus, :ferry]
end
def mticket_price(monthly_price) when monthly_price > 1000 do
monthly_price - 1000
end
defp commuter_rail_fare_name(zone) do
case String.split(zone, "_") do
["zone", zone] -> {:zone, String.upcase(zone)}
["interzone", zone] -> {:interzone, String.upcase(zone)}
end
end
defp dollars_to_cents(dollars) do
dollars
|> String.to_float()
|> Kernel.*(100)
|> round
end
defp floor_to_ten_cents(fare), do: Float.floor(fare / 10) * 10
# Student and Senior fare prices are always the same.
# For every generic reduced fare, add in two discreet
# fares by media type (senior_card and student_card).
@spec split_reduced_fares([Fare.t()]) :: [Fare.t()]
defp split_reduced_fares(fares) do
fares
|> Enum.filter(&match?(%{reduced: :any}, &1))
|> Enum.reduce(fares, &populate_reduced(&1, &2))
end
@spec populate_reduced(Fare.t(), [Fare.t()]) :: [Fare.t()]
defp populate_reduced(fare, fares) do
senior = %{fare | media: [:senior_card], reduced: :senior_disabled}
student = %{fare | media: [:student_card], reduced: :student}
[senior, student | fares]
end
# Special fare used only for inbound trips from the airport
@spec free_fare() :: [Fare.t()]
defp free_fare do
[
%Fare{
mode: :bus,
name: :free_fare,
duration: :single_trip,
media: [],
reduced: nil,
cents: dollars_to_cents("0.00")
}
]
end
@spec georges_island_ferry_fares() :: [Fare.t()]
def georges_island_ferry_fares do
base_fare = %Fare{
mode: :ferry,
name: :ferry_george,
duration: :round_trip,
media: [],
reduced: nil
}
[
%{base_fare | cents: dollars_to_cents("19.95"), price_label: "Adult"},
%{base_fare | cents: dollars_to_cents("12.95"), price_label: "Child"},
%{base_fare | cents: dollars_to_cents("0.0"), price_label: "Child under 3"},
%{
base_fare
| cents: dollars_to_cents("49.00"),
price_label: "Family 4-pack (2 adults, 2 children)"
},
%{base_fare | reduced: :student, cents: dollars_to_cents("14.95"), price_label: "Student"},
%{
base_fare
| reduced: :senior_disabled,
cents: dollars_to_cents("14.95"),
price_label: "Seniors"
},
%{
base_fare
| reduced: :senior_disabled,
cents: dollars_to_cents("14.95"),
price_label: "Military"
}
]
end
end
|
apps/fares/lib/fare_info.ex
| 0.753104
| 0.62111
|
fare_info.ex
|
starcoder
|
defmodule AWS.ApplicationInsights do
@moduledoc """
Amazon CloudWatch Application Insights for .NET and SQL Server
Amazon CloudWatch Application Insights for .NET and SQL Server is a service that
helps you detect common problems with your .NET and SQL Server-based
applications.
It enables you to pinpoint the source of issues in your applications (built with
technologies such as Microsoft IIS, .NET, and Microsoft SQL Server), by
providing key insights into detected problems.
After you onboard your application, CloudWatch Application Insights for .NET and
SQL Server identifies, recommends, and sets up metrics and logs. It continuously
analyzes and correlates your metrics and logs for unusual behavior to surface
actionable problems with your application. For example, if your application is
slow and unresponsive and leading to HTTP 500 errors in your Application Load
Balancer (ALB), Application Insights informs you that a memory pressure problem
with your SQL Server database is occurring. It bases this analysis on impactful
metrics and log errors.
"""
@doc """
Adds an application that is created from a resource group.
"""
def create_application(client, input, options \\ []) do
request(client, "CreateApplication", input, options)
end
@doc """
Creates a custom component by grouping similar standalone instances to monitor.
"""
def create_component(client, input, options \\ []) do
request(client, "CreateComponent", input, options)
end
@doc """
Adds an log pattern to a `LogPatternSet`.
"""
def create_log_pattern(client, input, options \\ []) do
request(client, "CreateLogPattern", input, options)
end
@doc """
Removes the specified application from monitoring.
Does not delete the application.
"""
def delete_application(client, input, options \\ []) do
request(client, "DeleteApplication", input, options)
end
@doc """
Ungroups a custom component.
When you ungroup custom components, all applicable monitors that are set up for
the component are removed and the instances revert to their standalone status.
"""
def delete_component(client, input, options \\ []) do
request(client, "DeleteComponent", input, options)
end
@doc """
Removes the specified log pattern from a `LogPatternSet`.
"""
def delete_log_pattern(client, input, options \\ []) do
request(client, "DeleteLogPattern", input, options)
end
@doc """
Describes the application.
"""
def describe_application(client, input, options \\ []) do
request(client, "DescribeApplication", input, options)
end
@doc """
Describes a component and lists the resources that are grouped together in a
component.
"""
def describe_component(client, input, options \\ []) do
request(client, "DescribeComponent", input, options)
end
@doc """
Describes the monitoring configuration of the component.
"""
def describe_component_configuration(client, input, options \\ []) do
request(client, "DescribeComponentConfiguration", input, options)
end
@doc """
Describes the recommended monitoring configuration of the component.
"""
def describe_component_configuration_recommendation(client, input, options \\ []) do
request(client, "DescribeComponentConfigurationRecommendation", input, options)
end
@doc """
Describe a specific log pattern from a `LogPatternSet`.
"""
def describe_log_pattern(client, input, options \\ []) do
request(client, "DescribeLogPattern", input, options)
end
@doc """
Describes an anomaly or error with the application.
"""
def describe_observation(client, input, options \\ []) do
request(client, "DescribeObservation", input, options)
end
@doc """
Describes an application problem.
"""
def describe_problem(client, input, options \\ []) do
request(client, "DescribeProblem", input, options)
end
@doc """
Describes the anomalies or errors associated with the problem.
"""
def describe_problem_observations(client, input, options \\ []) do
request(client, "DescribeProblemObservations", input, options)
end
@doc """
Lists the IDs of the applications that you are monitoring.
"""
def list_applications(client, input, options \\ []) do
request(client, "ListApplications", input, options)
end
@doc """
Lists the auto-grouped, standalone, and custom components of the application.
"""
def list_components(client, input, options \\ []) do
request(client, "ListComponents", input, options)
end
@doc """
Lists the INFO, WARN, and ERROR events for periodic configuration updates
performed by Application Insights.
Examples of events represented are:
* INFO: creating a new alarm or updating an alarm threshold.
* WARN: alarm not created due to insufficient data points used to
predict thresholds.
* ERROR: alarm not created due to permission errors or exceeding
quotas.
"""
def list_configuration_history(client, input, options \\ []) do
request(client, "ListConfigurationHistory", input, options)
end
@doc """
Lists the log pattern sets in the specific application.
"""
def list_log_pattern_sets(client, input, options \\ []) do
request(client, "ListLogPatternSets", input, options)
end
@doc """
Lists the log patterns in the specific log `LogPatternSet`.
"""
def list_log_patterns(client, input, options \\ []) do
request(client, "ListLogPatterns", input, options)
end
@doc """
Lists the problems with your application.
"""
def list_problems(client, input, options \\ []) do
request(client, "ListProblems", input, options)
end
@doc """
Retrieve a list of the tags (keys and values) that are associated with a
specified application.
A *tag* is a label that you optionally define and associate with an application.
Each tag consists of a required *tag key* and an optional associated *tag
value*. A tag key is a general label that acts as a category for more specific
tag values. A tag value acts as a descriptor within a tag key.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Add one or more tags (keys and values) to a specified application.
A *tag* is a label that you optionally define and associate with an application.
Tags can help you categorize and manage application in different ways, such as
by purpose, owner, environment, or other criteria.
Each tag consists of a required *tag key* and an associated *tag value*, both of
which you define. A tag key is a general label that acts as a category for more
specific tag values. A tag value acts as a descriptor within a tag key.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Remove one or more tags (keys and values) from a specified application.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates the application.
"""
def update_application(client, input, options \\ []) do
request(client, "UpdateApplication", input, options)
end
@doc """
Updates the custom component name and/or the list of resources that make up the
component.
"""
def update_component(client, input, options \\ []) do
request(client, "UpdateComponent", input, options)
end
@doc """
Updates the monitoring configurations for the component.
The configuration input parameter is an escaped JSON of the configuration and
should match the schema of what is returned by
`DescribeComponentConfigurationRecommendation`.
"""
def update_component_configuration(client, input, options \\ []) do
request(client, "UpdateComponentConfiguration", input, options)
end
@doc """
Adds a log pattern to a `LogPatternSet`.
"""
def update_log_pattern(client, input, options \\ []) do
request(client, "UpdateLogPattern", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "applicationinsights"}
host = build_host("applicationinsights", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "EC2WindowsBarleyService.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/application_insights.ex
| 0.874453
| 0.581778
|
application_insights.ex
|
starcoder
|
defmodule ExUnit.Formatter do
@moduledoc """
This module holds helper functions related to formatting and contains
documentation about the formatting protocol.
Formatters are registered at the `ExUnit.EventManager` event manager and
will be send events by the runner.
The following events are possible:
* `{ :suite_started, opts }` - The suite has started with the specified
options to the runner.
* `{ :suite_finished, run_us, load_us }` - The suite has finished. `run_us` and
`load_us` are the run and load
times in microseconds respectively.
* `{ :case_started, test_case }` - A test case has started. See
`ExUnit.TestCase` for details.
* `{ :case_finished, test_case }` - A test case has finished. See
`ExUnit.TestCase` for details.
* `{ :test_started, test_case }` - A test case has started. See
`ExUnit.Test` for details.
* `{ :test_finished, test_case }` - A test case has finished. See
`ExUnit.Test` for details.
"""
@type id :: term
@type test_case :: ExUnit.TestCase.t
@type test :: ExUnit.Test.t
@type run_us :: pos_integer
@type load_us :: pos_integer | nil
import Exception, only: [format_stacktrace_entry: 1]
@doc """
Formats time taken running the test suite.
It receives the time spent running the tests and
optionally the time spent loading the test suite.
## Examples
iex> format_time(10000, nil)
"Finished in 0.01 seconds"
iex> format_time(10000, 20000)
"Finished in 0.03 seconds (0.02s on load, 0.01s on tests)"
iex> format_time(10000, 200000)
"Finished in 0.2 seconds (0.2s on load, 0.01s on tests)"
"""
@spec format_time(run_us, load_us) :: String.t
def format_time(run_us, nil) do
"Finished in #{run_us |> normalize_us |> format_us} seconds"
end
def format_time(run_us, load_us) do
run_us = run_us |> normalize_us
load_us = load_us |> normalize_us
ms = run_us + load_us
"Finished in #{format_us ms} seconds (#{format_us load_us}s on load, #{format_us run_us}s on tests)"
end
defp normalize_us(us) do
div(us, 10000)
end
defp format_us(us) do
if us < 10 do
"0.0#{us}"
else
us = div us, 10
"#{div(us, 10)}.#{rem(us, 10)}"
end
end
@doc """
Formats filters used to constain cases to be run.
## Examples
iex> format_filters([run: true, slow: false], :include)
"Including tags: [run: true, slow: false]"
"""
@spec format_filters(Keyword.t, atom) :: String.t
def format_filters(filters, type) do
case type do
:include -> "Including tags: #{inspect filters}"
:exclude -> "Excluding tags: #{inspect filters}"
end
end
@doc ~S"""
Receives a test and formats its failure.
"""
def format_test_failure(test_case, test, { kind, reason, stacktrace }, counter, color) do
test_info("#{counter}) #{test} (#{inspect test_case})", color) <>
format_kind_reason(kind, reason, color) <>
format_stacktrace(stacktrace, test_case, test, color)
end
@doc """
Receives a test case and formats its failure.
"""
def format_test_case_failure(test_case, { kind, reason, stacktrace }, counter, color) do
test_case_info("#{counter}) #{inspect test_case}: ", color) <>
format_kind_reason(kind, reason, color) <>
format_stacktrace(stacktrace, test_case, nil, color)
end
defp format_kind_reason(:error, ExUnit.ExpectationError[] = record, color) do
prelude = String.downcase record.prelude
assertion = record.full_assertion
max = max(size(prelude), size(assertion))
error_info("** (ExUnit.ExpectationError)", color) <>
if desc = record.expr do
max = max(max, size("instead got"))
error_info(" #{pad(prelude, max)}: #{maybe_multiline(desc, max)}", color) <>
error_info(" #{pad(assertion, max)}: #{maybe_multiline(record.expected, max)}", color) <>
error_info(" #{pad("instead got", max)}: #{maybe_multiline(record.actual, max)}", color)
else
error_info(" #{pad(prelude, max)}: #{maybe_multiline(record.expected, max)}", color) <>
error_info(" #{pad(assertion, max)}: #{maybe_multiline(record.actual, max)}", color)
end
end
defp format_kind_reason(:error, exception, color) do
error_info "** (#{inspect exception.__record__(:name)}) #{exception.message}", color
end
defp format_kind_reason(kind, reason, color) do
error_info "** (#{kind}) #{inspect(reason)}", color
end
defp format_stacktrace([{ test_case, test, _, location }|_], test_case, test, color) do
location_info("at #{location[:file]}:#{location[:line]}", color)
end
defp format_stacktrace([], _case, _test, _color) do
""
end
defp format_stacktrace(stacktrace, _case, _test, color) do
location_info("stacktrace:", color) <>
Enum.map_join(stacktrace, fn(s) -> stacktrace_info format_stacktrace_entry(s), color end)
end
defp pad(binary, max) do
remaining = max - size(binary)
if remaining > 0 do
String.duplicate(" ", remaining) <> binary
else
binary
end
end
defp maybe_multiline(str, max) do
unless multiline?(str) do
String.strip(str)
else
"\n" <>
Enum.join((for line <- String.split(str, ~r/\n/), do: String.duplicate(" ", max) <> line ), "\n")
end
end
defp multiline?(<<>>), do: false
defp multiline?(<<?\n, _ :: binary>>) do
true
end
defp multiline?(<<_, rest :: binary>>) do
multiline?(rest)
end
defp test_case_info(msg, nil), do: " " <> msg <> "failure on setup_all/teardown_all callback, tests invalidated\n"
defp test_case_info(msg, color), do: test_case_info(color.(:test_case_info, msg), nil)
defp test_info(msg, nil), do: " " <> msg <> "\n"
defp test_info(msg, color), do: test_info(color.(:test_info, msg), nil)
defp error_info(msg, nil), do: " " <> msg <> "\n"
defp error_info(msg, color), do: error_info(color.(:error_info, msg), nil)
defp location_info(msg, nil), do: " " <> msg <> "\n"
defp location_info(msg, color), do: location_info(color.(:location_info, msg), nil)
defp stacktrace_info(msg, nil), do: " " <> msg <> "\n"
defp stacktrace_info(msg, color), do: stacktrace_info(color.(:stacktrace_info, msg), nil)
end
|
lib/ex_unit/lib/ex_unit/formatter.ex
| 0.864925
| 0.741077
|
formatter.ex
|
starcoder
|
defmodule Chopperbot.Split.InputTransformer do
alias Chopperbot.Split.Order
@doc """
Transform inputs to a list of orders.
## Examples
iex> transform(["turbo", "10", "kendo", "200"])
{:ok, [{"turbo", 10.0}, {"kendo", 200.0}]}
iex> transform(["ant", "200", "pipe", "100", "share", "-30"])
{:ok, [{"ant", 200.0}, {"pipe", 100.0}, {"share", -30.0}]}
iex> transform(["Satoshi", "10.9", "Takeshi", "390.13", "satoshi", "112.50"])
{:ok, [{"satoshi", 10.9}, {"takeshi", 390.13}, {"satoshi", 112.5}]}
iex> transform([])
{:ok, []}
iex> transform(["turbo", "ten", "kendo", "twenty"])
{:error, :invalid_input, ["ten", "twenty"]}
iex> transform(["turbo", "100", "kendo", "200", "chopper"])
{:error, :invalid_input, ["chopper"]}
iex> transform(["turbo", "ten", "kendo", "200", "chopper"])
{:error, :invalid_input, ["ten", "chopper"]}
"""
@spec transform([String.t()]) :: {:ok, [Order.t()]} | {:error, :invalid_input, [String.t()]}
def transform(inputs) do
input_pairs = Enum.chunk_every(inputs, 2)
case transform_to_orders(input_pairs) do
{orders, []} ->
{:ok, orders}
{_, invalid_inputs} ->
{:error, :invalid_input, invalid_inputs}
end
end
defp transform_to_orders(input_pairs, orders \\ [], invalid_inputs \\ [])
defp transform_to_orders([input_pair | rest_input_pairs], orders, invalid_inputs) do
with {:ok, name, amount} <- validate_input_pair(input_pair),
{:ok, float_amount} <- validate_amount_string(amount) do
order = {String.downcase(name), float_amount}
transform_to_orders(
rest_input_pairs,
[order | orders],
invalid_inputs
)
else
{:error, invalid_input} ->
transform_to_orders(
rest_input_pairs,
orders,
[invalid_input | invalid_inputs]
)
end
end
defp transform_to_orders([], orders, invalid_inputs) do
{Enum.reverse(orders), Enum.reverse(invalid_inputs)}
end
defp validate_input_pair(input_pair) do
case input_pair do
[name, amount] -> {:ok, name, amount}
[invalid_input] -> {:error, invalid_input}
end
end
defp validate_amount_string(string) do
case Float.parse(string) do
{float_number, ""} -> {:ok, float_number}
_ -> {:error, string}
end
end
end
|
lib/chopperbot/split/parser/input_transformer.ex
| 0.84626
| 0.404743
|
input_transformer.ex
|
starcoder
|
defmodule AWS.Codeartifact do
@moduledoc """
AWS CodeArtifact is a fully managed artifact repository compatible with
language-native package managers and build tools such as npm, Apache Maven, and
pip.
You can use CodeArtifact to share packages with development teams and pull
packages. Packages can be pulled from both public and CodeArtifact repositories.
You can also create an upstream relationship between a CodeArtifact repository
and another repository, which effectively merges their contents from the point
of view of a package manager client.
## AWS CodeArtifact Components
Use the information in this guide to help you work with the following
CodeArtifact components:
* **Repository## : A CodeArtifact repository contains a set of
[package versions](https://docs.aws.amazon.com/codeartifact/latest/ug/welcome.html#welcome-concepts-package-version),
each of which maps to a set of assets, or files. Repositories are polyglot, so a
single repository can contain packages of any supported type. Each repository
exposes endpoints for fetching and publishing packages using tools like the
`npm` ** CLI, the Maven CLI (** `mvn` **), and ** `pip` **.
* **Domain**: Repositories are aggregated into a higher-level entity
known as a *domain*. All package assets and metadata are stored in the domain,
but are consumed through repositories. A given package asset, such as a Maven
JAR file, is stored once per domain, no matter how many repositories it's
present in. All of the assets and metadata in a domain are encrypted with the
same customer master key (CMK) stored in AWS Key Management Service (AWS KMS).
Each repository is a member of a single domain and can't be moved to a different
domain.
The domain allows organizational policy to be applied across multiple
repositories, such as which accounts can access repositories in the domain, and
which public repositories can be used as sources of packages.
Although an organization can have multiple domains, we recommend a single
production domain that contains all published artifacts so that teams can find
and share packages across their organization.
* **Package**: A *package* is a bundle of software and the metadata
required to resolve dependencies and install the software. CodeArtifact supports
[npm](https://docs.aws.amazon.com/codeartifact/latest/ug/using-npm.html), [PyPI](https://docs.aws.amazon.com/codeartifact/latest/ug/using-python.html),
and [Maven](https://docs.aws.amazon.com/codeartifact/latest/ug/using-maven) package formats.
In CodeArtifact, a package consists of:
* A *name* (for example, `webpack` is the name of a
popular npm package)
* An optional namespace (for example, `@types` in
`@types/node`)
* A set of versions (for example, `1.0.0`, `1.0.1`,
`1.0.2`, etc.)
* Package-level metadata (for example, npm tags)
* **Package version**: A version of a package, such as `@types/node
12.6.9`. The version number format and semantics vary for different package
formats. For example, npm package versions must conform to the [Semantic
Versioning specification](https://semver.org/). In CodeArtifact, a package
version consists of the version identifier, metadata at the package version
level, and a set of assets.
* **Upstream repository**: One repository is *upstream* of another
when the package versions in it can be accessed from the repository endpoint of
the downstream repository, effectively merging the contents of the two
repositories from the point of view of a client. CodeArtifact allows creating an
upstream relationship between two repositories.
* **Asset**: An individual file stored in CodeArtifact associated
with a package version, such as an npm `.tgz` file or Maven POM and JAR files.
CodeArtifact supports these operations:
* `AssociateExternalConnection`: Adds an existing external
connection to a repository.
* `CopyPackageVersions`: Copies package versions from one repository
to another repository in the same domain.
* `CreateDomain`: Creates a domain
* `CreateRepository`: Creates a CodeArtifact repository in a domain.
* `DeleteDomain`: Deletes a domain. You cannot delete a domain that
contains repositories.
* `DeleteDomainPermissionsPolicy`: Deletes the resource policy that
is set on a domain.
* `DeletePackageVersions`: Deletes versions of a package. After a
package has been deleted, it can be republished, but its assets and metadata
cannot be restored because they have been permanently removed from storage.
* `DeleteRepository`: Deletes a repository.
* `DeleteRepositoryPermissionsPolicy`: Deletes the resource policy
that is set on a repository.
* `DescribeDomain`: Returns a `DomainDescription` object that
contains information about the requested domain.
* `DescribePackageVersion`: Returns a
[PackageVersionDescription](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageVersionDescription.html)
object that contains details about a package version.
* `DescribeRepository`: Returns a `RepositoryDescription` object
that contains detailed information about the requested repository.
* `DisposePackageVersions`: Disposes versions of a package. A
package version with the status `Disposed` cannot be restored because they have
been permanently removed from storage.
* `DisassociateExternalConnection`: Removes an existing external
connection from a repository.
* `GetAuthorizationToken`: Generates a temporary authorization token
for accessing repositories in the domain. The token expires the authorization
period has passed. The default authorization period is 12 hours and can be
customized to any length with a maximum of 12 hours.
* `GetDomainPermissionsPolicy`: Returns the policy of a resource
that is attached to the specified domain.
* `GetPackageVersionAsset`: Returns the contents of an asset that is
in a package version.
* `GetPackageVersionReadme`: Gets the readme file or descriptive
text for a package version.
* `GetRepositoryEndpoint`: Returns the endpoint of a repository for
a specific package format. A repository has one endpoint for each package
format:
* `npm`
* `pypi`
* `maven`
* `GetRepositoryPermissionsPolicy`: Returns the resource policy that
is set on a repository.
* `ListDomains`: Returns a list of `DomainSummary` objects. Each
returned `DomainSummary` object contains information about a domain.
* `ListPackages`: Lists the packages in a repository.
* `ListPackageVersionAssets`: Lists the assets for a given package
version.
* `ListPackageVersionDependencies`: Returns a list of the direct
dependencies for a package version.
* `ListPackageVersions`: Returns a list of package versions for a
specified package in a repository.
* `ListRepositories`: Returns a list of repositories owned by the
AWS account that called this method.
* `ListRepositoriesInDomain`: Returns a list of the repositories in
a domain.
* `PutDomainPermissionsPolicy`: Attaches a resource policy to a
domain.
* `PutRepositoryPermissionsPolicy`: Sets the resource policy on a
repository that specifies permissions to access it.
* `UpdatePackageVersionsStatus`: Updates the status of one or more
versions of a package.
* `UpdateRepository`: Updates the properties of a repository.
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: nil,
api_version: "2018-09-22",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "codeartifact",
global?: false,
protocol: "rest-json",
service_id: "codeartifact",
signature_version: "v4",
signing_name: "codeartifact",
target_prefix: nil
}
end
@doc """
Adds an existing external connection to a repository.
One external connection is allowed per repository.
A repository can have one or more upstream repositories, or an external
connection.
"""
def associate_external_connection(%Client{} = client, input, options \\ []) do
url_path = "/v1/repository/external-connection"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"externalConnection", "external-connection"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Copies package versions from one repository to another repository in the same
domain.
You must specify `versions` or `versionRevisions`. You cannot specify both.
"""
def copy_package_versions(%Client{} = client, input, options \\ []) do
url_path = "/v1/package/versions/copy"
headers = []
{query_params, input} =
[
{"destinationRepository", "destination-repository"},
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"format", "format"},
{"namespace", "namespace"},
{"package", "package"},
{"sourceRepository", "source-repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a domain.
CodeArtifact *domains* make it easier to manage multiple repositories across an
organization. You can use a domain to apply permissions across many repositories
owned by different AWS accounts. An asset is stored only once in a domain, even
if it's in multiple repositories.
Although you can have multiple domains, we recommend a single production domain
that contains all published artifacts so that your development teams can find
and share packages. You can use a second pre-production domain to test changes
to the production domain configuration.
"""
def create_domain(%Client{} = client, input, options \\ []) do
url_path = "/v1/domain"
headers = []
{query_params, input} =
[
{"domain", "domain"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Creates a repository.
"""
def create_repository(%Client{} = client, input, options \\ []) do
url_path = "/v1/repository"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes a domain.
You cannot delete a domain that contains repositories. If you want to delete a
domain with repositories, first delete its repositories.
"""
def delete_domain(%Client{} = client, input, options \\ []) do
url_path = "/v1/domain"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the resource policy set on a domain.
"""
def delete_domain_permissions_policy(%Client{} = client, input, options \\ []) do
url_path = "/v1/domain/permissions/policy"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"policyRevision", "policy-revision"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes one or more versions of a package.
A deleted package version cannot be restored in your repository. If you want to
remove a package version from your repository and be able to restore it later,
set its status to `Archived`. Archived packages cannot be downloaded from a
repository and don't show up with list package APIs (for example,
[ListackageVersions](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_ListPackageVersions.html)), but you can restore them using
[UpdatePackageVersionsStatus](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_UpdatePackageVersionsStatus.html).
"""
def delete_package_versions(%Client{} = client, input, options \\ []) do
url_path = "/v1/package/versions/delete"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"format", "format"},
{"namespace", "namespace"},
{"package", "package"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes a repository.
"""
def delete_repository(%Client{} = client, input, options \\ []) do
url_path = "/v1/repository"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the resource policy that is set on a repository.
After a resource policy is deleted, the permissions allowed and denied by the
deleted policy are removed. The effect of deleting a resource policy might not
be immediate.
Use `DeleteRepositoryPermissionsPolicy` with caution. After a policy is deleted,
AWS users, roles, and accounts lose permissions to perform the repository
actions granted by the deleted policy.
"""
def delete_repository_permissions_policy(%Client{} = client, input, options \\ []) do
url_path = "/v1/repository/permissions/policies"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"policyRevision", "policy-revision"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a [ `DomainDescription`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_DomainDescription.html)
object that contains information about the requested domain.
"""
def describe_domain(%Client{} = client, domain, domain_owner \\ nil, options \\ []) do
url_path = "/v1/domain"
headers = []
query_params = []
query_params =
if !is_nil(domain_owner) do
[{"domain-owner", domain_owner} | query_params]
else
query_params
end
query_params =
if !is_nil(domain) do
[{"domain", domain} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a [ `PackageVersionDescription`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageVersionDescription.html)
object that contains information about the requested package version.
"""
def describe_package_version(
%Client{} = client,
domain,
domain_owner \\ nil,
format,
namespace \\ nil,
package,
package_version,
repository,
options \\ []
) do
url_path = "/v1/package/version"
headers = []
query_params = []
query_params =
if !is_nil(repository) do
[{"repository", repository} | query_params]
else
query_params
end
query_params =
if !is_nil(package_version) do
[{"version", package_version} | query_params]
else
query_params
end
query_params =
if !is_nil(package) do
[{"package", package} | query_params]
else
query_params
end
query_params =
if !is_nil(namespace) do
[{"namespace", namespace} | query_params]
else
query_params
end
query_params =
if !is_nil(format) do
[{"format", format} | query_params]
else
query_params
end
query_params =
if !is_nil(domain_owner) do
[{"domain-owner", domain_owner} | query_params]
else
query_params
end
query_params =
if !is_nil(domain) do
[{"domain", domain} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a `RepositoryDescription` object that contains detailed information
about the requested repository.
"""
def describe_repository(
%Client{} = client,
domain,
domain_owner \\ nil,
repository,
options \\ []
) do
url_path = "/v1/repository"
headers = []
query_params = []
query_params =
if !is_nil(repository) do
[{"repository", repository} | query_params]
else
query_params
end
query_params =
if !is_nil(domain_owner) do
[{"domain-owner", domain_owner} | query_params]
else
query_params
end
query_params =
if !is_nil(domain) do
[{"domain", domain} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Removes an existing external connection from a repository.
"""
def disassociate_external_connection(%Client{} = client, input, options \\ []) do
url_path = "/v1/repository/external-connection"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"externalConnection", "external-connection"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:delete,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Deletes the assets in package versions and sets the package versions' status to
`Disposed`.
A disposed package version cannot be restored in your repository because its
assets are deleted.
To view all disposed package versions in a repository, use [
`ListPackageVersions`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_ListPackageVersions.html)
and set the [ `status`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_ListPackageVersions.html#API_ListPackageVersions_RequestSyntax)
parameter to `Disposed`.
To view information about a disposed package version, use [
`DescribePackageVersion`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_DescribePackageVersion.html)..
"""
def dispose_package_versions(%Client{} = client, input, options \\ []) do
url_path = "/v1/package/versions/dispose"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"format", "format"},
{"namespace", "namespace"},
{"package", "package"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Generates a temporary authorization token for accessing repositories in the
domain.
This API requires the `codeartifact:GetAuthorizationToken` and
`sts:GetServiceBearerToken` permissions. For more information about
authorization tokens, see [AWS CodeArtifact authentication and tokens](https://docs.aws.amazon.com/codeartifact/latest/ug/tokens-authentication.html).
CodeArtifact authorization tokens are valid for a period of 12 hours when
created with the `login` command. You can call `login` periodically to refresh
the token. When you create an authorization token with the
`GetAuthorizationToken` API, you can set a custom authorization period, up to a
maximum of 12 hours, with the `durationSeconds` parameter.
The authorization period begins after `login` or `GetAuthorizationToken` is
called. If `login` or `GetAuthorizationToken` is called while assuming a role,
the token lifetime is independent of the maximum session duration of the role.
For example, if you call `sts assume-role` and specify a session duration of 15
minutes, then generate a CodeArtifact authorization token, the token will be
valid for the full authorization period even though this is longer than the
15-minute session duration.
See [Using IAM Roles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) for
more information on controlling session duration.
"""
def get_authorization_token(%Client{} = client, input, options \\ []) do
url_path = "/v1/authorization-token"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"durationSeconds", "duration"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns the resource policy attached to the specified domain.
The policy is a resource-based policy, not an identity-based policy. For more
information, see [Identity-based policies and resource-based policies
](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html)
in the *AWS Identity and Access Management User Guide*.
"""
def get_domain_permissions_policy(
%Client{} = client,
domain,
domain_owner \\ nil,
options \\ []
) do
url_path = "/v1/domain/permissions/policy"
headers = []
query_params = []
query_params =
if !is_nil(domain_owner) do
[{"domain-owner", domain_owner} | query_params]
else
query_params
end
query_params =
if !is_nil(domain) do
[{"domain", domain} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns an asset (or file) that is in a package.
For example, for a Maven package version, use `GetPackageVersionAsset` to
download a `JAR` file, a `POM` file, or any other assets in the package version.
"""
def get_package_version_asset(
%Client{} = client,
asset,
domain,
domain_owner \\ nil,
format,
namespace \\ nil,
package,
package_version,
package_version_revision \\ nil,
repository,
options \\ []
) do
url_path = "/v1/package/version/asset"
headers = []
query_params = []
query_params =
if !is_nil(repository) do
[{"repository", repository} | query_params]
else
query_params
end
query_params =
if !is_nil(package_version_revision) do
[{"revision", package_version_revision} | query_params]
else
query_params
end
query_params =
if !is_nil(package_version) do
[{"version", package_version} | query_params]
else
query_params
end
query_params =
if !is_nil(package) do
[{"package", package} | query_params]
else
query_params
end
query_params =
if !is_nil(namespace) do
[{"namespace", namespace} | query_params]
else
query_params
end
query_params =
if !is_nil(format) do
[{"format", format} | query_params]
else
query_params
end
query_params =
if !is_nil(domain_owner) do
[{"domain-owner", domain_owner} | query_params]
else
query_params
end
query_params =
if !is_nil(domain) do
[{"domain", domain} | query_params]
else
query_params
end
query_params =
if !is_nil(asset) do
[{"asset", asset} | query_params]
else
query_params
end
options =
Keyword.put(
options,
:response_header_parameters,
[
{"X-AssetName", "assetName"},
{"X-PackageVersion", "packageVersion"},
{"X-PackageVersionRevision", "packageVersionRevision"}
]
)
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Gets the readme file or descriptive text for a package version.
For packages that do not contain a readme file, CodeArtifact extracts a
description from a metadata file. For example, from the `<description>` element
in the `pom.xml` file of a Maven package.
The returned text might contain formatting. For example, it might contain
formatting for Markdown or reStructuredText.
"""
def get_package_version_readme(
%Client{} = client,
domain,
domain_owner \\ nil,
format,
namespace \\ nil,
package,
package_version,
repository,
options \\ []
) do
url_path = "/v1/package/version/readme"
headers = []
query_params = []
query_params =
if !is_nil(repository) do
[{"repository", repository} | query_params]
else
query_params
end
query_params =
if !is_nil(package_version) do
[{"version", package_version} | query_params]
else
query_params
end
query_params =
if !is_nil(package) do
[{"package", package} | query_params]
else
query_params
end
query_params =
if !is_nil(namespace) do
[{"namespace", namespace} | query_params]
else
query_params
end
query_params =
if !is_nil(format) do
[{"format", format} | query_params]
else
query_params
end
query_params =
if !is_nil(domain_owner) do
[{"domain-owner", domain_owner} | query_params]
else
query_params
end
query_params =
if !is_nil(domain) do
[{"domain", domain} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the endpoint of a repository for a specific package format.
A repository has one endpoint for each package format:
* `npm`
* `pypi`
* `maven`
"""
def get_repository_endpoint(
%Client{} = client,
domain,
domain_owner \\ nil,
format,
repository,
options \\ []
) do
url_path = "/v1/repository/endpoint"
headers = []
query_params = []
query_params =
if !is_nil(repository) do
[{"repository", repository} | query_params]
else
query_params
end
query_params =
if !is_nil(format) do
[{"format", format} | query_params]
else
query_params
end
query_params =
if !is_nil(domain_owner) do
[{"domain-owner", domain_owner} | query_params]
else
query_params
end
query_params =
if !is_nil(domain) do
[{"domain", domain} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns the resource policy that is set on a repository.
"""
def get_repository_permissions_policy(
%Client{} = client,
domain,
domain_owner \\ nil,
repository,
options \\ []
) do
url_path = "/v1/repository/permissions/policy"
headers = []
query_params = []
query_params =
if !is_nil(repository) do
[{"repository", repository} | query_params]
else
query_params
end
query_params =
if !is_nil(domain_owner) do
[{"domain-owner", domain_owner} | query_params]
else
query_params
end
query_params =
if !is_nil(domain) do
[{"domain", domain} | query_params]
else
query_params
end
Request.request_rest(
client,
metadata(),
:get,
url_path,
query_params,
headers,
nil,
options,
nil
)
end
@doc """
Returns a list of
[DomainSummary](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageVersionDescription.html)
objects for all domains owned by the AWS account that makes this call.
Each returned `DomainSummary` object contains information about a domain.
"""
def list_domains(%Client{} = client, input, options \\ []) do
url_path = "/v1/domains"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of [ `AssetSummary`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_AssetSummary.html)
objects for assets in a package version.
"""
def list_package_version_assets(%Client{} = client, input, options \\ []) do
url_path = "/v1/package/version/assets"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"format", "format"},
{"maxResults", "max-results"},
{"namespace", "namespace"},
{"nextToken", "next-token"},
{"package", "package"},
{"packageVersion", "version"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns the direct dependencies for a package version.
The dependencies are returned as [ `PackageDependency`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageDependency.html)
objects. CodeArtifact extracts the dependencies for a package version from the
metadata file for the package format (for example, the `package.json` file for
npm packages and the `pom.xml` file for Maven). Any package version dependencies
that are not listed in the configuration file are not returned.
"""
def list_package_version_dependencies(%Client{} = client, input, options \\ []) do
url_path = "/v1/package/version/dependencies"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"format", "format"},
{"namespace", "namespace"},
{"nextToken", "next-token"},
{"package", "package"},
{"packageVersion", "version"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of [ `PackageVersionSummary`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageVersionSummary.html)
objects for package versions in a repository that match the request parameters.
"""
def list_package_versions(%Client{} = client, input, options \\ []) do
url_path = "/v1/package/versions"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"format", "format"},
{"maxResults", "max-results"},
{"namespace", "namespace"},
{"nextToken", "next-token"},
{"package", "package"},
{"repository", "repository"},
{"sortBy", "sortBy"},
{"status", "status"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of [ `PackageSummary`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageSummary.html)
objects for packages in a repository that match the request parameters.
"""
def list_packages(%Client{} = client, input, options \\ []) do
url_path = "/v1/packages"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"format", "format"},
{"maxResults", "max-results"},
{"namespace", "namespace"},
{"nextToken", "next-token"},
{"packagePrefix", "package-prefix"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of [ `RepositorySummary`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_RepositorySummary.html)
objects.
Each `RepositorySummary` contains information about a repository in the
specified AWS account and that matches the input parameters.
"""
def list_repositories(%Client{} = client, input, options \\ []) do
url_path = "/v1/repositories"
headers = []
{query_params, input} =
[
{"maxResults", "max-results"},
{"nextToken", "next-token"},
{"repositoryPrefix", "repository-prefix"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Returns a list of [ `RepositorySummary`
](https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_RepositorySummary.html)
objects.
Each `RepositorySummary` contains information about a repository in the
specified domain and that matches the input parameters.
"""
def list_repositories_in_domain(%Client{} = client, input, options \\ []) do
url_path = "/v1/domain/repositories"
headers = []
{query_params, input} =
[
{"administratorAccount", "administrator-account"},
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"maxResults", "max-results"},
{"nextToken", "next-token"},
{"repositoryPrefix", "repository-prefix"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Gets information about AWS tags for a specified Amazon Resource Name (ARN) in
AWS CodeArtifact.
"""
def list_tags_for_resource(%Client{} = client, input, options \\ []) do
url_path = "/v1/tags"
headers = []
{query_params, input} =
[
{"resourceArn", "resourceArn"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Sets a resource policy on a domain that specifies permissions to access it.
When you call `PutDomainPermissionsPolicy`, the resource policy on the domain is
ignored when evaluting permissions. This ensures that the owner of a domain
cannot lock themselves out of the domain, which would prevent them from being
able to update the resource policy.
"""
def put_domain_permissions_policy(%Client{} = client, input, options \\ []) do
url_path = "/v1/domain/permissions/policy"
headers = []
query_params = []
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Sets the resource policy on a repository that specifies permissions to access
it.
When you call `PutRepositoryPermissionsPolicy`, the resource policy on the
repository is ignored when evaluting permissions. This ensures that the owner of
a repository cannot lock themselves out of the repository, which would prevent
them from being able to update the resource policy.
"""
def put_repository_permissions_policy(%Client{} = client, input, options \\ []) do
url_path = "/v1/repository/permissions/policy"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Adds or updates tags for a resource in AWS CodeArtifact.
"""
def tag_resource(%Client{} = client, input, options \\ []) do
url_path = "/v1/tag"
headers = []
{query_params, input} =
[
{"resourceArn", "resourceArn"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Removes tags from a resource in AWS CodeArtifact.
"""
def untag_resource(%Client{} = client, input, options \\ []) do
url_path = "/v1/untag"
headers = []
{query_params, input} =
[
{"resourceArn", "resourceArn"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Updates the status of one or more versions of a package.
"""
def update_package_versions_status(%Client{} = client, input, options \\ []) do
url_path = "/v1/package/versions/update_status"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"format", "format"},
{"namespace", "namespace"},
{"package", "package"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:post,
url_path,
query_params,
headers,
input,
options,
nil
)
end
@doc """
Update the properties of a repository.
"""
def update_repository(%Client{} = client, input, options \\ []) do
url_path = "/v1/repository"
headers = []
{query_params, input} =
[
{"domain", "domain"},
{"domainOwner", "domain-owner"},
{"repository", "repository"}
]
|> Request.build_params(input)
Request.request_rest(
client,
metadata(),
:put,
url_path,
query_params,
headers,
input,
options,
nil
)
end
end
|
lib/aws/generated/codeartifact.ex
| 0.924479
| 0.604662
|
codeartifact.ex
|
starcoder
|
defmodule FalconPlusApi.Api.Graph do
alias Maxwell.Conn
alias FalconPlusApi.{Util, Sig, Api}
@doc """
* [Session](#/authentication) Required
* dashobard 画图资料
* consol_fun:
* AVERAGE
* MAX
* MIN
### Request
```{
"step": 60,
"start_time": 1481854596,
"hostnames": [
"docker-a",
"docker-b",
"docker-c"
],
"end_time": 1481858193,
"counters": [
"cpu.idle",
"cpu.iowait"
],
"consol_fun": "AVERAGE"
}```
### Response
```Status: 200```
```[
{
"endpoint": "docker-a",
"counter": "cpu.idle",
"dstype": "GAUGE",
"step": 60,
"Values": [
{
"timestamp": 1481854620,
"value": 98.154506
},
{
"timestamp": 1481854680,
"value": 97.864161
},
{
"timestamp": 1481854740,
"value": 97.521368
},
{
"timestamp": 1481854800,
"value": 97.587247
},
{
"timestamp": 1481854860,
"value": 97.440273
},
{
"timestamp": 1481854920,
"value": 97.914006
},
{
"timestamp": 1481854980,
"value": 97.223409
},
{
"timestamp": 1481855040,
"value": 98.029135
},
{
"timestamp": 1481855100,
"value": 97.614991
},
{
"timestamp": 1481855160,
"value": 97.565143
},
{
"timestamp": 1481855220,
"value": 97.070064
},
{
"timestamp": 1481855280,
"value": 98.726115
},
{
"timestamp": 1481855340,
"value": 98.720137
},
{
"timestamp": 1481855400,
"value": 98.205128
},
{
"timestamp": 1481855460,
"value": 97.70017
},
{
"timestamp": 1481855520,
"value": 97.780623
},
{
"timestamp": 1481855580,
"value": 97.379725
},
{
"timestamp": 1481855640,
"value": 98.034188
},
{
"timestamp": 1481855700,
"value": 98.246364
},
{
"timestamp": 1481855760,
"value": 98.372591
},
{
"timestamp": 1481855820,
"value": 98.152921
},
{
"timestamp": 1481855880,
"value": 97.914006
},
{
"timestamp": 1481855940,
"value": 95.592048
},
{
"timestamp": 1481856000,
"value": 94.11512
},
{
"timestamp": 1481856060,
"value": 97.596567
},
{
"timestamp": 1481856120,
"value": 97.501077
},
{
"timestamp": 1481856180,
"value": 96.374622
},
{
"timestamp": 1481856240,
"value": 89.570815
},
{
"timestamp": 1481856300,
"value": 96.410035
},
{
"timestamp": 1481856360,
"value": 97.567222
},
{
"timestamp": 1481856420,
"value": 97.25204
},
{
"timestamp": 1481856480,
"value": 95.356836
},
{
"timestamp": 1481856540,
"value": 97.330508
},
{
"timestamp": 1481856600,
"value": 96.999571
},
{
"timestamp": 1481856660,
"value": 97.278912
},
{
"timestamp": 1481856720,
"value": 97.229327
},
{
"timestamp": 1481856780,
"value": 97.212693
},
{
"timestamp": 1481856840,
"value": 96.503198
},
{
"timestamp": 1481856900,
"value": 96.790757
},
{
"timestamp": 1481856960,
"value": 98.121264
},
{
"timestamp": 1481857020,
"value": 98.550725
},
{
"timestamp": 1481857080,
"value": 97.548387
},
{
"timestamp": 1481857140,
"value": 98.466127
},
{
"timestamp": 1481857200,
"value": 97.9006
},
{
"timestamp": 1481857260,
"value": 97.985426
},
{
"timestamp": 1481857320,
"value": 97.357204
},
{
"timestamp": 1481857380,
"value": 97.086547
},
{
"timestamp": 1481857440,
"value": 98.770144
},
{
"timestamp": 1481857500,
"value": 97.727273
},
{
"timestamp": 1481857560,
"value": 98.595147
},
{
"timestamp": 1481857620,
"value": 97.867804
},
{
"timestamp": 1481857680,
"value": 98.128456
},
{
"timestamp": 1481857740,
"value": 97.886729
},
{
"timestamp": 1481857800,
"value": 95.969453
},
{
"timestamp": 1481857860,
"value": 97.134303
},
{
"timestamp": 1481857920,
"value": 98.033348
},
{
"timestamp": 1481857980,
"value": 96.511628
},
{
"timestamp": 1481858040,
"value": 96.522112
},
{
"timestamp": 1481858100,
"value": 97.49043
},
{
"timestamp": 1481858160,
"value": 96.958012
}
]
},
{
"endpoint": "docker-a",
"counter": "cpu.iowait",
"dstype": "GAUGE",
"step": 60,
"Values": [
{
"timestamp": 1481854620,
"value": 0.815451
},
{
"timestamp": 1481854680,
"value": 0.598035
},
{
"timestamp": 1481854740,
"value": 1.239316
},
{
"timestamp": 1481854800,
"value": 0.775528
},
{
"timestamp": 1481854860,
"value": 0.895904
},
{
"timestamp": 1481854920,
"value": 0.893997
},
{
"timestamp": 1481854980,
"value": 0.811619
},
{
"timestamp": 1481855040,
"value": 0.728363
},
{
"timestamp": 1481855100,
"value": 0.425894
},
{
"timestamp": 1481855160,
"value": 0.768902
},
{
"timestamp": 1481855220,
"value": 1.443737
},
{
"timestamp": 1481855280,
"value": 0.29724
},
{
"timestamp": 1481855340,
"value": 0.213311
},
{
"timestamp": 1481855400,
"value": 0.512821
},
{
"timestamp": 1481855460,
"value": 1.022147
},
{
"timestamp": 1481855520,
"value": 0.810926
},
{
"timestamp": 1481855580,
"value": 0.515464
},
{
"timestamp": 1481855640,
"value": 0.555556
},
{
"timestamp": 1481855700,
"value": 0.470488
},
{
"timestamp": 1481855760,
"value": 0.428266
},
{
"timestamp": 1481855820,
"value": 0.386598
},
{
"timestamp": 1481855880,
"value": 0.63857
},
{
"timestamp": 1481855940,
"value": 0.432152
},
{
"timestamp": 1481856000,
"value": 0.730241
},
{
"timestamp": 1481856060,
"value": 0.643777
},
{
"timestamp": 1481856120,
"value": 0.603188
},
{
"timestamp": 1481856180,
"value": 1.035822
},
{
"timestamp": 1481856240,
"value": 8.927039
},
{
"timestamp": 1481856300,
"value": 0.605536
},
{
"timestamp": 1481856360,
"value": 0.341443
},
{
"timestamp": 1481856420,
"value": 0.343495
},
{
"timestamp": 1481856480,
"value": 0.601892
},
{
"timestamp": 1481856540,
"value": 0.466102
},
{
"timestamp": 1481856600,
"value": 0.557222
},
{
"timestamp": 1481856660,
"value": 0.382653
},
{
"timestamp": 1481856720,
"value": 0.554135
},
{
"timestamp": 1481856780,
"value": 0.428816
},
{
"timestamp": 1481856840,
"value": 1.151386
},
{
"timestamp": 1481856900,
"value": 0.556269
},
{
"timestamp": 1481856960,
"value": 0.469684
},
{
"timestamp": 1481857020,
"value": 0.29838
},
{
"timestamp": 1481857080,
"value": 0.903226
},
{
"timestamp": 1481857140,
"value": 0.426076
},
{
"timestamp": 1481857200,
"value": 0.771208
},
{
"timestamp": 1481857260,
"value": 1.071582
},
{
"timestamp": 1481857320,
"value": 1.278772
},
{
"timestamp": 1481857380,
"value": 0.642674
},
{
"timestamp": 1481857440,
"value": 0.212044
},
{
"timestamp": 1481857500,
"value": 0.686106
},
{
"timestamp": 1481857560,
"value": 0.425713
},
{
"timestamp": 1481857620,
"value": 0.810235
},
{
"timestamp": 1481857680,
"value": 0.765632
},
{
"timestamp": 1481857740,
"value": 0.380389
},
{
"timestamp": 1481857800,
"value": 0.296988
},
{
"timestamp": 1481857860,
"value": 0.855432
},
{
"timestamp": 1481857920,
"value": 0.470286
},
{
"timestamp": 1481857980,
"value": 1.248923
},
{
"timestamp": 1481858040,
"value": 1.631602
},
{
"timestamp": 1481858100,
"value": 1.531263
},
{
"timestamp": 1481858160,
"value": 0.599829
}
]
},
{
"endpoint": "docker-b",
"counter": "cpu.idle",
"dstype": "GAUGE",
"step": 60,
"Values": [
{
"timestamp": 1481854620,
"value": 93.811775
},
{
"timestamp": 1481854680,
"value": 94.150538
},
{
"timestamp": 1481854740,
"value": 94.991438
},
{
"timestamp": 1481854800,
"value": 93.287435
},
{
"timestamp": 1481854860,
"value": 96.642584
},
{
"timestamp": 1481854920,
"value": 98.207426
},
{
"timestamp": 1481854980,
"value": 94.801875
},
{
"timestamp": 1481855040,
"value": 97.827939
},
{
"timestamp": 1481855100,
"value": 94.439692
},
{
"timestamp": 1481855160,
"value": 98.292787
},
{
"timestamp": 1481855220,
"value": 95.027624
},
{
"timestamp": 1481855280,
"value": 98.218074
},
{
"timestamp": 1481855340,
"value": 97.402044
},
{
"timestamp": 1481855400,
"value": 94.368601
},
{
"timestamp": 1481855460,
"value": 94.772631
},
{
"timestamp": 1481855520,
"value": 93.992331
},
{
"timestamp": 1481855580,
"value": 94.939446
},
{
"timestamp": 1481855640,
"value": 97.780623
},
{
"timestamp": 1481855700,
"value": 97.860505
},
{
"timestamp": 1481855760,
"value": 92.765411
},
{
"timestamp": 1481855820,
"value": 98.029979
},
{
"timestamp": 1481855880,
"value": 94.523502
},
{
"timestamp": 1481855940,
"value": 94.102564
},
{
"timestamp": 1481856000,
"value": 94.96587
},
{
"timestamp": 1481856060,
"value": 94.382979
},
{
"timestamp": 1481856120,
"value": 93.336181
},
{
"timestamp": 1481856180,
"value": 97.988875
},
{
"timestamp": 1481856240,
"value": 94.401709
},
{
"timestamp": 1481856300,
"value": 94.619983
},
{
"timestamp": 1481856360,
"value": 94.916702
},
{
"timestamp": 1481856420,
"value": 94.089733
},
{
"timestamp": 1481856480,
"value": 94.475375
},
{
"timestamp": 1481856540,
"value": 93.576017
},
{
"timestamp": 1481856600,
"value": 94.010195
},
{
"timestamp": 1481856660,
"value": 94.137783
},
{
"timestamp": 1481856720,
"value": 95.264505
},
{
"timestamp": 1481856780,
"value": 96.879008
},
{
"timestamp": 1481856840,
"value": 96.38759
},
{
"timestamp": 1481856900,
"value": 93.830334
},
{
"timestamp": 1481856960,
"value": 96.282051
},
{
"timestamp": 1481857020,
"value": 94.173093
},
{
"timestamp": 1481857080,
"value": 95.382642
},
{
"timestamp": 1481857140,
"value": 94.107452
},
{
"timestamp": 1481857200,
"value": 93.611584
},
{
"timestamp": 1481857260,
"value": 97.0538
},
{
"timestamp": 1481857320,
"value": 94.404101
},
{
"timestamp": 1481857380,
"value": 94.449189
},
{
"timestamp": 1481857440,
"value": 98.286938
},
{
"timestamp": 1481857500,
"value": 93.720632
},
{
"timestamp": 1481857560,
"value": 93.669803
},
{
"timestamp": 1481857620,
"value": 93.865294
},
{
"timestamp": 1481857680,
"value": 94.498934
},
{
"timestamp": 1481857740,
"value": 94.610778
},
{
"timestamp": 1481857800,
"value": 93.929029
},
{
"timestamp": 1481857860,
"value": 97.827939
},
{
"timestamp": 1481857920,
"value": 97.76824
},
{
"timestamp": 1481857980,
"value": 91.079014
},
{
"timestamp": 1481858040,
"value": 97.854998
},
{
"timestamp": 1481858100,
"value": 93.399482
},
{
"timestamp": 1481858160,
"value": 94.014536
}
]
},
{
"endpoint": "docker-b",
"counter": "cpu.iowait",
"dstype": "GAUGE",
"step": 60,
"Values": [
{
"timestamp": 1481854620,
"value": 0.171895
},
{
"timestamp": 1481854680,
"value": 0.645161
},
{
"timestamp": 1481854740,
"value": 0.47089
},
{
"timestamp": 1481854800,
"value": 0.731497
},
{
"timestamp": 1481854860,
"value": 0.594985
},
{
"timestamp": 1481854920,
"value": 0.256082
},
{
"timestamp": 1481854980,
"value": 0.340861
},
{
"timestamp": 1481855040,
"value": 0.255537
},
{
"timestamp": 1481855100,
"value": 0.684346
},
{
"timestamp": 1481855160,
"value": 0.384123
},
{
"timestamp": 1481855220,
"value": 0.084998
},
{
"timestamp": 1481855280,
"value": 0.466695
},
{
"timestamp": 1481855340,
"value": 1.192504
},
{
"timestamp": 1481855400,
"value": 0.511945
},
{
"timestamp": 1481855460,
"value": 0.594985
},
{
"timestamp": 1481855520,
"value": 0.383468
},
{
"timestamp": 1481855580,
"value": 1.038062
},
{
"timestamp": 1481855640,
"value": 0.981647
},
{
"timestamp": 1481855700,
"value": 0.599059
},
{
"timestamp": 1481855760,
"value": 0.770548
},
{
"timestamp": 1481855820,
"value": 0.428266
},
{
"timestamp": 1481855880,
"value": 0.301854
},
{
"timestamp": 1481855940,
"value": 0.598291
},
{
"timestamp": 1481856000,
"value": 0.853242
},
{
"timestamp": 1481856060,
"value": 0.723404
},
{
"timestamp": 1481856120,
"value": 1.025203
},
{
"timestamp": 1481856180,
"value": 0.470689
},
{
"timestamp": 1481856240,
"value": 0.213675
},
{
"timestamp": 1481856300,
"value": 0.768574
},
{
"timestamp": 1481856360,
"value": 0.256301
},
{
"timestamp": 1481856420,
"value": 0.388266
},
{
"timestamp": 1481856480,
"value": 0.342612
},
{
"timestamp": 1481856540,
"value": 0.813704
},
{
"timestamp": 1481856600,
"value": 0.594732
},
{
"timestamp": 1481856660,
"value": 0.684638
},
{
"timestamp": 1481856720,
"value": 1.535836
},
{
"timestamp": 1481856780,
"value": 0.25652
},
{
"timestamp": 1481856840,
"value": 0.38249
},
{
"timestamp": 1481856900,
"value": 1.028278
},
{
"timestamp": 1481856960,
"value": 1.025641
},
{
"timestamp": 1481857020,
"value": 0.642674
},
{
"timestamp": 1481857080,
"value": 0.726806
},
{
"timestamp": 1481857140,
"value": 0.649913
},
{
"timestamp": 1481857200,
"value": 0.809199
},
{
"timestamp": 1481857260,
"value": 0.256191
},
{
"timestamp": 1481857320,
"value": 0.512601
},
{
"timestamp": 1481857380,
"value": 0.59778
},
{
"timestamp": 1481857440,
"value": 0.342612
},
{
"timestamp": 1481857500,
"value": 1.19607
},
{
"timestamp": 1481857560,
"value": 0.983747
},
{
"timestamp": 1481857620,
"value": 0.900901
},
{
"timestamp": 1481857680,
"value": 0.639659
},
{
"timestamp": 1481857740,
"value": 0.641574
},
{
"timestamp": 1481857800,
"value": 0.598546
},
{
"timestamp": 1481857860,
"value": 0.425894
},
{
"timestamp": 1481857920,
"value": 0.729614
},
{
"timestamp": 1481857980,
"value": 0.339847
},
{
"timestamp": 1481858040,
"value": 0.686401
},
{
"timestamp": 1481858100,
"value": 0.560828
},
{
"timestamp": 1481858160,
"value": 0.51304
}
]
},
{
"endpoint": "docker-c",
"counter": "cpu.idle",
"dstype": "",
"step": 0,
"Values": []
},
{
"endpoint": "docker-c",
"counter": "cpu.iowait",
"dstype": "",
"step": 0,
"Values": []
}
]```
"""
def histroy(sig, addr, opts \\ []) do
sig = Sig.get_sig(sig)
~s</api/v1/graph/history>
|> Util.url(addr)
|> Conn.new()
|> Api.set_opts(opts)
|> Conn.put_req_header("Apitoken", sig)
|> Api.post
|> Api.get_result
end
end
|
lib/falcon_plus_api/api/graph.ex
| 0.557845
| 0.677301
|
graph.ex
|
starcoder
|
defmodule EventStore.MonitoredServer do
@moduledoc false
# Starts a `GenServer` process using a given module-fun-args tuple. Monitors
# the started process and attempts to restart it on terminate using an
# exponential backoff strategy. Allows interested processes to be informed
# when the process terminates.
use GenServer
require Logger
alias DBConnection.Backoff
defmodule State do
@moduledoc false
defstruct [:mfa, :name, :backoff, :pid, :shutdown, :queue, monitors: MapSet.new()]
end
def start_link(opts) do
{start_opts, monitor_opts} = Keyword.split(opts, [:name, :timeout, :debug, :spawn_opt])
{_module, _fun, _args} = mfa = Keyword.fetch!(monitor_opts, :mfa)
state = %State{
backoff: Backoff.new(backoff_type: :exp),
mfa: mfa,
name: Keyword.get(start_opts, :name),
queue: :queue.new(),
shutdown: Keyword.get(monitor_opts, :shutdown, 100)
}
GenServer.start_link(__MODULE__, state, start_opts)
end
def monitor(name) do
GenServer.call(name, {__MODULE__, :monitor, self()})
end
def init(%State{} = state) do
Process.flag(:trap_exit, true)
{:ok, start_process(state)}
end
def handle_call({__MODULE__, :monitor, monitor}, _from, %State{} = state) do
%State{monitors: monitors, name: name, pid: pid} = state
_ref = Process.monitor(monitor)
case pid do
pid when is_pid(pid) ->
Process.send(monitor, {:UP, name, pid}, [])
_ ->
:ok
end
state = %State{state | monitors: MapSet.put(monitors, monitor)}
{:reply, :ok, state}
end
def handle_call(msg, from, %State{pid: nil} = state) do
{:noreply, enqueue({:call, msg, from}, state)}
end
def handle_call(msg, from, %State{pid: pid} = state) do
forward_call(pid, msg, from)
{:noreply, state}
end
def handle_cast(msg, %State{pid: nil} = state) do
{:noreply, enqueue({:cast, msg}, state)}
end
def handle_cast(msg, %State{pid: pid} = state) do
forward_cast(pid, msg)
{:noreply, state}
end
def handle_info(:start_process, %State{} = state) do
{:noreply, start_process(state)}
end
@doc """
Handle process exit by attempting to restart, after a delay.
"""
def handle_info({:EXIT, pid, reason}, %State{pid: pid} = state) do
%State{name: name} = state
Logger.debug(fn -> "Monitored process EXIT due to: #{inspect(reason)}" end)
notify_monitors({:DOWN, name, pid, reason}, state)
state = %State{state | pid: nil}
{:noreply, delayed_start(state)}
end
def handle_info({:EXIT, pid, _reason}, %State{} = state) do
%State{monitors: monitors} = state
state = %State{state | monitors: MapSet.delete(monitors, pid)}
{:noreply, state}
end
def handle_info(msg, %State{pid: nil} = state) do
{:noreply, enqueue({:info, msg}, state)}
end
def handle_info(msg, %State{pid: pid} = state) do
forward_info(pid, msg)
{:noreply, state}
end
def terminate(_reason, %State{pid: nil}), do: :ok
def terminate(reason, %State{} = state) do
%State{pid: pid, shutdown: shutdown, mfa: {module, _fun, _args}} = state
Logger.debug(fn ->
"Monitored server #{inspect(module)} terminate due to: #{inspect(reason)}"
end)
Process.exit(pid, reason)
receive do
{:EXIT, ^pid, _} -> :ok
after
shutdown ->
Logger.warn(
"Monitored server #{inspect(module)} failed to terminate within #{shutdown}, killing it brutally"
)
Process.exit(pid, :kill)
receive do
{:EXIT, ^pid, _} -> :ok
end
end
end
# Attempt to start the process, retry after a delay on failure
defp start_process(%State{} = state) do
%State{mfa: {module, fun, args}} = state
Logger.debug(fn -> "Attempting to start #{inspect(module)}" end)
case apply(module, fun, args) do
{:ok, pid} ->
Logger.debug(fn -> "Successfully started #{inspect(module)} (#{inspect(pid)})" end)
on_process_start(pid, state)
{:error, {:already_started, pid}} ->
Logger.debug(fn ->
"Monitored process already started #{inspect(module)} (#{inspect(pid)})"
end)
on_process_start(pid, state)
{:error, reason} ->
Logger.info(fn -> "Failed to start #{inspect(module)} due to: #{inspect(reason)}" end)
delayed_start(state)
end
end
defp on_process_start(pid, %State{} = state) do
%State{name: name, queue: queue} = state
:ok = forward_queued_msgs(pid, queue)
:ok = notify_monitors({:UP, name, pid}, state)
%State{state | pid: pid, queue: :queue.new()}
end
defp enqueue(item, %State{queue: queue} = state) do
%State{state | queue: :queue.in(item, queue)}
end
defp forward_call(pid, msg, from) do
:erlang.send(pid, {:"$gen_call", from, msg}, [:noconnect])
end
defp forward_cast(pid, msg) do
:erlang.send(pid, {:"$gen_cast", msg}, [:noconnect])
end
defp forward_info(pid, msg) do
:erlang.send(pid, msg, [:noconnect])
end
defp forward_queued_msgs(pid, queue) do
case :queue.out(queue) do
{{:value, item}, new_queue} ->
forward_queued_msg(pid, item)
forward_queued_msgs(pid, new_queue)
{:empty, _new_queue} ->
:ok
end
end
defp forward_queued_msg(pid, {:call, msg, from}), do: forward_call(pid, msg, from)
defp forward_queued_msg(pid, {:cast, msg}), do: forward_cast(pid, msg)
defp forward_queued_msg(pid, {:info, msg}), do: forward_info(pid, msg)
defp notify_monitors(message, %State{} = state) do
%State{monitors: monitors} = state
for monitor <- monitors do
:ok = Process.send(monitor, message, [])
end
:ok
end
defp delayed_start(%State{backoff: backoff} = state) do
{delay, backoff} = Backoff.backoff(backoff)
Process.send_after(self(), :start_process, delay)
%State{state | backoff: backoff}
end
end
|
lib/event_store/monitored_server.ex
| 0.721841
| 0.433981
|
monitored_server.ex
|
starcoder
|
defmodule Keyword do
@moduledoc """
A keyword is a list of tuples where the first element
of the tuple is an atom and the second element can be
any value.
A keyword may have duplicated keys, so it is not strictly
a dictionary. However most of the functions in this module
allows it to behave exactly as a dictionary. For example,
`Keyword.get` will get the first entry matching the given
key, regardless if duplicated entries exist. Similarly,
`Keyword.put` and `Keyword.delete` ensure all duplicated
entries for a given key are removed when invoked.
"""
@type key :: atom
@type value :: any
@type t :: [{key, value}]
@doc """
Creates a Keyword from an enum. Unlike `Keyword.new`
which behaves as a dict, `Keyword.from_enum` does not remove
duplicated entries.
"""
@spec from_enum(Enum.t) :: t
def from_enum(enum) when is_list(enum) do
enum
end
def from_enum(enum) do
Enum.map(enum, fn(x) -> x end)
end
@doc """
Checks if the given argument is a keywords list or not
"""
@spec keyword?(term) :: boolean
def keyword?([{ key, _value } | rest]) when is_atom(key) do
keyword?(rest)
end
def keyword?([]), do: true
def keyword?(_other), do: false
@doc """
Returns an empty keyword list, i.e. an empty list.
"""
@spec new :: t
def new do
[]
end
@doc """
Creates a Keyword from an enumerable. Similar to dicts,
duplicated entries are removed, the latest one prevails.
## Examples
iex> Keyword.new([{:b, 1}, {:a, 2}])
[a: 2, b: 1]
"""
@spec new(Enum.t) :: t
def new(pairs) do
Enum.reduce pairs, [], fn { k, v }, keywords ->
put(keywords, k, v)
end
end
@doc """
Creates a Keyword from an enumerable with the
help of the transformation function. Duplicated
entries are removed, the latest one prevails.
## Examples
iex> Keyword.new([:a, :b], fn (x) -> {x, x} end) |> Enum.sort
[a: :a, b: :b]
"""
@spec new(Enum.t, ({key, value} -> {key, value})) :: t
def new(pairs, transform) do
Enum.reduce pairs, [], fn i, keywords ->
{ k, v } = transform.(i)
put(keywords, k, v)
end
end
@doc """
Gets the value for a specific `key`.
If `key` does not exist, return default value (`nil` if no default value).
If duplicated entries exist, the first one is returned.
Use `get_values/2` to retrieve all entries.
## Examples
iex> Keyword.get([a: 1], :a)
1
iex> Keyword.get([a: 1], :b)
nil
iex> Keyword.get([a: 1], :b, 3)
3
"""
@spec get(t, key) :: value
@spec get(t, key, value) :: value
def get(keywords, key, default // nil) when is_atom(key) do
case :lists.keyfind(key, 1, keywords) do
{ ^key, value } -> value
false -> default
end
end
@doc """
Fetches the value for a specific `key` and returns it in a tuple.
If the `key` does not exist, returns `:error`.
## Examples
iex> Keyword.fetch([a: 1], :a)
{ :ok, 1 }
iex> Keyword.fetch([a: 1], :b)
:error
"""
@spec fetch(t, key) :: value
def fetch(keywords, key) when is_atom(key) do
case :lists.keyfind(key, 1, keywords) do
{ ^key, value } -> { :ok, value }
false -> :error
end
end
@doc """
Fetches the value for specific `key`. If `key` does not exist,
a `KeyError` is raised.
## Examples
iex> Keyword.fetch!([a: 1], :a)
1
iex> Keyword.fetch!([a: 1], :b)
** (KeyError) key not found: :b
"""
@spec fetch!(t, key) :: value | no_return
def fetch!(keywords, key) when is_atom(key) do
case :lists.keyfind(key, 1, keywords) do
{ ^key, value } -> value
false -> raise(KeyError, key: key)
end
end
@doc """
Gets all values for a specific `key`.
## Examples
iex> Keyword.get_values([a: 1, a: 2], :a)
[1,2]
"""
@spec get_values(t, key) :: [value]
def get_values(keywords, key) when is_atom(key) do
lc { k, v } inlist keywords, key == k, do: v
end
@doc """
Returns all keys from the keyword list. Duplicated
keys appear duplicated in the final list of keys.
## Examples
iex> Keyword.keys([a: 1, b: 2])
[:a,:b]
iex> Keyword.keys([a: 1, b: 2, a: 3])
[:a,:b,:a]
"""
@spec keys(t) :: [key]
def keys(keywords) do
lc { key, _ } inlist keywords, do: key
end
@doc """
Returns all values from the keyword list.
## Examples
iex> Keyword.values([a: 1, b: 2])
[1,2]
"""
@spec values(t) :: [value]
def values(keywords) do
lc { _, value } inlist keywords, do: value
end
@doc """
Deletes all entries in the keyword list for a specific `key`.
If the `key` does not exist, returns the keyword list unchanged.
Use `delete_first` to delete just the first entry in case of
duplicated keys.
## Examples
iex> Keyword.delete([a: 1, b: 2], :a)
[b: 2]
iex> Keyword.delete([a: 1, b: 2, a: 3], :a)
[b: 2]
iex> Keyword.delete([b: 2], :a)
[b: 2]
"""
@spec delete(t, key) :: t
def delete(keywords, key) when is_atom(key) do
lc { k, _ } = tuple inlist keywords, key != k, do: tuple
end
@doc """
Deletes the first entry in the keyword list for a specific `key`.
If the `key` does not exist, returns the keyword list unchanged.
## Examples
iex> Keyword.delete_first([a: 1, b: 2, a: 3], :a)
[b: 2, a: 3]
iex> Keyword.delete_first([b: 2], :a)
[b: 2]
"""
@spec delete_first(t, key) :: t
def delete_first(keywords, key) when is_atom(key) do
:lists.keydelete(key, 1, keywords)
end
@doc """
Puts the given `value` under `key`.
If a previous value is already stored, all entries are
removed and the value is overriden.
## Examples
iex> Keyword.put([a: 1, b: 2], :a, 3)
[a: 3, b: 2]
iex> Keyword.put([a: 1, b: 2, a: 4], :a, 3)
[a: 3, b: 2]
"""
@spec put(t, key, value) :: t
def put(keywords, key, value) when is_atom(key) do
[{key, value}|delete(keywords, key)]
end
@doc """
Puts the given `value` under `key` unless the entry `key`
already exists.
## Examples
iex> Keyword.put_new([a: 1], :b, 2)
[b: 2, a: 1]
iex> Keyword.put_new([a: 1, b: 2], :a, 3)
[a: 1, b: 2]
"""
@spec put_new(t, key, value) :: t
def put_new(keywords, key, value) when is_atom(key) do
case :lists.keyfind(key, 1, keywords) do
{ ^key, _ } -> keywords
false -> [{key, value}|keywords]
end
end
@doc """
Checks if two keywords are equal. I.e. they contain
the same keys and those keys contain the same values.
## Examples
iex> Keyword.equal?([a: 1, b: 2], [b: 2, a: 1])
true
"""
@spec equal?(t, t) :: boolean
def equal?(left, right) do
:lists.sort(left) == :lists.sort(right)
end
@doc """
Merges two keyword lists into one. If they have duplicated
entries, the one given as second argument wins.
## Examples
iex> Keyword.merge([a: 1, b: 2], [a: 3, d: 4]) |> Enum.sort
[a: 3, b: 2, d: 4]
"""
@spec merge(t, t) :: t
def merge(d1, d2) do
d2 ++ lc({ k, _ } = tuple inlist d1, not has_key?(d2, k), do: tuple)
end
@doc """
Merges two keyword lists into one. If they have duplicated
entries, the given function is invoked to solve conflicts.
## Examples
iex> Keyword.merge([a: 1, b: 2], [a: 3, d: 4], fn (_k, v1, v2) ->
...> v1 + v2
iex> end)
[a: 4, b: 2, d: 4]
"""
@spec merge(t, t, (key, value, value -> value)) :: t
def merge(d1, d2, fun) do
do_merge(d2, d1, fun)
end
defp do_merge([{ k, v2 }|t], acc, fun) do
do_merge t, update(acc, k, v2, fn(v1) -> fun.(k, v1, v2) end), fun
end
defp do_merge([], acc, _fun) do
acc
end
@doc """
Returns whether a given `key` exists in the given `keywords`.
## Examples
iex> Keyword.has_key?([a: 1], :a)
true
iex> Keyword.has_key?([a: 1], :b)
false
"""
@spec has_key?(t, key) :: boolean
def has_key?(keywords, key) when is_atom(key) do
:lists.keymember(key, 1, keywords)
end
@doc """
Updates the `key` with the given function. If the `key` does
not exist, raises `KeyError`.
## Examples
iex> Keyword.update!([a: 1], :a, &1 * 2)
[a: 2]
iex> Keyword.update!([a: 1], :b, &1 * 2)
** (KeyError) key not found: :b
"""
@spec update!(t, key, (value -> value)) :: t | no_return
def update!([{key, value}|keywords], key, fun) do
[{key, fun.(value)}|delete(keywords, key)]
end
def update!([{_, _} = e|keywords], key, fun) do
[e|update!(keywords, key, fun)]
end
def update!([], key, _fun) when is_atom(key) do
raise(KeyError, key: key)
end
@doc """
Updates the `key` with the given function. If the `key` does
not exist, inserts the given `initial` value.
## Examples
iex> Keyword.update([a: 1], :a, 13, &1 * 2)
[a: 2]
iex> Keyword.update([a: 1], :b, 11, &1 * 2)
[a: 1, b: 11]
"""
@spec update(t, key, value, (value -> value)) :: t
def update([{key, value}|keywords], key, _initial, fun) do
[{key, fun.(value)}|delete(keywords, key)]
end
def update([{_, _} = e|keywords], key, initial, fun) do
[e|update(keywords, key, initial, fun)]
end
def update([], key, initial, _fun) when is_atom(key) do
[{key, initial}]
end
end
|
lib/elixir/lib/keyword.ex
| 0.921913
| 0.673702
|
keyword.ex
|
starcoder
|
defmodule Hui.Encode do
@moduledoc """
Utilities for encoding Solr query and update data structures.
"""
alias Hui.Query
@type solr_query :: Keyword.t | Query.Facet.t | Query.FacetRange.t | Query.FacetInterval.t
@doc """
Encodes list of Solr query keywords to IO data.
"""
@spec encode(solr_query) :: iodata
def encode(query) when is_list(query) do
query
|> Enum.reject(fn {k,v} -> is_nil(v) or v == "" or v == [] or k == :__struct__ end)
|> _encode
end
# encode structs requiring facet and per field prefixes
def encode(%Query.FacetInterval{} = query), do: encode(query, {"facet.interval", query.interval, query.per_field})
def encode(%Query.FacetRange{} = query), do: encode(query, {"facet.range", query.range, query.per_field})
def encode(%Query.Facet{} = query), do: encode(query, {"facet", "", false})
def encode(query, info) when is_map(query) do
query
|> Map.to_list
|> Enum.reject(fn {k,v} -> is_nil(v) or v == "" or v == [] or k == :__struct__ end)
|> _transform(info)
|> _encode
end
defp _encode([head|[]]), do: [_encode(head, "")]
defp _encode([head|tail]), do: [_encode(head) | _encode(tail)]
defp _encode(keyword, separator \\ "&")
# do not render nil valued or empty keyword
defp _encode({_,nil}, _), do: ""
defp _encode([], _), do: ""
# when value is a also struct, e.g. %Hui.Query.FacetRange/Interval{}
defp _encode({_,v}, sep) when is_map(v), do: [ encode(v), sep ]
# encodes fq: [x, y] type keyword to "fq=x&fq=y"
defp _encode({k,v}, sep) when is_list(v), do: [ v |> Enum.map_join("&", &_encode( {k,&1}, "" ) ), sep ]
defp _encode({k,v}, sep), do: [to_string(k), "=", URI.encode_www_form(to_string(v)), sep]
# render keywords according to Solr prefix / per field syntax
# e.g. transform `field: "year"` into `"facet.field": "year"`, `f.[field].facet.gap` etc.
defp _transform([head|[]], info), do: [_transform(head, info)]
defp _transform([head|tail], info), do: [_transform(head, info) | _transform(tail, info)]
defp _transform({k,v}, {k_prefix, field, per_field}) do
case {k, k_prefix, per_field} do
{:facet, _, _} -> {:facet, v}
{:range, "facet.range", _} -> {:"facet.range", v}
{:interval, "facet.interval", _} -> {:"facet.interval", v}
{:per_field, _, _} -> {k, nil} # set value to nil, do not render this field
{_, _, true} -> {:"f.#{field}.#{k_prefix}.#{k}", v}
{_, _, false} -> {:"#{k_prefix}.#{k}", v}
end
end
end
|
lib/hui/encode.ex
| 0.73782
| 0.477798
|
encode.ex
|
starcoder
|
defmodule Multipart do
@moduledoc """
`Multipart` constructs multipart messages.
It aims to produce multipart messages that are compatible with [RFC
2046](https://tools.ietf.org/html/rfc2046#section-5.1) for general use, and
[RFC 7578](https://tools.ietf.org/html/rfc7578) for constructing
`multipart/form-data` requests.
"""
defstruct boundary: nil, parts: []
@type t :: %__MODULE__{
boundary: String.t(),
parts: list(Multipart.Part.t())
}
@crlf "\r\n"
@separator "--"
alias Multipart.Part
@doc """
Create a new `Multipart` request.
Pass in the boundary as the first argument to set it explicitly, otherwise
it will default to a random 16 character alphanumeric string padded by `==`
on either side.
"""
@spec new(String.t()) :: t()
def new(boundary \\ generate_boundary()) do
%__MODULE__{boundary: boundary}
end
@doc """
Adds a part to the `Multipart` message.
"""
@spec add_part(t(), Multipart.Part.t()) :: t()
def add_part(%__MODULE__{parts: parts} = multipart, %Part{} = part) do
%__MODULE__{multipart | parts: parts ++ [part]}
end
@doc """
Returns a `Stream` of the `Multipart` message body.
"""
@spec body_stream(Multipart.t()) :: Enum.t()
def body_stream(%__MODULE__{boundary: boundary, parts: parts}) do
parts
|> Enum.map(&part_stream(&1, boundary))
|> Stream.concat()
|> Stream.concat([final_delimiter(boundary)])
end
@doc """
Returns a binary of the `Multipart` message body.
This uses `body_stream/1` under the hood.
"""
@spec body_binary(Multipart.t()) :: binary()
def body_binary(%__MODULE__{} = multipart) do
multipart
|> body_stream()
|> Enum.join("")
end
@doc """
Returns the Content-Type header for the `Multipart` message.
iex> multipart = Multipart.new("==abc123==")
iex> Multipart.content_type(multipart, "multipart/mixed")
"multipart/mixed; boundary=\\"==abc123==\\""
"""
@spec content_type(Multipart.t(), String.t()) :: String.t()
def content_type(%__MODULE__{boundary: boundary}, mime_type) do
[mime_type, "boundary=\"#{boundary}\""]
|> Enum.join("; ")
end
@doc """
Returns the length of the `Multipart` message in bytes.
It uses the `content_length` property in each of the message parts to
calculate the length of the multipart message without reading the entire
body into memory. `content_length` is set on the `Multipart.Part` by the
constructor functions when possible, such as when the in-memory binary
or the file on disk can be inspected.
This will throw an error if any of the parts does not have `content_length`
defined.
"""
@spec content_length(Multipart.t()) :: pos_integer()
def content_length(%__MODULE__{parts: parts, boundary: boundary}) do
final_delimiter_length =
final_delimiter(boundary)
|> Enum.join("")
|> octet_length()
parts
|> Enum.with_index()
|> Enum.reduce(0, fn {%Part{} = part, index}, total ->
case part_content_length(part, boundary) do
cl when is_integer(cl) ->
cl + total
nil ->
throw("Part at index #{index} has nil content_length")
end
end)
|> Kernel.+(final_delimiter_length)
end
@doc """
Helper function to compute the length of a string in octets.
"""
@spec octet_length(String.t()) :: pos_integer()
def octet_length(str) do
length(String.codepoints(str))
end
defp part_stream(%Part{} = part, boundary) do
Stream.concat([part_delimiter(boundary), part_headers(part), part_body_stream(part)])
end
defp part_content_length(%Part{content_length: content_length} = part, boundary) do
if is_integer(content_length) do
Enum.concat(part_delimiter(boundary), part_headers(part))
|> Enum.reduce(0, fn str, acc ->
octet_length(str) + acc
end)
|> Kernel.+(content_length)
else
nil
end
end
defp part_delimiter(boundary) do
[@crlf, @separator, boundary, @crlf]
end
defp final_delimiter(boundary) do
[@crlf, @separator, boundary, @separator, @crlf]
end
defp part_headers(%Part{headers: headers}) do
headers
|> Enum.flat_map(fn {k, v} ->
["#{k}: #{v}", @crlf]
end)
|> List.insert_at(-1, @crlf)
end
defp part_body_stream(%Part{body: body}) when is_binary(body) do
[body]
end
defp part_body_stream(%Part{body: body}) when is_list(body) do
body
end
defp part_body_stream(%Part{body: %type{} = body}) when type in [Stream, File.Stream] do
body
end
defp generate_boundary() do
token =
16
|> :crypto.strong_rand_bytes()
|> Base.encode16(case: :lower)
"==#{token}=="
end
end
|
lib/multipart.ex
| 0.824568
| 0.484319
|
multipart.ex
|
starcoder
|
defmodule Ockam.Examples.SecureChannel.Local do
@moduledoc """
Local node secure channel example
run/0 - run the example. Creates a secure channel and sends a message
send_and_wait/0 - send more messages through the channel
"""
## Ignore no local return for secure channel
@dialyzer [:no_return, {:nowarn_function, wait: 1}]
alias Ockam.SecureChannel
alias Ockam.Vault
alias Ockam.Vault.Software, as: SoftwareVault
alias Ockam.Examples.Echoer
require Logger
def run() do
responder()
initiator()
end
def responder() do
{:ok, "echoer"} = Echoer.create(address: "echoer")
create_secure_channel_listener()
end
def initiator() do
{:ok, channel} = create_secure_channel(["SC_listener"])
## Register this process to receive messages
my_address = "example_run"
Ockam.Node.register_address(my_address)
send_and_wait(channel, "Hello secure channel!", my_address)
{:ok, channel}
end
def send_and_wait(channel, message, return_address \\ "example_run") do
Ockam.Router.route(%{
onward_route: [channel, "echoer"],
return_route: [return_address],
payload: message
})
receive do
%{
onward_route: [^return_address],
return_route: _return_route,
payload: ^message
} = reply ->
Logger.info("Received message: #{inspect(reply)}")
:ok
end
end
defp create_secure_channel_listener() do
{:ok, vault} = SoftwareVault.init()
{:ok, identity} = Vault.secret_generate(vault, type: :curve25519)
SecureChannel.create_listener(
vault: vault,
identity_keypair: identity,
address: "SC_listener"
)
end
defp create_secure_channel(route_to_listener) do
{:ok, vault} = SoftwareVault.init()
{:ok, identity} = Vault.secret_generate(vault, type: :curve25519)
{:ok, c} =
SecureChannel.create(route: route_to_listener, vault: vault, identity_keypair: identity)
wait(fn -> SecureChannel.established?(c) end)
{:ok, c}
end
defp wait(fun) do
case fun.() do
true ->
:ok
false ->
:timer.sleep(100)
wait(fun)
end
end
end
|
implementations/elixir/ockam/ockam/lib/ockam/examples/secure_channel/local.ex
| 0.805747
| 0.415343
|
local.ex
|
starcoder
|
defmodule Brex.Result.Base do
@moduledoc """
Tools for doing basic result tuple manipulations.
"""
@type s(x) :: {:ok, x} | {:error, any}
@type t(x) :: :ok | s(x)
@type p() :: :ok | {:error, any}
@type s() :: s(any)
@type t() :: t(any)
@doc """
Wraps value in an `ok` tuple.
Will be inlined at compile time.
## Typespec:
ok(a) :: s(a) when a: var
"""
@doc since: "0.1.0"
defmacro ok(val), do: {:ok, val}
@doc """
Wraps value in an `error` tuple
Will be inlined at compile time.
## Typespec:
error(any) :: t()
"""
@doc since: "0.1.0"
defmacro error(r), do: {:error, r}
@doc """
Takes in a tuple and function from plain value to `{:ok, any} | {:error, any}`.
Applies the function to the value within the `ok` tuple or propogates the `error`.
## Examples:
iex> bind({:ok, 1}, fn x -> if x == 1, do: {:ok, 2}, else: {:error, "not_one"} end)
{:ok, 2}
iex> bind({:ok, 4}, fn x -> if x == 1, do: {:ok, 2}, else: {:error, "not_one"} end)
{:error, "not_one"}
iex> bind({:error, 4}, fn x -> if x == 1, do: {:ok, 2}, else: {:error, "not_one"} end)
{:error, 4}
"""
@doc updated: "0.3.0"
@doc since: "0.1.0"
@spec bind(s(a), (a -> s(b))) :: s(b) when a: var, b: var
def bind({:error, r}, _), do: {:error, r}
def bind({:ok, v}, f) do
case f.(v) do
{:error, r} -> {:error, r}
{:ok, val} -> {:ok, val}
end
end
@doc """
This is infix `bind/2`
Has same syntax restrictions as pipe.
Defined as a macro for syntatic purposes.
## Examples:
def sgn(x) do
if x > 0 do
{:ok, "pos"}
else
{:error, "neg"}
end
end
def two_args(x, y), do: {:ok, x - y}
{:ok, 1}
~> sgn
= {:ok, "pos"}
{:ok, -3}
~> sgn
= {:error, "neg"}
{:error, 2}
~> sgn
= {:error, 2}
{:ok, 3}
~> two_args(2)
= {:ok, 1}
## Typespec:
t(a) ~> (a -> t(b)) :: t(b) when a: var, b: var
"""
@doc updated: "0.4.0"
@doc since: "0.1.0"
defmacro arg ~> fun do
quote do
unquote(__MODULE__).bind(unquote(arg), fn x ->
x
|> unquote(fun)
end)
end
end
@doc """
Takes in a tuple and a function from plain value to plain value.
Applies the function to the value within the `ok` tuple or propogates `error`.
## Examples:
iex> {:ok, 6}
...> |> fmap(fn x -> x+2 end)
{:ok, 8}
iex> {:error, 6}
...> |> fmap(fn x -> x+2 end)
{:error, 6}
"""
@doc since: "0.1.0"
@spec fmap(s(a), (a -> b)) :: s(b) when a: var, b: var
def fmap(m, f), do: bind(m, &{:ok, f.(&1)})
@doc """
Ignores the value in an `ok` tuple and just returns `:ok`.
Still shortcircuits on `error`.
## Examples:
iex> {:ok, 2}
...> |> ignore
:ok
iex> :ok
...> |> ignore
:ok
iex> {:error, :not_found}
...> |> ignore
{:error, :not_found}
"""
@doc since: "0.2.0"
@spec ignore(t()) :: p()
def ignore({:error, r}), do: {:error, r}
def ignore({:ok, _val}), do: :ok
def ignore(:ok), do: :ok
@doc """
Extracts the value or reason from the tuple.
Caution: If given an `error` tuple it raise an exception!
"""
@doc updated: "0.3.0"
@doc since: "0.1.0"
@spec extract!(s(a)) :: a when a: var
def extract!({:error, _} = ma) do
raise ArgumentError, "`extract` expects an ok tuple, \"#{inspect(ma)}\" given."
end
def extract!({:ok, value}), do: value
end
|
lib/result/base.ex
| 0.882472
| 0.472683
|
base.ex
|
starcoder
|
defmodule Cache do
@moduledoc """
Functions for manipulating and maintaining the marshal decode caches
"""
@doc """
Add a symbol to the cache.
# Examples
iex> Cache.add_to_symbol_cache(:test, {%{}, %{}})
{%{0 => :test}, %{}}
"""
def add_to_symbol_cache(symbol, {symbol_cache, object_cache}) do
{add_to_cache(symbol, symbol_cache), object_cache}
end
@doc """
Add an object to the cache.
# Examples
iex> Cache.add_to_object_cache(%{1 => 2}, {%{}, %{}})
{%{}, %{0 => %{1 => 2}}}
"""
def add_to_object_cache(object, {symbol_cache, object_cache}) do
{symbol_cache, add_to_cache(object, object_cache)}
end
@doc """
Replace an symbol stored in the cache. Used for updating a symbol with ivars.
# Examples
iex> Cache.replace_symbol_cache(:test, :test_update, {%{0 => :test}, %{}})
{%{0 => :test_update}, %{}}
"""
def replace_symbol_cache(old, new, {symbol_cache, object_cache}) do
symbol_cache = replace_cache(old, new, symbol_cache)
{symbol_cache, object_cache}
end
@doc """
Replace an object stored in the cache. Used for replacing a placeholder with a real value.
# Examples
iex> Cache.replace_object_cache(<<0xFF>>, "test", {%{}, %{0 => <<0>>, 1 => <<0xFF>>}})
{%{}, %{0 => <<0>>, 1 => "test"}}
"""
def replace_object_cache(old, new, {symbol_cache, object_cache}) do
object_cache = replace_cache(old, new, object_cache)
{symbol_cache, object_cache}
end
defp replace_cache(old, new, cache) do
index = Enum.find_index(cache, fn {_key, val} -> old == val end)
Map.put(cache, index, new)
end
# Add to cache if ref isn't already there
defp add_to_cache(element, cache) do
Map.put_new(cache, get_next_index(cache), element)
end
defp get_next_index(cache), do: do_get_next_index(Map.keys(cache))
defp do_get_next_index([]), do: 0
defp do_get_next_index(indices), do: indices |> Enum.max() |> increment()
defp increment(value), do: value + 1
@doc """
Retrieve a symbol from the cache for a symlink reference.
# Examples
iex> Cache.fetch_symbol(1, {%{0 => :apple, 1 => :banana}, %{0 => ["test"], 1 => "test"}})
:banana
"""
def fetch_symbol(index, {symbol_cache, _object_cache}) do
fetch_from_cache(index, symbol_cache)
end
@doc """
Retrieve an object from the cache for a type link.
# Examples
iex> Cache.fetch_object(1, {%{0 => :apple, 1 => :banana}, %{0 => ["test"], 1 => "test"}})
"test"
"""
def fetch_object(index, {_symbol_cache, object_cache}) do
fetch_from_cache(index, object_cache)
end
defp fetch_from_cache(index, cache) do
Map.get(cache, index)
end
end
|
lib/cache.ex
| 0.777764
| 0.471527
|
cache.ex
|
starcoder
|
defmodule UAInspector.Util do
@moduledoc false
@doc """
Generate a regex to be used for engine version detection.
"""
@spec build_engine_regex(name :: String.t()) :: Regex.t()
def build_engine_regex("Gecko") do
# sigil_S used to ensure escaping is kept as-is
# Concatenated expression:
# - [ ](?:rv[: ]([0-9\.]+)).*Gecko\/[0-9]{8,10}
# - Regular expression of `build_engine_regex("Gecko")`
Regex.compile!(
~S"(?:[ ](?:rv[: ]([0-9\.]+)).*Gecko\/[0-9]{8,10}|Gecko\s*\/?\s*((?(?=\d+\.\d)\d+[.\d]*|\d{1,7}(?=(?:\D|$)))))",
[:caseless]
)
end
def build_engine_regex(name) do
Regex.compile!(name <> ~S"\s*\/?\s*((?(?=\d+\.\d)\d+[.\d]*|\d{1,7}(?=(?:\D|$))))", [:caseless])
end
@doc """
Upgrades a database regex into a detection regex.
This prevents matching a string with other characters
before the matching part.
"""
@spec build_regex(regex :: String.t()) :: Regex.t()
def build_regex(regex) do
Regex.compile!("(?:^|[^A-Z0-9\-_]|[^A-Z0-9\-]_|sprd-)(?:" <> regex <> ")", [:caseless])
end
@doc """
Replaces an empty string with `:unknown`.
"""
@spec maybe_unknown(data :: String.t()) :: :unknown | String.t()
def maybe_unknown(""), do: :unknown
def maybe_unknown("Unknown"), do: :unknown
def maybe_unknown(data), do: data
@doc """
Sanitizes a model string.
"""
@spec sanitize_model(model :: String.t()) :: String.t()
def sanitize_model(""), do: ""
def sanitize_model("Build"), do: ""
def sanitize_model(model) do
model
|> String.replace(~r/\$(\d)/, "")
|> String.replace("_", " ")
|> String.replace(~r/ TD$/, "")
|> String.trim()
end
@doc """
Sanitizes a name string.
"""
@spec sanitize_name(name :: String.t()) :: String.t()
def sanitize_name(""), do: ""
def sanitize_name(name), do: String.trim(name)
@doc """
Sanitizes a version string.
"""
@spec sanitize_version(version :: String.t()) :: String.t()
def sanitize_version(""), do: ""
def sanitize_version(version) do
version
|> String.replace(~r/\$(\d)/, "")
|> String.replace(~r/\.$/, "")
|> String.replace("_", ".")
|> String.trim()
end
@doc """
Converts an unknown version string to a semver-comparable format.
Everything except the `major` and `minor` version is dropped as
these two parts are the only available/needed.
Missing values are filled with zeroes while empty strings are ignored.
If a non-integer value is found it is ignored and every part
including and after it will be a zero.
## Examples
iex> to_semver("15")
"15.0.0"
iex> to_semver("3.6")
"3.6.0"
iex> to_semver("8.8.8")
"8.8.0"
iex> to_semver("")
""
iex> to_semver("invalid")
"0.0.0"
iex> to_semver("3.help")
"3.0.0"
iex> to_semver("0.1.invalid")
"0.1.0"
"""
@spec to_semver(version :: String.t()) :: String.t()
def to_semver(""), do: ""
def to_semver(version) do
case String.split(version, ".", parts: 3) do
[maj] -> to_semver_string(maj, "0")
[maj, min] -> to_semver_string(maj, min)
[maj, min, _] -> to_semver_string(maj, min)
end
end
defp to_semver_string(major, minor) do
case {Integer.parse(major), Integer.parse(minor)} do
{:error, _} -> "0.0.0"
{{maj, _}, :error} -> "#{maj}.0.0"
{{maj, _}, {min, _}} -> "#{maj}.#{min}.0"
end
end
@doc """
Replaces PHP-Style regex captures with their values.
"""
@spec uncapture(data :: String.t(), captures :: list) :: String.t()
def uncapture(data, captures), do: uncapture(data, captures, 1)
defp uncapture(data, [], _), do: data
defp uncapture(data, [capture | captures], index) do
data
|> String.replace("\$#{index}", capture)
|> uncapture(captures, index + 1)
end
end
|
lib/ua_inspector/util.ex
| 0.851567
| 0.477859
|
util.ex
|
starcoder
|
defmodule VintageNetQMI do
@moduledoc """
Use a QMI-enabled cellular modem with VintageNet
This module is not intended to be called directly but via calls to `VintageNet`. Here's a
typical example:
```elixir
VintageNet.configure(
"wwan0",
%{
type: VintageNetQMI,
vintage_net_qmi: %{
service_providers: [%{apn: "super"}]
}
}
)
```
The following keys are supported
* `:service_providers` - This is a list of service provider information
The `:service_providers` key should be set to information provided by each of
your service providers. Currently only the first service provider is used.
Information for each service provider is a map with some or all of the following
fields:
* `:apn` (required) - e.g., `"access_point_name"`
Your service provider should provide you with the information that you need to
connect. Often it is just an APN. The Gnome project provides a database of
[service provider
information](https://wiki.gnome.org/Projects/NetworkManager/MobileBroadband/ServiceProviders)
that may also be useful.
Here's an example with a service provider list:
```elixir
%{
type: VintageNetQMI,
vintage_net_qmi: %{
service_providers: [
%{apn: "wireless.twilio.com"}
],
}
}
```
"""
@behaviour VintageNet.Technology
alias VintageNet.Interface.RawConfig
alias VintageNet.IP.IPv4Config
alias VintageNetQMI.Cookbook
@doc """
Name of the the QMI server that VintageNetQMI uses
"""
@spec qmi_name(VintageNet.ifname()) :: atom()
def qmi_name(ifname), do: Module.concat(__MODULE__.QMI, ifname)
@impl VintageNet.Technology
def normalize(%{type: __MODULE__, vintage_net_qmi: _qmi} = config) do
require_a_service_provider(config)
end
def normalize(_config) do
raise ArgumentError,
"specify vintage_net_qmi options (e.g., %{vintage_net_qmi: %{service_providers: [%{apn: \"super\"}]}})"
end
defp require_a_service_provider(
%{type: __MODULE__, vintage_net_qmi: qmi} = config,
required_fields \\ [:apn]
) do
case Map.get(qmi, :service_providers, []) do
[] ->
service_provider =
for field <- required_fields, into: %{} do
{field, to_string(field)}
end
new_config = %{
config
| vintage_net_qmi: Map.put(qmi, :service_providers, [service_provider])
}
raise ArgumentError,
"""
At least one service provider is required for #{__MODULE__}.
For example:
#{inspect(new_config)}
"""
[service_provider | _rest] ->
missing =
Enum.find(required_fields, fn field -> not Map.has_key?(service_provider, field) end)
if missing do
raise ArgumentError,
"""
The service provider '#{inspect(service_provider)}' is missing the `inspect(missing)' field.
"""
end
config
end
end
@impl VintageNet.Technology
def to_raw_config(
ifname,
%{type: __MODULE__} = config,
_opts
) do
normalized_config = normalize(config)
up_cmds = [
{:fun, QMI, :configure_linux, [ifname]}
]
child_specs = [
{VintageNetQMI.Indications, ifname: ifname},
{QMI.Supervisor,
[
ifname: ifname,
name: qmi_name(ifname),
indication_callback: indication_callback(ifname)
]},
{VintageNetQMI.Connectivity, ifname: ifname},
{VintageNetQMI.Connection,
[ifname: ifname, service_providers: normalized_config.vintage_net_qmi.service_providers]},
{VintageNetQMI.CellMonitor, [ifname: ifname]},
{VintageNetQMI.SignalMonitor, [ifname: ifname]},
{VintageNetQMI.ModemInfo, ifname: ifname}
]
# QMI uses DHCP to report IP addresses, gateway, DNS, etc.
ipv4_config = %{ipv4: %{method: :dhcp}, hostname: Map.get(config, :hostname)}
config =
%RawConfig{
ifname: ifname,
type: __MODULE__,
source_config: config,
required_ifnames: [ifname],
up_cmds: up_cmds,
child_specs: child_specs
}
|> IPv4Config.add_config(ipv4_config, [])
|> remove_connectivity_detector()
config
end
defp remove_connectivity_detector(raw_config) do
new_child_specs =
Enum.reject(raw_config.child_specs, fn
# Old internet connectivity checker module
{VintageNet.Interface.InternetConnectivityChecker, _ifname} -> true
# New internet connectivity checker module
{VintageNet.Connectivity.InternetChecker, _ifname} -> true
_ -> false
end)
%{raw_config | child_specs: new_child_specs}
end
@impl VintageNet.Technology
def check_system(_), do: {:error, "unimplemented"}
@impl VintageNet.Technology
def ioctl(_ifname, _command, _args), do: {:error, :unsupported}
@doc """
Configure a cellular modem using an APN
```
iex> VintageNetQMI.quick_configure("an_apn")
:ok
```
"""
@spec quick_configure(String.t()) :: :ok | {:error, term()}
def quick_configure(apn) do
with {:ok, config} <- Cookbook.simple(apn) do
VintageNet.configure("wwan0", config)
end
end
# For unit test purposes
@doc false
@spec indication_callback(VintageNet.ifname()) :: function()
def indication_callback(ifname) do
&VintageNetQMI.Indications.handle(ifname, &1)
end
end
|
lib/vintage_net_qmi.ex
| 0.838101
| 0.759961
|
vintage_net_qmi.ex
|
starcoder
|
defmodule Ecto.Enum do
@moduledoc """
A custom type that maps atoms to strings.
`Ecto.Enum` must be used whenever you want to keep atom values in a field.
Since atoms cannot be persisted to the database, `Ecto.Enum` converts them
to string when writing to the database and converts them back to atoms when
loading data. It can be used in your schemas as follows:
field :status, Ecto.Enum, values: [:foo, :bar, :baz]
Composite types, such as `:array`, are also supported:
field :roles, {:array, Ecto.Enum}, values: [:Author, :Editor, :Admin]
`:values` must be a list of atoms. String values will be cast to atoms safely
and only if the atom exists in the list (otherwise an error will be raised).
Attempting to load any string not represented by an atom in the list will be
invalid.
The helper function `values/2` returns the values for a given schema and
field, which can be used in places like form drop-downs. For example,
given the following schema:
defmodule EnumSchema do
use Ecto.Schema
schema "my_schema" do
field :my_enum, Ecto.Enum, values: [:foo, :bar, :baz]
end
end
you can call `values/2` like this:
> Ecto.Enum.values(EnumSchema, :my_enum)
[:foo, :bar, :baz]
"""
use Ecto.ParameterizedType
@impl true
def type(_params), do: :string
@impl true
def init(opts) do
values = Keyword.get(opts, :values, nil)
unless is_list(values) and Enum.all?(values, &is_atom/1) do
raise ArgumentError, """
Ecto.Enum types must have a values option specified as a list of atoms. For example:
field :my_field, Ecto.Enum, values: [:foo, :bar]
"""
end
on_load = Map.new(values, &{Atom.to_string(&1), &1})
on_dump = Map.new(values, &{&1, Atom.to_string(&1)})
%{on_load: on_load, on_dump: on_dump, values: values}
end
@impl true
def cast(nil, _params), do: {:ok, nil}
def cast(data, params) do
case params do
%{on_load: %{^data => as_atom}} -> {:ok, as_atom}
%{on_dump: %{^data => _}} -> {:ok, data}
_ -> :error
end
end
@impl true
def load(nil, _, _), do: {:ok, nil}
def load(data, _loader, %{on_load: on_load}) do
case on_load do
%{^data => as_atom} -> {:ok, as_atom}
_ -> :error
end
end
@impl true
def dump(nil, _, _), do: {:ok, nil}
def dump(data, _dumper, %{on_dump: on_dump}) do
case on_dump do
%{^data => as_string} -> {:ok, as_string}
_ -> :error
end
end
@impl true
def equal?(a, b, _params), do: a == b
@impl true
def embed_as(_, _), do: :self
def values(schema, field) do
try do
schema.__changeset__()
rescue
_ in UndefinedFunctionError -> raise ArgumentError, "#{inspect schema} is not an Ecto schema"
else
%{^field => {:parameterized, Ecto.Enum, %{values: values}}} -> values
%{^field => {_, {:parameterized, Ecto.Enum, %{values: values}}}} -> values
%{} -> raise ArgumentError, "#{field} is not an Ecto.Enum field"
end
end
end
|
lib/ecto/enum.ex
| 0.822581
| 0.616359
|
enum.ex
|
starcoder
|
defmodule APDS9960.Sensor do
@moduledoc "The APDS9960 sensor."
alias APDS9960.{Comm, Sensor, Transport}
@i2c_address 0x39
use TypedStruct
typedstruct do
field(:transport, Transport.t(), enforce: true)
end
@typedoc "The APDS9960 sensor option"
@type option() :: [
{:bus_name, binary}
| {:reset, boolean}
| {:set_defaults, boolean}
]
@type engine :: :color | :als | :proximity | :gesture
@type gesture_direction :: :up | :down | :left | :right
@doc "Initializes the I2C bus and sensor."
@spec init([option]) :: t()
def init(opts \\ []) do
bus_name = Access.get(opts, :bus_name, "i2c-1")
reset = Access.get(opts, :reset, true)
set_defaults = Access.get(opts, :set_defaults, true)
sensor = %Sensor{
transport: Transport.new(bus_name, @i2c_address)
}
:ok = ensure_connected!(sensor)
if reset, do: reset!(sensor)
if set_defaults, do: set_defaults!(sensor)
%Sensor{} = sensor
end
@spec ensure_connected!(Sensor.t()) :: :ok
defp ensure_connected!(%Sensor{transport: i2c}) do
true = Comm.connected?(i2c)
:ok
end
@spec reset!(Sensor.t()) :: :ok
def reset!(%Sensor{transport: i2c}) do
# Disable prox, gesture, and color engines
:ok = Comm.set_enable(i2c, gesture: 0, proximity: 0, als: 0)
# Reset basic config registers to power-on defaults
:ok = Comm.set_proximity_threshold(i2c, low: 0, high: 0)
:ok = Comm.set_interrupt_persistence(i2c, <<0>>)
:ok = Comm.set_gesture_proximity_threshold(i2c, enter: 0, exit: 0)
:ok = Comm.set_gesture_conf1(i2c, <<0>>)
:ok = Comm.set_gesture_conf2(i2c, <<0>>)
:ok = Comm.set_gesture_conf4(i2c, <<0>>)
:ok = Comm.set_gesture_pulse(i2c, <<0>>)
:ok = Comm.set_adc_integration_time(i2c, <<255>>)
:ok = Comm.set_control(i2c, als_and_color_gain: 1)
# Clear all non-gesture interrupts
:ok = Comm.clear_all_non_gesture_interrupts(i2c)
# Disable sensor and all functions/interrupts
:ok = Comm.set_enable(i2c, <<0>>)
:ok = Process.sleep(25)
# Re-enable sensor and wait 10ms for the power on delay to finish
:ok = Comm.set_enable(i2c, power: 1)
:ok = Process.sleep(10)
:ok
end
@spec set_defaults!(Sensor.t()) :: :ok
def set_defaults!(%Sensor{transport: i2c}) do
# Trigger proximity interrupt at >= 5, PPERS: 4 cycles
:ok = Comm.set_proximity_threshold(i2c, low: 0, high: 5)
:ok = Comm.set_interrupt_persistence(i2c, proximity: 4)
# Enter gesture engine at >= 5 proximity counts
# Exit gesture engine if all counts drop below 30
:ok = Comm.set_gesture_proximity_threshold(i2c, enter: 5, exit: 30)
# GEXPERS: 2 (4 cycles), GEXMSK: 0 (default) GFIFOTH: 2 (8 datasets)
:ok =
Comm.set_gesture_conf1(i2c,
fifo_threshold: 2,
exit_mask: 0,
exit_persistence: 2
)
# GGAIN: 2 (4x), GLDRIVE: 0 (100 mA), GWTIME: 1 (2.8 ms)
:ok =
Comm.set_gesture_conf2(i2c,
gain: 2,
led_drive_strength: 0,
wait_time: 1
)
# GPULSE: 5 (6 pulses), GPLEN: 2 (16 us)
:ok =
Comm.set_gesture_pulse(i2c,
pulse_count: 5,
pulse_length: 2
)
# ATIME: 0 (712ms color integration time, max count of 65535)
:ok = Comm.set_adc_integration_time(i2c, <<0>>)
# AGAIN: 1 (4x color gain)
:ok = Comm.set_control(i2c, als_and_color_gain: 1)
:ok
end
@doc "Enable an engine for a desired feature."
@spec enable(Sensor.t(), engine) :: :ok
def enable(%Sensor{transport: i2c}, :color), do: Comm.set_enable(i2c, als: 1)
def enable(%Sensor{transport: i2c}, engine), do: Comm.set_enable(i2c, [{engine, 1}])
end
|
lib/apds9960/sensor.ex
| 0.839339
| 0.416945
|
sensor.ex
|
starcoder
|
defmodule ForthVM.Words.Flow do
@moduledoc """
Flow words
"""
import ForthVM.Utils
alias ForthVM.Process
# ---------------------------------------------
# Flow control: start end DO do_stack LOOP
# ---------------------------------------------
@doc """
do: ( end_count count -- ) start loop declaration
"""
def _do(tokens, [count, end_count | data_stack], return_stack, dictionary, meta) do
Process.next(
tokens,
data_stack,
[count, end_count, %{do: tokens} | return_stack],
dictionary,
meta
)
end
@doc """
i: ( -- count ) copy the top of a LOOP's return stack to the data stack
"""
def i(
tokens,
data_stack,
[count, _end_count, %{do: _do_tokens} | _] = return_stack,
dictionary,
meta
) do
Process.next(tokens, [count | data_stack], return_stack, dictionary, meta)
end
@doc """
r@: ( -- data )copy the top of the return stack to the data stack
"""
def copy_r_to_d(tokens, data_stack, [data | _] = return_stack, dictionary, meta) do
Process.next(tokens, [data | data_stack], return_stack, dictionary, meta)
end
@doc """
>r: ( data -- ) move the top of the data stack to the return stack
"""
def move_d_to_r(tokens, [data | data_stack], return_stack, dictionary, meta) do
Process.next(tokens, data_stack, [data | return_stack], dictionary, meta)
end
@doc """
r>: ( -- data ) move the top of the return stack to the data stack
"""
def move_r_to_d(tokens, data_stack, [data | return_stack], dictionary, meta) do
Process.next(tokens, [data | data_stack], return_stack, dictionary, meta)
end
@doc """
j: ( -- data ) copy data from return stack after LOOP's definition to the data stack
"""
def copy_r_loop_to_d(
tokens,
data_stack,
[_count, _end_count, %{do: _do_tokens}, data | _] = return_stack,
dictionary,
meta
) do
Process.next(tokens, [data | data_stack], return_stack, dictionary, meta)
end
@doc """
loop: ( -- ) keep processing do_tokens till count < end_count, each step incrementing count by 1
"""
def loop(
tokens,
data_stack,
[count, end_count, %{do: do_tokens} | return_stack],
dictionary,
meta
) do
count = count + 1
case count < end_count do
true ->
Process.next(
do_tokens,
data_stack,
[count, end_count, %{do: do_tokens} | return_stack],
dictionary,
meta
)
false ->
Process.next(tokens, data_stack, return_stack, dictionary, meta)
end
end
@doc """
+loop: ( inc -- ) keep processing do_tokens till count < end_count, incrementing count by top value on the data stack
"""
def plus_loop(
tokens,
[inc | data_stack],
[count, end_count, %{do: do_tokens} | return_stack],
dictionary,
meta
) do
count = count + inc
case count < end_count do
true ->
Process.next(
do_tokens,
data_stack,
[count, end_count, %{do: do_tokens} | return_stack],
dictionary,
meta
)
false ->
Process.next(tokens, data_stack, return_stack, dictionary, meta)
end
end
# ---------------------------------------------
# Flow control: BEGIN until_stack UNTIL
# ---------------------------------------------
@doc """
begin: ( -- ) start loop declaration
"""
def begin(tokens, data_stack, return_stack, dictionary, meta) do
Process.next(tokens, data_stack, [%{begin: tokens} | return_stack], dictionary, meta)
end
@doc """
until: (bool -- ) keep processing untill contition is truthly
"""
def until(
tokens,
[condition | data_stack],
[%{begin: until_tokens} | return_stack],
dictionary,
meta
) do
case is_falsely(condition) do
true ->
Process.next(
until_tokens,
data_stack,
[%{begin: until_tokens} | return_stack],
dictionary,
meta
)
false ->
Process.next(tokens, data_stack, return_stack, dictionary, meta)
end
end
end
|
lib/forthvm/words/flow.ex
| 0.729712
| 0.636028
|
flow.ex
|
starcoder
|
defmodule ExSieve.Node.Condition do
@moduledoc false
alias ExSieve.Node.{Condition, Attribute}
defstruct values: nil, attributes: nil, predicat: nil, combinator: nil
@type t :: %__MODULE__{}
@basic_predicates ~w(eq
not_eq
cont
not_cont
lt
lteq
gt
gteq
in
not_in
matches
does_not_match
start
not_start
end
not_end
true
not_true
false
not_false
present
blank
null
not_null)
@all_any_predicates Enum.flat_map(@basic_predicates, &(["#{&1}_any", "#{&1}_all"]))
@predicates @basic_predicates ++ @all_any_predicates
@spec predicates :: list(String.t)
def predicates do
@predicates
end
@spec basic_predicates :: list(String.t)
def basic_predicates do
@basic_predicates
end
@typep values :: String.t | integer | list(String.t | integer)
@spec extract(String.t | atom, values, atom) :: t | {:error, :predicat_not_found | :value_is_empty}
def extract(key, values, module) do
with attributes <- extract_attributes(key, module),
predicat <- get_predicat(key),
combinator <- get_combinator(key),
values <- prepare_values(values),
do: build_condition(attributes, predicat, combinator, values)
end
defp prepare_values(values) when is_list(values) do
result = Enum.all?(values, fn
(value) when is_bitstring(value) -> String.length(value) >= 1
(_) -> true
end)
if result do
values
else
{:error, :value_is_empty}
end
end
defp prepare_values(""), do: {:error, :value_is_empty}
defp prepare_values(value) when is_bitstring(value), do: List.wrap(value)
defp prepare_values(value), do: List.wrap(value)
defp build_condition({:error, reason}, _predicat, _combinator, _values), do: {:error, reason}
defp build_condition(_attributes, _predicat, _combinator, {:error, reason}), do: {:error, reason}
defp build_condition(_attributes, {:error, reason}, _combinator, _values), do: {:error, reason}
defp build_condition(attributes, predicat, combinator, values) do
%Condition{
attributes: attributes,
predicat: predicat,
combinator: combinator,
values: values
}
end
defp extract_attributes(key, module) do
key
|> String.split(~r/_(and|or)_/)
|> Enum.map(&Attribute.extract(&1, module))
|> validate_attributes
end
defp validate_attributes(attributes, acc \\ [])
defp validate_attributes([{:error, reason}|_tail], _acc),
do: {:error, reason}
defp validate_attributes([attribute|tail], acc),
do: validate_attributes(tail, acc ++ [attribute])
defp validate_attributes([], acc),
do: acc
defp get_combinator(key) do
cond do
key |> String.contains?("_or_") -> :or
key |> String.contains?("_and_") -> :and
:otherwise -> :and
end
end
defp get_predicat(key) do
case @predicates |> Enum.sort_by(&byte_size/1, &>=/2) |> Enum.find(&String.ends_with?(key, &1)) do
nil -> {:error, :predicat_not_found}
predicat -> String.to_atom(predicat)
end
end
end
|
lib/ex_sieve/node/condition.ex
| 0.789153
| 0.454896
|
condition.ex
|
starcoder
|
defmodule Plymio.Funcio.Enum.Index do
@moduledoc ~S"""
Functions for an Enumerable's Indices
## Documentation Terms
### *index range*
See `Plymio.Funcio.Index` for an explanation of *index range*
See `Plymio.Funcio` for an overview and explanation of other terms used in the documentation.
"""
import Plymio.Fontais.Error,
only: [
new_error_result: 1
]
import Plymio.Funcio.Index,
only: [
normalise_index_range: 1,
validate_index: 1,
validate_indices: 1,
new_index_error_result: 1,
create_predicate_indices: 1
]
import Plymio.Funcio.Enum.Utility,
only: [
enum_to_list: 1
]
@type error :: Plymio.Funcio.error()
@type index :: Plymio.Funcio.index()
@type indices :: Plymio.Funcio.indices()
@type fun1_predicate :: Plymio.Funcio.fun1_predicate()
@error_text_list_invalid "list invalid"
defp list_indices(state) when is_list(state) do
Range.new(0, length(state) - 1)
end
@doc ~S"""
`normalise_index_range_enum/1` takes an *enum* and *index range*,
normalises the *index range* and converts negative indices to their
absolute (zero offset) values.
Finally it confirms each index is valid for the *enum*, returning `{:ok, positive_indices}`.
## Examples
iex> [1,2,3] |> normalise_index_range_enum(0)
{:ok, [0]}
iex> [1,2,3] |> normalise_index_range_enum(-1)
{:ok, [2]}
iex> [1,2,3] |> normalise_index_range_enum([0,-1])
{:ok, [0,2]}
iex> [1,2,3] |> normalise_index_range_enum(0 .. 2)
{:ok, [0,1,2]}
iex> [1,2,3] |> normalise_index_range_enum([0,-1,0,2])
{:ok, [0,2,0,2]}
iex> {:error, error} = [1,2,3] |> normalise_index_range_enum(4)
...> error |> Exception.message
"index invalid, got: 4"
iex> {:error, error} = [1,2,3] |> normalise_index_range_enum([0,-1,4,5,0,2])
...> error |> Exception.message
"indices invalid, got: [4, 5]"
iex> {:error, error} = [1,2,3] |> normalise_index_range_enum(:not_valid)
...> error |> Exception.message
"index range invalid, got: :not_valid"
"""
@since "0.1.0"
@spec normalise_index_range_enum(any, any) :: {:ok, indices} | {:error, error}
def normalise_index_range_enum(enum, index_range)
def normalise_index_range_enum(state, nil) when is_list(state) do
{:ok, state |> list_indices |> Enum.to_list()}
end
def normalise_index_range_enum(state, index_range) do
with {:ok, indices} <- index_range |> normalise_index_range,
{:ok, state} <- state |> enum_to_list,
{:ok, _} = result <- state |> validate_indices_enum(indices) do
result
else
{:error, %{__exception__: true}} = result -> result
end
end
@doc ~S"""
`validate_index_enum/2` takes an *enum* and *index*, converts
the *index* into its absolute (zero offset) value and then
confirms the index is valid for the *enum*, returning
`{:ok, positive_index}`.
## Examples
iex> [1,2,3] |> validate_index_enum(0)
{:ok, 0}
iex> [1,2,3] |> validate_index_enum(-1)
{:ok, 2}
iex> {:error, error} = [1,2,3] |> validate_index_enum(4)
...> error |> Exception.message
"index too large, got: 4"
iex> {:error, error} = [1,2,3] |> validate_index_enum(-999)
...> error |> Exception.message
"index too small, got: -999"
iex> {:error, error} = [1,2,3] |> validate_index_enum(:not_valid)
...> error |> Exception.message
"index invalid, got: :not_valid"
"""
@spec validate_index_enum(any, any) :: {:ok, index} | {:error, error}
def validate_index_enum(enum, index)
def validate_index_enum(state, index) when is_list(state) do
with {:ok, index} <- index |> validate_index do
index_max = length(state) - 1
case index do
x when x >= 0 -> x
x -> index_max + x + 1
end
|> case do
ndx when ndx < 0 ->
new_error_result(m: "index too small", v: index)
ndx when ndx > index_max ->
new_error_result(m: "index too large", v: index)
ndx ->
{:ok, ndx}
end
else
{:error, %{__exception__: true}} = result -> result
end
end
def validate_index_enum(state, index) do
with {:ok, state} <- state |> enum_to_list,
{:ok, _} = result <- state |> validate_index_enum(index) do
result
else
{:error, %{__exception__: true}} = result -> result
end
end
@doc ~S"""
`validate_indices_enum/2` takes an *enum* and *indices*, converts
negative indices into their absolute (zero offset) values and then
confirms each index is valid for the *enum*, returning
`{:ok, positive_indices}`.
## Examples
iex> [1,2,3] |> validate_indices_enum(0)
{:ok, [0]}
iex> [1,2,3] |> validate_indices_enum(-1)
{:ok, [2]}
iex> [1,2,3,4,5] |> validate_indices_enum([-1,0,2])
{:ok, [4,0,2]}
iex> {:error, error} = [1,2,3] |> validate_indices_enum(4)
...> error |> Exception.message
"index invalid, got: 4"
iex> {:error, error} = [1,2,3] |> validate_indices_enum([0,-1,4,5,0,2])
...> error |> Exception.message
"indices invalid, got: [4, 5]"
iex> {:error, error} = [1,2,3] |> validate_indices_enum(:not_valid)
...> error |> Exception.message
"index invalid, got: :not_valid"
"""
@since "0.1.0"
@spec validate_indices_enum(any, any) :: {:ok, indices} | {:error, error}
def validate_indices_enum(enum, indices)
def validate_indices_enum(state, indices) when is_list(state) do
with {:ok, state} <- state |> enum_to_list,
{:ok, indices} <- indices |> List.wrap() |> validate_indices do
indices
|> Enum.reduce(
{[], []},
fn index, {oks, errors} ->
state
|> validate_index_enum(index)
|> case do
{:ok, index} -> {[index | oks], errors}
{:error, %{__struct__: _}} -> {oks, [index | errors]}
end
end
)
|> case do
# no invalid indices
{indices, []} ->
{:ok, indices |> Enum.reverse()}
{_, invalid_indices} ->
invalid_indices |> Enum.reverse() |> new_index_error_result
end
else
{:error, %{__exception__: true}} = result -> result
end
end
def validate_indices_enum(state, _indices) do
new_error_result(m: @error_text_list_invalid, v: state)
end
@doc ~S"""
`create_predicate_index_range_enum/1` takes an *enum* and *index range*
and creates an arity 1 function that expects to be passed a `{value, index}`
2tuple and returns `true` if `index` is in the *index range*, else `false`.
## Examples
iex> {:ok, fun} = [1,2,3] |> create_predicate_index_range_enum(0)
...> true = {:x, 0} |> fun.()
...> {"HelloWorld", 2} |> fun.()
false
iex> {:ok, fun} = 0 .. 9 |> Enum.to_list |> create_predicate_index_range_enum(0 .. 2)
...> true = {:x, 0} |> fun.()
...> true = {%{a: 1}, 2} |> fun.()
...> true = {42, 2} |> fun.()
...> {"HelloWorld", 4} |> fun.()
false
iex> {:error, error} = [] |> create_predicate_index_range_enum(:not_valid)
...> error |> Exception.message
"index range invalid, got: :not_valid"
"""
@since "0.1.0"
@spec create_predicate_index_range_enum(any, any) :: {:ok, fun1_predicate} | {:error, error}
def create_predicate_index_range_enum(enum, index_range \\ nil)
# range == nil => all state
def create_predicate_index_range_enum(_state, nil) do
{:ok, fn _ -> true end}
end
# range = arity 1 fun
def create_predicate_index_range_enum(_state, range)
when is_function(range, 1) do
{:ok, range}
end
def create_predicate_index_range_enum(state, index_range) do
with {:ok, range_indices} <- state |> normalise_index_range_enum(index_range),
{:ok, _fun} = result <- range_indices |> create_predicate_indices do
result
else
{:error, %{__exception__: true}} = result -> result
end
end
end
|
lib/funcio/enum/index/index.ex
| 0.876304
| 0.492798
|
index.ex
|
starcoder
|
defmodule Artemis.Repo.Select do
require Ecto.Query
@moduledoc """
Functions that execute Ecto `select` queries
"""
@doc """
Helper function to parse options and potentially call `select_fields/3` and
`exclude_fields/3`
"""
def select_query(ecto_query, schema, params) do
params = get_select_query_params(params)
ecto_query
|> maybe_exclude_fields(schema, params)
|> maybe_select_fields(schema, params)
end
defp get_select_query_params(params) do
params
|> Enum.into(%{})
|> Artemis.Helpers.keys_to_strings()
end
defp maybe_select_fields(ecto_query, schema, %{"select" => fields}) do
select_fields(ecto_query, schema, fields)
end
defp maybe_select_fields(ecto_query, _, _), do: ecto_query
defp maybe_exclude_fields(ecto_query, schema, %{"exclude" => fields}) do
exclude_fields(ecto_query, schema, fields)
end
defp maybe_exclude_fields(ecto_query, _, _), do: ecto_query
@doc """
Add a select statement to an ecto query to include requested fields. Takes
the schema as an argument and filters requested fields against existing ones.
If a `select` statement already exists in the query, it attempts to detect it
and use `select_merge` instead.
Does not support associations.
Example:
select_fields(existing_ecto_query, Artemis.User, [:name, :invalid_field, :__meta__])
Returns:
#Ecto.Query<from u0 in Artemis.User, select: [:name]>
"""
def select_fields(ecto_query, schema, fields) when is_list(fields) do
selected_fields = get_selected_fields(schema, fields)
case length(selected_fields) > 0 do
true -> add_select_fields(ecto_query, selected_fields)
false -> ecto_query
end
end
def select_fields(ecto_query, _schema, _params), do: ecto_query
@doc """
Add a select statement to an ecto query to excluded requested fields. Takes
the schema as an argument and filters requested fields against existing ones.
A `nil` value will be returned for excluded fields.
If a `select` statement already exists in the query, it attempts to detect it
and use `select_merge` instead.
Does not support associations.
Example:
exclude_fields(existing_ecto_query, Artemis.User, [:name, :invalid_field, :__meta__])
Returns:
#Ecto.Query<from u0 in Artemis.User, select: [<a list of all fields except for excluded ones>]>
"""
def exclude_fields(ecto_query, schema, exclude_fields: fields) do
exclude_fields(ecto_query, schema, fields)
end
def exclude_fields(ecto_query, schema, fields) when is_list(fields) do
selected_fields = get_selected_fields_without_excluded(schema, fields)
case length(selected_fields) > 0 do
true -> add_select_fields(ecto_query, selected_fields)
false -> ecto_query
end
end
def exclude_fields(ecto_query, _schema, _params), do: ecto_query
# Helpers
defp get_selected_fields(schema, fields) do
available_fields = get_available_fields(schema)
requested_fields = Enum.map(fields, &Artemis.Helpers.to_string(&1))
available_fields
|> get_intersection(requested_fields)
|> Enum.map(&String.to_existing_atom/1)
end
defp get_selected_fields_without_excluded(schema, fields) do
available_fields = get_available_fields(schema)
requested_fields = Enum.map(fields, &Artemis.Helpers.to_string(&1))
available_fields
|> get_difference(requested_fields)
|> Enum.map(&String.to_existing_atom/1)
end
defp get_available_fields(schema) do
schema.__schema__(:fields)
|> Enum.map(&Atom.to_string/1)
|> Enum.reject(&String.starts_with?(&1, "__"))
end
defp get_intersection(available, requested) do
MapSet.intersection(MapSet.new(available), MapSet.new(requested))
end
defp get_difference(available, requested) do
MapSet.difference(MapSet.new(available), MapSet.new(requested))
end
defp add_select_fields(ecto_query, selected_fields) do
case has_prior_select?(ecto_query) do
false -> add_select_query(ecto_query, selected_fields)
true -> add_select_merge_query(ecto_query, selected_fields)
end
end
defp has_prior_select?(ecto_query) do
ecto_query.select && true
rescue
_ -> false
end
defp add_select_query(ecto_query, selected_fields) do
Ecto.Query.select(ecto_query, ^selected_fields)
end
defp add_select_merge_query(ecto_query, selected_fields) do
fields_as_strings =
selected_fields
|> Enum.map(fn field -> "#{field}: i.#{field}" end)
|> Enum.join(", ")
field_map_as_string = "%{#{fields_as_strings}}"
select_merge_as_string = "Ecto.Query.select_merge(ecto_query, [i], #{field_map_as_string})"
# NOTE: Operation is safe since requested fields have been filtered against
# existing ones before passed into `eval_string`.
{result, _} = Code.eval_string(select_merge_as_string, [ecto_query: ecto_query], __ENV__)
result
end
end
|
apps/artemis/lib/artemis/repo/select.ex
| 0.720958
| 0.507873
|
select.ex
|
starcoder
|
defmodule Flop.Filter do
@moduledoc """
Defines a filter.
"""
use Ecto.Schema
import Ecto.Changeset
import Flop.Schema
alias Ecto.Changeset
alias Flop.CustomTypes.Any
alias Flop.CustomTypes.ExistingAtom
alias Flop.CustomTypes.Operator
@typedoc """
Represents filter query parameters.
### Fields
- `field`: The field the filter is applied to. The allowed fields can be
restricted by deriving `Flop.Schema` in your Ecto schema.
- `op`: The filter operator.
- `value`: The comparison value of the filter.
"""
@type t :: %__MODULE__{
field: atom | String.t(),
op: op,
value: any
}
@typedoc """
Represents valid filter operators.
| Operator | Value | WHERE clause |
| :----------- | :------------------ | ----------------------------------- |
| `:==` | `"Salicaceae"` | `WHERE column = 'Salicaceae'` |
| `:!=` | `"Salicaceae"` | `WHERE column != 'Salicaceae'` |
| `:=~` | `"cyth"` | `WHERE column ILIKE '%cyth%'` |
| `:<=` | `10` | `WHERE column <= 10` |
| `:<` | `10` | `WHERE column < 10` |
| `:>=` | `10` | `WHERE column >= 10` |
| `:>` | `10` | `WHERE column > 10` |
| `:in` | `["pear", "plum"]` | `WHERE column IN ('pear', 'plum')` |
| `:like` | `"cyth"` | `WHERE column LIKE '%cyth%'` |
| `:like_and` | `"Rubi Rosa"` | `WHERE column LIKE '%Rubi%' AND column LIKE '%Rosa%'` |
| `:like_or` | `"Rubi Rosa"` | `WHERE column LIKE '%Rubi%' OR column LIKE '%Rosa%'` |
| `:ilike` | `"cyth"` | `WHERE column ILIKE '%cyth%'` |
| `:ilike_and` | `"<NAME>"` | `WHERE column ILIKE '%Rubi%' AND column ILIKE '%Rosa%'` |
| `:ilike_or` | `"<NAME>"` | `WHERE column ILIKE '%Rubi%' OR column ILIKE '%Rosa%'` |
"""
@type op ::
:==
| :!=
| :=~
| :<=
| :<
| :>=
| :>
| :in
| :like
| :like_and
| :like_or
| :ilike
| :ilike_and
| :ilike_or
@primary_key false
embedded_schema do
field :field, ExistingAtom
field :op, Operator, default: :==
field :value, Any
end
@doc false
@spec changeset(__MODULE__.t(), map, keyword) :: Changeset.t()
def changeset(filter, %{} = params, opts \\ []) do
filter
|> cast(params, [:field, :op, :value])
|> validate_required([:field, :op, :value])
|> validate_filterable(opts[:for])
end
@spec validate_filterable(Changeset.t(), module | nil) :: Changeset.t()
defp validate_filterable(changeset, nil), do: changeset
defp validate_filterable(changeset, module) do
filterable_fields =
module
|> struct()
|> filterable()
validate_inclusion(changeset, :field, filterable_fields)
end
end
|
lib/flop/filter.ex
| 0.83498
| 0.702862
|
filter.ex
|
starcoder
|
defmodule Shapt.Adapters.Env do
@behaviour Shapt.Adapter
@moduledoc """
An adapter to load toggle state from environment variables or an env file.
"""
@typedoc """
Additional option to configure the toggle.
#{__MODULE__} only defines one additional option that is `:key`.
The key is the name of the environment variable to get the toggle state.
If there is no `:key` set for a toggle, the adapter gonna abstract an environment variable from the `Shapt.toggle_name()`.
The environment variable for that case gonna be the `t:Shapt.toggle_name/0` upcased and stripped from a question mark, if there is any.
"""
@type toggle_opts :: %{key: String.t()}
@typedoc """
Configuration for this adapter.
- `from`: source of the state of the toggles.
The only options are `:file` and `:env`.
If the `from` is set to `:file`, the option `file` is required.
- `file`: Required when `from` is set to `:file`.
It gonna be envinroment variable file used to load the toggles state.
"""
@type adapter_opts :: [from: :file | :env, file: filename()]
@typedoc """
Path to a file that must exist when starting the Shapt worker.
The content of the file must be a pair `ENVVAR=true` per line.
This gonna be loaded and used as the state of the toggles.
"""
@type filename :: Path.t()
@impl Shapt.Adapter
def load(opts, toggles) do
case opts[:from] do
:file -> from_file(opts[:file], toggles)
_from -> from_env(toggles)
end
end
@impl Shapt.Adapter
def create_template(_opts, toggles) do
toggles
|> Enum.map(&get_key/1)
|> Enum.map(&"#{&1}=false")
|> Enum.join("\n")
end
@impl Shapt.Adapter
def validate_configuration(opts) do
case opts[:from] do
:file ->
if File.regular?(opts[:file] || "") do
:ok
else
"not a file"
end
_from ->
:ok
end
end
defp from_env(toggles) do
toggles
|> Enum.map(&env_toggle/1)
|> Enum.into(%{})
end
defp env_toggle(toggle) do
value =
toggle
|> get_key()
|> System.get_env()
|> to_boolean()
{elem(toggle, 0), value}
end
defp from_file(nil, _toggles), do: %{}
defp from_file(file, toggles) do
keys = Enum.map(toggles, &get_key/1)
key_toggles = Enum.map(toggles, &remap_keys/1)
values = load_file(file, keys)
key_toggles
|> Enum.map(fn {k, t} -> {t, values[k] |> to_boolean()} end)
|> Enum.into(%{})
end
defp load_file(file, keys) do
case File.read(file) do
{:error, _err} ->
[]
{:ok, content} ->
content
|> String.split("\n")
|> Enum.map(&String.split(&1, "="))
|> Enum.map(&List.to_tuple/1)
|> Enum.filter(&(elem(&1, 0) in keys))
|> Enum.into(%{})
end
end
defp remap_keys(toggle), do: {get_key(toggle), elem(toggle, 0)}
defp get_key({toggle, opts}), do: opts[:key] || toggle_key(toggle)
defp toggle_key(key) do
key
|> Atom.to_string()
|> String.replace("?", "")
|> String.upcase()
end
defp to_boolean("true"), do: true
defp to_boolean(_env), do: false
end
|
lib/shapt/adapters/env.ex
| 0.768038
| 0.575886
|
env.ex
|
starcoder
|
defmodule Beamchmark.Suite.Measurements.CpuInfo do
@moduledoc """
Module representing statistics about cpu usage.
Method of measuring:
- Take a snapshot of cpu usage every `cpu_interval` milliseconds
- Calculate the average cpu usage of processor (combining each core usage)
- At the end combine the results and calculate the average
**Warning!**
This module can give unstable cpu usage values when measuring a short time because of a high cpu volatility.
TODO Can be improved by taking average of 5-10 values for each snapshot
"""
use Bunch.Access
alias Beamchmark.Math
@typedoc """
All information gathered via single snapshot + processor average
"""
@type cpu_snapshot_t ::
%{
cpu_usage: %{
(core_id :: integer()) => usage :: Math.percent_t()
},
average_all_cores: average_all_cores :: Math.percent_t()
}
@typedoc """
All information gathered via all snapshots
`all_average` is average from all snapshots
"""
@type t :: %__MODULE__{
cpu_snapshots: [cpu_snapshot_t()] | nil,
average_by_core: %{
(core_id :: number()) => usage :: Math.percent_t() | float()
},
average_all: Math.percent_t() | float()
}
defstruct cpu_snapshots: [],
average_by_core: %{},
average_all: 0
@doc """
Converts list of `cpu_snapshot_t` to ` #{__MODULE__}.t()`
By calculating the average
"""
@spec from_cpu_snapshots([cpu_snapshot_t()]) :: t()
def from_cpu_snapshots(cpu_snapshots) do
average_all =
Enum.reduce(cpu_snapshots, 0, fn map, average_all_acc ->
average_all_acc + map.average_all_cores
end) / length(cpu_snapshots)
sum_by_core =
Enum.reduce(cpu_snapshots, %{}, fn %{cpu_usage: cpu_usage}, sum_cores_acc ->
reduce_cpu_usage(cpu_usage, sum_cores_acc)
end)
number_of_snapshots = length(cpu_snapshots)
average_by_core =
Enum.reduce(sum_by_core, %{}, fn {core_id, value}, average_by_core_acc ->
Map.put(average_by_core_acc, core_id, value / number_of_snapshots)
end)
%__MODULE__{
cpu_snapshots: cpu_snapshots,
average_by_core: average_by_core,
average_all: average_all
}
end
@spec diff(t(), t()) :: t()
def diff(base, new) do
average_by_core_diff =
Enum.reduce(new.average_by_core, %{}, fn {core_id, new_core_avg},
average_by_core_diff_acc ->
Map.put(
average_by_core_diff_acc,
core_id,
new_core_avg - Map.fetch!(base.average_by_core, core_id)
)
end)
%__MODULE__{
cpu_snapshots: nil,
average_all: new.average_all - base.average_all,
average_by_core: average_by_core_diff
}
end
defp reduce_cpu_usage(cpu_usage, sum_cores_acc) do
Enum.reduce(cpu_usage, sum_cores_acc, fn {core_id, usage}, sum_cores_acc ->
Map.update(sum_cores_acc, core_id, usage, &(&1 + usage))
end)
end
end
|
lib/beamchmark/suite/measurements/cpu_info.ex
| 0.787523
| 0.521837
|
cpu_info.ex
|
starcoder
|
defmodule Kitto do
@moduledoc """
This is the documentation for the Kitto project.
You can find documentation about developing with Kitto and configuration
options at the [wiki](https://github.com/kittoframework/kitto#support)
By default, Kitto applications depend on the following packages:
* [Plug](https://hexdocs.pm/plug) - a specification and conveniences
for composable modules in between web applications
* [Poison](https://hexdocs.pm/poison) - an Elixir JSON library
"""
use Application
import Supervisor.Spec, warn: false
require Logger
@defaults %{ip: {127, 0, 0, 1}, port: 4000}
def start(_type, _args) do
opts = [strategy: :one_for_one, name: Kitto.Supervisor]
Supervisor.start_link(children(), opts)
end
@spec start_server() :: {:ok, pid()}
def start_server do
Logger.info "Starting Kitto server, listening on #{ip_human(ip())}:#{port()}"
{:ok, _pid} = Plug.Adapters.Cowboy.http(Kitto.Router, [], ip: ip(), port: port())
end
@doc """
Returns the root path of the dashboard project
"""
@spec root() :: String.t() | no_return()
def root do
case Application.get_env(:kitto, :root) do
:otp_app -> Application.app_dir(Application.get_env(:kitto, :otp_app))
path when is_bitstring(path) -> path
nil ->
"""
Kitto config :root is nil.
It should normally be set to Path.dirname(__DIR__) in config/config.exs
""" |> Logger.error
exit(:shutdown)
end
end
@doc """
Returns true when the asset development server is set to be watching for changes
"""
@spec watch_assets?() :: any()
def watch_assets?, do: Application.get_env :kitto, :watch_assets?, true
@doc """
Returns the binding ip of the assets watcher server
"""
@spec asset_server_host() :: any()
def asset_server_host, do: Application.get_env :kitto, :assets_host, "127.0.0.1"
@doc """
Returns the binding port of the assets watcher server
"""
@spec asset_server_port() :: any()
def asset_server_port, do: Application.get_env :kitto, :assets_port, 8080
defp ip, do: ip(Application.get_env(:kitto, :ip, @defaults.ip))
defp ip({:system, var}) do
case System.get_env(var) do
nil ->
Logger.error "Configured binding ip via #{var} but no value is set"
exit(:shutdown)
address -> address
|> String.split(".")
|> Enum.map(&String.to_integer/1)
|> List.to_tuple
end
end
defp ip(address) when is_tuple(address), do: address
defp ip(_), do: @defaults.ip
defp ip_human(tup), do: tup |> Tuple.to_list |> Enum.join(".")
defp port, do: port(Application.get_env(:kitto, :port))
defp port({:system, var}), do: var |> System.get_env |> Integer.parse |> elem(0)
defp port(p) when is_integer(p), do: p
defp port(_), do: @defaults.port
defp children do
case Kitto.CodeReloader.reload_code? do
true -> children(:prod) ++ [worker(Kitto.CodeReloader, [[server: :runner]])]
false -> children(:prod)
end
end
defp children(:prod) do
[supervisor(__MODULE__, [], function: :start_server),
supervisor(Kitto.Notifier, []),
worker(Kitto.BackoffServer, [[]]),
worker(Kitto.StatsServer, [[]]),
worker(Kitto.Runner, [[name: :runner]])]
end
end
|
lib/kitto.ex
| 0.666497
| 0.466481
|
kitto.ex
|
starcoder
|
defmodule App do
@moduledoc """
This is a module that provides simple blockchain example
with Merkle trees.
Using the following technology mocks:
- Using :ets module for transaction store.
- Using sha256 for hashing.
Providing the following functionalities.
- create a block.
- add transactions to a block.
- Create Block Tree.
- verify block.
- Create Block Root Hash.
This is a raw implementation focusing on performamnce, understanding merkle trees and not security.
Consider checking out my article on this at https://zemuldo.com/blog
"""
alias App.Transaction
alias App.Block
alias App.MerkleTree
def create_block(transactions) do
block = %Block{id: UUID.uuid4()}
Enum.map(transactions, &Transaction.init(&1, block)) |> Enum.map(&Block.add_transaction/1)
[[merkele_root] | _] = Block.get_transaction_hashes(block) |> MerkleTree.create_tree()
block |> Map.put(:merkele_root, merkele_root)
end
def verify_block(block) do
[[merkele_root] | _] = Block.get_transaction_hashes(block) |> MerkleTree.create_tree()
block.merkele_root == merkele_root
end
@doc """
Generates a Merkle Tree and returns the root hash.
Takes a .txt filepath for a file that contains hash strings in each line or a list of hashes.
Returns root hash :: `String.t()`.
## Examples
iex> hashes = [
iex> "8ca4e01e7e6604235b87793dbc0600a30690607b32b9803dbf1fd7c14c87ffa9",
iex> "4669795e97cc87cdfed515147b29ceb131c31604233f51c5ccb325e06fdd23d8",
iex> "72ed2d7304873d39f05784cd11e4a910a44e964ccc58c5550aca50cd14bcb085",
iex> "ddd9c1f94df79edf8c253e1266f5d456ca8123641b9e57186a80231c9867c711",
iex> "<KEY>"
iex> ]
["<KEY>",
"<KEY>",
"<KEY>",
"ddd9c1f94df79edf8c253e1266f5d456ca8123641b9e57186a80231c9867c711",
"<KEY>"]
iex> App.create_merkle_root(hashes)
"2df16be3e8f1bb5d469f0d3902d91716627872099f5a2f683e7828b26376a794"
iex> App.create_merkle_root("test/hashes.txt")
"c1ffa0ab32c7a472ec6400f6ecce10a0a10dab1840e9518ea0b9b5597675508c"
"""
@spec create_merkle_root((hashes_file_path :: String.t()) | (hashes :: list(String.t()))) :: String.t()
defdelegate create_merkle_root(hashes), to: MerkleTree, as: :create_root
@doc """
Generates a Merkle Tree and returns the root hash.
Takes a .txt filepath for a file that contains hash strings in each line or a list of hashes.
Returns root hash :: `String.t()`.
## Examples
iex> hashes = [
iex> "8ca4e01e7e6604235b87793dbc0600a30690607b32b9803dbf1fd7c14c87ffa9",
iex> "4669795e97cc87cdfed515147b29ceb131c31604233f51c5ccb325e06fdd23d8",
iex> "72ed2d7304873d39f05784cd11e4a910a44e964ccc58c5550aca50cd14bcb085",
iex> "ddd9c1f94df79edf8c253e1266f5d456ca8123641b9e57186a80231c9867c711",
iex> "594c61e01fb1df9dab41d1455522d9b0fc2ca3eceddc7df6a7e9052b70b6a60e"
iex> ]
["<KEY>",
"4669795e97cc87cdfed515147b29ceb131c31604233f51c5ccb325e06fdd23d8",
"<KEY>",
"ddd9c1f94df79edf8c253e1266f5d456ca8123641b9e57186a80231c9867c711",
"594c61e01fb1df9dab41d1455522d9b0fc2ca3eceddc7df6a7e9052b70b6a60e"]
iex> App.create_merkle_tree(hashes) |> Enum.at(0)
["2df16be3e8f1bb5d469f0d3902d91716627872099f5a2f683e7828b26376a794"]
iex> App.create_merkle_tree("test/hashes.txt") |> Enum.at(0)
["c1ffa0ab32c7a472ec6400f6ecce10a0a10dab1840e9518ea0b9b5597675508c"]
"""
@spec create_merkle_tree((hashes_file_path :: String.t()) | (hashes :: list(String.t()))) :: list(list(String.t()))
defdelegate create_merkle_tree(hashes), to: MerkleTree, as: :create_tree
end
|
merkle-tree/elixir/lib/app.ex
| 0.883864
| 0.532243
|
app.ex
|
starcoder
|
defmodule Commanded.Commands.CompositeRouter do
@moduledoc """
Composite router allows you to combine multiple router modules into a single
router able to dispatch any registered command from an included child router.
One example usage is to define a router per context and then combine each
context's router into a single top-level composite app router used for all
command dispatching.
### Example
Define a composite router module which imports the commands from each included
router:
defmodule Bank.AppRouter do
use Commanded.Commands.CompositeRouter
router(Bank.Accounts.Router)
router(Bank.MoneyTransfer.Router)
end
One or more routers or composite routers can be included in a
`Commanded.Application` since it is also a composite router:
defmodule BankApp do
use Commanded.Application
router(Bank.AppRouter)
end
You can dispatch a command via the application which will then be routed to
the associated child router:
command = %OpenAccount{account_number: "ACC123", initial_balance: 1_000}
:ok = BankApp.dispatch(command)
Or via the composite router itself, specifying the application:
:ok = Bank.AppRouter.dispatch(command, application: BankApp)
A composite router can include composite routers.
"""
defmacro __using__(opts) do
quote do
require Logger
import unquote(__MODULE__)
@before_compile unquote(__MODULE__)
Module.register_attribute(__MODULE__, :registered_commands, accumulate: false)
application = Keyword.get(unquote(opts), :application)
default_dispatch_opts =
unquote(opts)
|> Keyword.get(:default_dispatch_opts, [])
|> Keyword.put(:application, application)
@default_dispatch_opts default_dispatch_opts
@registered_commands %{}
end
end
@doc """
Register a `Commanded.Commands.Router` module within this composite router.
Will allow the composite router to dispatch any commands registered by the
included router module. Multiple routers can be registered.
"""
defmacro router(router_module) do
quote location: :keep do
for command <- unquote(router_module).__registered_commands__() do
case Map.get(@registered_commands, command) do
nil ->
@registered_commands Map.put(@registered_commands, command, unquote(router_module))
existing_router ->
raise "duplicate registration for #{inspect(command)} command, registered in both #{
inspect(existing_router)
} and #{inspect(unquote(router_module))}"
end
end
end
end
defmacro __before_compile__(_env) do
quote generated: true do
@doc false
def __registered_commands__ do
Enum.map(@registered_commands, fn {command_module, _router} -> command_module end)
end
@doc false
def dispatch(command, opts \\ [])
@doc false
def dispatch(command, :infinity),
do: do_dispatch(command, timeout: :infinity)
@doc false
def dispatch(command, timeout) when is_integer(timeout),
do: do_dispatch(command, timeout: timeout)
@doc false
def dispatch(command, opts),
do: do_dispatch(command, opts)
for {command_module, router} <- @registered_commands do
@command_module command_module
@router router
defp do_dispatch(%@command_module{} = command, opts) do
opts = Keyword.merge(@default_dispatch_opts, opts)
@router.dispatch(command, opts)
end
end
# Catch unregistered commands, log and return an error.
defp do_dispatch(command, _opts) do
Logger.error(fn ->
"attempted to dispatch an unregistered command: " <> inspect(command)
end)
{:error, :unregistered_command}
end
end
end
end
|
lib/commanded/commands/composite_router.ex
| 0.852506
| 0.402715
|
composite_router.ex
|
starcoder
|
defmodule CBOR do
@moduledoc """
The Concise Binary Object Representation (CBOR) is a data format
whose design goals include the possibility of extremely small code
size, fairly small message size, and extensibility without the need
for version negotiation. These design goals make it different from
earlier binary serializations such as ASN.1 and MessagePack.
The objectives of CBOR, roughly in decreasing order of importance are:
1. The representation must be able to unambiguously encode most
common data formats used in Internet standards.
* It must represent a reasonable set of basic data types and
structures using binary encoding. "Reasonable" here is
largely influenced by the capabilities of JSON, with the major
addition of binary byte strings. The structures supported are
limited to arrays and trees; loops and lattice-style graphs
are not supported.
* There is no requirement that all data formats be uniquely
encoded; that is, it is acceptable that the number "7" might
be encoded in multiple different ways.
2. The code for an encoder or decoder must be able to be compact in
order to support systems with very limited memory, processor
power, and instruction sets.
* An encoder and a decoder need to be implementable in a very
small amount of code (for example, in class 1 constrained
nodes as defined in [CNN-TERMS]).
* The format should use contemporary machine representations of
data (for example, not requiring binary-to-decimal
conversion).
3. Data must be able to be decoded without a schema description.
* Similar to JSON, encoded data should be self-describing so
that a generic decoder can be written.
4. The serialization must be reasonably compact, but data
compactness is secondary to code compactness for the encoder and
decoder.
* "Reasonable" here is bounded by JSON as an upper bound in
size, and by implementation complexity maintaining a lower
bound. Using either general compression schemes or extensive
bit-fiddling violates the complexity goals.
5. The format must be applicable to both constrained nodes and high-
volume applications.
* This means it must be reasonably frugal in CPU usage for both
encoding and decoding. This is relevant both for constrained
nodes and for potential usage in applications with a very high
volume of data.
6. The format must support all JSON data types for conversion to and
from JSON.
* It must support a reasonable level of conversion as long as
the data represented is within the capabilities of JSON. It
must be possible to define a unidirectional mapping towards
JSON for all types of data.
7. The format must be extensible, and the extended data must be
decodable by earlier decoders.
* The format is designed for decades of use.
* The format must support a form of extensibility that allows
fallback so that a decoder that does not understand an
extension can still decode the message.
* The format must be able to be extended in the future by later
IETF standards.
"""
@doc """
Returns a binary encoding of the data in a format
that can be interpreted by other CBOR libraries.
## Examples
iex> CBOR.encode(["Hello", "World!"])
<<130, 101, 72, 101, 108, 108, 111, 102, 87, 111, 114, 108, 100, 33>>
iex> CBOR.encode([1, [2, 3]])
<<130, 1, 130, 2, 3>>
iex> CBOR.encode(%{"a" => 1, "b" => [2, 3]})
<<162, 97, 97, 1, 97, 98, 130, 2, 3>>
"""
@spec encode(any()) :: binary()
def encode(value), do: CBOR.Encoder.encode_into(value, <<>>)
@doc """
Converts a CBOR encoded binary into native elixir data structures
## Examples
iex> CBOR.decode(<<130, 101, 72, 101, 108, 108, 111, 102, 87, 111, 114, 108, 100, 33>>)
{:ok, ["Hello", "World!"], ""}
iex> CBOR.decode(<<130, 1, 130, 2, 3>>)
{:ok, [1, [2, 3]], ""}
iex> CBOR.decode(<<162, 97, 97, 1, 97, 98, 130, 2, 3>>)
{:ok, %{"a" => 1, "b" => [2, 3]}, ""}
"""
@spec decode(binary()) :: {:ok, any(), binary()} | {:error, atom}
def decode(binary) do
try do
perform_decoding(binary)
rescue
FunctionClauseError -> {:error, :cbor_function_clause_error}
end
end
defp perform_decoding(binary) when is_binary(binary) do
case CBOR.Decoder.decode(binary) do
{value, rest} -> {:ok, value, rest}
_other -> {:error, :cbor_decoder_error}
end
end
defp perform_decoding(_value), do: {:error, :cannot_decode_non_binary_values}
end
|
lib/cbor.ex
| 0.852736
| 0.837753
|
cbor.ex
|
starcoder
|
defprotocol Marker.Encoder do
@moduledoc """
The Marker Encoder protocol.
This protocol is used by Marker's compiler to convert different Elixir
data types to it's `Marker.Compiler.element` type.
Elements can be of the type `String.t`,
`Marker.Element.t`, or `Macro.t`, so any implementation of the
`Marker.Encoder` protocol needs to return one of these types.
Strings are expected to be escaped.
The following types are implemented by default:
`Integer`, `Float`, `Atom`, `Tuple`, `BitString` 'Date', 'DateTime',
'NaiveDateTime' and `Marker.Element`
You can easily implement a protocol implementation for a custom
type, by defining an `encode` function that receives the custom type
and outputs to `Marker.Compiler.element`.
### Example
iex> defmodule Customer do
...> defstruct [:name, :email, :phone]
...> end
iex> defimpl Marker.Encoder, for: Customer do
...> def encode(%Customer{name: name, email: email, phone: phone}) do
...> use Marker
...>
...> div class: "customer" do
...> div [span("name: "), span(name)]
...> div [span("email: "), span(email)]
...> div [span("phone: "), span(phone)]
...> end
...> end
...> end
iex> c = %Customer{name: "Fred", email: "<EMAIL>", phone: "+31 6 5678 1234"}
%Customer{email: "<EMAIL>", name: "Fred", phone: "+31 6 5678 1234"}
iex> Marker.Encoder.encode c
{:safe, "<div class='customer'><div><span>name: </span><span>Fred</span></div><div><span>email: </span><span><EMAIL></span></div><div><span>phone: </span><span>+31 6 5678 1234</span></div></div>"}
"""
@spec encode(Marker.Encoder.t()) :: Marker.Compiler.element()
def encode(value)
end
defimpl Marker.Encoder, for: BitString do
def encode(value), do: Marker.Compiler.escape(value)
end
defimpl Marker.Encoder, for: Marker.Element do
def encode(value), do: value
end
defimpl Marker.Encoder, for: Tuple do
def encode({:safe, value}) when is_binary(value) do
value
end
def encode(value) do
with {:safe, val} <- value,
:ok <- Macro.validate(val) do
val
else
_ ->
if Macro.validate(value) == :ok do
value
else
raise Protocol.UndefinedError, protocol: Marker.Encoder, value: value
end
end
end
end
defimpl Marker.Encoder, for: List do
def encode(list) do
Enum.reduce(list, "", fn value, acc ->
quote do: unquote(acc) <> Marker.Encoder.encode(unquote(value))
end)
end
end
defimpl Marker.Encoder, for: Atom do
def encode(nil), do: ""
def encode(value), do: Marker.Compiler.escape(Atom.to_string(value))
end
defimpl Marker.Encoder, for: Integer do
def encode(value), do: Integer.to_string(value)
end
defimpl Marker.Encoder, for: Float do
def encode(value), do: Float.to_string(value)
end
defimpl Marker.Encoder, for: Date do
def encode(value), do: Date.to_string(value)
end
defimpl Marker.Encoder, for: Time do
def encode(value), do: Time.to_string(value)
end
defimpl Marker.Encoder, for: DateTime do
def encode(value), do: DateTime.to_string(value)
end
defimpl Marker.Encoder, for: NaiveDateTime do
def encode(value), do: NaiveDateTime.to_string(value)
end
|
lib/marker/encoder.ex
| 0.900089
| 0.508544
|
encoder.ex
|
starcoder
|
defmodule FakeServer do
alias FakeServer.HTTP.Server
alias FakeServer.Agents.EnvAgent
@moduledoc """
Manage HTTP servers on your tests
"""
@doc """
Runs a test with an HTTP server.
If you need an HTTP server on your test, just write it using `test_with_server/3` instead of `ExUnit.Case.test/3`. Their arguments are similar: A description (the `test_description` argument), the implementation of the test case itself (the `list` argument) and an optional list of parameters (the `opts` argument).
The server will start just before your test block and will stop just before the test exits. Each `test_with_server/3` has its own server. By default, all servers will start in a random unused port, which allows you to run your tests with `ExUnit.Case async: true` option enabled.
## Environment
FakeServer defines an environment for each `test_with_server/3`. This environment is stored inside a `FakeServer.Env` structure, which has the following fields:
- `:ip`: the current server IP
- `:port`: the current server port
- `:routes`: the list of server routes
- `:hits`: the number of requests made to the server
To access this environment, you can use `FakeServer.env/0`, which returns the environment for the current test. For convenience, you can also use `FakeServer.address/0`, `FakeServer.http_address/0` or `FakeServer.hits/0`.
## Server options
You can set some options to the server before it starts using the `opts` params. The following options are accepted:
`:default_response`: The response that will be given by the server if a route has no responses configured.
`:port`: The port that the server will listen.
## Usage:
```elixir
defmodule SomeTest do
use ExUnit.Case, async: true
import FakeServer
alias FakeServer.HTTP.Response
test_with_server "each test runs its own http server" do
IO.inspect FakeServer.env
# prints something like %FakeServer.Env{hits: 0, ip: "127.0.0.1", port: 5156, routes: []}
end
test_with_server "it is possible to configure the server to run on a specific port", [port: 5001] do
assert FakeServer.env.port == 5001
assert FakeServer.address == "127.0.0.1:5001"
end
test_with_server "it is possible to count how many requests the server received" do
route "/", fn(_) -> Response.ok end
assert FakeServer.hits == 0
HTTPoison.get! FakeServer.address <> "/"
assert FakeServer.hits == 1
HTTPoison.get! FakeServer.address <> "/"
assert FakeServer.hits == 2
end
test_with_server "adding body and headers to the response" do
route "/", do: Response.ok(~s<{"response": "ok"}>, [{'x-my-header', 'fake-server'}])
response = HTTPoison.get! FakeServer.address <> "/"
assert Enum.any?(response.headers, fn(header) -> header == {"x-my-header", "fake-server"} end)
end
end
```
"""
defmacro test_with_server(test_description, opts \\ [], do: test_block) do
quote do
test unquote(test_description) do
map_opts = Enum.into(unquote(opts), %{})
{:ok, server_id, port} = Server.run(map_opts)
env = FakeServer.Env.new(port)
EnvAgent.save_env(server_id, env)
var!(current_id, FakeServer) = server_id
unquote(test_block)
Server.stop(server_id)
EnvAgent.delete_env(server_id)
end
end
end
@doc """
Adds a route to a server and the response that will be given when a request reaches that route.
When the macro route is used, you are telling the server what to respond when a request is made for this route. If you run a `test_with_server/3` with no route configured, the server will always reply `404`.
```elixir
test_with_server "if you do not add any route, the server will reply 404 to all requests" do
response = HTTPoison.get! FakeServer.address <> "/"
assert response.status_code == 404
response = HTTPoison.get! FakeServer.address <> "/test"
assert response.status_code == 404
response = HTTPoison.get! FakeServer.address <> "/test/1"
assert response.status_code == 404
assert FakeServer.env.hits == 0
end
```
## Adding routes
When you add a route, you have to say what will be answered by it when it receives a request. For each request, the server will use the appropriate `FakeServer.HTTP.Response` based on the way the route was configured.
### Routes with a single response
When the test expects the route to receive only one request, it is appropriate to configure this route with a single response.
```elixir
test_with_server "raises UserNotFound error when the user is not found on server" do
route "/user/" <> @user_id, Response.not_found
assert_raise, MyApp.Errors.UserNotFound, fn ->
MyApp.External.User.get(@user_id)
end
end
```
### Routes with lists
When the route is configured with a list of `FakeServer.HTTP.Response`s, the server will respond with the first element in the list and then remove it. This will be repeated for each request made for this route. If the list is empty, the server will respond with its `default_response`.
```
test_with_server "the server will always reply the first element and then remove it" do
route "/", [Response.ok, Response.not_found, Response.bad_request]
assert FakeServer.hits == 0
response = HTTPoison.get! FakeServer.address <> "/"
assert response.status_code == 200
assert FakeServer.hits == 1
response = HTTPoison.get! FakeServer.address <> "/"
assert response.status_code == 404
assert FakeServer.hits == 2
response = HTTPoison.get! FakeServer.address <> "/"
assert response.status_code == 400
assert FakeServer.hits == 3
end
test_with_server "default response can be configured and will be replied when the response list is empty", [default_response: Response.bad_request] do
route "/", []
assert FakeServer.hits == 0
response = HTTPoison.get! FakeServer.address <> "/"
assert response.status_code == 400
assert FakeServer.hits == 1
response = HTTPoison.get! FakeServer.address <> "/"
assert response.status_code == 400
assert FakeServer.hits == 2
end
```
### Configuring a route with a function
You can configure a route to execute a function every time a request arrives. This function must accept a single argument, which is an `FakeServer.Request` object that holds request information. The `FakeServer.Request` structure holds several information about the request, such as method, headers and query strings.
Configure a route with a function is useful when you need to simulate timeouts, validate the presence of headers or some mandatory parameters.
The function will be called every time a request arrives at that route. If the return value of the function is a `FakeServer.HTTP.Response`, this response will be replied. However, if the return is not a `FakeServer.HTTP.Response`, the server `default_response` is returned.
```elixir
test_with_server "the server will return the default_response if the function return is not a Response struct", [default_response: Response.not_found("Ops!")] do
route "/", fn(_) -> :ok end
response = HTTPoison.get! FakeServer.address <> "/"
assert response.status_code == 404
assert response.body == "Ops!"
end
test_with_server "you can evaluate the request object to choose what to reply" do
route "/", fn(%{query: query} = _req) ->
case Map.get(query, "access_token") do
"1234" -> Response.ok("Welcome!")
nil -> Response.bad_request("You must provide and access_token!")
_ -> Response.forbidden("Invalid access token!")
end
end
response = HTTPoison.get! FakeServer.address <> "/"
assert response.status_code == 400
assert response.body == "You must provide and access_token!"
response = HTTPoison.get! FakeServer.address <> "/?access_token=4321"
assert response.status_code == 403
assert response.body == "Invalid access token!"
response = HTTPoison.get! FakeServer.address <> "/?access_token=<PASSWORD>"
assert response.status_code == 200
assert response.body == "Welcome!"
end
```
### Responses
The server will always use a struct to set the response. You can define the headers and body of this struct using `FakeServer.HTTP.Response` helpers like `FakeServer.HTTP.Response.ok/2` or `FakeServer.HTTP.Response.not_found/2`. There are helpers like these for most of the HTTP status codes.
You can also use `FakeServer.HTTP.Response.new/3` or even create the struct yourself. For more details see `FakeServer.HTTP.Response` docs.
```elixir
test_with_server "adding body and headers to the response" do
route "/", do: Response.ok(~s<{"response": "ok"}>, %{"x-my-header" => 'fake-server'})
response = HTTPoison.get! FakeServer.address <> "/"
assert Enum.any?(response.headers, fn(header) -> header == {"x-my-header", "fake-server"} end)
end
```
"""
# DEPRECATED: Keep Backward compatibility
defmacro route(path, response_block \\ nil)
defmacro route(path, do: response_block) do
quote do
current_id = var!(current_id, FakeServer)
env = EnvAgent.get_env(current_id)
EnvAgent.save_env(current_id, %FakeServer.Env{env | routes: [unquote(path)|env.routes]})
Server.add_response(current_id, unquote(path), unquote(response_block))
end
end
defmacro route(path, response_block) do
quote do
current_id = var!(current_id, FakeServer)
env = EnvAgent.get_env(current_id)
EnvAgent.save_env(current_id, %FakeServer.Env{env | routes: [unquote(path)|env.routes]})
Server.add_response(current_id, unquote(path), unquote(response_block))
end
end
@doc """
Returns the current server environment.
You can only call `FakeServer.env/0` inside `test_with_server/3`.
## Usage
```elixir
test_with_server "Getting the server env", [port: 5001] do
assert FakeServer.env.ip == "127.0.0.1"
assert FakeServer.env.port == 5001
end
```
"""
defmacro env do
quote do
case var!(current_id, FakeServer) do
nil -> raise "You can call this macro inside test_with_server only"
current_id -> EnvAgent.get_env(current_id)
end
end
end
@doc """
Returns the current server address.
You can only call `FakeServer.address/0` inside `test_with_server/3`.
## Usage
```elixir
test_with_server "Getting the server address", [port: 5001] do
assert FakeServer.address == "127.0.0.1:5001"
end
```
"""
defmacro address do
quote do
case var!(current_id, FakeServer) do
nil -> raise "You can only call FakeServer.address inside test_with_server"
current_id ->
env = EnvAgent.get_env(current_id)
"#{env.ip}:#{env.port}"
end
end
end
@doc """
Returns the current server HTTP address.
You can only call `FakeServer.http_address/0` inside `test_with_server/3`.
## Usage
```elixir
test_with_server "Getting the server address", [port: 5001] do
assert FakeServer.address == "http://127.0.0.1:5001"
end
```
"""
defmacro http_address do
quote do
case var!(current_id, FakeServer) do
nil -> raise "You can only call FakeServer.address inside test_with_server"
current_id ->
env = EnvAgent.get_env(current_id)
"http://#{env.ip}:#{env.port}"
end
end
end
@doc """
Returns the number of requests made to the server.
You can only call `FakeServer.hits/0` inside `test_with_server/3`.
## Usage
```elixir
test_with_server "counting server hits" do
route "/", do: Response.ok
assert FakeServer.hits == 0
HTTPoison.get! FakeServer.address <> "/"
assert FakeServer.hits == 1
HTTPoison.get! FakeServer.address <> "/"
assert FakeServer.hits == 2
end
```
"""
defmacro hits do
quote do
case var!(current_id, FakeServer) do
nil -> raise "You can only call FakeServer.hits inside test_with_server"
current_id ->
EnvAgent.get_env(current_id).hits
end
end
end
end
|
lib/fake_server.ex
| 0.891999
| 0.79649
|
fake_server.ex
|
starcoder
|
defmodule AoC.Day10 do
@moduledoc false
def part_1 do
"data/day10-input.txt"
|> File.stream!()
|> Enum.map(&String.trim/1)
|> AoC.Day10.max_detected()
|> elem(1)
end
def part_2 do
map_data =
"data/day10-input.txt"
|> File.stream!()
|> Enum.map(&String.trim/1)
{base, _} = AoC.Day10.max_detected(map_data)
AoC.Day10.zap_asteroids(map_data, base)
|> Enum.at(199)
|> (fn {x, y} -> x * 100 + y end).()
end
def angle({a_col, a_row}, {b_col, b_row}) when a_row == b_row and a_col == b_col,
do: raise("cannot find angle to itself")
def angle({a_col, a_row}, {b_col, b_row}) when a_row > b_row and a_col == b_col,
do: 0
def angle({a_col, a_row}, {b_col, b_row}) when a_row > b_row and a_col < b_col,
do: Math.atan((b_col - a_col) / (a_row - b_row))
def angle({a_col, a_row}, {b_col, b_row}) when a_row == b_row and a_col < b_col,
do: Math.pi() / 2
def angle({a_col, a_row}, {b_col, b_row}) when a_row < b_row and a_col < b_col,
do: Math.atan((b_row - a_row) / (b_col - a_col)) + Math.pi() / 2
def angle({a_col, a_row}, {b_col, b_row}) when a_row < b_row and a_col == b_col,
do: Math.pi()
def angle({a_col, a_row}, {b_col, b_row}) when a_row < b_row and a_col > b_col,
do: Math.atan((a_col - b_col) / (b_row - a_row)) + Math.pi()
def angle({a_col, a_row}, {b_col, b_row}) when a_row == b_row and a_col > b_col,
do: Math.pi() * 3 / 2
def angle({a_col, a_row}, {b_col, b_row}) when a_row > b_row and a_col > b_col,
do: Math.atan((a_row - b_row) / (a_col - b_col)) + Math.pi() * 3 / 2
def detected_count(a, bs) do
a
|> group_by_angle(bs)
|> map_size()
end
def distance({a_row, a_col}, {b_row, b_col}),
do: Math.sqrt(Math.pow(b_row - a_row, 2) + Math.pow(b_col - a_col, 2))
def filter(matrix, fun) do
matrix
|> Max.map(fn index, value ->
{row, col} = Max.index_to_position(matrix, index)
{{col, row}, value}
end)
|> Enum.filter(fun)
|> Enum.map(fn {position, _} -> position end)
end
def group_by_angle(a, bs) do
a
|> position_angles(bs)
|> Enum.group_by(fn {_, angle} -> angle end)
end
def map_data_to_matrix(data) do
data
|> Enum.map(&String.split(&1, "", trim: true))
|> Enum.map(
&Enum.map(&1, fn
"#" -> 1
_ -> 0
end)
)
|> Max.from_list_of_lists()
end
def max_detected(map_data) do
positions =
map_data
|> map_data_to_matrix()
|> filter(fn {_, value} -> value == 1 end)
positions
|> Enum.map(fn a ->
bs = List.delete(positions, a)
{a, AoC.Day10.detected_count(a, bs)}
end)
|> Enum.max_by(fn {_, count} -> count end)
end
def position_angles(a, bs), do: Enum.map(bs, fn b -> {b, angle(a, b)} end)
def zap([], zapped_order), do: Enum.reverse(zapped_order)
def zap([{_, []} | angle_tail], zapped_order), do: zap(angle_tail, zapped_order)
def zap([{angle, [{position, _} | range_tail]} | angle_tail], zapped_order) do
[angle_tail, {angle, range_tail}]
|> List.flatten()
|> zap([position | zapped_order])
end
def zap_asteroids(map_data, base) do
positions =
map_data
|> map_data_to_matrix()
|> filter(fn {_, value} -> value == 1 end)
others = List.delete(positions, base)
group_by_angle(base, others)
|> Map.to_list()
|> Enum.sort_by(fn {angle, _} -> angle end)
|> Enum.map(fn {angle, positions} ->
{angle, Enum.sort_by(positions, fn {position, _} -> distance(base, position) end)}
end)
|> zap([])
end
end
|
lib/aoc/day_10.ex
| 0.685107
| 0.522263
|
day_10.ex
|
starcoder
|
defmodule OpencensusEcto do
@moduledoc """
Telemetry handler for creating OpenCensus spans from Ecto query events.
"""
import Bitwise
require Record
Record.defrecordp(
:span,
Record.extract(:span, from_lib: "opencensus/include/opencensus.hrl")
)
Record.defrecordp(
:span_ctx,
Record.extract(:span_ctx, from_lib: "opencensus/include/opencensus.hrl")
)
@doc """
Attaches the OpencensusEcto handler to your repo events. This should be called
from your application behaviour on startup.
Example:
OpencensusEcto.setup([:blog, :repo, :query])
You may also supply the following options in the second argument:
* `:time_unit` - a time unit used to convert the values of query phase
timings, defaults to `:microsecond`. See `System.convert_time_unit/3`
* `:span_prefix` - the first part of the span name, as a `String.t`,
defaults to the concatenation of the event name with periods, e.g.
`"blog.repo.query"`. This will always be followed with a colon and the
source (the table name for SQL adapters).
* `:span_name` - a unary function that will be called with the event
metadata to compute the entire name of the span. Setting this will
override the `:span_prefix` option.
"""
def setup(event_name, config \\ []) do
:telemetry.attach({__MODULE__, event_name}, event_name, &__MODULE__.handle_event/4, config)
end
@doc false
def handle_event(event, measurements, metadata, config) do
{end_time, end_offset} = ending = :wts.timestamp()
with span_ctx(
trace_id: trace_id,
trace_options: trace_options,
tracestate: state,
span_id: parent_span
)
when is_integer(trace_options) and (trace_options &&& 1) != 0 <- :ocp.current_span_ctx() do
total_time = measurements[:total_time]
%{
query: query,
source: source,
result: query_result
} = metadata
time_unit = Keyword.get(config, :time_unit, :microsecond)
span_name =
with nil <- List.keyfind(config, :span_name, 0),
nil <- List.keyfind(config, :span_prefix, 0) do
Enum.join(event, ".") <> ":#{source}"
else
{:span_name, f} when is_function(f, 1) -> f.(metadata)
{:span_prefix, prefix} -> "#{prefix}:#{source}"
end
base_attributes =
Map.merge(
%{
"query" => query,
"source" => source,
"total_time_#{time_unit}s" => System.convert_time_unit(total_time, :native, time_unit)
},
case query_result do
{:ok, _} -> %{}
_ -> %{"error" => true}
end
)
attributes =
measurements
|> Enum.into(%{})
|> Map.take(~w(decode_time query_time queue_time)a)
|> Enum.reject(&is_nil(elem(&1, 1)))
|> Enum.map(fn {k, v} ->
{"#{k}_#{time_unit}s", System.convert_time_unit(v, :native, time_unit)}
end)
|> Enum.into(base_attributes)
span(
name: span_name,
trace_id: trace_id,
span_id: :opencensus.generate_span_id(),
tracestate: state,
start_time: {end_time - total_time, end_offset},
end_time: ending,
parent_span_id: parent_span,
attributes: attributes
)
|> :oc_reporter.store_span()
end
end
end
|
lib/opencensus_ecto.ex
| 0.823931
| 0.494995
|
opencensus_ecto.ex
|
starcoder
|
defmodule OMG.Watcher.State.Transaction.Payment do
@moduledoc """
Internal representation of a raw payment transaction done on Plasma chain.
This module holds the representation of a "raw" transaction, i.e. without signatures nor recovered input spenders
"""
alias OMG.Watcher.Crypto
alias OMG.Output
alias OMG.Watcher.RawData
alias OMG.Watcher.State.Transaction
alias OMG.Watcher.Utxo
require Transaction
require Utxo
@zero_metadata <<0::256>>
@payment_tx_type OMG.Watcher.WireFormatTypes.tx_type_for(:tx_payment_v1)
@payment_output_type OMG.Watcher.WireFormatTypes.output_type_for(:output_payment_v1)
defstruct [:tx_type, :inputs, :outputs, metadata: @zero_metadata]
@type t() :: %__MODULE__{
tx_type: non_neg_integer(),
inputs: list(OMG.Watcher.Utxo.Position.t()),
outputs: list(Output.t()),
metadata: Transaction.metadata()
}
@type currency() :: Crypto.address_t()
@max_inputs 4
@max_outputs 4
defmacro max_inputs() do
quote do
unquote(@max_inputs)
end
end
defmacro max_outputs() do
quote do
unquote(@max_outputs)
end
end
@doc """
Creates a new raw transaction structure from a list of inputs and a list of outputs, given in a succinct tuple form.
assumptions:
```
length(inputs) <= @max_inputs
length(outputs) <= @max_outputs
```
"""
@spec new(
list({pos_integer, pos_integer, 0..unquote(@max_outputs - 1)}),
list({Crypto.address_t(), currency(), pos_integer}),
Transaction.metadata()
) :: t()
def new(inputs, outputs, metadata \\ @zero_metadata)
def new(inputs, outputs, metadata)
when Transaction.is_metadata(metadata) and length(inputs) <= @max_inputs and length(outputs) <= @max_outputs do
inputs = Enum.map(inputs, &new_input/1)
outputs = Enum.map(outputs, &new_output/1)
%__MODULE__{tx_type: @payment_tx_type, inputs: inputs, outputs: outputs, metadata: metadata}
end
@doc """
Transforms the structure of RLP items after a successful RLP decode of a raw transaction, into a structure instance
"""
def reconstruct([tx_type, inputs_rlp, outputs_rlp, tx_data_rlp, metadata_rlp]) do
with {:ok, inputs} <- reconstruct_inputs(inputs_rlp),
{:ok, outputs} <- reconstruct_outputs(outputs_rlp),
{:ok, tx_data} <- RawData.parse_uint256(tx_data_rlp),
:ok <- check_tx_data(tx_data),
{:ok, metadata} <- reconstruct_metadata(metadata_rlp),
do: {:ok, %__MODULE__{tx_type: tx_type, inputs: inputs, outputs: outputs, metadata: metadata}}
end
def reconstruct(_), do: {:error, :malformed_transaction}
# `new_input/1` and `new_output/1` are here to just help interpret the short-hand form of inputs outputs when doing
# `new/3`
defp new_input({blknum, txindex, oindex}), do: Utxo.position(blknum, txindex, oindex)
defp new_output({owner, currency, amount}) do
%Output{
owner: owner,
currency: currency,
amount: amount,
output_type: @payment_output_type
}
end
defp reconstruct_inputs(inputs_rlp) do
with {:ok, inputs} <- parse_inputs(inputs_rlp),
do: {:ok, inputs}
end
defp reconstruct_outputs([]), do: {:error, :empty_outputs}
defp reconstruct_outputs(outputs_rlp) do
with {:ok, outputs} <- parse_outputs(outputs_rlp),
do: {:ok, outputs}
end
# txData is required to be zero in the contract
defp check_tx_data(0), do: :ok
defp check_tx_data(_), do: {:error, :malformed_tx_data}
defp reconstruct_metadata(metadata) when Transaction.is_metadata(metadata), do: {:ok, metadata}
defp reconstruct_metadata(_), do: {:error, :malformed_metadata}
defp parse_inputs(inputs_rlp) do
with true <- Enum.count(inputs_rlp) <= @max_inputs || {:error, :too_many_inputs},
# NOTE: workaround for https://github.com/omgnetwork/ex_plasma/issues/19.
# remove, when this is blocked on `ex_plasma` end
true <- Enum.all?(inputs_rlp, &(&1 != <<0::256>>)) || {:error, :malformed_inputs},
do: {:ok, Enum.map(inputs_rlp, &parse_input!/1)}
rescue
_ -> {:error, :malformed_inputs}
end
defp parse_outputs(outputs_rlp) do
outputs = Enum.map(outputs_rlp, &Output.reconstruct/1)
with true <- Enum.count(outputs) <= @max_outputs || {:error, :too_many_outputs},
nil <- Enum.find(outputs, &match?({:error, _}, &1)),
true <- only_allowed_output_types?(outputs) || {:error, :tx_cannot_create_output_type},
do: {:ok, outputs}
rescue
_ -> {:error, :malformed_outputs}
end
defp only_allowed_output_types?(outputs),
do: Enum.all?(outputs, &match?(%Output{}, &1))
defp parse_input!(encoded), do: OMG.Watcher.Utxo.Position.decode!(encoded)
end
defimpl OMG.Watcher.State.Transaction.Protocol, for: OMG.Watcher.State.Transaction.Payment do
alias OMG.Output
alias OMG.Watcher.State.Transaction
alias OMG.Watcher.Utxo
require Transaction
require Utxo
@empty_signature <<0::size(520)>>
@doc """
Turns a structure instance into a structure of RLP items, ready to be RLP encoded, for a raw transaction
"""
@spec get_data_for_rlp(Transaction.Payment.t()) :: list(any())
def get_data_for_rlp(%Transaction.Payment{tx_type: tx_type, inputs: inputs, outputs: outputs, metadata: metadata})
when Transaction.is_metadata(metadata),
do: [
tx_type,
Enum.map(inputs, &OMG.Watcher.Utxo.Position.get_data_for_rlp/1),
Enum.map(outputs, &Output.get_data_for_rlp/1),
# used to be optional and as such was `if`-appended if not null here
# When it is not optional, and there's the if, dialyzer complains about the if
0,
metadata
]
@spec get_outputs(Transaction.Payment.t()) :: list(Output.t())
def get_outputs(%Transaction.Payment{outputs: outputs}), do: outputs
@spec get_inputs(Transaction.Payment.t()) :: list(OMG.Watcher.Utxo.Position.t())
def get_inputs(%Transaction.Payment{inputs: inputs}), do: inputs
@doc """
True if the witnessses provided follow some extra custom validation.
Currently this covers the requirement for all the inputs to be signed on predetermined positions
"""
@spec valid?(Transaction.Payment.t(), Transaction.Signed.t()) :: true | {:error, atom}
def valid?(%Transaction.Payment{}, %Transaction.Signed{sigs: sigs} = tx) do
tx
|> Transaction.get_inputs()
|> all_inputs_signed?(sigs)
end
@doc """
True if a payment can be applied, given a set of input UTXOs is present in the ledger.
Involves the checking of balancing of inputs and outputs for currencies
Returns the fees that this transaction is paying, mapped by currency
"""
@spec can_apply?(Transaction.Payment.t(), list(Output.t())) ::
{:ok, map()} | {:error, :amounts_do_not_add_up}
def can_apply?(%Transaction.Payment{} = tx, outputs_spent) do
outputs = Transaction.get_outputs(tx)
input_amounts_by_currency = get_amounts_by_currency(outputs_spent)
output_amounts_by_currency = get_amounts_by_currency(outputs)
with :ok <- amounts_add_up?(input_amounts_by_currency, output_amounts_by_currency),
do: {:ok, fees_paid(input_amounts_by_currency, output_amounts_by_currency)}
end
defp all_inputs_signed?(non_zero_inputs, sigs) do
count_non_zero_signatures = Enum.count(sigs, &(&1 != @empty_signature))
count_non_zero_inputs = length(non_zero_inputs)
cond do
count_non_zero_signatures > count_non_zero_inputs -> {:error, :superfluous_signature}
count_non_zero_signatures < count_non_zero_inputs -> {:error, :missing_signature}
true -> true
end
end
defp fees_paid(input_amounts_by_currency, output_amounts_by_currency) do
Enum.into(
input_amounts_by_currency,
%{},
fn {input_currency, input_amount} ->
# fee is implicit - it's the difference between funds owned and spend
implicit_paid_fee = input_amount - Map.get(output_amounts_by_currency, input_currency, 0)
{input_currency, implicit_paid_fee}
end
)
end
defp get_amounts_by_currency(outputs) do
outputs
|> Enum.group_by(fn %{currency: currency} -> currency end, fn %{amount: amount} -> amount end)
|> Enum.map(fn {currency, amounts} -> {currency, Enum.sum(amounts)} end)
|> Map.new()
end
defp amounts_add_up?(input_amounts, output_amounts) do
for {output_currency, output_amount} <- Map.to_list(output_amounts) do
input_amount = Map.get(input_amounts, output_currency, 0)
input_amount >= output_amount
end
|> Enum.all?()
|> if(do: :ok, else: {:error, :amounts_do_not_add_up})
end
end
|
apps/omg_watcher/lib/omg_watcher/state/transaction/payment.ex
| 0.894421
| 0.625295
|
payment.ex
|
starcoder
|
defmodule AWS.Firehose do
@moduledoc """
Amazon Kinesis Data Firehose API Reference
Amazon Kinesis Data Firehose is a fully managed service that delivers real-time
streaming data to destinations such as Amazon Simple Storage Service (Amazon
S3), Amazon Elasticsearch Service (Amazon ES), Amazon Redshift, and Splunk.
"""
@doc """
Creates a Kinesis Data Firehose delivery stream.
By default, you can create up to 50 delivery streams per AWS Region.
This is an asynchronous operation that immediately returns. The initial status
of the delivery stream is `CREATING`. After the delivery stream is created, its
status is `ACTIVE` and it now accepts data. If the delivery stream creation
fails, the status transitions to `CREATING_FAILED`. Attempts to send data to a
delivery stream that is not in the `ACTIVE` state cause an exception. To check
the state of a delivery stream, use `DescribeDeliveryStream`.
If the status of a delivery stream is `CREATING_FAILED`, this status doesn't
change, and you can't invoke `CreateDeliveryStream` again on it. However, you
can invoke the `DeleteDeliveryStream` operation to delete it.
A Kinesis Data Firehose delivery stream can be configured to receive records
directly from providers using `PutRecord` or `PutRecordBatch`, or it can be
configured to use an existing Kinesis stream as its source. To specify a Kinesis
data stream as input, set the `DeliveryStreamType` parameter to
`KinesisStreamAsSource`, and provide the Kinesis stream Amazon Resource Name
(ARN) and role ARN in the `KinesisStreamSourceConfiguration` parameter.
To create a delivery stream with server-side encryption (SSE) enabled, include
`DeliveryStreamEncryptionConfigurationInput` in your request. This is optional.
You can also invoke `StartDeliveryStreamEncryption` to turn on SSE for an
existing delivery stream that doesn't have SSE enabled.
A delivery stream is configured with a single destination: Amazon S3, Amazon ES,
Amazon Redshift, or Splunk. You must specify only one of the following
destination configuration parameters: `ExtendedS3DestinationConfiguration`,
`S3DestinationConfiguration`, `ElasticsearchDestinationConfiguration`,
`RedshiftDestinationConfiguration`, or `SplunkDestinationConfiguration`.
When you specify `S3DestinationConfiguration`, you can also provide the
following optional values: BufferingHints, `EncryptionConfiguration`, and
`CompressionFormat`. By default, if no `BufferingHints` value is provided,
Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever
condition is satisfied first. `BufferingHints` is a hint, so there are some
cases where the service cannot adhere to these conditions strictly. For example,
record boundaries might be such that the size is a little over or under the
configured buffering size. By default, no encryption is performed. We strongly
recommend that you enable encryption to ensure secure data storage in Amazon S3.
A few notes about Amazon Redshift as a destination:
* An Amazon Redshift destination requires an S3 bucket as
intermediate location. Kinesis Data Firehose first delivers data to Amazon S3
and then uses `COPY` syntax to load data into an Amazon Redshift table. This is
specified in the `RedshiftDestinationConfiguration.S3Configuration` parameter.
* The compression formats `SNAPPY` or `ZIP` cannot be specified in
`RedshiftDestinationConfiguration.S3Configuration` because the Amazon Redshift
`COPY` operation that reads from the S3 bucket doesn't support these compression
formats.
* We strongly recommend that you use the user name and password you
provide exclusively with Kinesis Data Firehose, and that the permissions for the
account are restricted for Amazon Redshift `INSERT` permissions.
Kinesis Data Firehose assumes the IAM role that is configured as part of the
destination. The role should allow the Kinesis Data Firehose principal to assume
the role, and the role should have permissions that allow the service to deliver
the data. For more information, see [Grant Kinesis Data Firehose Access to an Amazon S3
Destination](https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3)
in the *Amazon Kinesis Data Firehose Developer Guide*.
"""
def create_delivery_stream(client, input, options \\ []) do
request(client, "CreateDeliveryStream", input, options)
end
@doc """
Deletes a delivery stream and its data.
To check the state of a delivery stream, use `DescribeDeliveryStream`. You can
delete a delivery stream only if it is in one of the following states: `ACTIVE`,
`DELETING`, `CREATING_FAILED`, or `DELETING_FAILED`. You can't delete a delivery
stream that is in the `CREATING` state. While the deletion request is in
process, the delivery stream is in the `DELETING` state.
While the delivery stream is in the `DELETING` state, the service might continue
to accept records, but it doesn't make any guarantees with respect to delivering
the data. Therefore, as a best practice, first stop any applications that are
sending records before you delete a delivery stream.
"""
def delete_delivery_stream(client, input, options \\ []) do
request(client, "DeleteDeliveryStream", input, options)
end
@doc """
Describes the specified delivery stream and its status.
For example, after your delivery stream is created, call
`DescribeDeliveryStream` to see whether the delivery stream is `ACTIVE` and
therefore ready for data to be sent to it.
If the status of a delivery stream is `CREATING_FAILED`, this status doesn't
change, and you can't invoke `CreateDeliveryStream` again on it. However, you
can invoke the `DeleteDeliveryStream` operation to delete it. If the status is
`DELETING_FAILED`, you can force deletion by invoking `DeleteDeliveryStream`
again but with `DeleteDeliveryStreamInput$AllowForceDelete` set to true.
"""
def describe_delivery_stream(client, input, options \\ []) do
request(client, "DescribeDeliveryStream", input, options)
end
@doc """
Lists your delivery streams in alphabetical order of their names.
The number of delivery streams might be too large to return using a single call
to `ListDeliveryStreams`. You can limit the number of delivery streams returned,
using the `Limit` parameter. To determine whether there are more delivery
streams to list, check the value of `HasMoreDeliveryStreams` in the output. If
there are more delivery streams to list, you can request them by calling this
operation again and setting the `ExclusiveStartDeliveryStreamName` parameter to
the name of the last delivery stream returned in the last call.
"""
def list_delivery_streams(client, input, options \\ []) do
request(client, "ListDeliveryStreams", input, options)
end
@doc """
Lists the tags for the specified delivery stream.
This operation has a limit of five transactions per second per account.
"""
def list_tags_for_delivery_stream(client, input, options \\ []) do
request(client, "ListTagsForDeliveryStream", input, options)
end
@doc """
Writes a single data record into an Amazon Kinesis Data Firehose delivery
stream.
To write multiple data records into a delivery stream, use `PutRecordBatch`.
Applications using these operations are referred to as producers.
By default, each delivery stream can take in up to 2,000 transactions per
second, 5,000 records per second, or 5 MB per second. If you use `PutRecord` and
`PutRecordBatch`, the limits are an aggregate across these two operations for
each delivery stream. For more information about limits and how to request an
increase, see [Amazon Kinesis Data Firehose Limits](https://docs.aws.amazon.com/firehose/latest/dev/limits.html).
You must specify the name of the delivery stream and the data record when using
`PutRecord`. The data record consists of a data blob that can be up to 1,000 KB
in size, and any kind of data. For example, it can be a segment from a log file,
geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination.
To disambiguate the data blobs at the destination, a common solution is to use
delimiters in the data, such as a newline (`\n`) or some other character unique
within the data. This allows the consumer application to parse individual data
items when reading the data from the destination.
The `PutRecord` operation returns a `RecordId`, which is a unique string
assigned to each record. Producer applications can use this ID for purposes such
as auditability and investigation.
If the `PutRecord` operation throws a `ServiceUnavailableException`, back off
and retry. If the exception persists, it is possible that the throughput limits
have been exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time
they are added to a delivery stream as it tries to send the records to the
destination. If the destination is unreachable for more than 24 hours, the data
is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your
records. Instead, concatenate the raw data, then perform base64 encoding.
"""
def put_record(client, input, options \\ []) do
request(client, "PutRecord", input, options)
end
@doc """
Writes multiple data records into a delivery stream in a single call, which can
achieve higher throughput per producer than when writing single records.
To write single data records into a delivery stream, use `PutRecord`.
Applications using these operations are referred to as producers.
For information about service quota, see [Amazon Kinesis Data Firehose Quota](https://docs.aws.amazon.com/firehose/latest/dev/limits.html).
Each `PutRecordBatch` request supports up to 500 records. Each record in the
request can be as large as 1,000 KB (before 64-bit encoding), up to a limit of 4
MB for the entire request. These limits cannot be changed.
You must specify the name of the delivery stream and the data record when using
`PutRecord`. The data record consists of a data blob that can be up to 1,000 KB
in size, and any kind of data. For example, it could be a segment from a log
file, geographic location data, website clickstream data, and so on.
Kinesis Data Firehose buffers records before delivering them to the destination.
To disambiguate the data blobs at the destination, a common solution is to use
delimiters in the data, such as a newline (`\n`) or some other character unique
within the data. This allows the consumer application to parse individual data
items when reading the data from the destination.
The `PutRecordBatch` response includes a count of failed records,
`FailedPutCount`, and an array of responses, `RequestResponses`. Even if the
`PutRecordBatch` call succeeds, the value of `FailedPutCount` may be greater
than 0, indicating that there are records for which the operation didn't
succeed. Each entry in the `RequestResponses` array provides additional
information about the processed record. It directly correlates with a record in
the request array using the same ordering, from the top to the bottom. The
response array always includes the same number of records as the request array.
`RequestResponses` includes both successfully and unsuccessfully processed
records. Kinesis Data Firehose tries to process all records in each
`PutRecordBatch` request. A single record failure does not stop the processing
of subsequent records.
A successfully processed record includes a `RecordId` value, which is unique for
the record. An unsuccessfully processed record includes `ErrorCode` and
`ErrorMessage` values. `ErrorCode` reflects the type of error, and is one of the
following values: `ServiceUnavailableException` or `InternalFailure`.
`ErrorMessage` provides more detailed information about the error.
If there is an internal server error or a timeout, the write might have
completed or it might have failed. If `FailedPutCount` is greater than 0, retry
the request, resending only those records that might have failed processing.
This minimizes the possible duplicate records and also reduces the total bytes
sent (and corresponding charges). We recommend that you handle any duplicates at
the destination.
If `PutRecordBatch` throws `ServiceUnavailableException`, back off and retry. If
the exception persists, it is possible that the throughput limits have been
exceeded for the delivery stream.
Data records sent to Kinesis Data Firehose are stored for 24 hours from the time
they are added to a delivery stream as it attempts to send the records to the
destination. If the destination is unreachable for more than 24 hours, the data
is no longer available.
Don't concatenate two or more base64 strings to form the data fields of your
records. Instead, concatenate the raw data, then perform base64 encoding.
"""
def put_record_batch(client, input, options \\ []) do
request(client, "PutRecordBatch", input, options)
end
@doc """
Enables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it,
Kinesis Data Firehose first sets the encryption status of the stream to
`ENABLING`, and then to `ENABLED`. The encryption status of a delivery stream is
the `Status` property in `DeliveryStreamEncryptionConfiguration`. If the
operation fails, the encryption status changes to `ENABLING_FAILED`. You can
continue to read and write data to your delivery stream while the encryption
status is `ENABLING`, but the data is not encrypted. It can take up to 5 seconds
after the encryption status changes to `ENABLED` before all records written to
the delivery stream are encrypted. To find out whether a record or a batch of
records was encrypted, check the response elements `PutRecordOutput$Encrypted`
and `PutRecordBatchOutput$Encrypted`, respectively.
To check the encryption status of a delivery stream, use
`DescribeDeliveryStream`.
Even if encryption is currently enabled for a delivery stream, you can still
invoke this operation on it to change the ARN of the CMK or both its type and
ARN. If you invoke this method to change the CMK, and the old CMK is of type
`CUSTOMER_MANAGED_CMK`, Kinesis Data Firehose schedules the grant it had on the
old CMK for retirement. If the new CMK is of type `CUSTOMER_MANAGED_CMK`,
Kinesis Data Firehose creates a grant that enables it to use the new CMK to
encrypt and decrypt data and to manage the grant.
If a delivery stream already has encryption enabled and then you invoke this
operation to change the ARN of the CMK or both its type and ARN and you get
`ENABLING_FAILED`, this only means that the attempt to change the CMK failed. In
this case, encryption remains enabled with the old CMK.
If the encryption status of your delivery stream is `ENABLING_FAILED`, you can
invoke this operation again with a valid CMK. The CMK must be enabled and the
key policy mustn't explicitly deny the permission for Kinesis Data Firehose to
invoke KMS encrypt and decrypt operations.
You can enable SSE for a delivery stream only if it's a delivery stream that
uses `DirectPut` as its source.
The `StartDeliveryStreamEncryption` and `StopDeliveryStreamEncryption`
operations have a combined limit of 25 calls per delivery stream per 24 hours.
For example, you reach the limit if you call `StartDeliveryStreamEncryption` 13
times and `StopDeliveryStreamEncryption` 12 times for the same delivery stream
in a 24-hour period.
"""
def start_delivery_stream_encryption(client, input, options \\ []) do
request(client, "StartDeliveryStreamEncryption", input, options)
end
@doc """
Disables server-side encryption (SSE) for the delivery stream.
This operation is asynchronous. It returns immediately. When you invoke it,
Kinesis Data Firehose first sets the encryption status of the stream to
`DISABLING`, and then to `DISABLED`. You can continue to read and write data to
your stream while its status is `DISABLING`. It can take up to 5 seconds after
the encryption status changes to `DISABLED` before all records written to the
delivery stream are no longer subject to encryption. To find out whether a
record or a batch of records was encrypted, check the response elements
`PutRecordOutput$Encrypted` and `PutRecordBatchOutput$Encrypted`, respectively.
To check the encryption state of a delivery stream, use
`DescribeDeliveryStream`.
If SSE is enabled using a customer managed CMK and then you invoke
`StopDeliveryStreamEncryption`, Kinesis Data Firehose schedules the related KMS
grant for retirement and then retires it after it ensures that it is finished
delivering records to the destination.
The `StartDeliveryStreamEncryption` and `StopDeliveryStreamEncryption`
operations have a combined limit of 25 calls per delivery stream per 24 hours.
For example, you reach the limit if you call `StartDeliveryStreamEncryption` 13
times and `StopDeliveryStreamEncryption` 12 times for the same delivery stream
in a 24-hour period.
"""
def stop_delivery_stream_encryption(client, input, options \\ []) do
request(client, "StopDeliveryStreamEncryption", input, options)
end
@doc """
Adds or updates tags for the specified delivery stream.
A tag is a key-value pair that you can define and assign to AWS resources. If
you specify a tag that already exists, the tag value is replaced with the value
that you specify in the request. Tags are metadata. For example, you can add
friendly names and descriptions or other types of information that can help you
distinguish the delivery stream. For more information about tags, see [Using Cost Allocation
Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
in the *AWS Billing and Cost Management User Guide*.
Each delivery stream can have up to 50 tags.
This operation has a limit of five transactions per second per account.
"""
def tag_delivery_stream(client, input, options \\ []) do
request(client, "TagDeliveryStream", input, options)
end
@doc """
Removes tags from the specified delivery stream.
Removed tags are deleted, and you can't recover them after this operation
successfully completes.
If you specify a tag that doesn't exist, the operation ignores it.
This operation has a limit of five transactions per second per account.
"""
def untag_delivery_stream(client, input, options \\ []) do
request(client, "UntagDeliveryStream", input, options)
end
@doc """
Updates the specified destination of the specified delivery stream.
Use this operation to change the destination type (for example, to replace the
Amazon S3 destination with Amazon Redshift) or change the parameters associated
with a destination (for example, to change the bucket name of the Amazon S3
destination). The update might not occur immediately. The target delivery stream
remains active while the configurations are updated, so data writes to the
delivery stream can continue during this process. The updated configurations are
usually effective within a few minutes.
Switching between Amazon ES and other services is not supported. For an Amazon
ES destination, you can only update to another Amazon ES destination.
If the destination type is the same, Kinesis Data Firehose merges the
configuration parameters specified with the destination configuration that
already exists on the delivery stream. If any of the parameters are not
specified in the call, the existing values are retained. For example, in the
Amazon S3 destination, if `EncryptionConfiguration` is not specified, then the
existing `EncryptionConfiguration` is maintained on the destination.
If the destination type is not the same, for example, changing the destination
from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any
parameters. In this case, all parameters must be specified.
Kinesis Data Firehose uses `CurrentDeliveryStreamVersionId` to avoid race
conditions and conflicting merges. This is a required field, and the service
updates the configuration only if the existing configuration has a version ID
that matches. After the update is applied successfully, the version ID is
updated, and can be retrieved using `DescribeDeliveryStream`. Use the new
version ID to set `CurrentDeliveryStreamVersionId` in the next call.
"""
def update_destination(client, input, options \\ []) do
request(client, "UpdateDestination", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, map() | nil, map()}
| {:error, term()}
defp request(client, action, input, options) do
client = %{client | service: "firehose"}
host = build_host("firehose", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "Firehose_20150804.#{action}"}
]
payload = encode!(client, input)
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
post(client, url, payload, headers, options)
end
defp post(client, url, payload, headers, options) do
case AWS.Client.request(client, :post, url, payload, headers, options) do
{:ok, %{status_code: 200, body: body} = response} ->
body = if body != "", do: decode!(client, body)
{:ok, body, response}
{:ok, response} ->
{:error, {:unexpected_response, response}}
error = {:error, _reason} -> error
end
end
defp build_host(_endpoint_prefix, %{region: "local", endpoint: endpoint}) do
endpoint
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
defp encode!(client, payload) do
AWS.Client.encode!(client, payload, :json)
end
defp decode!(client, payload) do
AWS.Client.decode!(client, payload, :json)
end
end
|
lib/aws/generated/firehose.ex
| 0.918613
| 0.684343
|
firehose.ex
|
starcoder
|
defmodule Tint.Distance do
@moduledoc """
A module providing functions for color distance calculations and a behavior
to implement custom distance algorithms.
"""
alias Tint.DistanceCache
@typedoc """
A function that allows calculating the distance between two colors.
"""
@type distance_fun :: (Tint.color(), Tint.color() -> number)
@typedoc """
A distance calculation module that implements this very behavior or a function
that allows calculating the distance between two colors.
"""
@type distance_algorithm :: module | {module, Keyword.t()} | distance_fun
@doc """
Calculate the distance of two colors using the given options.
"""
@callback distance(
color :: Tint.color(),
other_color :: Tint.color(),
opts :: Keyword.t()
) :: number
@doc """
Calculate the distance of two colors using the given distance algorithm.
"""
@spec distance(Tint.color(), Tint.color(), distance_algorithm) :: float
def distance(color, other_color, distance_algorithm)
def distance(color, other_color, fun) when is_function(fun) do
maybe_cache({color, other_color, fun}, fn ->
fun.(color, other_color)
end)
end
def distance(color, other_color, {mod, opts}) do
maybe_cache({color, other_color, mod, opts}, fn ->
mod.distance(color, other_color, opts)
end)
end
def distance(color, other_color, mod) do
distance(color, other_color, {mod, []})
end
defp maybe_cache(key, calc_fun) do
if GenServer.whereis(DistanceCache) do
DistanceCache.get_or_put(key, calc_fun)
else
calc_fun.()
end
end
@doc """
Gets the nearest color from the specified palette using the given distance
algorithm.
"""
@spec nearest_color(Tint.color(), [Tint.color()], distance_algorithm) ::
nil | Tint.color()
def nearest_color(color, palette, distance_algorithm) do
case nearest_colors(color, palette, 1, distance_algorithm) do
[nearest_color] -> nearest_color
_ -> nil
end
end
@doc """
Gets the nearest n colors from the specified palette using the given distance
algorithm.
"""
@spec nearest_colors(
Tint.color(),
[Tint.color()],
non_neg_integer,
distance_algorithm
) :: [Tint.color()]
def nearest_colors(color, palette, n, distance_algorithm) do
palette
|> Enum.sort_by(fn palette_color ->
distance(color, palette_color, distance_algorithm)
end)
|> Enum.take(n)
end
end
|
lib/tint/distance.ex
| 0.91183
| 0.82386
|
distance.ex
|
starcoder
|
defmodule MementoWeb.QsParamsValidator do
@default_page 1
@default_per_page 25
@default_type :all
@moduledoc """
This module validates and parses a map with string keys representing a query string
and returns a stable map that can be used with database queries.
When the params map doesn't have necessary keys, defaults are provided.
Defaults:
- `page`: `#{@default_page}`
- `per_page`: `#{@default_per_page}`
- `type`: `#{@default_type}`
"""
alias Memento.Schema.Entry
@type params :: %{
page: pos_integer(),
per_page: pos_integer(),
type: :all | Entry.Type.t(),
q: String.t() | :not_provided
}
@doc """
Validates a map with binary keys, returning either `{:ok, map_with_atom_keys}` or
`{:error, reason}`, where the returned map has a predictable structure.
iex> alias MementoWeb.QsParamsValidator, as: V
iex> V.validate(%{"page" => "10"})
{:ok, %{page: 10, per_page: 25, q: :not_provided, type: :all}}
"""
@spec validate(%{optional(String.t()) => term}) ::
{:ok, params} | {:error, term}
def validate(qs_params) do
steps = Saul.all_of([validator(), &transformer/1])
Saul.validate(qs_params, steps)
end
defp validator do
Saul.map(%{
"page" => {:optional, &page_validator/1},
"per_page" => {:optional, &per_page_validator/1},
"type" => {:optional, &type_validator/1},
"q" => {:optional, &q_validator/1}
})
end
defp transformer(map) do
result = %{
page: Map.get(map, "page", @default_page),
per_page: Map.get(map, "per_page", @default_per_page),
type: Map.get(map, "type", @default_type),
q: Map.get(map, "q", :not_provided)
}
{:ok, result}
end
defp page_validator(page), do: string_to_integer(page)
defp per_page_validator(per_page), do: string_to_integer(per_page)
defp string_to_integer(string) do
case Integer.parse(string) do
{int, ""} -> {:ok, int}
_other -> {:error, "not parsable as integer"}
end
end
defp type_validator("all") do
{:ok, :all}
end
defp type_validator(type_string) do
if type_string in Entry.Type.type_strings() do
Entry.Type.load(type_string)
else
{:error, "invalid type string"}
end
end
defp q_validator(q) do
is_binary(q)
end
end
|
lib/memento_web/qs_params_validator.ex
| 0.846371
| 0.401893
|
qs_params_validator.ex
|
starcoder
|
defmodule CodeCorps.ModelCase do
@moduledoc """
This module defines the test case to be used by
model tests.
You may define functions here to be used as helpers in
your model tests. See `errors_on/2`'s definition as reference.
Finally, if the test case interacts with the database,
it cannot be async. For this reason, every test runs
inside a transaction which is reset at the beginning
of the test unless the test case is marked as async.
"""
use ExUnit.CaseTemplate
using do
quote do
alias CodeCorps.Repo
import Ecto
import Ecto.Changeset
import Ecto.Query
import CodeCorps.Factories
import CodeCorps.ModelCase
end
end
setup tags do
:ok = Ecto.Adapters.SQL.Sandbox.checkout(CodeCorps.Repo)
unless tags[:async] do
Ecto.Adapters.SQL.Sandbox.mode(CodeCorps.Repo, {:shared, self()})
end
:ok
end
@doc """
Helper for returning list of errors in a struct when given certain data.
## Examples
Given a User schema that lists `:name` as a required field and validates
`:password` to be safe, it would return:
iex> errors_on(%User{}, %{password: "password"})
[password: "<PASSWORD>", name: "is blank"]
You could then write your assertion like:
assert {:password, "<PASSWORD>"} in errors_on(%User{}, %{password: "password"})
You can also create the changeset manually and retrieve the errors
field directly:
iex> changeset = User.changeset(%User{}, password: "password")
iex> {:password, "<PASSWORD>"} in changeset.errors
true
"""
def errors_on(struct, data) do
struct.__struct__.changeset(struct, data)
|> Ecto.Changeset.traverse_errors(&CodeCorpsWeb.ErrorHelpers.translate_error/1)
|> Enum.flat_map(fn {key, errors} -> for msg <- errors, do: {key, msg} end)
end
@doc """
Asserts if a specific error message has been added to a specific field on the
changeset. It is more flexible to use `error_message/2` directly instead of
this one.
```
assert_error_message(changeset, :foo, "bar")
```
Compared to
```
assert error_message(changeset, :foo) == "bar"
refute error_message?(changeset, :foo) == "baz"
```
"""
def assert_error_message(changeset, field, expected_message) do
assert error_message(changeset, field) == expected_message
end
@doc """
Asserts if a specific validation type has been triggered on a specific field
on the changeset. It is more flexible to use `validation_triggered/2` directly
instead of this one.
```
assert_validation_triggered(changeset, :foo, "bar")
```
Compared to
```
assert validation_triggered(changeset, :foo) == :required
refute validation_triggered?(changeset, :bar) == :required
```
"""
def assert_validation_triggered(changeset, field, type) do
assert validation_triggered(changeset, field) == type
end
@doc """
Returns an error message on a specific field on the specified changeset
"""
@spec error_message(Ecto.Changeset.t, Atom.t) :: String.t
def error_message(changeset, field) do
{message, _} = changeset.errors[field]
message
end
@doc """
Returns an atom indicating the type of validation that was triggered on a
field in a changeset.
"""
@spec validation_triggered(Ecto.Changeset.t, Atom.t) :: Atom.t
def validation_triggered(changeset, field) do
{_message, status} = changeset.errors[field]
status[:validation]
end
@doc """
Returns true or false depending on if an assoc_constraint validation has been
triggered in the provided changeset on the specified field.
"""
@spec assoc_constraint_triggered?(Ecto.Changeset.t, Atom.t) :: boolean
def assoc_constraint_triggered?(changeset, field) do
error_message(changeset, field) == "does not exist"
end
end
|
test/support/model_case.ex
| 0.909217
| 0.737158
|
model_case.ex
|
starcoder
|
defmodule Philosopher do
@moduledoc """
Documentation for `Philosophers`.
"""
@doc """
Hello world.
## Examples
iex> Philosophers.hello()
:world
"""
def hello do
:world
end
@dream 800
@eat 100
@delay 0
@timeout 1000
# Create a new philosopher process.
def start(hunger, strength, left, right, name, ctrl, seed) do
spawn_link(fn -> init(hunger, strength, left, right, name, ctrl, seed) end)
end
defp init(hunger, strength, left, right, name, ctrl, seed) do
gui = Gui.start(name)
:rand.seed(:exsss, {seed, seed, seed})
dreaming(hunger, strength, left, right, name, ctrl, gui)
end
# Philosopher is in a dreaming state.
defp dreaming(0, strength, _left, _right, name, ctrl, gui) do
IO.puts("#{name} is happy, strength is still #{strength}!")
send(gui, :stop)
send(ctrl, :done)
end
defp dreaming(hunger, 0, _left, _right, name, ctrl, gui) do
IO.puts("#{name} is starved to death, hunger is down to #{hunger}!")
send(gui, :stop)
send(ctrl, :done)
end
defp dreaming(hunger, strength, left, right, name, ctrl, gui) do
IO.puts("#{name} is dreaming!")
send(gui, :leave)
## this is where we sleep
delay(@dream)
IO.puts("#{name} wakes up")
waiting(hunger, strength, left, right, name, ctrl, gui)
end
# Philosopher is waiting for chopsticks.
defp waiting(hunger, strength, left, right, name, ctrl, gui) do
send(gui, :waiting)
IO.puts("#{name} is waiting, #{hunger} to go!")
case Chopstick.request(left) do
:ok ->
IO.puts("#{name} received left stick")
delay(@delay)
case Chopstick.request(right) do
:ok ->
IO.puts("#{name} received both sticks!")
eating(hunger, strength, left, right, name, ctrl, gui)
end
end
end
# Philosopher is eating.
defp eating(hunger, strength, left, right, name, ctrl, gui) do
send(gui, :enter)
IO.puts("#{name} is eating...")
delay(@eat)
Chopstick.return(left)
Chopstick.return(right)
dreaming(hunger - 1, strength, left, right, name, ctrl, gui)
end
defp delay(t), do: sleep(t)
defp sleep(0), do: :ok
defp sleep(t), do: :timer.sleep(:rand.uniform(t))
end
|
philosophers/lib/philosophers.ex
| 0.778313
| 0.446253
|
philosophers.ex
|
starcoder
|
defmodule Imglab do
@moduledoc """
Provides a set of functions to work with imglab services.
"""
alias Imglab.Source
alias Imglab.Signature
alias Imglab.Utils
@doc """
Returns a formatted URL `string` with the specified parameters.
* `source_name_or_source` must be a `string` indicating a source name or a [Source struct](`t:Imglab.Source.t/0`).
* `path` must be a `string` indicating the path of the resource.
* `params` must be an optional `keyword` list with the image parameters to use.
## Source
The first parameter can be a `string` with the name of the source in the case that no additional settings for the source are needed:
iex> Imglab.url("assets", "image.jpeg", width: 500, height: 600)
"https://assets.imglab-cdn.net/image.jpeg?width=500&height=600"
Or a [Source struct](`t:Imglab.Source.t/0`) created with `Imglab.Source.new/2` specifying additional settings for the source if needed:
iex> "assets"
iex> |> Imglab.Source.new()
iex> |> Imglab.url("image.jpeg", width: 500, height: 600)
"https://assets.imglab-cdn.net/image.jpeg?width=500&height=600"
### Secured sources
You can specify a secure source and use it to generate signed URLs:
iex> "assets"
iex> |> Imglab.Source.new(secure_key: "qxxKNvxRONOMklcGJBVczefrJnE=", secure_salt: "e9bXw6/HIMGTWcmAYArHA5jpIAE=")
iex> |> Imglab.url("image.jpeg", width: 500, height: 600)
"https://assets.imglab-cdn.net/image.jpeg?width=500&height=600&signature=MX0DlvzVo39-_Dh_YqPbOnrayWVabIWaSDzi-9PfGHQ"
The `signature` query string will be automatically added to the URL.
> Note: `secure_key` and `secure_salt` paramaters are secrets that should not be added to the code. Please use environment vars or other secure method to use them in your project.
## Path
The second parameter must be a `string` defining the path to the resource.
iex> Imglab.url("assets", "path/to/myimage.jpeg", width: 500, height: 600)
"https://assets.imglab-cdn.net/path/to/myimage.jpeg?width=500&height=600"
## Params
The third optional parameter is a `keyword` list with any desired imglab transformation parameter.
Some imglab parameters use hyphens inside their names. You can use atoms with underscores, these will be normalized to the correct format used by imglab API.
iex> Imglab.url("assets", "image.jpeg", width: 500, trim: "color", trim_color: "orange")
"https://assets.imglab-cdn.net/image.jpeg?width=500&trim=color&trim-color=orange"
Or you can define a quoted atom instead:
iex> Imglab.url("assets", "image.jpeg", width: 500, trim: "color", "trim-color": "orange")
"https://assets.imglab-cdn.net/image.jpeg?width=500&trim=color&trim-color=orange"
If no params are specified a URL without query params will be generated:
iex> Imglab.url("assets", "image.jpeg")
"https://assets.imglab-cdn.net/image.jpeg"
"""
@spec url(binary | Source.t(), binary, keyword) :: binary
def url(source_name_or_source, path, params \\ [])
def url(source_name, path, params) when is_binary(source_name) and is_binary(path) and is_list(params) do
url(Source.new(source_name), path, params)
end
def url(%Source{} = source, path, params) when is_binary(path) and is_list(params) do
normalized_path = Utils.normalize_path(path)
normalized_params = Utils.normalize_params(params)
URI.to_string(%URI{
scheme: Source.scheme(source),
host: Source.host(source),
port: source.port,
path: Path.join("/", Source.path(source, encode_path(normalized_path))),
query: encode_params(source, normalized_path, normalized_params)
})
end
@spec encode_path(binary) :: binary
defp encode_path(path) when is_binary(path) do
if Utils.web_uri?(path) do
encode_path_component(path)
else
path
|> String.split("/")
|> Enum.map(&encode_path_component/1)
|> Enum.join("/")
end
end
@spec encode_path_component(binary) :: binary
defp encode_path_component(path_component) when is_binary(path_component) do
URI.encode(path_component, &URI.char_unreserved?/1)
end
@spec encode_params(Source.t(), binary, list) :: binary
defp encode_params(%Source{} = source, path, params)
when is_binary(path) and is_list(params) and length(params) > 0 do
if Source.is_secure?(source) do
signature = Signature.generate(source, path, URI.encode_query(params))
URI.encode_query(params ++ [{"signature", signature}])
else
URI.encode_query(params)
end
end
defp encode_params(%Source{} = source, path, _params) when is_binary(path) do
if Source.is_secure?(source) do
signature = Signature.generate(source, path)
URI.encode_query(signature: signature)
else
nil
end
end
end
|
lib/imglab.ex
| 0.913126
| 0.651105
|
imglab.ex
|
starcoder
|
defmodule SingletonSupervisor do
@moduledoc ~S"""
A singleton supervisor within an erlang cluster.
`SingletonSupervisor` by defualt uses `{:global, SingletonSupervisor}` as name, and further
instances with the same name will not start a supervisor but a placeholder to monitor
the already started one.
When a `SingletonSupervisor` or its node fails or stops, all its placeholders will also stop,
so that the parent supervisor of placeholders will try to restart the singleton supervisor
and one of them will takeover and others will become placeholder monitoring the globally registered one.
`SingletonSupervisor` by defaut uses `:global` registry, but any other distributed registry can be used
if it takes care of killing duplicate instances.
## Examples
SingletonSupervisor can be added as a child to a supervision tree but should not be used as the top level application supervisor:
defmodule MyApp.Application do
use Application
def start(_type, _args) do
children = [
{SingletonSupervisor,
strategy: :one_for_one,
name: {:global, MyApp.SingletonSupervisor},
children: [
# Children of SingletonSupervisor
]}
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
end
SingletonSupervisor can also be used as a module-based supervisor:
defmodule MyApp.SingletonSupervisor do
use Supervisor
def start_link(init_arg) do
SingletonSupervisor.start_link(__MODULE__, init_arg)
end
def init(_init_arg) do
children = [
# Children of SingletonSupervisor
]
Supervisor.init(children, strategy: :one_for_one)
end
end
"""
def start_link(options) do
{children, options} = Keyword.pop(options, :children, [])
start_link(children, options)
end
def start_link(children, options) when is_list(children) do
options = Keyword.put_new(options, :name, {:global, __MODULE__})
with {:error, {:already_started, pid}} <- Supervisor.start_link(children, options) do
SingletonSupervisor.Placeholder.start_link(pid, options[:name])
end
end
def start_link(module, init_arg, options \\ []) do
options = Keyword.put_new(options, :name, {:global, module})
with {:error, {:already_started, pid}} <- Supervisor.start_link(module, init_arg, options) do
SingletonSupervisor.Placeholder.start_link(pid, options[:name])
end
end
def child_spec(options) do
%{
id: Keyword.get(options, :name, __MODULE__),
start: {__MODULE__, :start_link, [options]},
type: :supervisor
}
end
end
|
lib/singleton_supervisor.ex
| 0.723016
| 0.419321
|
singleton_supervisor.ex
|
starcoder
|
defmodule Routemaster.Drains.IgnoreStale do
@moduledoc """
Drops stale events from the current payload to only include
events that reflect an entity state that is _more recent_ than
previously received events.
Helps to ignore events received out-of-order (e.g. an `update`
event about en entity received after the `delete` event for
that same entity), given that the Routemaster event bus server
makes no guarantee of in-order delivery of events.
## Implementation Details
First it removes duplicates by URL from the current payload
and only preserves the most recent event for each URL.
Then it compares the timestamp of the remaining events with
the Redis-backed `Routemaster.Drain.EventState` data, to
check if the event is newer than the latest known state of
the resource at that URL.
If an event is indeed fresher than the latest known state,
the state is updated in Redis with the timestamp of the
fresh event.
Since the output of this middleware should be a single event
per URL, filtering the events first and only checking the most
recent one will reduce the number of Redis calls.
The Redis calls to read and save the event states are executed
one by one. While reading them in bulk would be more efficient,
it would not work correctly because multiple batches can be
received and processed concurrently, and out-of-order events
for the same resources could be spread in more than one batch.
"""
alias Routemaster.Drain.EventState
alias Plug.Conn
def init(opts), do: opts
def call(conn, _opts) do
Conn.assign(conn, :events, filter(conn.assigns.events))
end
defp filter(events) do
events
|> newest_by_url()
|> remove_stale_and_update_state()
end
defp newest_by_url(events) do
events
|> Enum.group_by(fn(e) -> e.url end)
|> Enum.map(fn({_, list}) -> newest(list) end)
end
defp newest(list) do
Enum.max_by(list, &(&1.t))
end
defp remove_stale_and_update_state(events) do
Enum.filter events, fn(event) ->
if EventState.fresh?(event) do
EventState.save(event)
true
else
false
end
end
end
end
|
lib/routemaster/drain/drains/ignore_stale.ex
| 0.72526
| 0.547041
|
ignore_stale.ex
|
starcoder
|
defmodule ExploringMars.Mission do
@moduledoc """
This module defines a mission. A mission encapsulates a single run of a probe,
being defined by a single set of problem parameters (bounds, initial position,
and instructions).
Calling `ExploringMars.Mission.run/3` with these parameters will produce the
status in which the probe ended (mission outcome -- see below), together with
the final position of the probe.
This module should change if the way a mission is represented changes.
"""
alias ExploringMars.Mission.{Position, Coordinate, Instruction}
@typedoc """
A mission outcome represents the final status of a mission.
* `:ok` → All commands were executed successfully
* `:out_of_bounds` → The probe fell off the plateau. The position reported
will be the position the probe was attempting to move to when it fell off.
* `:illegal_instruction` → The probe tried to execute something that was not
a valid instruction. The position reported will be the position where the
probe was when it executed the invalid instruction.
"""
@type outcome :: :ok | :out_of_bounds | :illegal_instruction
@typedoc """
A mission's result is its outcome together with its final position.
See the `t:outcome/0` type's documentation above for further details.
"""
@type result :: {outcome, Position.t()}
@doc """
Run a mission by setting its parameters. Returns mission outcome and final
position. Each outcome specifies how its final position should be interpreted.
Check the `t:outcome/0` type's documentation above for further details.
## Examples
iex> Mission.run({2, 2}, {{0, 0}, :N}, [:M, :R, :M])
{:ok, {{1, 1}, :E}}
iex> Mission.run({1, 1}, {{0, 0}, :N}, [:M, :R, :M, :M])
{:out_of_bounds, {{2, 1}, :E}}
iex> Mission.run({1, 1}, {{0, 0}, :N}, [:M, :R, "%", :M, :M])
{:invalid_instruction, {{0, 1}, :E}}
"""
@spec run(
Coordinate.t(),
Position.t(),
list(Instruction.t())
) :: result
def run(bounds, position, instructions) do
coordinate = position |> elem(0)
if in_bounds(bounds, coordinate) do
do_run(bounds, position, instructions)
else
{:out_of_bounds, position}
end
end
# run a mission when we know the initial position is in bounds.
# the reason we delegate to this function is so that we only need
# to bounds check after running each instructions, instead of bounds
# checking before *and* after running each instruction.
@spec do_run(
Coordinate.t(),
Position.t(),
list(Instruction.t())
) :: result
defp do_run(_bounds, position, []), do: {:ok, position}
defp do_run(bounds, position, [instruction | rest]) do
case Instruction.run(position, instruction) do
:invalid_instruction ->
{:invalid_instruction, position}
{coord, direction} ->
if in_bounds(bounds, coord) do
do_run(bounds, {coord, direction}, rest)
else
{:out_of_bounds, {coord, direction}}
end
end
end
# Checks if a coordinate is in-bounds.
@spec in_bounds(Coordinate.t(), Coordinate.t()) :: as_boolean(atom)
defp in_bounds({x_max, y_max}, {x, y}) do
# IO.puts("#{x} <= #{x_max}, #{y} <= #{y_max}")
x >= 0 && y >= 0 && x <= x_max && y <= y_max
end
@doc """
Converts the result of a mission into a string suitable for user-facing
output.
## Examples
iex> Mission.result_to_string({:ok, {{1, 1}, :N}})
"1 1 N\\n"
iex> Mission.result_to_string({:out_of_bounds, {{1, 1}, :N}})
"OUT OF BOUNDS @ 1 1 N\\n"
iex> Mission.result_to_string({:invalid_instruction, {{1, 1}, :N}})
"INVALID INSTRUCTION @ 1 1 N\\n"
"""
@spec result_to_string(result) :: String.t()
def result_to_string(mission_result)
def result_to_string({outcome, position}) do
output =
case outcome do
:ok ->
Position.pretty_print(position)
:out_of_bounds ->
"OUT OF BOUNDS @ #{Position.pretty_print(position)}"
:invalid_instruction ->
"INVALID INSTRUCTION @ #{Position.pretty_print(position)}"
end
output <> "\n"
end
end
|
lib/exploring_mars/mission.ex
| 0.923876
| 0.876898
|
mission.ex
|
starcoder
|
defmodule Concentrate.VehiclePosition do
@moduledoc """
Structure for representing a transit vehicle's position.
"""
import Concentrate.StructHelpers
defstruct_accessors([
:id,
:trip_id,
:stop_id,
:label,
:license_plate,
:latitude,
:longitude,
:bearing,
:speed,
:odometer,
:stop_sequence,
:last_updated,
:consist,
:occupancy_status,
:occupancy_percentage,
status: :IN_TRANSIT_TO
])
defmodule Consist do
@moduledoc """
The consist is the set of individual cars which make up the vehicle we're tracking.
For example, a subway train can have 6 cars, each of which would have a separate item in the consist.
"""
defstruct_accessors([
:label
])
end
def new(opts) do
# required fields
_ = Keyword.fetch!(opts, :latitude)
_ = Keyword.fetch!(opts, :longitude)
super(opts)
end
defimpl Concentrate.Mergeable do
def key(%{id: id}), do: id
@doc """
Merging VehiclePositions takes the latest position for a given vehicle.
"""
def merge(first, %{last_updated: nil}) do
first
end
def merge(%{last_updated: nil}, second) do
second
end
def merge(first, second) do
if first.last_updated <= second.last_updated do
do_merge(first, second)
else
do_merge(second, first)
end
end
defp do_merge(first, second) do
%{
second
| trip_id: first_value(second.trip_id, first.trip_id),
stop_id: first_value(second.stop_id, first.stop_id),
label: first_value(second.label, first.label),
license_plate: first_value(second.license_plate, first.license_plate),
latitude: first_value(second.latitude, first.latitude),
longitude: first_value(second.longitude, first.longitude),
bearing: first_value(second.bearing, first.bearing),
speed: first_value(second.speed, first.speed),
odometer: first_value(second.odometer, first.odometer),
stop_sequence: first_value(second.stop_sequence, first.stop_sequence),
occupancy_status: first_value(second.occupancy_status, first.occupancy_status),
occupancy_percentage:
first_value(second.occupancy_percentage, first.occupancy_percentage),
consist: first_list_value(second.consist, first.consist)
}
end
defp first_value(value, _) when not is_nil(value), do: value
defp first_value(_, value), do: value
defp first_list_value([_ | _] = value, _), do: value
defp first_list_value([], value), do: value
defp first_list_value(value1, value2), do: first_value(value1, value2)
end
end
|
lib/concentrate/vehicle_position.ex
| 0.804943
| 0.65055
|
vehicle_position.ex
|
starcoder
|
defmodule Visualizer.Data do
require Logger
defp initial_state() do
%{nodes: [], links: []}
end
def start_link() do
Agent.start_link(fn -> initial_state() end, name: __MODULE__)
end
def add_node(name, group) do
Agent.update(__MODULE__, fn state ->
%{nodes: nodes, links: links} = state
nodes = Enum.filter(nodes, fn(x) -> x.name != name end)
nodes = nodes ++ [%{name: name, group: group}]
%{nodes: nodes, links: links}
end)
end
def add_node(name, group, parent, workload, distance) do
Agent.update(__MODULE__, fn state ->
%{nodes: nodes, links: links} = state
nodes = Enum.filter(nodes, fn(x) -> x.name != name end)
nodes = nodes ++ [%{name: name, group: group}]
links = Enum.filter(links, fn(x) -> x.from != name end)
links = Enum.filter(links, fn(x) -> x.to != name end)
links = links ++ [%{from: parent, to: name, workload: workload, distance: distance}]
%{nodes: nodes, links: links}
end)
end
def add_link(from, to, workload, distance) do
Agent.update(__MODULE__, fn state ->
%{nodes: nodes, links: links} = state
links = Enum.filter(links, fn(x) -> !(x.from == from and x.to == to) end)
links = Enum.filter(links, fn(x) -> !(x.from == to and x.to == from) end)
links = links ++ [%{from: from, to: to, workload: workload, distance: distance}]
%{nodes: nodes, links: links}
end)
end
def remove_node(name) do
Agent.update(__MODULE__, fn state ->
%{nodes: nodes, links: links} = state
links = Enum.filter(links, fn(x) -> x.from != name end)
links = Enum.filter(links, fn(x) -> x.to != name end)
nodes = Enum.filter(nodes, fn(x) -> x.name != name end)
%{nodes: nodes, links: links}
end)
end
def update_link(from, to, workload, distance) do
Agent.update(__MODULE__, fn state ->
%{nodes: nodes, links: links} = state
links = Enum.filter(links, fn(x) -> !(x.from == from and x.to == to) end)
links = Enum.filter(links, fn(x) -> !(x.from == to and x.to == from) end)
links = links ++ [%{from: from, to: to, workload: workload, distance: distance}]
%{nodes: nodes, links: links}
end)
end
def remove_link(from, to) do
Agent.update(__MODULE__, fn state ->
%{nodes: nodes, links: links} = state
links = Enum.filter(links, fn(x) -> !(x.from == from and x.to == to) end)
links = Enum.filter(links, fn(x) -> !(x.from == to and x.to == from) end)
%{nodes: nodes, links: links}
end)
end
def reset() do
Agent.update(__MODULE__, fn _state ->
initial_state()
end)
end
def get() do
Agent.get(__MODULE__, fn state -> state end)
end
def generate_uuid() do
token = <KEY>()
end
end
|
lib/visualizer/data.ex
| 0.551574
| 0.527925
|
data.ex
|
starcoder
|
defmodule Vow do
@moduledoc """
QUICK SUMMARY
SHOW HOW TO USE (conform/2, unform/2, gen/2)
`Vow.Conformable`
`Vow.Generatable`
## Conformable Types
The following Elixir types have `Vow.Conformable` implementations and their
behavior will be discussed in their corresponding section below. They are
divided into two groups: leaf vows and composite vows.
Composite Vows:
* `List`
* `Tuple`
* `Map`
* `Struct`
Leaf Vows:
* `Function`
* `MapSet`
* `Regex`
* `Range`
* `Date.Range`
* `Any`
### Function
1-arity functions that return booleans are valid vows. Raises, throws, exits,
and non-`true` returns as a result of exectuing the given vow function will
fail to conform the given value.
```
iex> Vow.conform(fn _ -> true end, nil)
{:ok, nil}
# function returns false, so will never succeed
iex> Vow.valid?(fn _ -> false end, nil)
false
# function returns a non-boolean value, so it fails
iex> Vow.valid?(fn _ -> "not a boolean" end, nil)
false
# function raises, so it fails
iex> Vow.valid?(fn _ -> raise %ArgumentError{} end, nil)
false
# function throws, so it fails
iex> Vow.valid?(fn _ -> throw :foo end, nil)
false
# function exits, so it fails
iex> Vow.valid?(fn _ -> exit :bad end, nil)
false
```
### List, Tuple, and Map
`List`, `Tuple`, and `Map` all represent 'fixed' versions of `Vow.list_of/2`,
and `Vow.map_of/2`. Each element in the vow list, tuple, or map is the vow
for the conformation of the corresponding element in the value list, tuple,
or map.
```
iex> vow = [&is_integer/1, &is_float/1]
...> Vow.valid?(vow, [1, 2.2])
true
# one of the elements does not conform
iex> vow = [&is_integer/1, &is_float/1]
...> Vow.valid?(vow, [:not_int, 2.2])
false
# value length does not match vow
iex> vow = [&is_integer/1, &is_float/1]
...> Vow.valid?(vow, [1, 2.2, 42])
false
```
Note that improper lists are valid vows.
```
iex> vow = [&is_integer/1 | &is_atom/1]
...> Vow.valid?(vow, [0 | :foo])
true
# the improper tail does not conform
iex> vow = [&is_integer/1 | &is_atom/1]
...> Vow.valid?(vow, [0 | "not atom"])
false
# one of the elements does not conform
iex> vow = [&is_integer/1 | &is_atom/1]
...> Vow.valid?(vow, [:not_int | :foo])
false
# value length does not match vow
iex> vow = [&is_integer/1 | &is_atom/1]
...> Vow.valid?(vow, [0, 1 | :foo])
false
```
This works for `Tuple` and `Map` in the same way.
### MapSet
A `MapSet` will behave in two different ways based on the type of value
it is comparing itself to.
If the value is also a `MapSet`, then the vow mapset is satified if the
value mapset is a subset of it (i.e. if the every member in the value mapset
is also contained in the vow mapset).
```
# both :a and :c are contained within the vow
iex> vow = MapSet.new([:a, :b, :c])
...> Vow.valid?(vow, MapSet.new([:a, :c]))
true
# :d is not contained within the vow, therefore it will fail
iex> vow = MapSet.new([:a, :b, :c])
...> Vow.valid?(vow, MapSet.new([:b, :d]))
false
```
If the value is anything other than a `MapSet`, then then vow will be
satified if the value is a member of the vow.
```
iex> vow = MapSet.new([:a, :b, :c])
...> Vow.valid?(vow, :b)
true
iex> vow = MapSet.new([:a, :b, :c])
...> Vow.valid?(vow, :d)
false
```
### Regex
A `Regex` will successfully conform a value if that value is a string and
it matches the regex successfully.
```
iex> Vow.valid?(~r/^[a-zA-Z]+$/, "abc")
true
# value does not match the regex
iex> Vow.valid?(~r/^[a-zA-Z]+$/, "abc123")
false
# value is not a string
iex> Vow.valid?(~r/^[a-zA-Z]+$/, %{a: 1})
false
```
### Range
A `Range` will behave in two different ways based on the type of value
it is comparing itself to.
If the value is also a `Range` then the vow range is satisfied if the value
range is bounded by the vow range.
```
iex> Vow.valid?(1..10, 1..3)
true
iex> Vow.valid?(1..10, 5..11)
false
```
If the value is anything other than a `Range`, then the vow will be satisfied
if the value is contained within the range.
```
iex> Vow.valid?(1..10, 5)
true
iex> Vow.valid?(1..10, 0)
false
```
### Date.Range
A `Date.Range` will behave in two different ways based on the type of value
it is comparing itself to.
If the value is also a `Date.Range` then then vow date range is satisfied if
the value date range is bounded by the vow date range.
```
iex> vow = Date.range(~D[2010-01-01], ~D[2010-03-01])
...> Vow.valid?(vow, Date.range(~D[2010-01-01], ~D[2010-02-01]))
true
iex> vow = Date.range(~D[2010-01-01], ~D[2010-03-01])
...> Vow.valid?(vow, Date.range(~D[2010-02-01], ~D[2010-03-02]))
false
```
If the value is anything other than a `Date.Range`, then the vow will be
satisfied if the value is a member of the date range.
```
iex> vow = Date.range(~D[2010-01-01], ~D[2010-03-01])
...> Vow.valid?(vow, ~D[2010-02-15])
true
iex> vow = Date.range(~D[2010-01-01], ~D[2010-03-01])
...> Vow.valid?(vow, ~D[2010-04-01])
false
```
### Structs
All structs that do not implement `Vow.Conformable` conformed similar to
maps after first validating that the vow struct and value struct share the
same module under the `:__struct__` key.
```
iex> vow = %ArgumentError{message: "foo"}
...> Vow.conform(vow, %ArgumentError{message: "foo"})
{:ok, %ArgumentError{message: "foo"}}
```
### Any
Any type not mentioned below are treated atomically for the purposes of
conforming values.
```
iex> Vow.conform(:foo, :foo)
{:ok, :foo}
iex> Vow.valid?(:foo, :bar)
false
```
## Conformed / Destructured Values
A conformed value (sometimes referred to a potentially destructured value),
is the result of calling `Vow.conform/2` on a value.
The vows that do destructure the values given to them are:
* `Vow.Alt`
* `Vow.Cat`
* `Vow.OneOf`
But it's worth noting that any composite vow (i.e. a vow that contains vows)
may result in a destructured value as they may contain a vow that does
destructure it's value.
## Regex Operators
The following vows are regex operators:
* `Vow.cat/1` - a concatenation of vows
* `Vow.alt/1` - a choice of one among a set of vows
* `Vow.zom/1` - zero or more occurences of a vow
* `Vow.oom/1` - one or more
* `Vow.maybe/1` - one or none
* `Vow.amp/1` - takes a vow and further constrains it with one or more vows
These nest arbitrarily to form complex expressions.
Nested regex vows compose to describe a single sequence / enumerable. Shown
below is an example of the different nesting behaviors of `Vow.also/1` and
`Vow.amp/1`.
```
# using `Vow.also/1`
iex> import Vow
...> import Vow.FunctionWrapper
...> vow = oom(alt(
...> n: &is_number/1,
...> s: also(
...> bs: oom(&is_bitstring/1),
...> ne: wrap(&Enum.all?(&1, fn s -> String.length(s) > 0 end))
...> )
...> ))
...> Vow.valid?(vow, [1, ["x", "a"], 2, ["y"], ["z"]])
true
# using `Vow.amp/1` (i.e. the regex operator)
iex> import Vow
...> import Vow.FunctionWrapper
...> regex_vow = oom(alt(
...> n: &is_number/1,
...> s: amp(
...> bs: oom(&is_bitstring/1),
...> ne: wrap(&Enum.all?(&1, fn s -> String.length(s) > 0 end))
...> )
...> ))
...> Vow.valid?(regex_vow, [1, "x", "a", 2, "y", "z"])
true
```
## Utilities
These modules and their associated macros are meant to aid in the
construction of your own vows. See their respecitive modules for more
details.
`Vow.FunctionWrapper` helps with better annoymous function inspecting. It
conforms the same way a normal function does, but exposes the macro
`Vow.FunctionWrapper.wrap/2` to help display the function for errors.
`Vow.Ref` allows for a reference to a 0-arity function that returns a vow.
Since this resolves whenever a conform occurs, this enables recursive vow
definitions and greater reusability of vows.
`Vow.Pat` wraps the AST of a pattern and will use Elixir pattern matching
to validate at conformation occurs. This also supports `Expat` patterns.
## Notes
See [clojure.spec](https://clojure.org/about/spec) docs for more details
and rationale from the primary influence of this library.
"""
import Kernel, except: [get_in: 2, update_in: 3, put_in: 3, get_and_update_in: 3, pop_in: 2]
alias Vow.{Conformable, ConformError}
@type t :: Conformable.t()
@doc """
Given a `vow` and a `value`, returns an `{:error, conform_error}` if
`value` does not match the `vow`, otherwise returns `{:ok, conformed}`
where `conformed` is a possibly destructured value.
## Examples
```
iex> Vow.conform(&is_integer/1, 42)
{:ok, 42}
iex> Vow.conform(Vow.list_of(Vow.one_of([i: &is_integer/1, a: &is_atom/1, s: &is_bitstring/1])), [0, :b, "c"])
{:ok, [%{i: 0}, %{a: :b}, %{s: "c"}]}
iex> Vow.conform(&is_atom/1, 42)
{:error, %Vow.ConformError{problems: [%Vow.ConformError.Problem{path: [], pred: &is_atom/1, reason: nil, route: [], val: 42, via: []}], val: 42, vow: &is_atom/1}}
```
"""
@spec conform(t, value :: term) :: {:ok, Conformable.conformed()} | {:error, ConformError.t()}
def conform(vow, value) do
case Conformable.conform(vow, [], [], [], value) do
{:ok, conformed} ->
{:ok, conformed}
{:error, problems} ->
{:error, ConformError.new(problems, vow, value)}
end
end
@doc """
Given a `vow` and a `value`, raises a `Vow.ConformError.t` if `value`
does not match the `vow`, otherwise returns a (possibly destructured)
value.
See `Vow.conform/2` for more details.
"""
@spec conform!(t, value :: term) :: Conformable.conformed() | no_return
def conform!(vow, value) do
case conform(vow, value) do
{:ok, conformed} -> conformed
{:error, reason} -> raise reason
end
end
@doc """
Returns true if the `value` conforms to the `vow`.
"""
@spec valid?(t, value :: term) :: boolean
def valid?(vow, value) do
case conform(vow, value) do
{:ok, _} -> true
{:error, _} -> false
end
end
@doc """
Returns true if the `value` fails to conform to the `vow`.
"""
@spec invalid?(t, value :: term) :: boolean
def invalid?(vow, value) do
not valid?(vow, value)
end
defdelegate unform(vow, value), to: Vow.Conformable
defdelegate get_in(data, keys), to: Acs
defdelegate get_and_update_in(data, keys, fun), to: Acs
defdelegate update_in(data, keys, fun), to: Acs
defdelegate put_in(data, keys, fun), to: Acs
defdelegate pop_in(data, keys), to: Acs
@typedoc """
A generator override for a vow is the combination for a path
(i.e. the 'navigable' path to the sub-vow to be replaced) and
the generator function (i.e. 0-arity function that returns a
generator).
These are extremely useful for 'optimizing' the generation of
a vow. A common use-case for this would be with `Vow.one_of/1`.
`Vow` allows one to specify the allowed forms of data, whereas
a library like `StreamData` allows for more semantics around
what is 'likely' to be generated (i.e. `StreamData.one_of/1`
vs `StreamData.frequency/1`).
See also `Vow.with_gen/2`.
"""
@type override :: {path :: [term], Vow.Generatable.gen_fun()}
@type gen_opt :: Vow.Generatable.gen_opt() | {:overrides, [override]}
@doc """
Returns a generator for the specified `vow`.
"""
@spec gen(t, keyword) :: {:ok, Vow.Generatable.generator()} | {:error, reason :: term}
def gen(vow, opts \\ []) do
{overrides, opts} = Keyword.pop(opts, :overrides, [])
overridden_vow =
Enum.reduce(overrides, vow, fn {path, gen_fun}, acc ->
put_in(acc, path, gen_fun.())
end)
Vow.Generatable.gen(overridden_vow, opts)
end
defdelegate with_gen(vow, gen_fun), to: Vow.WithGen, as: :new
@doc """
Given a `vow` and a destructured `value`, returns the original value or
raises a `Vow.UnformError.t`.
See `Vow.unform/2` for more details.
"""
@spec unform!(t, Conformable.conformed()) :: value :: term | no_return
def unform!(vow, value) do
case unform(vow, value) do
{:ok, unformed} -> unformed
{:error, reason} -> raise reason
end
end
@doc """
Converts the specified `enumerable` into a `MapSet.t`.
"""
@spec set(Enum.t()) :: MapSet.t()
def set(enumerable), do: MapSet.new(enumerable)
@doc """
Returns true if given a term.
"""
@spec term?(term) :: true
def term?(_term), do: true
@doc """
Returns true if given anything.
"""
@spec any?(term) :: true
def any?(term), do: term?(term)
@doc """
Returns a vow that successfully conforms a value if all given `named_vows`
are successfully conformed. Successive, and possible destructured,
conformed values propagate through the rest of the vows, in order.
## Examples
```
iex> vow = Vow.also(list: &is_list/1, len: &(length(&1) > 1))
...> Vow.conform(vow, [1, 2, 3])
{:ok, [1, 2, 3]}
```
"""
@spec also([{atom, t}]) :: t
def also(named_vows) do
Vow.Also.new(named_vows)
end
@doc """
Returns a vow that successfully conforms a value if any of the given
`named_vows` successfully conform the value.
The returned value will always be a map containing the key of the first
vow that successfully conformed, with the value being the conformed value.
## Examples
```
iex> vow = Vow.one_of(int: &is_integer/1, float: &is_float/1, any: &Vow.any?/1)
...> Vow.conform(vow, 42)
{:ok, %{int: 42}}
iex> vow = Vow.one_of(int: &is_integer/1, float: &is_float/1, any: &Vow.any?/1)
...> Vow.conform(vow, 10.2)
{:ok, %{float: 10.2}}
iex> vow = Vow.one_of(int: &is_integer/1, float: &is_float/1, any: &Vow.any?/1)
...> Vow.conform(vow, :foo)
{:ok, %{any: :foo}}
```
"""
@spec one_of([{atom, t}, ...]) :: t | no_return
def one_of(named_vows)
when is_list(named_vows) and length(named_vows) > 0 do
Vow.OneOf.new(named_vows)
end
@doc """
Returns a vow that accepts `nil` and values satisfying the given `vow`.
## Examples
```
iex> vow = Vow.nilable(&is_integer/1)
...> Vow.conform(vow, nil)
{:ok, nil}
iex> vow = Vow.nilable(&is_integer/1)
...> Vow.conform(vow, 42)
{:ok, 42}
iex> vow = Vow.nilable(&is_integer/1)
...> Vow.valid?(vow, "not an integer or nil!")
false
```
"""
@spec nilable(t) :: t
def nilable(vow) do
Vow.Nilable.new(vow)
end
@typedoc """
Options for list, keyword, and map vow construction.
* `:length` - the length of the list specified as an integer or range
(will override `min_length` and/or `:max_length` if either are present)
* `:min_length` - the minimum acceptable length of the list (defaults to `0`)
* `:max_length` - the maximum acceptable length of the list
* `:distinct?` - specifies whether the elements of the list should be unique
(defaults to `false`)
"""
@type list_opt ::
{:length, Range.t() | non_neg_integer}
| {:min_length, non_neg_integer}
| {:max_length, non_neg_integer}
| {:distinct?, boolean}
@doc """
Returns a vow that accepts a list of elements that all conform to the given
`vow` in addition to whatever constraints have been specified in the `opts`.
See `list_opt` type for more details.
## Examples
```
iex> vow = Vow.list_of(Vow.one_of(i: &is_integer/1, s: &is_bitstring/1))
...> Vow.conform(vow, [1, 2, 3, "foo", 5, "bar"])
{:ok, [%{i: 1}, %{i: 2}, %{i: 3}, %{s: "foo"}, %{i: 5}, %{s: "bar"}]}
```
"""
@spec list_of(t, [list_opt]) :: t
def list_of(vow, opts \\ []) do
distinct? = Keyword.get(opts, :distinct?, false)
{min, max} = get_range(opts)
Vow.List.new(vow, min, max, distinct?)
end
@doc false
@spec get_range([list_opt]) :: {non_neg_integer, non_neg_integer | nil}
defp get_range(opts) do
with {:length, nil} <- {:length, Keyword.get(opts, :length)},
{:min, min} <- {:min, Keyword.get(opts, :min_length, 0)},
{:max, max} <- {:max, Keyword.get(opts, :max_length)} do
{min, max}
else
{:length, min..max} -> {min, max}
{:length, len} -> {len, len}
end
end
@doc """
Equivalent to `Vow.list_of({&is_atom/1, vow}, opts)`.
See `Vow.list_of/2` for more details.
"""
@spec keyword_of(t, [list_opt]) :: t
def keyword_of(vow, opts \\ []) do
list_of({&is_atom/1, vow}, opts)
end
@typedoc """
Options for map vow construction.
* `:conform_keys?` - `true` will result in the map keys being overridden by
the result of their conformation, while the default, `false`, will result
in no change to the map keys.
This distinction between conforming or not conforming keys is important
because of the potential for vows to destructure the values they conform,
which may not be desired for map keys.
See `list_opt` for more details.
"""
@type map_opt ::
list_opt
| {:conform_keys?, boolean}
@doc """
Returns a vow that successfully conforms a value if it is a map whose keys
all conform to the `key_vow`, and whose values all conform to the
`value_vow`.
## Examples
```
iex> vow = Vow.map_of(&is_atom/1, &is_integer/1)
...> Vow.conform(vow, %{a: 1, b: 2, c: 3})
{:ok, %{a: 1, b: 2, c: 3}}
iex> vow = Vow.map_of(&is_atom/1, &is_integer/1)
...> Vow.valid?(vow, %{a: 1, b: :not_integer, c: 3})
false
```
"""
@spec map_of(key_vow :: t, value_vow :: t, [map_opt]) :: t
def map_of(key_vow, value_vow, opts \\ []) do
conform_keys? = Keyword.get(opts, :conform_keys?, false)
{min, max} = get_range(opts)
Vow.Map.new(key_vow, value_vow, min, max, conform_keys?)
end
@typedoc """
Any `Vow.t` that represents a `Map` or `Keyword`.
"""
@type merged ::
Vow.Merge.t()
| Vow.Map.t()
| Vow.Keys.t()
| map
@doc """
Takes map-validating vows and returns a vow that returns a conformed
map satifying all of the vows.
## Examples
```
iex> vow = Vow.merge(
...> req: %{a: -100..0, b: 1..100},
...> opt: Vow.map_of(&is_atom/1, &is_integer/1)
...> )
...> Vow.conform(vow, %{a: -42, b: 35, c: 0, d: 10000})
{:ok, %{a: -42, b: 35, c: 0, d: 10000}}
```
## Notes
Unlike `Vow.also/1`, merge can generate maps satifying the union of
the `named_vows`.
"""
@spec merge([{atom, merged}]) :: t
def merge(named_vows) do
Vow.Merge.new(named_vows)
end
@typedoc """
Either a `Vow.Ref.t` or the module, function pair used to construct
the `Vow.Ref.t`. Also supports just the function named if the
`:default_module` is specified (see `key_opt` for more details).
"""
@type vow_ref :: atom | {module, atom} | Vow.Ref.t()
@typedoc """
This expression represents a set of valid `Vow.Ref` combinations and
supports nested `{:and, [...]}` and `{:or, []}` notation.
"""
@type vow_ref_expr ::
vow_ref
| {:and | :or, [vow_ref_expr, ...]}
@typedoc """
Options for keys vow construction:
* `:required` - the expression describing the required keys (defaults to `[]`)
* `:optional` - the expression describing the optional keys (defulats to `[]`)
* `:default_module` - the default module to use when a vow_ref is
unspecified (defaults to `nil`)
* `:regex?` -
"""
@type key_opt ::
{:required, [vow_ref_expr]}
| {:optional, [vow_ref_expr]}
| {:default_module, module | nil}
| {:regex?, boolean}
@doc """
Returns a map validating vow that takes a set of vow reference
expressions in the `:required` and `:optional` `opts`.
A vow reference is a named vow (see `Vow.Ref` for more details),
and the expression part supports nested 'and' and 'or' operators
(see `vow_ref_expr` type).
The reference function name is implied to be the key name and the
value corresponding to that key must conform with the vow referenced.
## Examples
```
iex> defmodule Foo do
...> def x, do: &is_integer/1
...> def y, do: &is_float/1
...> end
...> vow = Vow.keys(required: [{Foo, :x}, {Foo, :y}])
...> Vow.conform(vow, %{x: 42, y: 42.0})
{:ok, %{x: 42, y: 42.0}}
```
## Notes
There is no support for inline vow specification, by design.
This is by default not a regex operator, but if the `:regex?` flag in the
`opts` is set to `true`, then it behaves as a regex operator.
"""
@spec keys([key_opt]) :: t | no_return
def keys(opts) do
required = Keyword.get(opts, :required, [])
optional = Keyword.get(opts, :optional, [])
default_module = Keyword.get(opts, :default_module, nil)
regex? = Keyword.get(opts, :regex?, false)
Vow.Keys.new(required, optional, default_module, regex?)
end
@doc """
This macro wraps `Vow.keys/1` with a default `:default_module`
value of the caller's module (via `__CALLER__.module`).
If having this default value is not useful, then using `Vow.keys/1`
is preferred.
"""
@spec mkeys([key_opt]) :: Macro.t()
defmacro mkeys(opts) do
opts = Keyword.put(opts, :default_module, __CALLER__.module)
quote do
Vow.keys(unquote(opts))
end
end
@doc """
Returns `true` if the given `vow` is a regex operator,
otherwise returns `false`.
"""
@spec regex?(t) :: boolean
def regex?(vow) do
Conformable.regex?(vow)
end
@doc """
Returns a vow that consumes values and subjects them to the conjunction
of the `named_vows`, and any conforming they might perform.
## Notes
This is a regex operator that behaves similarly to `Vow.also/1`.
"""
@spec amp([{atom, t}]) :: t
def amp(named_vows) do
Vow.Amp.new(named_vows)
end
@doc """
Returns a vow that matches zero or one value matching the specified `vow`.
Produces either an empty list, or a list of a single element.
## Notes
This is a regex operator.
"""
@spec maybe(t) :: t
def maybe(vow) do
Vow.Maybe.new(vow)
end
@doc """
Returns a vow that matches one or more values matching the specified `vow`.
Produces a list of matches.
## Notes
This is a regex operator.
"""
@spec one_or_more(t) :: t
def one_or_more(vow) do
Vow.OneOrMore.new(vow)
end
@doc """
Shorthand for `Vow.one_or_more/1`.
"""
@spec oom(t) :: t
def oom(vow), do: one_or_more(vow)
@doc """
Returns a vow that matches zero or more values matching the specified `vow`.
Produces a list of matches.
## Notes
This is a regex operator.
"""
@spec zero_or_more(t) :: t
def zero_or_more(vow) do
Vow.ZeroOrMore.new(vow)
end
@doc """
Shorthand for `Vow.zero_or_more/1`.
"""
@spec zom(t) :: t
def zom(vow), do: zero_or_more(vow)
@doc """
Returns a vow that returns a map containing the key of the first matching
vow and the corresponding conformed value.
## Notes
This is a regex operator that behaves similarly to `Vow.one_of/1`.
"""
@spec alt([{atom, t}, ...]) :: t | no_return
def alt(named_vows)
when is_list(named_vows) and length(named_vows) > 0 do
Vow.Alt.new(named_vows)
end
@doc """
Returns a vow that matches all values in a list, returning a map containing
the keys of each name in `named_vows` and the corresponding conformed value.
## Notes
This is a regex operator.
"""
@spec cat([{atom, t}, ...]) :: t | no_return
def cat(named_vows)
when is_list(named_vows) and length(named_vows) > 0 do
Vow.Cat.new(named_vows)
end
defdelegate conform_function(vow, function, args \\ []), to: Vow.Function, as: :conform
end
|
lib/vow.ex
| 0.892399
| 0.924108
|
vow.ex
|
starcoder
|
defmodule ISO8583.DataTypes do
@moduledoc """
This module provides utilities for validation `ISO 8583` field data types based the description below pulled
from a postilion interface documentation. Each character gets validated against the regex that defines each fata type.
- `a` - Alphabetic characters, `A` through `Z` and a through `z`
- `n` - Numeric digits, `0` through `9`
- `p` - Pad character, space
- `s` - Special characters, i.e. other printable
- `an` - Alphabetic and numeric characters
- `as` - Alphabetic and special characters
- `ns` - Numeric and special characters
- `anp` - Alphabetic, numeric and pad characters
- `ans` - Alphabetic, numeric and special characters
- `x` `C` for credit, `D` for debit, always associated with a numeric amount field, i.e. `x+n16` means a prefix of `C` or `D` followed by `16`
numeric characters.
- `b` - Binary representation of data
- `z` - `Track 2` as defined in `ISO 7813`
"""
defp test?("a", character) do
Regex.match?(~r/[a-z]/i, character)
end
defp test?("n", character) do
Regex.match?(~r/[0-9]/i, character)
end
defp test?("b", character) do
Regex.match?(~r/[0-9ABCDEF]/i, character)
end
defp test?("p", character) do
Regex.match?(~r/[*#]/i, character)
end
defp test?("an", character) do
Regex.match?(~r/[0-9a-z]/i, character)
end
defp test?("ans", character) do
Regex.match?(~r/[0-9a-z-!$%^&*()_+|~=`{}\[\]:";'<>?,.\/ ]/i, character)
end
defp test?("ns", character) do
Regex.match?(~r/[0-9-!$%^&*()_+|~=`{}\[\]:";'<>?,.\/ ]/i, character)
end
defp test?("s", character) do
Regex.match?(~r/[-!$%^&*()_+|~=`{}\[\]:";'<>?,.\/ ]/i, character)
end
defp test?("anp", character) do
Regex.match?(~r/[0-9a-z*#\x20]/i, character)
end
defp test?("x+n", character) do
Regex.match?(~r/[0-9CDcd*#]/i, character)
end
defp test?("z", _) do
true
end
defp test?(type, _) do
{:error, "Data type #{type} is not implemented"}
end
defp each(field, type, [head | tail]) do
case test?(type, head) do
true -> each(field, type, tail)
false -> {:error, "While processing field #{field} data provided is not of type '#{type}'"}
{:error, reason} -> {:error, reason}
end
end
defp each(_field, _type, []), do: true
defp run_validation(field, type, string_data) do
chars_list = String.graphemes(string_data)
case String.graphemes(string_data) do
[] -> false
_ -> each(field, type, chars_list)
end
end
def check_data_length(field, data, %{len_type: "fixed"} = format) do
case byte_size(data) == format.max_len do
true ->
true
false ->
{:error,
"Invalid length of data on field #{field}, expected #{format.max_len} , found #{
byte_size(data)
}"}
end
end
def check_data_length(field, data, %{len_type: _} = format) do
case byte_size(data) <= format.max_len do
true ->
true
false ->
{:error,
"Invalid length of data on field #{field}, expected maximum of #{format.max_len} , found #{
byte_size(data)
}"}
end
end
@doc """
Function to validate the data type in a field, returns `true` if all characters in a field matches the type otherwize return false
## Examples
iex> DataTypes.valid?("2", "440044444444444", ISO8583.Formats.format(:"2"))
true
iex> DataTypes.valid?("2", "440044444444444R", ISO8583.Formats.format(:"2"))
{:error, "While processing field 2 data provided is not of type 'n'"}
iex> DataTypes.valid?("2", "44004444444444499999999", ISO8583.Formats.format(:"2"))
{:error, "Invalid length of data on field 2, expected maximum of 19 , found 23"}
"""
def valid?(field, string_data, %{content_type: "x+n"} = format) do
with true <- Regex.match?(~r/[c,d]/i, String.at(string_data, 0)),
true <- run_validation(field, "x+n", string_data),
true <- check_data_length(field, string_data, format) do
true
else
error ->
case error do
false -> {:error, "Data type x+n must be presceeded with c or d"}
_ -> error
end
end
end
def valid?(field, string_data, format) do
with true <- run_validation(field, format.content_type, string_data),
true <- check_data_length(field, string_data, format) do
true
else
error -> error
end
end
@doc false
def valid?(message, opts) do
for {key, value} <- message do
case valid?(key, value, opts[:formats][key]) do
true -> true
error -> throw(error)
end
end
{:ok, message}
catch
error -> error
end
end
|
lib/iso_8583/data_types/data_types.ex
| 0.863449
| 0.748076
|
data_types.ex
|
starcoder
|
defmodule Code do
@moduledoc """
The Code module is responsible to manage code compilation,
code evaluation and code loading.
It complements (Erlang's code module)[1] to add behavior
which is specific to Elixir.
[1]: (www.erlang.org/doc/man/code.html)
"""
@doc """
Returns all the loaded files.
"""
def loaded_files do
server_call :loaded
end
@doc """
Appends a path to Erlang VM code path.
The path is expanded with `File.expand_path` before added.
"""
def append_path(path) do
Erlang.code.add_pathz(File.expand_path to_char_list(path))
end
@doc """
Prepends a path to Erlang VM code path.
The path is expanded with `File.expand_path` before added.
"""
def prepend_path(path) do
Erlang.code.add_patha(File.expand_path to_char_list(path))
end
@doc """
Evalutes the contents given by string. The second argument is the binding
(which should be a Keyword) followed by a keyword list of options. The
options can be:
* `:file` - the file to be considered in the evaluation
* `:line` - the line the script starts
* `:delegate_locals_to` - delegate local calls to the given module,
otherwise functions are evaluated inside Erlang's default scope.
## Examples
Code.eval "a + b", [a: 1, b: 2], file: __ENV__.file, line: __ENV__.line
#=> { 3, [ {:a, 1}, {:b, 2} ] }
When passing the __ENV__'s file and line, we could simply get
the location which already returns both fields as a keywords lists:
Code.eval "a + b", [a: 1, b: 2], __ENV__.location
#=> { 3, [ {:a, 1}, {:b, 2} ] }
"""
def eval(string, binding // [], opts // []) do
{ value, binding, _scope } =
Erlang.elixir.eval :unicode.characters_to_list(string), binding, opts
{ value, binding }
end
@doc """
Evalutes the quoted contents.
## Options
This function accepts a list of options. The supported
options are:
* `:file` - The filename to be used in stacktraces
and the file reported in the __ENV__ variable.
* `:line` - The line reported in the __ENV__ variable.
## Examples
contents = quote hygiene: false, do: a + b
Code.eval_quoted contents, [a: 1, b: 2], file: __ENV__.file, line: __ENV__.line
#=> { 3, [ {:a, 1}, {:b, 2} ] }
When passing the __ENV__'s file and line, we could simply get
the location which already returns both fields as a keywords lists:
Code.eval_quoted contents, [a: 1, b: 2], __ENV__.location
#=> { 3, [ {:a, 1}, {:b, 2} ] }
"""
def eval_quoted(quoted, binding // [], opts // []) do
{ value, binding, _scope } =
Erlang.elixir.eval_quoted [quoted], binding, opts
{ value, binding }
end
@doc """
Converts the given string to AST. It returns { :ok, ast }
if it succeeds, { :error, { line, error, token } } otherwise.
## Options
* `:file` - The filename to be used in stacktraces
and the file reported in the __ENV__ variable.
* `:line` - The line reported in the __ENV__ variable.
"""
def string_to_ast(string, opts // []) do
file = Keyword.get opts, :file, "nofile"
line = Keyword.get opts, :line, 1
res = :elixir_translator.raw_forms(:unicode.characters_to_list(string), line, file)
case res do
{ :ok, ast } -> { :ok, unpack_ast(line, ast) }
_ -> res
end
end
@doc """
Converts the given string to AST. It returns the ast if it succeeds,
raises an exception otherwise. The exception is a TokenMissingError
in case a token is missing (usually because the expression is incomplete),
SyntaxError otherwise.
## Options
* `:file` - The filename to be used in stacktraces
and the file reported in the __ENV__ variable.
* `:line` - The line reported in the __ENV__ variable.
"""
def string_to_ast!(string, opts // []) do
file = Keyword.get opts, :file, "nofile"
line = Keyword.get opts, :line, 1
res = :elixir_translator.forms(:unicode.characters_to_list(string), line, file)
unpack_ast(line, res)
end
defp unpack_ast(_line, []), do: nil
defp unpack_ast(_line, [forms]) when not is_list(forms), do: forms
defp unpack_ast(line, forms), do: { :__block__, line, forms }
@doc """
Loads the given `file`. Accepts `relative_to` as an argument to tell
where the file is located. If the file was already required/loaded,
loads it again. It returns the full path of the loaded file.
When loading a file, you may skip passing .exs as extension as Elixir
automatically adds it for you.
"""
def load_file(file, relative_to // nil) when is_binary(file) do
file = find_file(file, relative_to)
server_call { :loaded, file }
Erlang.elixir_compiler.file file
file
end
@doc """
Requires the given `file`. Accepts `relative_to` as an argument to tell
where the file is located. If the file was already required/loaded,
returns nil, otherwise the full path of the loaded file.
When requiring a file, you may skip passing .exs as extension as
Elixir automatically adds it for you.
"""
def require_file(file, relative_to // nil) when is_binary(file) do
file = find_file(file, relative_to)
case server_call({ :loaded, file }) do
:ok -> Erlang.elixir_compiler.file file
:duplicated -> []
end
end
@doc """
Loads the compilation options from the code server.
Check compiler_options/1 for more information.
"""
def compiler_options do
server_call :compiler_options
end
@doc """
Sets compilation options. Those options are global
since they are stored by Elixir's Code Server.
Available options are:
* docs - when true, retain documentation in the compiled module.
True by default;
* debug_info - when true, retain debug information in the compiled module.
This allows a developer to reconstruct the original source
code, for such reasons, false by default;
* ignore_module_conflict - when true, override modules that were already defined
without raising errors, false by default;
"""
def compiler_options(opts) do
server_call { :compiler_options, opts }
end
@doc """
Compiles the given string and returns a list of tuples where
the first element is the module name and the second one is its
binary.
For compiling many files at once, check `Kernel.ParallelCompiler`.
"""
def compile_string(string, file // "nofile") when is_binary(file) do
Erlang.elixir_compiler.string :unicode.characters_to_list(string), to_binary(file)
end
@doc """
Ensures the given module is loaded. If the module is already
loaded, it works as no-op. If the module was not loaded yet,
it tries to load it.
If it succeeds loading the module anyhow, it returns
`{ :module, module }`. If not, returns `{ :error, reason }` with
the error reason.
## Code loading on the Erlang VM
Erlang has two modes to load code: interactive and embedded.
By default, the Erlang VM runs on interactive mode, where modules
are loaded as needed. In embedded mode the opposite happens, as all
modules need to be loaded upfront or explicitly.
Therefore, this function is useful to check if a module is loaded
before using it and react accordingly. For example, the `URI` module
uses this function to check if a specific parser exists and is for the
given URI scheme.
## Code.ensure_compiled
Elixir also contains an `ensure_compiled/1` function that is a
superset of `ensure_loaded/1`.
Since Elixir's compilation happens in parallel, in some situations
you may need to use a module but it was not compiled yet, therefore
it can't even be loaded.
`ensure_compiled/1` puts a halt in the current process until the
module we are depending on is available.
In most of the cases, `ensure_loaded` is enough. `ensure_compiled`
must be used just in same rare conditions, usually involving macros
that needs to invoke a module for callback information.
"""
def ensure_loaded(module) when is_atom(module) do
Erlang.code.ensure_loaded(module)
end
@doc """
Ensures the given module is compiled and loaded. If the module
is already loaded, it works as no-op. If the module was not
loaded yet, it checks if it needs to be compiled first and just
then tries to load it.
If it succeeds loading the module anyhow, it returns
`{ :module, module }`. If not, returns `{ :error, reason }` with
the error reason.
Check `ensure_loaded/1` for more information on module loading
and when to use `ensure_loaded/1` or `ensure_compiled/1`.
"""
def ensure_compiled(module) when is_atom(module) do
case Erlang.code.ensure_loaded(module) do
{ :error, :nofile } = error ->
case :erlang.get(:elixir_compiler_pid) do
:undefined -> error
_ ->
try do
module.__info__(:self)
{ :module, module }
rescue
UndefinedFunctionError -> error
end
end
other -> other
end
end
## Helpers
# Finds the file given the relative_to path.
# If the file is found, returns its path in binary, fails otherwise.
defp find_file(file, relative_to) do
file = to_binary(file)
file = if relative_to do
File.expand_path(file, relative_to)
else
File.expand_path(file)
end
if File.regular?(file) do
file
else
prefix = "#{file}.exs"
if File.regular?(prefix) do
prefix
else
raise ArgumentError, message: "could not load #{file}"
end
end
end
defp server_call(args) do
Erlang.gen_server.call(:elixir_code_server, args)
end
end
|
lib/elixir/lib/code.ex
| 0.846117
| 0.446615
|
code.ex
|
starcoder
|
defmodule EthEvent do
@moduledoc """
[](https://travis-ci.org/etherharvest/eth_event) [](https://hex.pm/packages/eth_event) [](https://hex.pm/packages/eth_event)
`EthEvent` is a library to declare and request Solidity events easily.
## Queries
This project provides a simple way of querying events and other information
from the Ethereum blockchain.
### Query Solidity events
Solidity events are no more than encoded block logs. They are usually used to
record that something changed in the contract's state. The following contract
declares and `emit`s an event every time someone sends some tokens to other
address:
```solidity
contract SomeToken is ERC20 {
Transfer(address indexed from, address indexed to, uint value);
...
function transfer(address to, uint value) {
...
emit Transfer(msg.sender, to, value)
}
}
```
Using the `EthEvent.Schema` behaviour you can query the events in the form of
Elixir structs. The `Transfer` event defined in the previous contract, could be
defined as follows:
```
defmodule Transfer do
use EthEvent.Schema
event "Transfer" do
address :from, indexed: true
address :to, indexed: true
uint :value
end
end
```
The previously defined module includes the functions `Transfer.query/1`,
`Transfer.query/2` and a struct definition for the `Transfer` event e.g:
Let's say we want to look for:
- The `Transfer` event in the contract of address
`0xd09de8b6b510aecd508a22811398f468e75c8c4d`.
- Where the tokens where transferred `from` the wallet address
`0x93ecb3962981e1ba2928297cb09c1932aa2c9c51`.
- And only between the blocks `0` and `100`.
We would do the following:
```
> contract_address = "0xd09de8b6b510aecd508a22811398f468e75c8c4d"
> from = "0x93ecb3962981e1ba2928297cb09c1932aa2c9c51"
> query = %Transfer{address: contract_address, from: from}
> options = [from_block: 0, to_block: 100]
> Transfer.query(query, options)
{:ok,
[
%Transfer{
...
address: "0xd09de8b6b510aecd508a22811398f468e75c8c4d",
block_number: 42,
from: "0x93ecb3962981e1ba2928297cb09c1932aa2c9c51",
to: "0x1e529de18f95ad5a4f41ac5e159fa307d5a85967",
value: 100
}
]
}
```
The result is the list of events that match the query.
### Query block number
It is possible to query the block number as well using the function
`Block.query/1` e.g:
```
> use EthEvent
> Block.query(%Block{block_number: "latest"})
{:ok,
%Block{
...
block_number: 42,
timestamp: #DateTime<...>
}
}
```
### Query balance
Similarly to the two previous sections, balances are available as well using
the function `query/1` e.g:
```
> use EthEvent
> address = "0xd09de8b6b510aecd508a22811398f468e75c8c4d"
> Balance.query(%Balance{address: address, block_number: "latest"})
{:ok,
%Balance{
address: "0xd09de8b6b510aecd508a22811398f468e75c8c4d"
balance: 100
}
}
```
The `balance` field is in _wei_.
### Query composability
One important thing is that events are composable, though they only preserve
some fields when composed e.g:
To query the balance of the wallet address
`0xd09de8b6b510aecd508a22811398f468e75c8c4d` for the block 42 there are two
ways of doing it:
Without block hash or block_number:
```
> address = "0xd09de8b6b510aecd508a22811398f468e75c8c4d"
> Balance.query(%Balance{address: address})
{:ok,
%Balance{
address: "0xd09de8b6b510aecd508a22811398f468e75c8c4d"
block_number: nil,
block_hash: nil,
balance: 100
}
}
```
or with block hash:
```
> address = "0xd09de8b6b510aecd508a22811398f468e75c8c4d"
> Block.query!(block_number: 42) |> Balance.query(address: address)
{:ok,
%Balance{
address: "0xd09de8b6b510aecd508a22811398f468e75c8c4d"
block_number: 42,
block_hash: "0x15feeab052b4bd65c8e3a2e3efab391debb9d8b5def6ced89ea772...",
balance: 100
}
}
```
## Installation
To install just add the following to your dependencies:
```elixir
def deps do
[
{:eth_event, "~> 0.1"}
]
end
```
"""
defmacro __using__(_) do
quote do
alias EthEvent.Api.{Block, Balance}
end
end
end
|
lib/eth_event.ex
| 0.903147
| 0.961858
|
eth_event.ex
|
starcoder
|
defmodule Calendar.DateTime.TzPeriod do
alias Calendar.TimeZoneData
@moduledoc """
DateTime.TzPeriod is for getting information about timezone periods.
A timezone period is an invention for Calendar, which is a period where the
offsets are the same for a given time zone. For instance during summer time
in London where Daylight Saving Time is in effect. The period would be from
the beginning of summer time until the fall where DST is no longer in effect.
The functions in this module lets you get the time instance where a period
begins and when the next begins, terminating the existing period.
"""
defp timezone_period(date_time) do
utc_greg_secs = date_time |> Calendar.DateTime.shift_zone!("Etc/UTC") |> Calendar.DateTime.gregorian_seconds
period_list = TimeZoneData.periods_for_time(date_time.time_zone, utc_greg_secs, :utc);
hd period_list
end
@doc """
Takes a DateTime. Returns another DateTime with the beginning of the next
timezone period. Or {:unlimited, :max} in case there are no planned changes
to the time zone.
See also `from`.
## Examples
Iceland does not observe DST and has no plans to do so. The period
that 2000 January 1st is in goes on "forever" and {:unlimited, :max} is
returned.
iex> Calendar.DateTime.from_erl!({{2000,1,1},{0,0,0}},"Atlantic/Reykjavik") |> next_from
{:unlimited, :max}
The provided DateTime is in summer of 2000 in New York. The period is in
DST. The returned DateTime is the first instance of winter time, where
DST is no longer in place:
iex> Calendar.DateTime.from_erl!({{2000,6,1},{0,0,0}},"America/New_York") |> next_from
{:ok,
%DateTime{zone_abbr: "EST", day: 29, hour: 1, microsecond: {0, 0}, minute: 0, month: 10, second: 0, std_offset: 0,
time_zone: "America/New_York", utc_offset: -18000, year: 2000}}
The provided DateTime is in winter 2000. The returned DateTime is the
first second of DST/summer time.
iex> Calendar.DateTime.from_erl!({{2000,1,1},{0,0,0}},"Europe/Copenhagen") |> next_from
{:ok,
%DateTime{zone_abbr: "CEST", day: 26, hour: 3, microsecond: {0, 0}, minute: 0, month: 3, second: 0, std_offset: 3600,
time_zone: "Europe/Copenhagen", utc_offset: 3600, year: 2000}}
"""
def next_from(date_time) do
period = date_time |> timezone_period
case is_integer(period.until.utc) do
true -> until = period.until.utc
|> :calendar.gregorian_seconds_to_datetime
|> Calendar.DateTime.from_erl!("Etc/UTC")
|> Calendar.DateTime.shift_zone!(date_time.time_zone)
{:ok, until}
false -> {:unlimited, period.until.wall}
end
end
@doc """
Takes a DateTime. Returns the beginning of the timezone period that timezone
is in as another DateTime in a tuple tagged with :ok
In case it is the first timezone period, the beginning will be
"the beginning of time" so to speak. In that case {:unlimited, :min} will
be returned.
See also `timezone_period_until`.
## Examples
iex> Calendar.DateTime.from_erl!({{2000,1,1},{0,0,0}},"Atlantic/Reykjavik") |> from
{:ok,
%DateTime{zone_abbr: "GMT", day: 7, hour: 2, microsecond: {0, 0}, minute: 0, month: 4, second: 0, std_offset: 0,
time_zone: "Atlantic/Reykjavik", utc_offset: 0, year: 1968}}
iex> Calendar.DateTime.from_erl!({{1800,1,1},{0,0,0}},"Atlantic/Reykjavik") |> from
{:unlimited, :min}
"""
def from(date_time) do
period = date_time |> timezone_period
case is_integer(period.from.utc) do
true -> from = period.from.utc |> :calendar.gregorian_seconds_to_datetime
|> Calendar.DateTime.from_erl!("Etc/UTC")
|> Calendar.DateTime.shift_zone!(date_time.time_zone)
{:ok, from}
false -> {:unlimited, period.from.wall}
end
end
@doc """
## Examples
iex> Calendar.DateTime.from_erl!({{2000,1,1},{0,0,0}},"Europe/Copenhagen") |> prev_from
{:ok,
%DateTime{zone_abbr: "CEST", day: 28, hour: 3, microsecond: {0, 0}, minute: 0, month: 3, second: 0, std_offset: 3600, time_zone: "Europe/Copenhagen", utc_offset: 3600, year: 1999}}
iex> Calendar.DateTime.from_erl!({{1800,1,1},{0,0,0}},"Atlantic/Reykjavik") |> prev_from
{:error, :already_at_first}
"""
def prev_from(date_time) do
{tag, val} = from(date_time)
case tag do
:unlimited -> {:error, :already_at_first}
_ -> val
|> Calendar.DateTime.shift_zone!("Etc/UTC")
|> Calendar.DateTime.gregorian_seconds
|> Kernel.-(1)
|> :calendar.gregorian_seconds_to_datetime
|> Calendar.DateTime.from_erl!("Etc/UTC")
|> Calendar.DateTime.shift_zone!(val.time_zone)
|> from
end
end
@doc """
Takes a DateTime and returns a stream of next timezone period
starts. Not including the "from" time of the current timezone period.
## Examples
A DateTime in winter is provided. We take the first 4 elements from the
stream. The first element is the first instance of the summer time period
that follows the standard/winter time period the provided DateTime was in.
The next is standard time. Then Daylight time and Standard time again.
iex> Calendar.DateTime.from_erl!({{2015,2,24},{13,0,0}}, "America/New_York") |> stream_next_from |> Enum.take(4)
[%DateTime{zone_abbr: "EDT", day: 8, hour: 3, microsecond: {0, 0}, minute: 0, month: 3, second: 0, std_offset: 3600, time_zone: "America/New_York",
utc_offset: -18000, year: 2015},
%DateTime{zone_abbr: "EST", day: 1, hour: 1, microsecond: {0, 0}, minute: 0, month: 11, second: 0, std_offset: 0, time_zone: "America/New_York",
utc_offset: -18000, year: 2015},
%DateTime{zone_abbr: "EDT", day: 13, hour: 3, microsecond: {0, 0}, minute: 0, month: 3, second: 0, std_offset: 3600, time_zone: "America/New_York",
utc_offset: -18000, year: 2016},
%DateTime{zone_abbr: "EST", day: 6, hour: 1, microsecond: {0, 0}, minute: 0, month: 11, second: 0, std_offset: 0, time_zone: "America/New_York",
utc_offset: -18000, year: 2016}]
"""
def stream_next_from(date_time) do
Stream.unfold(next_from(date_time), fn {tag, date_time} -> if tag == :ok do {date_time, date_time |> next_from} else nil end end)
end
@doc """
Takes a DateTime and returns a stream of previous "from" timezone period
starts. Plus the "from" time of the current timezone period.
## Examples
A DateTime in winter is provided. We take the first 4 elements from the
stream. The first element is the beginning of the period for the DateTime
provided. The next is the first instance of summer time aka. Eastern
Daylight Time earlier that year. The next one is standard time before that
which began in the previous year.
iex> Calendar.DateTime.from_erl!({{2015,2,24},{13,0,0}}, "America/New_York") |> stream_prev_from |> Enum.take(4)
[%DateTime{zone_abbr: "EST", day: 2, hour: 1, microsecond: {0, 0}, minute: 0, month: 11, second: 0, std_offset: 0, time_zone: "America/New_York",
utc_offset: -18000, year: 2014},
%DateTime{zone_abbr: "EDT", day: 9, hour: 3, microsecond: {0, 0}, minute: 0, month: 3, second: 0, std_offset: 3600, time_zone: "America/New_York",
utc_offset: -18000, year: 2014},
%DateTime{zone_abbr: "EST", day: 3, hour: 1, microsecond: {0, 0}, minute: 0, month: 11, second: 0, std_offset: 0, time_zone: "America/New_York",
utc_offset: -18000, year: 2013},
%DateTime{zone_abbr: "EDT", day: 10, hour: 3, microsecond: {0, 0}, minute: 0, month: 3, second: 0, std_offset: 3600, time_zone: "America/New_York",
utc_offset: -18000, year: 2013}]
"""
def stream_prev_from(date_time) do
Stream.unfold(from(date_time), fn {tag, date_time} -> if tag == :ok do {date_time, date_time |> prev_from} else nil end end)
end
end
|
data/web/deps/calendar/lib/calendar/date_time/tz_period.ex
| 0.920665
| 0.614423
|
tz_period.ex
|
starcoder
|
defmodule NeuralNetwork.Layer do
defstruct neurons: []
alias NeuralNetwork.Layer
alias NeuralNetwork.Neuron
def create(num) do
neurons = 1..num |> Enum.map(fn _ -> Neuron.create end)
%Layer{neurons: neurons}
end
def connect(src, dest) do
for src_pid <- src.neurons, dest_pid <- dest.neurons do
GenServer.call(src_pid, {:connect_to, dest_pid})
end
end
def set_outputs(layer, outputs) do
List.zip([layer.neurons, outputs])
|> Parallel.each(fn {neuron_pid, output} ->
GenServer.call(neuron_pid, {:set_output, output})
end)
end
def get_outputs(layer) do
Enum.map layer.neurons, fn neuron_pid ->
GenServer.call(neuron_pid, :get_state).output
end
end
def prop_forward(layer) do
Parallel.each layer.neurons, fn neuron_pid ->
GenServer.call(neuron_pid, :prop_forward)
end
end
def update_outputs(layer) do
Parallel.each layer.neurons, fn neuron_pid ->
GenServer.call(neuron_pid, :update_output)
end
end
def prop_backward(layer, target_outputs) do
List.zip([layer.neurons, target_outputs])
|> Parallel.each(fn {neuron_pid, target_output} ->
GenServer.call(neuron_pid, {:prop_backward, target_output})
end)
end
def prop_backward(layer) do
Parallel.each layer.neurons, fn neuron_pid ->
GenServer.call(neuron_pid, {:prop_backward, nil})
end
end
def adjust_weights(layer, target_outputs) do
List.zip([layer.neurons, target_outputs])
|> Parallel.each(fn {neuron_pid, target_output} ->
GenServer.call(neuron_pid, {:adjust_weights, target_output})
end)
end
def adjust_weights(layer) do
Parallel.each layer.neurons, fn neuron_pid ->
GenServer.call(neuron_pid, {:adjust_weights, nil})
end
end
def get_in_conns(layer) do
Enum.map layer.neurons, fn neuron_pid ->
GenServer.call(neuron_pid, :get_in_conn)
end
end
def set_in_conns(layer, in_conns) do
List.zip([layer.neurons, in_conns])
|> Parallel.each(fn {neuron_pid, conn_list} ->
GenServer.call(neuron_pid, {:set_in_conn, conn_list})
end)
end
end
|
lib/neural_network/layer.ex
| 0.701202
| 0.552962
|
layer.ex
|
starcoder
|
defmodule ExPixBRCode.ValueObject do
@moduledoc """
Value object is a data structure that has no identity.
We use value objects as a means of defining a data structure with its validations.
Also, it has helpers for turning structs into maps (the opposite of
`ExPixBRCode.Changests.cast_and_apply`).
"""
@iso8601_structs [
Date,
DateTime,
NaiveDateTime,
Time
]
defmacro __using__(_opts) do
quote do
use Ecto.Schema
import Ecto.Changeset
@typedoc """
Represent the schema type
"""
@type t :: %__MODULE__{}
@primary_key false
end
end
@doc "Transforms a struct and its inner fields to atom-maps"
def to_map(instance, opts \\ []) do
key_type = Keyword.get(opts, :key_type, :atom_keys)
serialize_timestamps = Keyword.get(opts, :serialize_timestamps, false)
instance
|> Map.drop([:__struct__, :__meta__])
|> Map.new(fn
{key, value} ->
{cast_key(key, key_type), do_cast_to_map(value, key_type, serialize_timestamps)}
end)
end
defp do_cast_to_map(%schema{} = struct, key_type, serialize_timestamps) do
cond do
schema in @iso8601_structs and serialize_timestamps ->
date_format(struct)
schema in @iso8601_structs ->
struct
true ->
struct
|> Map.from_struct()
|> do_cast_to_map(key_type, serialize_timestamps)
end
end
defp do_cast_to_map(map, key_type, serialize_timestamps) when is_map(map) do
map
|> Map.drop([:__meta__])
|> Map.to_list()
|> Enum.map(fn
{k, v} -> {cast_key(k, key_type), do_cast_to_map(v, key_type, serialize_timestamps)}
end)
|> Enum.into(%{})
end
defp do_cast_to_map(list, key_type, serialize_timestamps) when is_list(list) do
Enum.map(list, fn
{k, v} -> {cast_key(k, key_type), do_cast_to_map(v, key_type, serialize_timestamps)}
v -> do_cast_to_map(v, key_type, serialize_timestamps)
end)
end
defp do_cast_to_map(value, _key_type, _serialize_timestamps), do: value
defp cast_key(key, :atom_keys), do: to_atom(key)
defp cast_key(key, :string_keys), do: to_string(key)
defp to_atom(v) when is_atom(v), do: v
defp to_atom(v), do: String.to_atom(v)
defp date_format(nil), do: nil
defp date_format(%Date{} = date), do: Date.to_iso8601(date)
defp date_format(%DateTime{} = datetime) do
datetime
|> DateTime.truncate(:second)
|> DateTime.to_iso8601()
end
defp date_format(%NaiveDateTime{} = datetime) do
datetime
|> NaiveDateTime.truncate(:second)
|> DateTime.from_naive!("Etc/UTC")
|> DateTime.to_iso8601()
end
end
|
lib/ex_pix_brcode/value_object.ex
| 0.820865
| 0.5119
|
value_object.ex
|
starcoder
|
defmodule Typo.Font.TrueTypeFont.Kern do
@moduledoc false
alias Typo, as: T
alias Typo.Font.TrueTypeFont
alias Typo.Font.TrueTypeFont.Kern
@type pairs_map :: %{optional({T.uint8(), T.uint16(), T.uint16()}) => number()}
@type t :: %__MODULE__{
kern_pairs: pairs_map()
}
defstruct kern_pairs: %{}
@doc """
Returns kerning adjustment for given glyph pair.
"""
@spec get_kern(TrueTypeFont.t(), Typo.gid(), Typo.gid()) :: number()
def get_kern(%TrueTypeFont{scale: s, tables: t}, left, right)
when (is_integer(left) or is_nil(left)) and is_integer(right) do
if Map.has_key?(t, :kern) do
kern = t.kern.kern_pairs
Map.get(kern, {1, left, right}, 0) * s
else
0
end
end
@spec parse(TrueTypeFont.t()) :: TrueTypeFont.t()
def parse(%TrueTypeFont{} = font) do
TrueTypeFont.load_table!(font, "kern", required: false)
|> parse_2(font)
end
@spec parse_2(nil | binary(), TrueTypeFont.t()) :: TrueTypeFont.t()
defp parse_2(nil, font), do: font
defp parse_2(<<0::16, n_subtables::16, data::binary>>, font) do
pairs = parse_3(%{}, n_subtables, data)
kern = %Kern{kern_pairs: pairs}
%TrueTypeFont{font | tables: Map.put(font.tables, :kern, kern)}
end
@spec parse_3(pairs_map(), non_neg_integer(), binary()) :: pairs_map()
defp parse_3(pairs, 0, <<_data::binary>>), do: pairs
defp parse_3(pairs, n_subtables, <<v::16, len::16, cov::16, data::binary>>) do
pairs_size = len - 6
with <<data::binary-size(pairs_size), rest::binary>> <- data do
case v do
0 ->
parse_subtable(pairs, cov, data)
|> parse_3(n_subtables - 1, rest)
_ ->
parse_3(pairs, n_subtables - 1, rest)
end
else
_ -> raise Typo.FontError, "invalid font \"kern\" table"
end
end
defp parse_3(_pairs, _n_subtables, <<>>),
do: raise(Typo.FontError, "invalid font \"kern\" table")
@spec parse_subtable(pairs_map(), non_neg_integer(), binary()) :: pairs_map()
defp parse_subtable(pairs, coverage, <<_n::16, _sr::16, _es::16, _rs::16, data::binary>>)
when is_map(pairs) and is_integer(coverage),
do: parse_subtable_pairs(pairs, coverage, data)
@spec parse_subtable_pairs(pairs_map(), non_neg_integer(), binary()) :: pairs_map()
defp parse_subtable_pairs(pairs, _coverage, <<>>), do: pairs
defp parse_subtable_pairs(
pairs,
coverage,
<<left::16, right::16, value::signed-16, rest::binary>>
) do
Map.put(pairs, {coverage, left, right}, value)
|> parse_subtable_pairs(coverage, rest)
end
end
|
lib/typo/font/true_type_font/kern.ex
| 0.804252
| 0.476701
|
kern.ex
|
starcoder
|
defmodule ChangesetMerger.ExpiresAt do
@moduledoc """
Several helper functions to generate date/time values to represent
an expiring value.
"""
@doc """
Generate an unguessable (non incremented) public token_expires_at
## Examples
iex> ChangesetMerger.ExpiresAt.generate("2017-09-21T04:50:34-05:00", 2, :days)
~U[2017-09-23 09:50:34Z]
iex> ChangesetMerger.ExpiresAt.generate("2017-09-21T04:50:34-05:00", 3, :minutes)
~U[2017-09-21 09:53:34Z]
iex> ChangesetMerger.ExpiresAt.generate("2019-02-04 21:40:15.397138Z", 3, :minutes)
~U[2019-02-04 21:43:15Z]
"""
def generate(num, units) do
DateTime.utc_now()
|> generate(num, units)
end
def generate(nil, num, units), do: generate(num, units)
def generate(start_date_time, num, units) when is_binary(start_date_time) do
start_date_time
|> from_iso8601()
|> generate(num, units)
end
def generate(start_date_time, num, :days) do
generate(start_date_time, num * 60 * 60 * 24, :second)
end
def generate(start_date_time, num, :minutes) do
generate(start_date_time, num * 60, :second)
end
def generate(start_date_time, num, :second) do
start_date_time
|> DateTime.add(num, :second)
|> DateTime.truncate(:second)
end
@doc """
Add a token to your changeset if none is already set
## Examples
iex> ChangesetMerger.create(%{"token_expires_at" => ChangesetMerger.ExpiresAt.from_iso8601("2015-09-21T04:50:34-05:00")}, %{token_expires_at: :utc_datetime})
...> |> ChangesetMerger.ExpiresAt.defaulted(:token_expires_at, "2017-09-21T04:50:34-05:00", 1, :days)
...> |> Map.get(:changes)
...> |> Map.get(:token_expires_at)
~U[2015-09-21 09:50:34Z]
iex> ChangesetMerger.create(%{"token_expires_at" => nil}, %{token_expires_at: :utc_datetime})
...> |> ChangesetMerger.ExpiresAt.defaulted(:token_expires_at, "2017-09-21T04:50:34-05:00", 1, :days)
...> |> Map.get(:changes)
...> |> Map.get(:token_expires_at)
~U[2017-09-22 09:50:34Z]
"""
def defaulted(changeset, field, num, units), do: defaulted(changeset, field, nil, num, units)
def defaulted(changeset, field, start_date_time, num, units) do
ChangesetMerger.defaulted(changeset, field, generate(start_date_time, num, units))
end
@doc """
Set a new token to your changeset
## Examples
iex> ChangesetMerger.create(%{"token_expires_at" => ChangesetMerger.ExpiresAt.from_iso8601("2015-09-21T04:50:34-05:00")}, %{token_expires_at: :utc_datetime})
...> |> ChangesetMerger.ExpiresAt.force(:token_expires_at, "2017-09-21T04:50:34-05:00", 1, :days)
...> |> Map.get(:changes)
...> |> Map.get(:token_expires_at)
~U[2017-09-22 09:50:34Z]
iex> ChangesetMerger.create(%{"token_expires_at" => nil}, %{token_expires_at: :utc_datetime})
...> |> ChangesetMerger.ExpiresAt.force(:token_expires_at, "2017-09-21T04:50:34-05:00", 1, :days)
...> |> Map.get(:changes)
...> |> Map.get(:token_expires_at)
~U[2017-09-22 09:50:34Z]
"""
def force(changeset, field, num, units), do: force(changeset, field, nil, num, units)
def force(changeset, field, start_date_time, num, units) do
ChangesetMerger.force(changeset, field, generate(start_date_time, num, units))
end
def from_iso8601(input) do
input
|> DateTime.from_iso8601()
|> case do
{:ok, dt, _offset} -> dt
end
end
end
|
lib/changeset_merger/expires_at.ex
| 0.826397
| 0.413714
|
expires_at.ex
|
starcoder
|
defmodule Attempt.Bucket.Dynamic do
@moduledoc """
Implementation of a Dynamic Token Bucket
This token bucket is designed to maximise
throughput without overloading the service
it is protecting.
For applications that require rate limiting
the token bucket `Attempt.Bucket.Token` is
recommended.
The Dynamic bucket maintains in its state the
currenct performance of the service it is
protecting by treating the `claim_token/1` call
as a proxy for service performance. Periodically
the number of requests per second is updated
and the bucket parameters adjusted to maximise
througput without overloading the external
service.
In addition Dynamic bucket provides a mechanism
to prevent retry storms. It does this by limiting
the number of retries as a percentage of overall
requests. Therefore as the failure rate goes up,
the number of retries will be throttled since
overall throughput will drop.
"""
use GenServer
alias Attempt.Bucket
alias Attempt.Retry.Budget
require Logger
import Attempt.Errors
import Supervisor.Spec
defstruct name: nil,
# Add a token each per fill_rate milliseconds
fill_rate: 3,
# Don't allow the queue to expand forever
max_queue_length: 100,
# The pending queue
queue: nil,
# Available tokens
tokens: 0,
# Maximum number of tokens that can be consumed in a burst
burst_size: 10,
# Number of first requests
first_request_count: 0,
# Number of retry requests
retry_request_count: 0,
# Calculate performance over n milliseconds
performance_window: 2_000,
# Maximum percentage of retries
retry_percentage: 0.05
@type t :: struct()
@default_config @struct
@default_timeout 5_000
@spec new(atom(), Keyword.t() | Bucket.Token.t()) ::
{:ok, Bucket.Token.t()} | {:error, {Exception.t(), String.t()}}
def new(name, config \\ @default_config)
def new(name, config) when is_atom(name) and is_list(config) do
config =
@default_config
|> Map.delete(:__struct__)
|> Map.to_list()
|> Keyword.merge(config)
|> Enum.into(%{})
new(name, struct(__MODULE__, config))
end
def new(name, %Bucket.Token{} = config) when is_atom(name) do
config = %Bucket.Token{config | name: name}
bucket_worker = worker(__MODULE__, [name, config])
case DynamicSupervisor.start_child(Bucket.Supervisor, bucket_worker) do
{:ok, _pid} -> {:ok, config}
{:error, {:already_started, _}} -> {:error, already_started_error(config), config}
end
end
@spec new(atom(), Keyword.t() | Bucket.Token.t()) :: {:ok, Bucket.Token.t()} | no_return()
def new!(name, config) do
case new(name, config) do
{:ok, bucket} -> bucket
error -> raise "Couldn't start bucket #{inspect(config.token_bucket)}: #{inspect(error)}"
end
end
@spec state(Token.Bucket.t()) :: {:ok, non_neg_integer} | {:error, {Exception.t(), String.t()}}
def state(bucket) do
GenServer.call(bucket.name, :state)
end
def start_link(name, bucket \\ @default_config) do
bucket = %{bucket | tokens: bucket.burst_size, queue: :queue.new()}
GenServer.start_link(__MODULE__, bucket, name: name)
end
@spec stop(atom() | Retry.Budget.t() | Bucket.Token.t()) ::
:ok | {:error, {Exception.t(), String.t()}}
def stop(name) when is_atom(name) do
if pid = Process.whereis(name) do
DynamicSupervisor.terminate_child(Bucket.Supervisor, pid)
else
{:error, unknown_bucket_error(name)}
end
end
def stop(%Budget{token_bucket: %Bucket.Token{name: name}}) do
stop(name)
end
def stop(%Bucket.Token{name: name}) do
stop(name)
end
def init(budget) do
schedule_increment(budget)
{:ok, budget}
end
def claim_token(bucket, %Budget{} = budget) do
timeout = budget.timeout || @default_timeout
try do
GenServer.call(bucket.name, :claim_token, timeout)
catch
:exit, {:timeout, {GenServer, :call, [bucket_name, :claim_token, timeout]}} ->
{:error, timeout_error(bucket_name, timeout)}
end
end
def claim_token!(bucket, %Budget{} = budget) do
timeout = budget.timeout || @default_timeout
GenServer.call(bucket, :claim_token!, timeout)
end
# Callbacks
def handle_call(:claim_token, from, %{tokens: tokens} = bucket) when tokens > 0 do
bucket = process_queue(bucket)
if bucket.tokens > 0 do
bucket = decrement(bucket)
{:reply, {:ok, bucket.tokens}, bucket}
else
handle_call(:claim_token, from, bucket)
end
end
def handle_call(:claim_token, from, %{queue: queue} = bucket) do
if :queue.len(queue) >= bucket.max_queue_length do
{:reply, {:error, full_queue_error()}, bucket}
else
bucket = %{bucket | queue: :queue.in(from, queue)}
{:noreply, bucket}
end
end
def handle_call(:claim_token!, _from, bucket) do
if bucket.tokens > 0 do
bucket = decrement(bucket)
{:reply, {:ok, bucket.tokens}, bucket}
else
{:reply, {:error, no_tokens_error()}, bucket}
end
end
def handle_call(:state, _from, bucket) do
{:reply, {:ok, bucket}, bucket}
end
def handle_info(:increment_bucket, bucket) do
schedule_increment(bucket)
bucket = %{bucket | tokens: min(bucket.tokens + 1, bucket.burst_size)}
{:noreply, process_queue(bucket)}
end
defp process_queue(%{queue: queue, tokens: tokens} = bucket) do
if :queue.is_empty(queue) || tokens == 0 do
bucket
else
bucket = decrement(bucket)
{{:value, pid}, new_queue} = :queue.out(queue)
GenServer.reply(pid, {:ok, bucket.tokens})
process_queue(%{bucket | queue: new_queue})
end
end
defp decrement(bucket) do
%{bucket | tokens: bucket.tokens - 1}
end
defp schedule_increment(bucket) do
Process.send_after(self(), :increment_bucket, bucket.fill_rate)
end
end
|
lib/attempt/bucket/dynamic.ex
| 0.806243
| 0.4099
|
dynamic.ex
|
starcoder
|
defmodule Wonderland.Data.Either do
use Calculus
use Wonderland.TypeClass
use Wonderland.Combinator
@moduledoc """
Classic sum type which represents 2 alternatives
- Bifunctor
- Functor (as right)
- Monad (as right)
- Applicative (as right)
"""
@typep a :: term
@typep b :: term
@type t(a, b) :: __MODULE__.t(a, b)
defmacrop leftp(x) do
quote location: :keep do
{:leftp, unquote(x)}
end
end
defmacrop rightp(x) do
quote location: :keep do
{:rightp, unquote(x)}
end
end
defcalculus state,
export_return: false,
generate_opaque: false,
generate_return: false do
method when method in [:is_left?, :is_right?] ->
case state do
leftp(_) -> calculus(return: method == :is_left?)
rightp(_) -> calculus(return: method == :is_right?)
end
{:functor_fmap, f} ->
case state do
leftp(_) -> calculus(return: state |> construct)
rightp(x) -> calculus(return: f.(x) |> rightp |> construct)
end
{:monad_bind, f} ->
case state do
leftp(_) -> calculus(return: state |> construct)
rightp(x) -> calculus(return: f.(x))
end
{:applicative_ap, mf} ->
case unlift(mf) do
{:ok, f} ->
case state do
leftp(_) -> calculus(return: state |> construct)
rightp(x) -> calculus(return: f.(x) |> rightp |> construct)
end
{:error, _} ->
calculus(return: mf)
end
:wonder_unlift ->
case state do
leftp(x) -> calculus(return: {:error, x})
rightp(x) -> calculus(return: {:ok, x})
end
{:bifunctor_bimap, f, g} ->
case state do
leftp(x) -> calculus(return: f.(x) |> leftp |> construct)
rightp(x) -> calculus(return: g.(x) |> rightp |> construct)
end
end
@doc """
First constructor
## Examples
```
iex> x = Either.left(1)
iex> Either.is_left?(x)
true
```
"""
@spec left(a) :: t(a, b)
def left(x), do: x |> leftp |> construct
@doc """
Second constructor
## Examples
```
iex> x = Either.right(1)
iex> Either.is_right?(x)
true
```
"""
@spec right(b) :: t(a, b)
def right(x), do: x |> rightp |> construct
@doc """
If argument is `left(x)` then returns `true`
If argument is `right(x)` then returns `false`
Otherwise raise exception
## Examples
```
iex> x = Either.left(1)
iex> y = Either.right(1)
iex> Either.is_left?(x)
true
iex> Either.is_left?(y)
false
```
"""
@spec is_left?(t(a, b)) :: boolean
def is_left?(x), do: eval(x, :is_left?)
@doc """
If argument is `right(x)` then returns `true`
If argument is `left(x)` then returns `false`
Otherwise raise exception
## Examples
```
iex> x = Either.left(1)
iex> y = Either.right(1)
iex> Either.is_right?(x)
false
iex> Either.is_right?(y)
true
```
"""
@spec is_right?(t(a, b)) :: boolean
def is_right?(x), do: eval(x, :is_right?)
@behaviour Functor
@impl true
def functor_fmap(f, x), do: eval(x, {:functor_fmap, f})
@behaviour Monad
@impl true
def monad_bind(x, f), do: eval(x, {:monad_bind, f})
@behaviour Applicative
@impl true
def applicative_ap(mf, x), do: eval(x, {:applicative_ap, mf})
@behaviour Wonder
@impl true
def wonder_lift(x) when x in [nil, :undefined, :error, false], do: left(void())
def wonder_lift(x) when x in [:ok, true], do: right(x)
def wonder_lift({:error, x}), do: left(x)
def wonder_lift({:ok, x}), do: right(x)
def wonder_lift(x), do: right(x)
@impl true
def wonder_unlift(x), do: eval(x, :wonder_unlift)
@behaviour Bifunctor
@impl true
def bifunctor_bimap(f, g, x), do: eval(x, {:bifunctor_bimap, f, g})
end
|
lib/wonderland/data/either.ex
| 0.898031
| 0.898944
|
either.ex
|
starcoder
|
defmodule SanbaseWeb.Graphql.BlockchainTypes do
use Absinthe.Schema.Notation
object :blockchain_metadata do
field(:blockchain, non_null(:string))
field(:slug, non_null(:string))
field(:ticker, non_null(:string))
field(:infrastructure, non_null(:string))
field(:created_on, :datetime)
# Metrics
@desc """
Exchange Metrics are those metrics that are computed by using both raw
on-chain data complimented by a labeled set of known exchange addresses. The
exchange addresses cannot be all known, so these metrics are always showing
an approximation of the real world. Exchange addresses are gathered by ...
"""
field(:has_exchange_metrics, :boolean)
@desc """
(NOTE: How does these differ from the exchange metrics? both need some labeles
but some labels are harder to obtain than other, so just `label` metrics won't do it)
Label Metrics are those metrics that are computed by using both raw on-chain
data complimented by a labeled set of addresses. The labels
"""
field(:has_label_metrics, :boolean)
@desc """
Top holders metrics are those metrics that show the current and historical
ranking of addresses according to the amount of coins/tokens they hold.
"""
field(:has_top_holders_metrics, :boolean)
@desc """
Exchange top holders metrics are those metrics that need both exchange address
labels and top holders metrics.
Examples for such metrics are `Amount held by top N exchange top holders` and
`Amount held by top N non-exchange top holders`.
"""
field(:has_exchange_top_holders_metrics, :boolean)
@desc """
On-Chain Financial metrics are those metrics that are computed by using both
raw on-chain data as well as financial data (price, marketcap or trading volume).
Examples for such metrics are MVRV and NVT.
"""
field(:has_onchain_financial_metrics, :boolean)
@desc """
Pure on-chain metrics are those metrics that are computed by using only raw
on-chain data. These metrics do not need any additional data to be known in
order to be computed.
Examples for such metrics are Transaction Volume and Daily Active Addresses.
"""
field(:has_pure_onchain_metrics, :boolean)
@desc """
Miners metrics are those metrics that show some statistics about on-chain
miners.
"""
field(:has_miners_metrics, :boolean)
@desc """
Balance metrics are those metrics that are showing the current and historical
balances of different addresses and assets.
"""
field(:has_balance_metrics, :boolean)
end
end
|
lib/sanbase_web/graphql/schema/types/blockchain_types.ex
| 0.912864
| 0.473414
|
blockchain_types.ex
|
starcoder
|
defmodule StarkInfra.CreditNote.Transfer do
alias __MODULE__, as: Transfer
alias StarkInfra.Utils.Check
@moduledoc """
Groups Transfer related functions
"""
@doc """
CreditNote signer's information.
## Parameters (required):
- `:name` [string]: receiver full name. ex: "<NAME>"
- `:tax_id` [string]: receiver tax ID (CPF or CNPJ) with or without formatting. ex: "01234567890" or "20.018.183/0001-80"
- `:bank_code` [string]: code of the receiver Bank institution in Brazil.
- `:branch_code` [string]: receiver Bank account branch. Use '-' in case there is a verifier digit. ex: "1357-9"
- `:account_number` [string]: Receiver Bank account number. Use '-' before the verifier digit. ex: "876543-2"
## Options:
- `:account_type` [string, default "checking"]: Receiver Bank account type. This parameter only has effect on Pix Transfers. ex: "checking", "savings", "salary" or "payment"
- `:scheduled` [Date, DateTime or string, default now]: date or datetime when the transfer will be processed. May be pushed to next business day if necessary. ex: ~U[2020-03-26 19:32:35.418698Z]
- `:tags` [list of strings]: list of strings for reference when searching for transfers. ex: ["employees", "monthly"]
Attributes (return-only):
- `:id` [string, default nil]: unique id returned when Transfer is created. ex: "5656565656565656"
- `:amount` [integer]: amount in cents to be transferred. ex: 1234 (= R$ 12.34)
- `:status` [string, default nil]: current transfer status. ex: "success" or "failed"
"""
@enforce_keys [
:bank_code,
:branch_code,
:account_number,
:name,
:tax_id
]
defstruct [
:account_number,
:account_type,
:amount,
:bank_code,
:branch_code,
:id,
:name,
:scheduled,
:status,
:tags,
:tax_id
]
@type t() :: %__MODULE__{}
@doc false
def resource_maker(json) do
%Transfer{
account_number: json[:account_number],
account_type: json[:account_type],
amount: json[:amount],
bank_code: json[:bank_code],
branch_code: json[:branch_code],
id: json[:id],
name: json[:name],
scheduled: json[:scheduled] |> Check.datetime(),
status: json[:status],
tags: json[:tags],
tax_id: json[:tax_id]
}
end
end
|
lib/credit_note/transfer.ex
| 0.8618
| 0.550607
|
transfer.ex
|
starcoder
|
defmodule MultiFormat do
@moduledoc """
`MultiFormat` is a helper for `Phoenix.Router` when working with multi format
routes.
It allows routes to match for one or more extensions (or none) without
having to manually define all of them and assigning pipelines with the
matching `plug :accepts, …`.
## Examples
The router:
defmodule MyAppWeb.Router do
use MyAppWeb, :router
# Use MultiFormat and supply default pipeline/ext pairs
use MultiFormat, match_html: "", match_json: "json"
pipeline :browser […]
pipeline :match_html do # :html would conflict with the Phoenix.Controller imports
plug(:accepts, ["html"])
end
pipeline :match_json do # :json would conflict with the Phoenix.Controller imports
plug(:accepts, ["json"])
end
scope "/", MyAppWeb do
# Use the default browser stack
pipe_through(:browser)
get("/", PageController, :index)
# Does allow for `/test` and `/test.json` based in the default pairs
# Does work with all the macros of Phoenix.Router
get("/test", PageController, :index) |> multi()
# Does allow only `/test2.json` based on the explicitly given pair
get("/test2", PageController, :index) |> multi(match_json: "json")
end
end
The controller:
defmodule MyAppWeb.PageController do
use MyAppWeb, :controller
# Overriding `action/2` makes matching on extensions easier
def action(conn, _) do
args = [conn, conn.params, conn.assigns]
apply(__MODULE__, action_name(conn), args)
end
# Match for the extensionless html setup
def index(conn, _params, %{match_ext: ""}) do
render(conn, "index.html")
end
# Match for the json route
def index(conn, _params, %{match_ext: "json"}) do
render(conn, "index.json")
end
end
"""
defmacro __using__(opts \\ []) do
opts = Enum.uniq_by(opts, fn {_pipeline, ext} -> ext end)
Module.put_attribute(__CALLER__.module, :multi_format_opts, opts)
quote do
import MultiFormat
end
end
defmacro multi(ast, opts \\ nil) do
opts =
case opts do
nil -> Module.get_attribute(__CALLER__.module, :multi_format_opts)
opts -> Enum.uniq_by(opts, fn {_pipeline, ext} -> ext end)
end
case ast do
{:match, meta, [method, path | rest]} ->
Enum.map(opts, fn {pipeline, ext} ->
{:match, meta, [method, handle_path(path, ext) | rest]}
|> build_scope(pipeline, ext)
end)
{method, meta, [path | rest]} ->
Enum.map(opts, fn {pipeline, ext} ->
{method, meta, [handle_path(path, ext) | rest]}
|> build_scope(pipeline, ext)
end)
end
end
defp build_scope(route, pipeline, ext) do
quote do
scope "/", assigns: %{multi_ext: unquote(ext)} do
pipe_through(unquote(pipeline))
unquote(route)
end
end
end
defp handle_path("/", _) do
raise ArgumentError, "Does only work with non-root paths."
end
defp handle_path(path, ext) do
case Path.basename(path) do
"*" <> _ ->
raise ArgumentError, "Does not work with wildcard paths."
":" <> _ ->
raise ArgumentError, "Does not work with paths ending with params."
_ ->
dotted_ext = if ext == "", do: "", else: ".#{ext}"
path <> dotted_ext
end
end
end
|
lib/multi_format.ex
| 0.870198
| 0.467636
|
multi_format.ex
|
starcoder
|
defmodule EEx.SyntaxError do
defexception [:message, :file, :line, :column]
@impl true
def message(exception) do
"#{exception.file}:#{exception.line}:#{exception.column}: #{exception.message}"
end
end
defmodule EEx do
@moduledoc ~S"""
EEx stands for Embedded Elixir. It allows you to embed
Elixir code inside a string in a robust way.
iex> EEx.eval_string("foo <%= bar %>", bar: "baz")
"foo baz"
## API
This module provides 3 main APIs for you to use:
1. Evaluate a string (`eval_string`) or a file (`eval_file`)
directly. This is the simplest API to use but also the
slowest, since the code is evaluated and not compiled before.
2. Define a function from a string (`function_from_string`)
or a file (`function_from_file`). This allows you to embed
the template as a function inside a module which will then
be compiled. This is the preferred API if you have access
to the template at compilation time.
3. Compile a string (`compile_string`) or a file (`compile_file`)
into Elixir syntax tree. This is the API used by both functions
above and is available to you if you want to provide your own
ways of handling the compiled template.
## Options
All functions in this module accept EEx-related options.
They are:
* `:file` - the file to be used in the template. Defaults to the given
file the template is read from or to "nofile" when compiling from a string.
* `:line` - the line to be used as the template start. Defaults to 1.
* `:indentation` - (since v1.11.0) an integer added to the column after every
new line. Defaults to 0.
* `:engine` - the EEx engine to be used for compilation.
* `:trim` - if true, trims whitespace left/right of quotation tags. If a
quotation tag appears on its own in a given line, line endings are also
removed. Defaults to false.
## Engine
EEx has the concept of engines which allows you to modify or
transform the code extracted from the given string or file.
By default, `EEx` uses the `EEx.SmartEngine` that provides some
conveniences on top of the simple `EEx.Engine`.
### Tags
`EEx.SmartEngine` supports the following tags:
<% Elixir expression - inline with output %>
<%= Elixir expression - replace with result %>
<%% EEx quotation - returns the contents inside %>
<%# Comments - they are discarded from source %>
All expressions that output something to the template
**must** use the equals sign (`=`). Since everything in
Elixir is an expression, there are no exceptions for this rule.
For example, while some template languages would special-case
`if/2` clauses, they are treated the same in EEx and
also require `=` in order to have their result printed:
<%= if true do %>
It is obviously true
<% else %>
This will never appear
<% end %>
To escape an EEx expression in EEx use `<%% content %>`. For example:
<%%= x + 3 %>
will be rendered as `<%= x + 3 %>`.
Notice that different engines may have different rules
for each tag. Other tags may be added in future versions.
### Macros
`EEx.SmartEngine` also adds some macros to your template.
An example is the `@` macro which allows easy data access
in a template:
iex> EEx.eval_string("<%= @foo %>", assigns: [foo: 1])
"1"
In other words, `<%= @foo %>` translates to:
<%= {:ok, v} = Access.fetch(assigns, :foo); v %>
The `assigns` extension is useful when the number of variables
required by the template is not specified at compilation time.
"""
@doc """
Generates a function definition from the string.
The kind (`:def` or `:defp`) must be given, the
function name, its arguments and the compilation options.
## Examples
iex> defmodule Sample do
...> require EEx
...> EEx.function_from_string(:def, :sample, "<%= a + b %>", [:a, :b])
...> end
iex> Sample.sample(1, 2)
"3"
"""
defmacro function_from_string(kind, name, source, args \\ [], options \\ []) do
quote bind_quoted: binding() do
info = Keyword.merge([file: __ENV__.file, line: __ENV__.line], options)
args = Enum.map(args, fn arg -> {arg, [line: info[:line]], nil} end)
compiled = EEx.compile_string(source, info)
case kind do
:def -> def unquote(name)(unquote_splicing(args)), do: unquote(compiled)
:defp -> defp unquote(name)(unquote_splicing(args)), do: unquote(compiled)
end
end
end
@doc """
Generates a function definition from the file contents.
The kind (`:def` or `:defp`) must be given, the
function name, its arguments and the compilation options.
This function is useful in case you have templates but
you want to precompile inside a module for speed.
## Examples
# sample.eex
<%= a + b %>
# sample.ex
defmodule Sample do
require EEx
EEx.function_from_file(:def, :sample, "sample.eex", [:a, :b])
end
# iex
Sample.sample(1, 2)
#=> "3"
"""
defmacro function_from_file(kind, name, file, args \\ [], options \\ []) do
quote bind_quoted: binding() do
info = Keyword.merge(options, file: file, line: 1)
args = Enum.map(args, fn arg -> {arg, [line: 1], nil} end)
compiled = EEx.compile_file(file, info)
@external_resource file
@file file
case kind do
:def -> def unquote(name)(unquote_splicing(args)), do: unquote(compiled)
:defp -> defp unquote(name)(unquote_splicing(args)), do: unquote(compiled)
end
end
end
@doc """
Gets a string `source` and generate a quoted expression
that can be evaluated by Elixir or compiled to a function.
"""
@spec compile_string(String.t(), keyword) :: Macro.t()
def compile_string(source, options \\ []) when is_binary(source) and is_list(options) do
EEx.Compiler.compile(source, options)
end
@doc """
Gets a `filename` and generate a quoted expression
that can be evaluated by Elixir or compiled to a function.
"""
@spec compile_file(String.t(), keyword) :: Macro.t()
def compile_file(filename, options \\ []) when is_binary(filename) and is_list(options) do
options = Keyword.merge(options, file: filename, line: 1)
compile_string(File.read!(filename), options)
end
@doc """
Gets a string `source` and evaluate the values using the `bindings`.
## Examples
iex> EEx.eval_string("foo <%= bar %>", bar: "baz")
"foo baz"
"""
@spec eval_string(String.t(), keyword, keyword) :: any
def eval_string(source, bindings \\ [], options \\ [])
when is_binary(source) and is_list(bindings) and is_list(options) do
compiled = compile_string(source, options)
do_eval(compiled, bindings, options)
end
@doc """
Gets a `filename` and evaluate the values using the `bindings`.
## Examples
# sample.eex
foo <%= bar %>
# iex
EEx.eval_file("sample.eex", bar: "baz")
#=> "foo baz"
"""
@spec eval_file(String.t(), keyword, keyword) :: any
def eval_file(filename, bindings \\ [], options \\ [])
when is_binary(filename) and is_list(bindings) and is_list(options) do
options = Keyword.put(options, :file, filename)
compiled = compile_file(filename, options)
do_eval(compiled, bindings, options)
end
### Helpers
defp do_eval(compiled, bindings, options) do
{result, _} = Code.eval_quoted(compiled, bindings, options)
result
end
end
|
lib/eex/lib/eex.ex
| 0.828939
| 0.524943
|
eex.ex
|
starcoder
|
defmodule Mix.Project do
@moduledoc """
Defines and manipulates Mix projects.
A Mix project is defined by calling `use Mix.Project` in a module, usually
placed in `mix.exs`:
defmodule MyApp.MixProject do
use Mix.Project
def project do
[
app: :my_app,
version: "1.0.0"
]
end
end
## Configuration
In order to configure Mix, the module that `use`s `Mix.Project` should export
a `project/0` function that returns a keyword list representing configuration
for the project.
This configuration can be read using `Mix.Project.config/0`. Note that
`config/0` won't fail if a project is not defined; this allows many Mix tasks
to work without a project.
If a task requires a project to be defined or needs to access a
special function within the project, the task can call `Mix.Project.get!/0`
which fails with `Mix.NoProjectError` in the case a project is not
defined.
There isn't a comprehensive list of all the options that can be returned by
`project/0` since many Mix tasks define their own options that they read from
this configuration. For example, look at the "Configuration" section in the
documentation for the `Mix.Tasks.Compile` task.
These are a few options that are not used by just one Mix task (and will thus
be documented here):
* `:build_per_environment` - if `true`, builds will be *per-environment*. If
`false`, builds will go in `_build/shared` regardless of the Mix
environment. Defaults to `true`.
* `:aliases` - a list of task aliases. For more information, check out the
"Aliases" section in the documentation for the `Mix` module. Defaults to
`[]`.
* `:config_path` - a string representing the path of the main config
file. See `config_files/0` for more information. Defaults to
`"config/config.exs"`.
* `:default_task` - a string representing the default task to be run by
`mix` when no task is specified. Defaults to `"run"`.
* `:deps` - a list of dependencies of this project. Refer to the
documentation for the `Mix.Tasks.Deps` task for more information. Defaults
to `[]`.
* `:deps_path` - directory where dependencies are stored. Also see
`deps_path/1`. Defaults to `"deps"`.
* `:lockfile` - the name of the lockfile used by the `mix deps.*` family of
tasks. Defaults to `"mix.lock"`.
* `:preferred_cli_env` - a keyword list of `{task, env}` tuples where `task`
is the task name as an atom (for example, `:"deps.get"`) and `env` is the
preferred environment (for example, `:test`). This option overrides what
is specified by the tasks with the `@preferred_cli_env` attribute (see the
docs for `Mix.Task`). Defaults to `[]`.
* `:preferred_cli_target` - a keyword list of `{task, target}` tuples where
`task` is the task name as an atom (for example, `:test`) and `target`
is the preferred target (for example, `:host`). Defaults to `[]`.
For more options, keep an eye on the documentation for single Mix tasks; good
examples are the `Mix.Tasks.Compile` task and all the specific compiler tasks
(such as `Mix.Tasks.Compile.Elixir` or `Mix.Tasks.Compile.Erlang`).
Note that sometimes the same configuration option is mentioned in the
documentation for different tasks; this is just because it's common for many
tasks to read and use the same configuration option (for example,
`:erlc_paths` is used by `mix compile.erlang`, `mix compile.yecc`, and other
tasks).
## Erlang projects
Mix can be used to manage Erlang projects that don't have any Elixir code. To
ensure Mix tasks work correctly for an Erlang project, `language: :erlang` has
to be part of the configuration returned by `project/0`. This setting also
makes sure Elixir is not added as a dependency to the generated `.app` file or
to the escript generated with `mix escript.build`, and so on.
"""
@doc false
defmacro __using__(_) do
quote do
@after_compile Mix.Project
end
end
# Invoked after each Mix.Project is compiled.
@doc false
def __after_compile__(env, _binary) do
push(env.module, env.file)
end
# Push a project onto the project stack.
# Only the top of the stack can be accessed.
@doc false
def push(module, file \\ nil, app \\ nil) when is_atom(module) do
file = file || (module && List.to_string(module.__info__(:compile)[:source]))
config = Keyword.merge([app: app] ++ default_config(), get_project_config(module))
case Mix.ProjectStack.push(module, config, file) do
:ok ->
:ok
{:error, other} when is_binary(other) ->
Mix.raise(
"Trying to load #{inspect(module)} from #{inspect(file)}" <>
" but another project with the same name was already defined at #{inspect(other)}"
)
end
end
# Pops a project from the stack.
@doc false
def pop do
Mix.ProjectStack.pop()
end
# The configuration that is pushed down to dependencies.
@doc false
def deps_config(config \\ config()) do
[
build_embedded: config[:build_embedded],
build_per_environment: config[:build_per_environment],
consolidate_protocols: false,
consolidation_path: consolidation_path(config),
deps_path: deps_path(config),
env_path: build_path(config)
]
end
@doc """
Retrieves the current project if there is one.
If there is no current project, `nil` is returned. This
may happen in cases there is no `mix.exs` in the current
directory.
If you expect a project to be defined, i.e., it is a
requirement of the current task, you should call
`get!/0` instead.
"""
@spec get() :: module | nil
def get do
case Mix.ProjectStack.peek() do
%{name: name} -> name
_ -> nil
end
end
@doc """
Same as `get/0`, but raises an exception if there is no current project.
This is usually called by tasks that need additional
functions on the project to be defined. Since such
tasks usually depend on a project being defined, this
function raises a `Mix.NoProjectError` exception in
case no project is available.
"""
@spec get!() :: module
def get! do
get() || raise Mix.NoProjectError, []
end
@doc """
Returns the path to the file that defines the current project.
The majority of the time, it will point to a `mix.exs` file.
Returns nil if not inside a project.
"""
@doc since: "1.13.0"
@spec project_file() :: binary | nil
defdelegate project_file(), to: Mix.ProjectStack
@doc """
Returns the project configuration.
If there is no project defined, it still returns a keyword
list with default values. This allows many Mix tasks to work
without the need for an underlying project.
Note this configuration is cached once the project is
pushed onto the stack. Calling it multiple times won't
cause it to be recomputed.
Do not use `Mix.Project.config/0` to find the runtime configuration.
Use it only to configure aspects of your project (like
compilation directories) and not your application runtime.
"""
@spec config() :: keyword
def config do
case Mix.ProjectStack.peek() do
%{config: config} -> config
_ -> default_config()
end
end
@doc """
Returns a list of project configuration files for this project.
This function is usually used in compilation tasks to trigger
a full recompilation whenever such configuration files change.
It returns the lock manifest, and all config files in the `config`
directory that do not start with a leading period (for example,
`.my_config.exs`).
Note: before Elixir v1.13.0, the `mix.exs` file was also included
as a config file, but since then it has been moved to its own
function called `project_file/0`.
"""
@spec config_files() :: [Path.t()]
def config_files do
Mix.ProjectStack.config_files()
end
@doc """
Returns the latest modification time from config files.
This function is usually used in compilation tasks to trigger
a full recompilation whenever such configuration files change.
For this reason, the mtime is cached to avoid file system lookups.
Note: before Elixir v1.13.0, the `mix.exs` file was also included
in the mtimes, but not anymore. You can compute its modification
date by calling `project_file/0`.
"""
@doc since: "1.7.0"
@spec config_mtime() :: posix_mtime when posix_mtime: integer()
def config_mtime do
Mix.ProjectStack.config_mtime()
end
@doc """
Returns `true` if `config` is the configuration for an umbrella project.
When called with no arguments, tells whether the current project is
an umbrella project.
"""
@spec umbrella?(keyword) :: boolean
def umbrella?(config \\ config()) do
config[:apps_path] != nil
end
@doc """
Returns a map with the umbrella child applications paths.
These paths are based on the `:apps_path` and `:apps` configurations.
If the given project configuration identifies an umbrella project, the return
value is a map of `app => path` where `app` is a child app of the umbrella and
`path` is its path relative to the root of the umbrella project.
If the given project configuration does not identify an umbrella project,
`nil` is returned.
## Examples
Mix.Project.apps_paths()
#=> %{my_app1: "apps/my_app1", my_app2: "apps/my_app2"}
"""
@doc since: "1.4.0"
@spec apps_paths(keyword) :: %{optional(atom) => Path.t()} | nil
def apps_paths(config \\ config()) do
if apps_path = config[:apps_path] do
key = {:apps_paths, Mix.Project.get!()}
if cache = Mix.State.read_cache(key) do
cache
else
cache = config[:apps] |> umbrella_apps(apps_path) |> to_apps_paths(apps_path)
Mix.State.write_cache(key, cache)
end
end
end
defp umbrella_apps(nil, apps_path) do
case File.ls(apps_path) do
{:ok, apps} -> Enum.map(apps, &String.to_atom/1)
{:error, _} -> []
end
end
defp umbrella_apps(apps, _apps_path) when is_list(apps) do
apps
end
defp to_apps_paths(apps, apps_path) do
for app <- apps,
path = path_with_mix_exs_otherwise_warn(app, apps_path),
do: {app, path},
into: %{}
end
defp path_with_mix_exs_otherwise_warn(app, apps_path) do
path = Path.join(apps_path, Atom.to_string(app))
cond do
File.regular?(Path.join(path, "mix.exs")) ->
path
File.dir?(path) ->
Mix.shell().error(
"warning: path #{inspect(Path.relative_to_cwd(path))} is a directory but " <>
"it has no mix.exs. Mix won't consider this directory as part of your " <>
"umbrella application. Please add a \"mix.exs\" or set the \":apps\" key " <>
"in your umbrella configuration with all relevant apps names as atoms"
)
nil
true ->
# If it is a stray file, we just ignore it.
nil
end
end
@doc ~S"""
Runs the given `fun` inside the given project.
This function changes the current working directory and
loads the project at the given directory onto the project
stack.
A `post_config` can be passed that will be merged into
the project configuration.
`fun` is called with the module name of the given `Mix.Project`.
The return value of this function is the return value of `fun`.
## Examples
Mix.Project.in_project(:my_app, "/path/to/my_app", fn module ->
"Mix project is: #{inspect(module)}"
end)
#=> "Mix project is: MyApp.MixProject"
"""
@spec in_project(atom, Path.t(), keyword, (module -> result)) :: result when result: term
def in_project(app, path, post_config \\ [], fun)
def in_project(app, ".", post_config, fun) when is_atom(app) do
cached =
try do
load_project(app, post_config)
rescue
any ->
Mix.shell().error("Error while loading project #{inspect(app)} at #{File.cwd!()}")
reraise any, __STACKTRACE__
end
try do
fun.(cached)
after
Mix.Project.pop()
end
end
def in_project(app, path, post_config, fun) when is_atom(app) do
File.cd!(path, fn ->
in_project(app, ".", post_config, fun)
end)
end
@doc """
Returns the path where dependencies are stored for the given project.
If no configuration is given, the one for the current project is used.
The returned path will be expanded.
## Examples
Mix.Project.deps_path()
#=> "/path/to/project/deps"
"""
@spec deps_path(keyword) :: Path.t()
def deps_path(config \\ config()) do
dir = System.get_env("MIX_DEPS_PATH") || config[:deps_path]
Path.expand(dir)
end
@doc """
Returns all dependencies app names.
The order they are returned is guaranteed to be sorted
for proper dependency resolution. For example, if A
depends on B, then B will listed before A.
"""
@doc since: "1.11.0"
@spec deps_apps() :: [atom()]
def deps_apps() do
Mix.Dep.cached() |> Enum.map(& &1.app)
end
@doc """
Returns the SCMs of all dependencies as a map.
See `Mix.SCM` module documentation to learn more about SCMs.
## Options
* `:depth` - only returns dependencies to the depth level,
a depth of 1 will only return top-level dependencies
* `:parents` - starts the dependency traversal from the
given parents instead of the application root
## Examples
Mix.Project.deps_scms()
#=> %{foo: Mix.SCM.Path, bar: Mix.SCM.Git}
"""
@doc since: "1.10.0"
@spec deps_scms(keyword) :: %{optional(atom) => Mix.SCM.t()}
def deps_scms(opts \\ []) do
traverse_deps(opts, fn %{scm: scm} -> scm end)
end
@doc """
Returns the full path of all dependencies as a map.
## Options
* `:depth` - only returns dependencies to the depth level,
a depth of 1 will only return top-level dependencies
* `:parents` - starts the dependency traversal from the
given parents instead of the application root
## Examples
Mix.Project.deps_paths()
#=> %{foo: "deps/foo", bar: "custom/path/dep"}
"""
@spec deps_paths(keyword) :: %{optional(atom) => Path.t()}
def deps_paths(opts \\ []) do
traverse_deps(opts, fn %{opts: opts} -> opts[:dest] end)
end
defp traverse_deps(opts, fun) do
all_deps = Mix.Dep.cached()
parents = opts[:parents]
depth = opts[:depth]
if parents || depth do
parent_filter = if parents, do: &(&1.app in parents), else: & &1.top_level
all_deps
|> Enum.filter(parent_filter)
|> traverse_deps_map(fun)
|> traverse_deps_depth(all_deps, 1, depth || :infinity)
else
traverse_deps_map(all_deps, fun)
end
end
defp traverse_deps_map(deps, fun) do
for %{app: app} = dep <- deps, do: {app, fun.(dep)}, into: %{}
end
defp traverse_deps_depth(deps, _all_deps, depth, depth) do
deps
end
defp traverse_deps_depth(parents, all_deps, depth, target_depth) do
children =
for parent_dep <- all_deps,
Map.has_key?(parents, parent_dep.app),
%{app: app, opts: opts} <- parent_dep.deps,
do: {app, opts[:dest]},
into: %{}
case Map.merge(parents, children) do
^parents -> parents
new_parents -> traverse_deps_depth(new_parents, all_deps, depth + 1, target_depth)
end
end
@doc """
Clears the dependency for the current environment.
Useful when dependencies need to be reloaded due to change of global state.
For example, Nerves uses this function to force all dependencies to be
reloaded after it updates the system environment. It goes roughly like
this:
1. Nerves fetches all dependencies and looks for the system specific deps
2. Once the system specific dep is found, it loads it alongside env vars
3. Nerves then clears the cache, forcing dependencies to be loaded again
4. Dependencies are loaded again, now with an updated env environment
"""
@doc since: "1.7.0"
@spec clear_deps_cache() :: :ok
def clear_deps_cache() do
Mix.Dep.clear_cached()
:ok
end
@doc """
Returns the build path for the given project.
If no configuration is given, the one for the current project is used.
The returned path will be expanded.
## Examples
Mix.Project.build_path()
#=> "/path/to/project/_build/shared"
If `:build_per_environment` is set to `true`, it will create a new build per
environment:
Mix.env()
#=> :dev
Mix.Project.build_path()
#=> "/path/to/project/_build/dev"
"""
@spec build_path(keyword) :: Path.t()
def build_path(config \\ config()) do
System.get_env("MIX_BUILD_PATH") || config[:env_path] || env_path(config)
end
defp env_path(config) do
dir = System.get_env("MIX_BUILD_ROOT") || config[:build_path] || "_build"
subdir = build_target() <> build_per_environment(config)
Path.expand(dir <> "/" <> subdir)
end
defp build_target do
case Mix.target() do
:host -> ""
other -> "#{other}_"
end
end
defp build_per_environment(config) do
case config[:build_per_environment] do
true ->
Atom.to_string(Mix.env())
false ->
"shared"
other ->
Mix.raise("The :build_per_environment option should be a boolean, got: #{inspect(other)}")
end
end
@doc """
Returns the path where manifests are stored.
By default they are stored in the app path inside
the build directory. Umbrella applications have
the manifest path set to the root of the build directory.
Directories may be changed in future releases.
The returned path will be expanded.
## Examples
Mix.Project.manifest_path()
#=> "/path/to/project/_build/shared/lib/app/.mix"
"""
@spec manifest_path(keyword) :: Path.t()
def manifest_path(config \\ config()) do
app_path =
config[:app_path] ||
if app = config[:app] do
Path.join([build_path(config), "lib", Atom.to_string(app)])
else
build_path(config)
end
Path.join(app_path, ".mix")
end
@doc """
Returns the application path inside the build.
The returned path will be expanded.
## Examples
Mix.Project.app_path()
#=> "/path/to/project/_build/shared/lib/app"
"""
@spec app_path(keyword) :: Path.t()
def app_path(config \\ config()) do
config[:app_path] ||
cond do
app = config[:app] ->
Path.join([build_path(config), "lib", Atom.to_string(app)])
config[:apps_path] ->
raise "trying to access Mix.Project.app_path for an umbrella project but umbrellas have no app"
true ->
Mix.raise(
"Cannot access build without an application name, " <>
"please ensure you are in a directory with a mix.exs file and it defines " <>
"an :app name under the project configuration"
)
end
end
@doc """
Returns the paths the given project compiles to.
If no configuration is given, the one for the current project will be used.
The returned path will be expanded.
## Examples
Mix.Project.compile_path()
#=> "/path/to/project/_build/dev/lib/app/ebin"
"""
@spec compile_path(keyword) :: Path.t()
def compile_path(config \\ config()) do
Path.join(app_path(config), "ebin")
end
@doc """
Returns the path where protocol consolidations are stored.
The returned path will be expanded.
## Examples
Mix.Project.consolidation_path()
#=> "/path/to/project/_build/dev/lib/my_app/consolidated"
Inside umbrellas:
Mix.Project.consolidation_path()
#=> "/path/to/project/_build/dev/consolidated"
"""
@spec consolidation_path(keyword) :: Path.t()
def consolidation_path(config \\ config()) do
config[:consolidation_path] ||
if umbrella?(config) do
Path.join(build_path(config), "consolidated")
else
Path.join(app_path(config), "consolidated")
end
end
@doc false
@deprecated "Use Mix.Task.run(\"compile\", args) instead"
def compile(args, _config \\ []) do
Mix.Task.run("compile", args)
end
@doc """
Builds the project structure for the given application.
## Options
* `:symlink_ebin` - symlink ebin instead of copying it
"""
@spec build_structure(keyword, keyword) :: :ok
def build_structure(config \\ config(), opts \\ []) do
source = opts[:source] || File.cwd!()
target = app_path(config)
File.mkdir_p!(target)
target_ebin = Path.join(target, "ebin")
hard_copy? = config[:build_embedded]
_ =
cond do
opts[:symlink_ebin] ->
_ = Mix.Utils.symlink_or_copy(hard_copy?, Path.join(source, "ebin"), target_ebin)
match?({:ok, _}, :file.read_link(target_ebin)) ->
_ = File.rm_rf!(target_ebin)
File.mkdir_p!(target_ebin)
true ->
File.mkdir_p!(target_ebin)
end
for dir <- ~w(include priv) do
Mix.Utils.symlink_or_copy(hard_copy?, Path.join(source, dir), Path.join(target, dir))
end
:ok
end
@doc """
Ensures the project structure for the given project exists.
In case it does exist, it is a no-op. Otherwise, it is built.
"""
@spec ensure_structure(keyword, keyword) :: :ok
def ensure_structure(config \\ config(), opts \\ []) do
if File.exists?(app_path(config)) do
:ok
else
build_structure(config, opts)
end
end
@deprecated "Use Mix.Project.compile_path/1 instead"
def load_paths(config \\ config()) do
if umbrella?(config) do
[]
else
[compile_path(config)]
end
end
# Loads mix.exs in the current directory or loads the project from the
# mixfile cache and pushes the project onto the project stack.
defp load_project(app, post_config) do
Mix.ProjectStack.post_config(post_config)
if cached = Mix.State.read_cache({:app, app}) do
{project, file} = cached
push(project, file, app)
project
else
file = Path.expand("mix.exs")
old_proj = get()
{new_proj, file} =
if File.regular?(file) do
old_undefined = Code.get_compiler_option(:no_warn_undefined)
try do
Code.compiler_options(relative_paths: false, no_warn_undefined: :all)
_ = Code.compile_file(file)
get()
else
^old_proj -> Mix.raise("Could not find a Mix project at #{file}")
new_proj -> {new_proj, file}
after
Code.compiler_options(relative_paths: true, no_warn_undefined: old_undefined)
end
else
push(nil, file, app)
{nil, "nofile"}
end
Mix.State.write_cache({:app, app}, {new_proj, file})
new_proj
end
end
defp default_config do
[
aliases: [],
build_embedded: false,
build_per_environment: true,
build_scm: Mix.SCM.Path,
config_path: "config/config.exs",
consolidate_protocols: true,
default_task: "run",
deps: [],
deps_path: "deps",
elixirc_paths: ["lib"],
erlc_paths: ["src"],
erlc_include_path: "include",
erlc_options: [],
lockfile: "mix.lock",
preferred_cli_env: [],
start_permanent: false
]
end
@private_config [:app_path, :build_scm, :env_path]
defp get_project_config(nil), do: []
defp get_project_config(atom), do: atom.project |> Keyword.drop(@private_config)
end
|
lib/mix/lib/mix/project.ex
| 0.872239
| 0.536191
|
project.ex
|
starcoder
|
defmodule Edeliver do
@moduledoc """
Execute edeliver tasks on the production / staging nodes.
This internal module provides functions on the nodes which are
used by some edeliver tasks e.g. to get the running release version
(`edeliver version`), show the pending migrations
(`edeliver show migrations`) or install pending migrations
(`edeliver migrate`).
In addition it represents the edeliver application callback module
and starts a process registered locally as `Edeliver` whichs onliest
purpose is to be able to detect whether the release was successfully
started. This requires to start edeliver as last application in the
release.
"""
use Application
use GenServer
@doc """
Starts the edeliver application
including the `Edeliver.Supervisor` process supervising the
`Edeliver` generic server.
"""
@spec start(term, term) :: {:ok, pid}
def start(_type, _args) do
import Supervisor.Spec, warn: false
children = [worker(__MODULE__, [], [name: __MODULE__])]
options = [strategy: :one_for_one, name: Edeliver.Supervisor]
Supervisor.start_link(children, options)
end
@doc "Starts this gen-server registered locally as `Edeliver`"
@spec start_link() :: {:ok, pid}
def start_link(), do: GenServer.start_link(__MODULE__, [], name: __MODULE__)
@doc """
Runs the edeliver command on the erlang node
started as:
```
bin/$APP rpc 'Elixir.Edeliver.run_command('[command_name, \"$APP\", arguments...])'
```
The first argument must be the name of the command, the second argument the
name of the main application and all further arguments are passed to the
function thats name is equal to the command name.
"""
@spec run_command(args::[term]) :: no_return
def run_command([:monitor_startup_progress, application_name, :verbose]) do
:error_logger.add_report_handler Edeliver.StartupProgress
monitor_startup_progress(application_name)
:error_logger.delete_report_handler Edeliver.StartupProgress
end
def run_command([:monitor_startup_progress, application_name | _]) do
monitor_startup_progress(application_name)
end
def run_command([command_name, application_name | arguments]) when is_atom(command_name) do
application_name = String.to_atom(application_name)
{^application_name, _description, application_version} = :application.which_applications |> List.keyfind(application_name, 0)
application_version = to_string application_version
apply __MODULE__, command_name, [application_name, application_version | arguments]
end
@doc """
Waits until the edeliver application is started.
If the edeliver application is added as last application in the `:applications` section of
the `application/0` fun in the `mix.exs` this waits until all applications are started.
This can be used as rpc call after running the asynchronous `bin/$APP start` command to
wait until all applications started and then return `ok`.
"""
@spec monitor_startup_progress(application_name::atom) :: :ok
def monitor_startup_progress(application_name) do
edeliver_pid = Process.whereis __MODULE__
if is_pid(edeliver_pid) do
:ok
else
receive do after 500 -> :ok end
monitor_startup_progress(application_name)
end
end
@doc """
Returns the running release version
which is either the `:current` version or the `:permanent` version.
"""
@spec release_version(application_name::atom, application_version::String.t) :: String.t
def release_version(application_name, _application_version \\ nil) do
releases = :release_handler.which_releases
application_name = Atom.to_charlist application_name
case (for {name, version, _apps, status} <- releases, status == :current and name == application_name, do: to_string(version)) do
[current_version] -> current_version
_ ->
case (for {name, version, _apps, status} <- releases, status == :permanent and name == application_name, do: to_string(version)) do
[permanent_version] -> String.to_charlist(permanent_version)
end
end
end
@doc """
Prints the pending ecto migrations
"""
def list_pending_migrations(application_name, application_version, ecto_repository \\ '') do
repository = ecto_repository!(application_name, ecto_repository)
migrator = Ecto.Migrator
versions = migrator.migrated_versions(repository)
pending_migrations = migrations_for(migrations_dir(application_name, application_version))
|> Enum.filter(fn {version, _name, _file} -> not (version in versions) end)
|> Enum.reverse
|> Enum.map(fn {version, name, _file} -> {version, name} end)
pending_migrations |> Enum.each(fn {version, name} ->
warning "pending: #{name} (#{version})"
end)
end
@doc """
Runs the pending ecto migrations
"""
def migrate(application_name, application_version, ecto_repository, direction, migration_version \\ :all) when is_atom(direction) do
options = if migration_version == :all, do: [all: true], else: [to: to_string(migration_version)]
migrator = Ecto.Migrator
migrator.run(ecto_repository!(application_name, ecto_repository), migrations_dir(application_name, application_version), direction, options)
end
@doc """
Returns the current directory containing the ecto migrations.
"""
def migrations_dir(application_name, application_version) do
# use priv dir from installed version
lib_dir = :code.priv_dir(application_name) |> to_string |> Path.dirname |> Path.dirname
application_with_version = "#{Atom.to_string(application_name)}-#{application_version}"
Path.join([lib_dir, application_with_version, "priv", "repo", "migrations"])
end
def init(args) do
{:ok, args}
end
defp ecto_repository!(_application_name, ecto_repository = [_|_] ) do
# repository name was passed as ECTO_REPOSITORY env by the erlang-node-execute rpc call
List.to_atom ecto_repository
end
defp ecto_repository!(application_name, _ecto_repository) do
case System.get_env "ECTO_REPOSITORY" do # ECTO_REPOSITORY env was set when the node was started
ecto_repository = <<_,_::binary>> ->
ecto_repository_module = ecto_repository |> to_charlist |> List.to_atom
if maybe_ecto_repo?(ecto_repository_module) do
ecto_repository_module
else
error! "Module '#{ecto_repository_module}' is not an ecto repository.\n Please set the correct repository module in the edeliver config as ECTO_REPOSITORY env\n or remove that value to use autodetection of that module."
end
_ ->
case ecto_repos_from_config(application_name) do
{:ok, [ecto_repository_module]} -> ecto_repository_module
{:ok, modules =[_|_]} -> error! "Found several ecto repository modules (#{inspect modules}).\n Please specify the repository to use in the edeliver config as ECTO_REPOSITORY env."
:error ->
case Enum.filter(:erlang.loaded |> Enum.reverse, &ecto_1_0_repo?/1) do
[ecto_repository_module] -> ecto_repository_module
[] -> error! "No ecto repository module found.\n Please specify the repository in the edeliver config as ECTO_REPOSITORY env."
modules =[_|_] -> error! "Found several ecto repository modules (#{inspect modules}).\n Please specify the repository to use in the edeliver config as ECTO_REPOSITORY env."
end
end
end
end
defp ecto_repos_from_config(application_name) do
Application.fetch_env(application_name, :ecto_repos)
end
defp maybe_ecto_repo?(module) do
if :erlang.module_loaded(module) do
exports = module.module_info(:exports)
# :__adapter__ for ecto versions >= 2.0, :__repo__ for ecto versions < 2.0
Keyword.get(exports, :__adapter__, nil) || Keyword.get(exports, :__repo__, false)
else
false
end
end
defp ecto_1_0_repo?(module) do
if :erlang.module_loaded(module) do
module.module_info(:exports)
|> Keyword.get(:__repo__, false)
else
false
end
end
# taken from https://github.com/elixir-lang/ecto/blob/master/lib/ecto/migrator.ex#L183
defp migrations_for(directory) do
query = Path.join(directory, "*")
for entry <- Path.wildcard(query),
info = extract_migration_info(entry),
do: info
end
defp extract_migration_info(file) do
base = Path.basename(file)
ext = Path.extname(base)
case Integer.parse(Path.rootname(base)) do
{integer, "_" <> name} when ext == ".exs" -> {integer, name, file}
_ -> nil
end
end
# defp info(message), do: IO.puts "==> #{IO.ANSI.green}#{message}#{IO.ANSI.reset}"
defp warning(message), do: IO.puts "==> #{IO.ANSI.yellow}#{message}#{IO.ANSI.reset}"
defp error!(message) do
IO.puts "==> #{IO.ANSI.red}#{message}#{IO.ANSI.reset}"
throw "error"
end
end
|
lib/edeliver.ex
| 0.90372
| 0.788543
|
edeliver.ex
|
starcoder
|
defmodule ExACN.PDU.Flags do
defstruct length: false, vector: false, header: false, data: false
@moduledoc """
Functions for processing flags at the beginning of PDUs.
These flags are encoded in the first four bits of the PDU and indicate if it
shares common data with the previous packet or requires a longer field to
store the length.
"""
@typedoc """
Named PDU flags
"""
@type t :: %ExACN.PDU.Flags{length: boolean(), vector: boolean(), header: boolean(), data: boolean()}
# Converts an integer 1 or 0 to a boolean value
@spec int_to_bool(integer()) :: boolean()
defp int_to_bool(1), do: true
defp int_to_bool(0), do: false
# Converts a boolean value into an integer 1 or 0 for encoding
@spec bool_to_int(boolean()) :: integer()
defp bool_to_int(true), do: 1
defp bool_to_int(false), do: 0
@doc """
Read flags from the beginning of a binary or bit string
Only the first four bits are checked and the rest is ignored. It will fail if the
bitstring is less than 4 bits long.
"""
@spec decode_flags(binary() | bitstring()) :: t
def decode_flags(data) do
<< length_int::size(1), vector_int::size(1), header_int::size(1), data_int::size(1), _::bits >> = data
length_flag = int_to_bool(length_int)
vector_flag = int_to_bool(vector_int)
header_flag = int_to_bool(header_int)
data_flag = int_to_bool(data_int)
%ExACN.PDU.Flags{length: length_flag, vector: vector_flag, header: header_flag, data: data_flag}
end
@doc """
Encodes the flags as bitstring.
The resulting bitstring will be four bits long
"""
@spec encode_flags(t) :: bitstring()
def encode_flags(flags) do
length_int = bool_to_int(flags.length)
vector_int = bool_to_int(flags.vector)
header_int = bool_to_int(flags.header)
data_int = bool_to_int(flags.data)
<< length_int::size(1), vector_int::size(1), header_int::size(1), data_int::size(1) >>
end
@doc """
Calculate the number of bits required to encode the length value for a PDU with the given
flags.
"""
@spec length_bits(t) :: integer()
def length_bits(%ExACN.PDU.Flags{length: true}), do: 20
def length_bits(%ExACN.PDU.Flags{length: false}), do: 12
@doc """
Calculate the number of bytes required to encode the preamble (flags and length) for a PDU
with the given flags.
"""
@spec preamble_bytes(t) :: integer()
def preamble_bytes(%ExACN.PDU.Flags{length: true}), do: 3
def preamble_bytes(%ExACN.PDU.Flags{length: false}), do: 2
end
|
lib/ex_acn/pdu/flags.ex
| 0.81309
| 0.717556
|
flags.ex
|
starcoder
|
defmodule Sparklinex.MogrifyDraw do
import Mogrify
def create_canvas(width, height, background_color) do
%Mogrify.Image{path: "test.png", ext: "png"}
|> custom("size", "#{width}x#{height}")
|> canvas(background_color)
end
def draw_line(canvas, {{x1, y1}, {x2, y2}}) do
custom(
canvas,
"draw",
"line #{to_string(:io_lib.format("~f, ~f ~f, ~f", [x1 / 1, y1 / 1, x2 / 1, y2 / 1]))}"
)
end
def draw_line(canvas, {{x1, y1}, {x2, y2}}, color) do
canvas
|> set_line_color(color)
|> draw_line({{x1, y1}, {x2, y2}})
end
def draw_lines(canvas, coord_pairs) do
Enum.reduce(coord_pairs, canvas, fn {p1, p2}, canvas -> draw_line(canvas, {p1, p2}) end)
end
def polygon(canvas, coords, color) do
canvas
|> custom("fill", color)
|> polygon(coords)
end
def polygon(canvas, coords) do
coords_string =
coords
|> Enum.map(fn {x, y} -> to_string(:io_lib.format("~f, ~f", [x / 1, y / 1])) end)
|> Enum.join(" ")
canvas
|> custom("draw", "polygon #{coords_string}")
end
def draw_box(canvas, {x, y}, offset, color) do
rectangle(canvas, {x - offset, y - offset}, {x + offset, y + offset}, color)
end
def draw_box(canvas, {x, y}, offset) do
rectangle(canvas, {x - offset, y - offset}, {x + offset, y + offset})
end
def rectangle(canvas, {upper_left_x, upper_left_y}, {lower_right_x, lower_right_y}, color) do
canvas
|> custom("fill", color)
|> rectangle({upper_left_x, upper_left_y}, {lower_right_x, lower_right_y})
end
def rectangle(canvas, {upper_left_x, upper_left_y}, {lower_right_x, lower_right_y}) do
canvas
|> custom(
"draw",
"rectangle #{
to_string(
:io_lib.format("~f,~f ~f,~f", [
upper_left_x / 1,
upper_left_y / 1,
lower_right_x / 1,
lower_right_y / 1
])
)
}"
)
end
def set_line_color(canvas, color) do
canvas
|> custom("stroke", color)
end
end
|
lib/sparklinex/mogrify_draw.ex
| 0.627495
| 0.619155
|
mogrify_draw.ex
|
starcoder
|
defmodule DataMatrix.Encode do
@moduledoc false
@symbol_capacity Code.eval_file("lib/datamatrix/static/total_data_codewords.tuple") |> elem(0)
@ascii_digit 48..57
@ascii_pad 129
@upper_shift 235
@doc """
## Examples
iex> DataMatrix.Encode.encode("1234569")
{:ok, 1, <<142, 164, 186, 58, 129>>}
iex> DataMatrix.Encode.encode("Aa999")
{:ok, 1, <<66, 98, 229, 58, 129>>}
iex> DataMatrix.Encode.encode("AAAAAAAAA")
{:ok, 3, <<66, 66, 66, 66, 66, 66, 66, 66, 66, 129, 101, 251>>}
"""
def encode(binary), do: encode(binary, :square)
@doc """
"""
def encode(binary, version) do
data_codewords = encode_ascii(binary)
data_codewords_length = byte_size(data_codewords)
case find_symbol(data_codewords_length, version) do
{:ok, version} ->
{:ok, version,
data_codewords <>
randomize_253_state(
data_codewords_length,
elem(@symbol_capacity, version) - data_codewords_length
)}
{:error, error} ->
{:error, error}
end
end
defp encode_ascii(<<tens, unit, rest::binary>>)
when tens in @ascii_digit and unit in @ascii_digit do
encoded = 130 + (tens - 48) * 10 + (unit - 48)
<<encoded>> <> encode_ascii(rest)
end
defp encode_ascii(<<character, rest::binary>>) when character in 0..127 do
<<character + 1>> <> encode_ascii(rest)
end
defp encode_ascii(<<character, rest::binary>>) when character in 128..255 do
encoded = character - 128 + 1
<<@upper_shift>> <> <<encoded>> <> encode_ascii(rest)
end
defp encode_ascii(<<>>), do: <<>>
defp find_symbol(data_length, version) when version in 0..29 do
if data_length <= elem(@symbol_capacity, version) do
{:ok, version}
else
{:error, "Specified version is too small for the data."}
end
end
defp find_symbol(data_length, :rectangle) do
find_symbol_by_offset(data_length, 24)
end
defp find_symbol(data_length, _) do
find_symbol_by_offset(data_length, 0)
end
defp find_symbol_by_offset(data_length, offset) do
version =
@symbol_capacity
|> Tuple.to_list()
|> Stream.drop(offset)
|> Enum.find_index(fn capacity ->
capacity >= data_length
end)
if version do
{:ok, version + offset}
else
{:error, "The data is too long."}
end
end
defp randomize_253_state(_, 0), do: <<>>
defp randomize_253_state(position, n) do
<<@ascii_pad>> <> do_randomize_253_state(position + 1, n - 1)
end
defp do_randomize_253_state(_, 0), do: <<>>
defp do_randomize_253_state(position, n) do
pseudo_random = @ascii_pad + rem(149 * position, 253) + 1
pseudo_random =
if pseudo_random <= 254 do
pseudo_random
else
pseudo_random - 254
end
<<pseudo_random>> <> do_randomize_253_state(position + 1, n - 1)
end
end
|
lib/datamatrix/encode.ex
| 0.591605
| 0.405449
|
encode.ex
|
starcoder
|
defmodule Appsignal.TransactionRegistry do
@moduledoc """
Internal module which keeps a registry of the transaction handles
linked to their originating process.
This is used on various places to link a calling process to its transaction.
For instance, the `Appsignal.ErrorHandler` module uses it to be able to
complete the transaction in case the originating process crashed.
The transactions are stored in an ETS table (with
`{:write_concurrency, true}`, so no bottleneck is created); and the
originating process is monitored to clean up the ETS table when the
process has finished.
"""
use GenServer
require Logger
@table :"$appsignal_transaction_registry"
alias Appsignal.Transaction
@spec start_link :: {:ok, pid}
def start_link do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
@doc """
Register the current process as the owner of the given transaction.
"""
@spec register(Transaction.t()) :: :ok
def register(transaction) do
pid = self()
if Appsignal.Config.active?() && registry_alive?() do
monitor_reference = GenServer.call(__MODULE__, {:monitor, pid})
true = :ets.insert(@table, {pid, transaction, monitor_reference})
:ok
else
nil
end
end
@doc """
Given a process ID, return its associated transaction.
"""
@spec lookup(pid) :: Transaction.t() | nil
def lookup(pid) do
case Appsignal.Config.active?() && registry_alive?() && :ets.lookup(@table, pid) do
[{^pid, %Transaction{} = transaction, _}] -> transaction
[{^pid, %Transaction{} = transaction}] -> transaction
[{^pid, :ignore}] -> :ignored
_ -> nil
end
end
@spec lookup(pid, boolean) :: Transaction.t() | nil | :removed
@doc false
def lookup(pid, return_removed) do
IO.warn(
"Appsignal.TransactionRegistry.lookup/2 is deprecated. Use Appsignal.TransactionRegistry.lookup/1 instead"
)
case registry_alive?() && :ets.lookup(@table, pid) do
[{^pid, :removed}] ->
case return_removed do
false -> nil
true -> :removed
end
[{^pid, transaction, _}] ->
transaction
[{^pid, transaction}] ->
transaction
false ->
nil
[] ->
nil
end
end
@doc """
Unregister the current process as the owner of the given transaction.
"""
@spec remove_transaction(Transaction.t()) :: :ok | {:error, :not_found} | {:error, :no_registry}
def remove_transaction(%Transaction{} = transaction) do
if registry_alive?() do
GenServer.cast(__MODULE__, {:demonitor, transaction})
GenServer.call(__MODULE__, {:remove, transaction})
else
{:error, :no_registry}
end
end
@doc """
Ignore a process in the error handler.
"""
@spec ignore(pid()) :: :ok
def ignore(pid) do
if registry_alive?() do
:ets.insert(@table, {pid, :ignore})
:ok
else
{:error, :no_registry}
end
end
@doc """
Check if a progress is ignored.
"""
@deprecated "Use Appsignal.TransactionRegistry.lookup/1 instead."
@spec ignored?(pid()) :: boolean()
def ignored?(pid) do
case registry_alive?() && :ets.lookup(@table, pid) do
[{^pid, :ignore}] -> true
_ -> false
end
end
defmodule State do
@moduledoc false
defstruct table: nil
end
def init([]) do
table =
:ets.new(@table, [:set, :named_table, {:keypos, 1}, :public, {:write_concurrency, true}])
{:ok, %State{table: table}}
end
def handle_call({:remove, transaction}, _from, state) do
reply =
case pids_and_monitor_references(transaction) do
[[_pid, _reference] | _] = pids_and_refs ->
delete(pids_and_refs)
[[_pid] | _] = pids ->
delete(pids)
[] ->
{:error, :not_found}
end
{:reply, reply, state}
end
def handle_call({:monitor, pid}, _from, state) do
monitor_reference = Process.monitor(pid)
{:reply, monitor_reference, state}
end
def handle_cast({:demonitor, %Transaction{} = transaction}, state) do
transaction
|> pids_and_monitor_references()
|> demonitor
{:noreply, state}
end
def handle_info({:DOWN, _ref, :process, pid, _reason}, state) do
# we give the error handler some time to process the error report
Process.send_after(self(), {:delete, pid}, 5000)
{:noreply, state}
end
def handle_info({:delete, pid}, state) do
:ets.delete(@table, pid)
{:noreply, state}
end
def handle_info(_msg, state) do
{:noreply, state}
end
defp delete([[pid, _] | tail]) do
:ets.delete(@table, pid)
delete(tail)
end
defp delete([[pid] | tail]) do
:ets.delete(@table, pid)
delete(tail)
end
defp delete([]), do: :ok
defp demonitor([[_, reference] | tail]) do
Process.demonitor(reference)
demonitor(tail)
end
defp demonitor([_ | tail]), do: demonitor(tail)
defp demonitor([]), do: :ok
defp registry_alive? do
pid = Process.whereis(__MODULE__)
!is_nil(pid) && Process.alive?(pid)
end
defp pids_and_monitor_references(transaction) do
:ets.match(@table, {:"$1", transaction, :"$2"}) ++ :ets.match(@table, {:"$1", transaction})
end
end
|
lib/appsignal/transaction/registry.ex
| 0.858274
| 0.505005
|
registry.ex
|
starcoder
|
defmodule EctoImmigrant.Migration do
@moduledoc """
Data migrations are used to modify the data in your database over time.
This module provides many helpers for migrating the database,
allowing developers to use Elixir to alter their storage in
a way that is database independent.
Here is an example:
defmodule ExampleApp.Repo.DataMigrations.AddJohn do
use EctoImmigrant.Migration
alias ExampleApp.Repo
alias ExampleApp.Person
def up do
Repo.insert(%Person{id: 123, first_name: "John", last_name: "Doe", age: 78})
end
def down do
Repo.delete(%Person{id: 123, first_name: "John", last_name: "Doe", age: 78})
end
end
Note data migrations have an `up/0` and `down/0`, which is used to update your data
and reverts the updated data, respectively.
EctoImmigrant provides some mix tasks to help developers work with migrations:
* `mix ecto_immigrant.gen.migration` # Generates a new data migration for the repo
* `mix ecto_immigrant.migrate` # Runs the repository data migrations
* `mix ecto_immigrant.rollback` # Reverts applied data migrations from the repository
* `mix ecto_immigrant.migrations` # Displays the repository data migration status
Run the `mix help COMMAND` for more information.
## Transactions
By default, Ecto_Immigrant runs all migrations inside a transaction. That's not always
ideal: for example, PostgreSQL allows to create/drop indexes concurrently but
only outside of any transaction (see the [PostgreSQL
docs](http://www.postgresql.org/docs/9.2/static/sql-createindex.html#SQL-CREATEINDEX-CONCURRENTLY)).
Data migrations can be forced to run outside a transaction by setting the
`@disable_ddl_transaction` module attribute to `true`:
defmodule ExampleApp.Repo.DataMigrations.AddJohn do
use EctoImmigrant.Migration
@disable_ddl_transaction true
alias ExampleApp.Repo
alias ExampleApp.Person
def up do
Repo.insert(%Person{id: 123, first_name: "John", last_name: "Doe", age: 78})
end
def down do
Repo.delete(%Person{id: 123, first_name: "John", last_name: "Doe", age: 78})
end
end
Since running migrations outside a transaction can be dangerous, consider
performing very few operations in such migrations.
"""
@doc false
defmacro __using__(_) do
quote location: :keep do
import EctoImmigrant.Migration
@disable_ddl_transaction false
@before_compile EctoImmigrant.Migration
end
end
@doc false
defmacro __before_compile__(_env) do
quote do
def __data_migration__,
do: [disable_ddl_transaction: @disable_ddl_transaction]
end
end
end
|
lib/migration.ex
| 0.841647
| 0.535949
|
migration.ex
|
starcoder
|
defmodule Cog.Command.Service.Memory do
@moduledoc """
Stores state isolated to a specific pipeline. Tokens originally created by
the token service are used to namespace each key, so pipelines have their own
keyspace. When the pipeline ends successfully or crashes the memory service
is notifed, and keys owned by that pipeline are removed.
There are a few operations that can be performed on keys: fetch, accum, join,
replace and delete. Each operation will always return an ok-error tuple.
"""
use GenServer
import Cog.Command.Service.PipelineMonitor
alias Cog.Util.ETSWrapper
require Logger
@dead_pipeline_cleanup_interval 30000 # 30 seconds
defstruct [:memory_table, :monitor_table]
@doc """
Starts the #{inspect __MODULE__} service. Accepts two public ets table
ids: one for storing tokens and keeping track of monitored pids.
"""
def start_link(memory_table, monitor_table),
do: GenServer.start_link(__MODULE__, [memory_table, monitor_table], name: __MODULE__)
@doc """
Fetches the given key. Returns `{:ok, value}` if the key exists or `{:error,
:unknown_key}` if it doesn't exist.
"""
def fetch(token, key),
do: GenServer.call(__MODULE__, {:fetch, token, key})
@doc """
Accumulates values in the given key. Returns `{:ok, accumulated_value}`. If
the key exists and is a list, the value is appeneded. If the key exists and
is not a list, we wrap it in a list before appending the value. If the key
does not exist we append the value to an empty list.
"""
def accum(token, key, value),
do: GenServer.call(__MODULE__, {:accum, token, key, value})
@doc """
Joins values in the given key. Returns `{:ok, joined_value}` when the value
is successfully joined. If the key exists and is a list and the value is a
list, the value is joined to the end of the list. If the key does not exist,
the value is joined to an empty list. If either value is not a list `{:error,
:value_not_list}` is returned.
"""
def join(token, key, value) when is_list(value),
do: GenServer.call(__MODULE__, {:join, token, key, value})
def join(_token, _key, _value),
do: {:error, :value_not_list}
@doc """
Replaces or sets the given key with the value. Returns `{:ok, value}`.
"""
def replace(token, key, value),
do: GenServer.call(__MODULE__, {:replace, token, key, value})
@doc """
Deletes the given key. Returns `{:ok, deleted_value}` when successfully
deleted or `{:error, :unknown_key}` if it doesn't exist.
"""
def delete(token, key),
do: GenServer.call(__MODULE__, {:delete, token, key})
def init([memory_table, monitor_table]) do
# Cleanup processes that died between restarts and monitor processes that
# are still alive after a restart. If anything dies between restarting and
# monitoring, the dead process cleanup will catch it.
account_for_existing_pipelines(monitor_table, memory_table, &{&1, :_})
Logger.info ("Dead pipeline token clean up interval set to #{round(@dead_pipeline_cleanup_interval / 1000)} seconds.")
schedule_dead_pipeline_cleanup(@dead_pipeline_cleanup_interval)
state = %__MODULE__{memory_table: memory_table, monitor_table: monitor_table}
{:ok, state}
end
def handle_call({:fetch, token, key}, _from, state) do
result = ETSWrapper.lookup(state.memory_table, {token, key})
{:reply, result, state}
end
def handle_call({:accum, token, key, value}, _from, state) do
monitor_pipeline(state.monitor_table, token)
result = with {:ok, existing_value} <- lookup_accum_list(state.memory_table, {token, key}),
accumulated_value = existing_value ++ [value],
{:ok, result} <- ETSWrapper.insert(state.memory_table, {token, key}, accumulated_value),
do: {:ok, result}
{:reply, result, state}
end
def handle_call({:join, token, key, value}, _from, state) when is_list(value) do
monitor_pipeline(state.monitor_table, token)
result = with {:ok, existing_value} <- lookup_join_list(state.memory_table, {token, key}),
accumulated_value = existing_value ++ value,
{:ok, result} <- ETSWrapper.insert(state.memory_table, {token, key}, accumulated_value),
do: {:ok, result}
{:reply, result, state}
end
def handle_call({:replace, token, key, value}, _from, state) do
monitor_pipeline(state.monitor_table, token)
result = ETSWrapper.insert(state.memory_table, {token, key}, value)
{:reply, result, state}
end
def handle_call({:delete, token, key}, _from, state) do
result = ETSWrapper.delete(state.memory_table, {token, key})
{:reply, result, state}
end
def handle_info({:DOWN, _monitor_ref, :process, pid, _reason}, state) do
case ETSWrapper.lookup(state.monitor_table, pid) do
{:ok, token} ->
cleanup_pipeline(state.monitor_table, state.memory_table, pid, {token, :_})
{:error, :unknown_key} ->
Logger.warn("Unknown pid #{inspect pid} was monitored; ignoring")
end
{:noreply, state}
end
def handle_info(:dead_process_cleanup, state) do
dead_pipeline_cleanup(state.monitor_table, state.memory_table)
schedule_dead_pipeline_cleanup(@dead_pipeline_cleanup_interval)
{:noreply, state}
end
defp lookup_accum_list(table, key) do
case ETSWrapper.lookup(table, key) do
{:ok, value} ->
{:ok, List.wrap(value)}
{:error, :unknown_key} ->
{:ok, []}
end
end
defp lookup_join_list(table, key) do
case ETSWrapper.lookup(table, key) do
{:ok, value} when is_list(value) ->
{:ok, value}
{:ok, _value} ->
{:error, :value_not_list}
{:error, :unknown_key} ->
{:ok, []}
end
end
end
|
lib/cog/command/service/memory.ex
| 0.82347
| 0.54153
|
memory.ex
|
starcoder
|
defmodule Membrane.Dashboard.Charts.Full do
@moduledoc """
Module responsible for preparing data for uPlot charts when they are being entirely reloaded.
Every chart visualizes one particular metric of pipelines. Chart data returned
by query is a list of maps (one for every chart) which consist of:
- series - list of maps with labels. Used as legend in uPlot;
- data - list of lists. Represents points on the chart. First list contains timestamps in UNIX time (x axis ticks).
Each following list consists of metric values, each value corresponds to a timestamp from the first list.
"""
import Membrane.Dashboard.Charts.Helpers
alias Membrane.Dashboard.Charts
alias Membrane.Dashboard.Charts.Context
@doc """
Queries database and marshals data to a format suitable for uPlot.
Returns a separate chart data for each provided metric, list of
chart paths (each chart has its own list of component paths) and a list
of chart accumulators that can be reused by the chart in case of an real-time update (e.g. to cache some necessary information).
"""
@spec query(Context.t()) ::
Charts.chart_query_result_t()
def query(%Context{time_from: time_from, time_to: time_to, metric: metric, accuracy: accuracy}) do
case query_measurements(time_from, time_to, metric, accuracy) do
{:ok, rows, paths_mapping} ->
rows
|> prepare_chart(time_from, time_to, metric, accuracy, paths_mapping)
|> then(&{:ok, &1})
_error ->
chart = %{series: [], data: [[]]}
{:ok, {chart, _paths_mapping = %{}, Explorer.DataFrame.from_map(%{})}}
end
end
# prepares a single chart based on raw data from TimescaleDB
defp prepare_chart(rows, time_from, time_to, metric, accuracy, paths_mapping) do
df = Membrane.Dashboard.Charts.ChartDataFrame.from_rows(rows, time_from, time_to, accuracy)
chart =
cond do
metric in ["caps", "event"] ->
Membrane.Dashboard.Charts.ChartDataFrame.to_cumulative_chart(df, paths_mapping)
metric in ["buffer", "bitrate"] ->
Membrane.Dashboard.Charts.ChartDataFrame.to_changes_per_second_chart(
df,
paths_mapping,
accuracy
)
true ->
Membrane.Dashboard.Charts.ChartDataFrame.to_simple_chart(df, paths_mapping)
end
{chart, paths_mapping, df}
end
end
|
lib/membrane_dashboard/charts/full.ex
| 0.925592
| 0.682765
|
full.ex
|
starcoder
|
defmodule Xgit.DirCache do
@moduledoc ~S"""
A directory cache records the current (intended) contents of a working tree
when last scanned or created by git.
In Xgit, the `DirCache` structure is an abstract, in-memory data structure
without any tie to a specific persistence mechanism. Persistence is implemented
by a specific implementation of the `Xgit.Repository.Storage` behaviour.
This content is commonly persisted on disk as an `index` file at the root of
the git tree. This file format can be read using the `from_iodevice/1` function
and written using `to_iodevice/1`. If a different persistence mechanism is
required, you may provide that e elsewhere.
Changes in the working tree can be detected by comparing the modification times
to the cached modification time within the dir cache.
Index files are also used during merges, where the merge happens within the
index file first, and the working directory is updated as a post-merge step.
Conflicts are stored in the index file to allow tool (and human) based
resolutions to be easily performed.
"""
use Bitwise
use Xgit.FileMode
import Xgit.Util.ForceCoverage
require Logger
alias Xgit.FilePath
alias Xgit.ObjectId
alias Xgit.Tree
alias Xgit.Util.Comparison
alias Xgit.Util.NB
alias Xgit.Util.TrailingHashDevice
@typedoc ~S"""
Version number for an index file.
"""
@type version :: 2..4
@typedoc ~S"""
This struct describes an entire working tree as understood by git.
## Struct Members
* `:version`: the version number as read from disk (typically 2, 3, or 4)
* `:entry_count`: the number of items in `entries`
* `:entries`: a list of `Entry` structs in sorted order
* `:extensions`: a list of `Extension` structs (not yet implemented)
"""
@type t :: %__MODULE__{
version: version,
entry_count: non_neg_integer,
entries: [__MODULE__.Entry.t()]
# extensions: [Extension.t()]
}
@enforce_keys [:version, :entry_count, :entries]
defstruct [:version, :entry_count, :entries]
defmodule Entry do
@moduledoc ~S"""
A single file (or stage of a file) in a directory cache.
An entry represents exactly one stage of a file. If a file path is unmerged
then multiple instances may appear for the same path name.
"""
use Xgit.FileMode
alias Xgit.FileMode
alias Xgit.FilePath
alias Xgit.ObjectId
@typedoc ~S"""
Merge status (stage).
"""
@type stage :: 0..3
@typedoc ~S"""
Merge status (stage) for matching a remove request. (Includes `:all` to match any stage.)
"""
@type stage_match :: 0..3 | :all
@typedoc ~S"""
A single file (or stage of a file) in a directory cache.
An entry represents exactly one stage of a file. If a file path is unmerged
then multiple instances may appear for the same path name.
Consult the [documentation for git index file format](https://github.com/git/git/blob/master/Documentation/technical/index-format.txt)
for a more detailed description of each item.
## Struct Members
* `name`: (`FilePath.t`) entry path name, relative to top-level directory (without leading slash)
* `stage`: (`0..3`) merge status
* `object_id`: (`ObjectId.t`) SHA-1 for the represented object
* `mode`: (`FileMode.t`)
* `size`: (integer) on-disk size, possibly truncated to 32 bits
* `ctime`: (integer) the last time the file's metadata changed
* `ctime_ns`: (integer) nanosecond fraction of `ctime` (if available)
* `mtime`: (integer) the last time a file's contents changed
* `mtime_ns`: (integer) nanosecond fractino of `mtime` (if available)
* `dev`: (integer)
* `ino`: (integer)
* `uid`: (integer)
* `gid`: (integer)
* `assume_valid?`: (boolean)
* `extended?`: (boolean)
* `skip_worktree?`: (boolean)
* `intent_to_add?`: (boolean)
"""
@type t :: %__MODULE__{
name: FilePath.t(),
stage: stage,
object_id: ObjectId.t(),
mode: FileMode.t(),
size: non_neg_integer,
ctime: integer,
ctime_ns: non_neg_integer,
mtime: integer,
mtime_ns: non_neg_integer,
dev: integer,
ino: integer,
uid: integer,
gid: integer,
assume_valid?: boolean,
extended?: boolean,
skip_worktree?: boolean,
intent_to_add?: boolean
}
@enforce_keys [:name, :stage, :object_id, :size, :mode, :ctime, :mtime]
defstruct [
:name,
:stage,
:object_id,
:size,
:mode,
:ctime,
:mtime,
ctime_ns: 0,
mtime_ns: 0,
dev: 0,
ino: 0,
uid: 0,
gid: 0,
assume_valid?: false,
extended?: false,
skip_worktree?: false,
intent_to_add?: false
]
@doc ~S"""
Return `true` if this entry struct describes a valid dir cache entry.
"""
@spec valid?(entry :: any) :: boolean
def valid?(entry)
# credo:disable-for-lines:30 Credo.Check.Refactor.CyclomaticComplexity
def valid?(
%__MODULE__{
name: name,
stage: stage,
object_id: object_id,
mode: mode,
size: size,
ctime: ctime,
ctime_ns: ctime_ns,
mtime: mtime,
mtime_ns: mtime_ns,
dev: dev,
ino: ino,
uid: uid,
gid: gid,
assume_valid?: assume_valid?,
extended?: extended?,
skip_worktree?: skip_worktree?,
intent_to_add?: intent_to_add?
} = _entry
)
when is_list(name) and is_integer(stage) and stage >= 0 and stage <= 3 and
is_binary(object_id) and
is_file_mode(mode) and
is_integer(size) and
size >= 0 and
is_integer(ctime) and
is_integer(ctime_ns) and ctime_ns >= 0 and
is_integer(mtime) and
is_integer(mtime_ns) and mtime_ns >= 0 and
is_integer(dev) and
is_integer(ino) and
is_integer(uid) and
is_integer(gid) and
is_boolean(assume_valid?) and
is_boolean(extended?) and
is_boolean(skip_worktree?) and
is_boolean(intent_to_add?) do
FilePath.valid?(name) && ObjectId.valid?(object_id) && object_id != ObjectId.zero()
end
def valid?(_), do: cover(false)
@doc ~S"""
Compare two entries according to git dir cache entry sort ordering rules.
For this purpose, only the following fields are considered (in this priority order):
* `:name`
* `:stage`
## Return Value
* `:lt` if `entry1` sorts before `entry2`.
* `:eq` if they are the same.
* `:gt` if `entry1` sorts after `entry2`.
"""
@spec compare(entry1 :: t | nil, entry2 :: t) :: Comparison.result()
def compare(entry1, entry2)
def compare(nil, _entry2), do: cover(:lt)
def compare(
%{name: name1, stage: stage1} = _entry1,
%{name: name2, stage: stage2} = _entry2
) do
cond do
name1 < name2 -> cover :lt
name2 < name1 -> cover :gt
stage1 < stage2 -> cover :lt
stage2 < stage1 -> cover :gt
true -> cover :eq
end
end
end
@doc ~S"""
Returns a dir cache that is the canonical "empty" dir cache (i.e. contains no entries).
"""
@spec empty() :: t
def empty, do: %__MODULE__{version: 2, entry_count: 0, entries: []}
@doc ~S"""
Return `true` if the value is a `DirCache` struct that is valid.
All of the following must be true for this to occur:
* The value is a `DirCache` struct.
* The version is supported by Xgit. (Currently, only version 2 is supported.)
* The `entry_count` matches the actual number of entries.
* The entries are properly sorted.
* All entries are valid, as determined by `Xgit.DirCache.Entry.valid?/1`.
"""
@spec valid?(dir_cache :: any) :: boolean
def valid?(dir_cache)
def valid?(%__MODULE__{version: version, entry_count: entry_count, entries: entries})
when version == 2 and is_integer(entry_count) and is_list(entries) do
Enum.count(entries) == entry_count &&
Enum.all?(entries, &Entry.valid?/1) &&
entries_sorted?([nil | entries])
end
def valid?(_), do: cover(false)
defp entries_sorted?([entry1, entry2 | tail]) do
Entry.compare(entry1, entry2) == :lt &&
(entry1 == nil ||
not FilePath.starts_with?(entry2.name, FilePath.ensure_trailing_separator(entry1.name))) &&
entries_sorted?([entry2 | tail])
end
defp entries_sorted?([_]), do: cover(true)
@doc ~S"""
Return `true` if all of the entries in this dir cache are fully merged (stage 0).
"""
@spec fully_merged?(dir_cache :: t) :: boolean
def fully_merged?(%__MODULE__{entries: entries} = _dir_cache) do
Enum.all?(entries, fn %__MODULE__.Entry{stage: stage} -> stage == 0 end)
end
@typedoc ~S"""
Error reason codes returned by `add_entries/2`.
"""
@type add_entries_reason :: :invalid_dir_cache | :invalid_entries | :duplicate_entries
@doc ~S"""
Returns a dir cache that has new directory entries added in.
In the event of a collision between entries (same path and stage), the existing
entry will be replaced by the new one.
## Parameters
`entries` a list of entries to add to the existing dir cache
## Return Value
`{:ok, dir_cache}` where `dir_cache` is the original `dir_cache` with the new
entries added (and properly sorted).
`{:error, :invalid_dir_cache}` if the original `dir_cache` was invalid.
`{:error, :invalid_entries}` if one or more of the entries is invalid.
`{:error, :duplicate_entries}` if one or more of the entries in the _new_ list
are duplicates of other entries in the _new_ list. (As stated earlier, duplicates
from the original list are acceptable; in that event, the new entry will replace
the old one.)
"""
@spec add_entries(dir_cache :: t, new_entries :: [Entry.t()]) ::
{:ok, t} | {:error, add_entries_reason}
def add_entries(%__MODULE__{entries: existing_entries} = dir_cache, new_entries)
when is_list(new_entries) do
with {:dir_cache_valid?, true} <- {:dir_cache_valid?, valid?(dir_cache)},
{:entries_valid?, true} <- {:entries_valid?, Enum.all?(new_entries, &Entry.valid?/1)},
sorted_new_entries <- Enum.sort_by(new_entries, &{&1.name, &1.stage}),
{:duplicates, ^sorted_new_entries} <-
{:duplicates, Enum.dedup_by(sorted_new_entries, &{&1.name, &1.stage})} do
combined_entries = combine_entries(existing_entries, sorted_new_entries)
cover {:ok,
%{dir_cache | entry_count: Enum.count(combined_entries), entries: combined_entries}}
else
{:dir_cache_valid?, _} -> cover {:error, :invalid_dir_cache}
{:entries_valid?, _} -> cover {:error, :invalid_entries}
{:duplicates, _} -> cover {:error, :duplicate_entries}
end
end
defp combine_entries(existing_entries, sorted_new_entries)
defp combine_entries(existing_entries, []), do: cover(existing_entries)
defp combine_entries([], sorted_new_entries), do: cover(sorted_new_entries)
defp combine_entries(
[existing_head | existing_tail] = existing_entries,
[new_head | new_tail] = sorted_new_entries
) do
case Entry.compare(existing_head, new_head) do
:lt -> cover [existing_head | combine_entries(existing_tail, sorted_new_entries)]
:eq -> cover [new_head | combine_entries(existing_tail, new_tail)]
:gt -> cover [new_head | combine_entries(existing_entries, new_tail)]
end
end
@typedoc ~S"""
An entry for the `remove` option for `remove_entries/2`.
"""
@type entry_to_remove :: {path :: FilePath.t(), stage :: Entry.stage_match()}
@typedoc ~S"""
Error reason codes returned by `remove_entries/2`.
"""
@type remove_entries_reason :: :invalid_dir_cache | :invalid_entries
@doc ~S"""
Returns a dir cache that has some directory entries removed.
## Parameters
`entries_to_remove` is a list of `{path, stage}` tuples identifying tuples to be removed.
* `path` should be a byte list for the path.
* `stage` should be `0..3` or `:all`, meaning any entry that matches the path,
regardless of stage, should be removed.
## Return Value
`{:ok, dir_cache}` where `dir_cache` is the original `dir_cache` with any matching
entries removed.
`{:error, :invalid_dir_cache}` if the original `dir_cache` was invalid.
`{:error, :invalid_entries}` if one or more of the entries is invalid.
"""
@spec remove_entries(dir_cache :: t, entries_to_remove :: [entry_to_remove]) ::
{:ok, t} | {:error, remove_entries_reason}
def remove_entries(%__MODULE__{entries: existing_entries} = dir_cache, entries_to_remove)
when is_list(entries_to_remove) do
with {:dir_cache_valid?, true} <- {:dir_cache_valid?, valid?(dir_cache)},
{:entries_valid?, true} <-
{:entries_valid?, Enum.all?(entries_to_remove, &valid_remove_entry?/1)} do
updated_entries = remove_matching_entries(existing_entries, Enum.sort(entries_to_remove))
cover {:ok,
%{dir_cache | entry_count: Enum.count(updated_entries), entries: updated_entries}}
else
{:dir_cache_valid?, _} -> cover {:error, :invalid_dir_cache}
{:entries_valid?, _} -> cover {:error, :invalid_entries}
end
end
defp valid_remove_entry?({path, :all}) when is_list(path), do: cover(true)
defp valid_remove_entry?({path, stage})
when is_list(path) and is_integer(stage) and stage >= 0 and stage <= 3,
do: cover(true)
defp valid_remove_entry?(_), do: cover(false)
defp remove_matching_entries(sorted_existing_entries, sorted_entries_to_remove)
defp remove_matching_entries([], _sorted_entries_to_remove), do: cover([])
defp remove_matching_entries(sorted_existing_entries, []), do: cover(sorted_existing_entries)
defp remove_matching_entries([%__MODULE__.Entry{name: path} | existing_tail], [
{path, :all} | remove_tail
]),
do:
remove_matching_entries(Enum.drop_while(existing_tail, &(&1.name == path)), remove_tail)
defp remove_matching_entries([%__MODULE__.Entry{name: path, stage: stage} | existing_tail], [
{path, stage} | remove_tail
]),
do: remove_matching_entries(existing_tail, remove_tail)
defp remove_matching_entries([existing_head | existing_tail], sorted_entries_to_remove),
do: cover([existing_head | remove_matching_entries(existing_tail, sorted_entries_to_remove)])
@typedoc ~S"""
Error reason codes returned by `to_tree_objects/2`.
"""
@type to_tree_objects_reason :: :invalid_dir_cache | :prefix_not_found
@doc ~S"""
Convert this `DirCache` to one or more `tree` objects.
## Parameters
`prefix`: (`Xgit.FilePath`) if present, return the object ID for the tree
pointed to by `prefix`. All tree objects will be generated, regardless of `prefix`.
## Return Value
`{:ok, objects, prefix_tree}` where `objects` is a list of `Xgit.Object`
structs of type `tree`. All others must be written or must be present in the
object database for the top-level tree to be valid. `prefix_tree` is the
tree for the subtree specified by `prefix` or the top-level tree if no prefix
was specified.
`{:error, :invalid_dir_cache}` if the `DirCache` is not valid.
`{:error, :prefix_not_found}` if no tree matching `prefix` exists.
"""
@spec to_tree_objects(dir_cache :: t, prefix :: Xgit.FilePath.t()) ::
{:ok, [Xgit.Object.t()], Xgit.Object.t()} | {:error, to_tree_objects_reason}
def to_tree_objects(dir_cache, prefix \\ [])
def to_tree_objects(%__MODULE__{entries: entries} = dir_cache, prefix)
when is_list(entries) and is_list(prefix) do
with {:valid?, true} <- {:valid?, valid?(dir_cache)},
{_entries, tree_for_prefix, _this_tree} <- to_tree_objects_inner(entries, [], %{}, []),
{:prefix, prefix_tree} when prefix_tree != nil <-
{:prefix, Map.get(tree_for_prefix, FilePath.ensure_trailing_separator(prefix))} do
objects =
tree_for_prefix
|> Enum.sort()
|> Enum.map(fn {_prefix, object} -> object end)
cover {:ok, objects, prefix_tree}
else
{:valid?, _} -> cover {:error, :invalid_dir_cache}
{:prefix, _} -> cover {:error, :prefix_not_found}
end
end
defp to_tree_objects_inner(entries, prefix, tree_for_prefix, tree_entries_acc)
defp to_tree_objects_inner([], prefix, tree_for_prefix, tree_entries_acc),
do: make_tree_and_continue([], prefix, tree_for_prefix, tree_entries_acc)
defp to_tree_objects_inner(
[%__MODULE__.Entry{name: name, object_id: object_id, mode: mode} | tail] = entries,
prefix,
tree_for_prefix,
tree_entries_acc
) do
if FilePath.starts_with?(name, prefix) do
name_after_prefix = Enum.drop(name, Enum.count(prefix))
{next_entries, new_tree_entry, tree_for_prefix} =
if Enum.any?(name_after_prefix, &(&1 == ?/)) do
make_subtree(entries, prefix, tree_for_prefix, tree_entries_acc)
else
cover {tail, %Tree.Entry{name: name_after_prefix, object_id: object_id, mode: mode},
tree_for_prefix}
end
to_tree_objects_inner(next_entries, prefix, tree_for_prefix, [
new_tree_entry | tree_entries_acc
])
else
make_tree_and_continue(entries, prefix, tree_for_prefix, tree_entries_acc)
end
end
defp make_tree_and_continue(entries, prefix, tree_for_prefix, tree_entries_acc) do
tree_object = Tree.to_object(%Tree{entries: Enum.reverse(tree_entries_acc)})
{entries, Map.put(tree_for_prefix, prefix, tree_object), tree_object}
end
defp make_subtree(
[%__MODULE__.Entry{name: name} | _tail] = entries,
existing_prefix,
tree_for_prefix,
_tree_entries_acc
) do
first_segment_after_prefix =
name
|> Enum.drop(Enum.count(existing_prefix))
|> Enum.drop_while(&(&1 == ?/))
|> Enum.take_while(&(&1 != ?/))
tree_name =
cover '#{FilePath.ensure_trailing_separator(existing_prefix)}#{first_segment_after_prefix}'
new_prefix = cover '#{tree_name}/'
{entries, tree_for_prefix, tree_object} =
to_tree_objects_inner(entries, new_prefix, tree_for_prefix, [])
new_tree_entry = %Tree.Entry{
name: first_segment_after_prefix,
object_id: tree_object.id,
mode: FileMode.tree()
}
cover {entries, new_tree_entry, tree_for_prefix}
end
@typedoc ~S"""
Error codes which can be returned by `from_iodevice/1`.
"""
@type from_iodevice_reason ::
:not_sha_hash_device
| :invalid_format
| :unsupported_version
| :too_many_entries
| :unsupported_extension
| :sha_hash_mismatch
| File.posix()
@doc ~S"""
Read a git `index` file from an `IO.device` (typically an opened file) and returns a
corresponding `DirCache` struct.
_IMPORTANT:_ The `IO.device` must be created using `Xgit.Util.TrailingHashDevice`.
## Return Value
`{:ok, dir_cache}` if the iodevice contains a valid index file.
`{:error, :not_sha_hash_device}` if the iodevice was not created using
`Xgit.Util.TrailingHashDevice`.
`{:error, :invalid_format}` if the iodevice can not be parsed as an index file.
`{:error, :unsupported_version}` if the index file is not a version 2 index file.
Other versions are not supported at this time.
`{:error, :too_many_entries}` if the index files contains more than 100,000
entries. This is an arbitrary limit to guard against malformed files and to
prevent overconsumption of memory. With experience, it could be revisited.
`{:error, :unsupported_extension}` if any index file extensions are present
that can not be parsed. Optional extensions will be skipped, but no required
extensions are understood at this time. (See
[issue #172](https://github.com/elixir-git/xgit/issues/172).)
`{:error, :sha_hash_mismatch}` if the SHA-1 hash written at the end of the file
does not match the file contents.
"""
@spec from_iodevice(iodevice :: IO.device()) ::
{:ok, dir_cache :: t()} | {:error, reason :: from_iodevice_reason}
def from_iodevice(iodevice) do
with {:sha_hash_device, true} <- {:sha_hash_device, TrailingHashDevice.valid?(iodevice)},
{:dirc, true} <- {:dirc, read_dirc(iodevice)},
{:version, version = 2} <- {:version, read_uint32(iodevice)},
{:entry_count, entry_count}
when is_integer(entry_count) and entry_count <= 100_000 <-
{:entry_count, read_uint32(iodevice)},
{:entries, entries} when is_list(entries) <-
{:entries, read_entries(iodevice, version, entry_count)},
{:extensions, :ok} <- {:extensions, read_extensions(iodevice)},
{:sha_valid?, true} <- {:sha_valid?, TrailingHashDevice.valid_hash?(iodevice)} do
cover {:ok,
%__MODULE__{
version: version,
entry_count: entry_count,
entries: entries
}}
else
{:sha_hash_device, _} -> cover {:error, :not_sha_hash_device}
{:dirc, _} -> cover {:error, :invalid_format}
{:version, _} -> cover {:error, :unsupported_version}
{:entry_count, :invalid} -> cover {:error, :invalid_format}
{:entry_count, _} -> cover {:error, :too_many_entries}
{:entries, _} -> cover {:error, :invalid_format}
{:extensions, error} -> cover {:error, error}
{:sha_valid?, _} -> cover {:error, :sha_hash_mismatch}
end
end
defp read_dirc(iodevice) do
case IO.binread(iodevice, 4) do
"DIRC" -> cover true
_ -> cover false
end
end
defp read_entries(_iodevice, _version, 0), do: cover([])
defp read_entries(iodevice, version, entry_count) do
entries =
Enum.map(1..entry_count, fn _ ->
read_entry(iodevice, version)
end)
if Enum.all?(entries, &valid_entry?/1) do
cover entries
else
cover :invalid
end
end
defp read_entry(iodevice, 2 = _version) do
with ctime when is_integer(ctime) <- read_uint32(iodevice),
ctime_ns when is_integer(ctime_ns) <- read_uint32(iodevice),
mtime when is_integer(mtime) <- read_uint32(iodevice),
mtime_ns when is_integer(mtime_ns) <- read_uint32(iodevice),
dev when is_integer(dev) <- read_uint32(iodevice),
ino when is_integer(ino) <- read_uint32(iodevice),
mode when is_integer(mode) <- read_uint32(iodevice),
uid when is_integer(uid) <- read_uint32(iodevice),
gid when is_integer(gid) <- read_uint32(iodevice),
size when is_integer(size) <- read_uint32(iodevice),
object_id
when is_binary(object_id) and object_id != "0000000000000000000000000000000000000000" <-
read_object_id(iodevice),
flags when is_integer(flags) and flags > 0 <- read_uint16(iodevice),
name when is_list(name) <- read_name(iodevice, flags &&& 0xFFF) do
%__MODULE__.Entry{
name: name,
stage: bsr(flags &&& 0x3000, 12),
object_id: object_id,
size: size,
mode: mode,
ctime: ctime,
ctime_ns: ctime_ns,
mtime: mtime,
mtime_ns: mtime_ns,
dev: dev,
ino: ino,
uid: uid,
gid: gid,
assume_valid?: to_boolean(flags &&& 0x8000),
extended?: to_boolean(flags &&& 0x4000),
skip_worktree?: false,
intent_to_add?: false
}
else
_ -> cover :invalid
end
end
defp valid_entry?(%__MODULE__.Entry{}), do: cover(true)
defp valid_entry?(_), do: cover(false)
defp read_extensions(iodevice) do
case IO.binread(iodevice, 1) do
:eof ->
:ok
char when byte_size(char) == 1 and char >= "A" and char <= "Z" ->
read_optional_extension(iodevice, char)
char ->
read_required_extension(iodevice, char)
end
end
defp read_optional_extension(iodevice, char) do
signature = "#{char}#{IO.binread(iodevice, 3)}"
length = read_uint32(iodevice)
Logger.info(fn ->
"skipping extension with signature #{inspect(signature)}, #{length} bytes"
end)
IO.binread(iodevice, length)
read_extensions(iodevice)
end
defp read_required_extension(iodevice, char) do
signature = "#{char}#{IO.binread(iodevice, 3)}"
length = read_uint32(iodevice)
Logger.info(fn ->
"don't know how to read required extension with signature #{inspect(signature)}, #{length} bytes"
end)
:unsupported_extension
end
defp read_uint16(iodevice) do
case IO.binread(iodevice, 2) do
x when is_binary(x) and byte_size(x) == 2 ->
x
|> :binary.bin_to_list()
|> NB.decode_uint16()
|> elem(0)
_ ->
cover :invalid
end
end
defp read_uint32(iodevice) do
case IO.binread(iodevice, 4) do
x when is_binary(x) and byte_size(x) == 4 ->
x
|> :binary.bin_to_list()
|> NB.decode_uint32()
|> elem(0)
_ ->
cover :invalid
end
end
defp read_object_id(iodevice) do
case IO.binread(iodevice, 20) do
x when is_binary(x) and byte_size(x) == 20 -> ObjectId.from_binary_iodata(x)
_ -> cover :invalid
end
end
defp read_name(iodevice, length) when length < 0xFFF do
bytes_to_read = length + padding_size(Integer.mod(length + 4, 8))
case IO.binread(iodevice, bytes_to_read) do
x when is_binary(x) and byte_size(x) == bytes_to_read ->
x
|> :binary.bin_to_list()
|> Enum.take_while(&(&1 != 0))
_ ->
cover :invalid
end
end
defp read_name(_iodevice, _length), do: :invalid
defp padding_size(length_mod_8) when length_mod_8 < 6, do: 6 - length_mod_8
defp padding_size(6), do: cover(8)
defp padding_size(7), do: cover(7)
defp to_boolean(0), do: cover(false)
defp to_boolean(_), do: cover(true)
@typedoc ~S"""
Error codes which can be returned by `to_iodevice/1`.
"""
@type to_iodevice_reason ::
:not_sha_hash_device | :invalid_dir_cache | :unsupported_version | File.posix()
@doc ~S"""
Write index file to an `iodevice` (typically an opened file) from a
`DirCache` struct.
_IMPORTANT:_ The `iodevice` must be created using `Xgit.Util.TrailingHashDevice`.
## Return Value
`:ok` if written successfully.
`{:error, :not_sha_hash_device}` if the iodevice was not created using
`Xgit.Util.TrailingHashDevice`.
`{:error, :invalid_dir_cache}` if `valid?/1` does not return
`true` for this struct.
`{:error, :unsupported_version}` if the `version` flag in the dir cache struct
is not version. Other versions are not supported at this time.
`{:error, posix_reason}` if an I/O error occurs.
"""
@spec to_iodevice(dir_cache :: t(), iodevice :: IO.device()) ::
:ok | {:error, reason :: to_iodevice_reason}
def to_iodevice(
%__MODULE__{version: version, entry_count: entry_count, entries: entries} = dir_cache,
iodevice
) do
with {:version, 2} <- {:version, version},
{:valid?, true} <- {:valid?, valid?(dir_cache)},
{:sha_hash_device, true} <- {:sha_hash_device, TrailingHashDevice.valid?(iodevice)},
:ok <- write_v2_header(iodevice, entry_count),
:ok <- write_v2_entries(iodevice, entries) do
# TO DO: Write extensions. https://github.com/elixir-git/xgit/issues/114
cover :ok
else
{:version, _} -> cover {:error, :unsupported_version}
{:valid?, _} -> cover {:error, :invalid_dir_cache}
{:sha_hash_device, _} -> cover {:error, :not_sha_hash_device}
{:error, reason} -> cover {:error, reason}
end
end
defp write_v2_header(iodevice, entry_count),
do: IO.binwrite(iodevice, ['DIRC', 0, 0, 0, 2, NB.encode_uint32(entry_count)])
defp write_v2_entries(_iodevice, []), do: cover(:ok)
defp write_v2_entries(iodevice, [entry | tail]) do
case write_v2_entry(iodevice, entry) do
:ok -> write_v2_entries(iodevice, tail)
error -> error
end
end
defp write_v2_entry(
iodevice,
%__MODULE__.Entry{
name: name,
stage: stage,
object_id: object_id,
size: size,
mode: mode,
ctime: ctime,
ctime_ns: ctime_ns,
mtime: mtime,
mtime_ns: mtime_ns,
dev: dev,
ino: ino,
uid: uid,
gid: gid,
assume_valid?: assume_valid?,
extended?: extended?,
skip_worktree?: false,
intent_to_add?: false
}
) do
name_length = Enum.count(name)
IO.binwrite(iodevice, [
NB.encode_uint32(ctime),
NB.encode_uint32(ctime_ns),
NB.encode_uint32(mtime),
NB.encode_uint32(mtime_ns),
NB.encode_uint32(dev),
NB.encode_uint32(ino),
NB.encode_uint32(mode),
NB.encode_uint32(uid),
NB.encode_uint32(gid),
NB.encode_uint32(size),
ObjectId.to_binary_iodata(object_id),
encode_v2_flags(stage, assume_valid?, extended?, name_length),
name,
padding(name_length)
])
end
defp encode_v2_flags(stage, assume_valid?, extended?, name_length) do
value =
value_if_boolean(assume_valid?, 0x8000) +
value_if_boolean(extended?, 0x4000) +
bsl(stage &&& 3, 12) +
min(name_length, 0xFFF)
NB.encode_uint16(value)
end
defp value_if_boolean(true, value), do: value
defp value_if_boolean(false, _value), do: cover(0)
defp padding(name_length) do
padding_size = padding_size(Integer.mod(name_length + 4, 8))
Enum.map(1..padding_size, fn _ -> 0 end)
end
end
|
lib/xgit/dir_cache.ex
| 0.908933
| 0.659309
|
dir_cache.ex
|
starcoder
|
defmodule IncrementalSlug do
@moduledoc """
Store a unique slug.
Append an increment (1-10), if this slug is already taken.
## Example
| id | title | slug |
|----|--------------|------------------|
| 1 | Slug Doe | Slug-Doe |
| 2 | Slug Doe | Slug-Doe-1 |
| 3 | Slug Doe | Slug-Doe-2 |
| 4 | Slug Doe 1 | Slug-Doe-1-1 |
| 5 | Slug Doe 1 | Slug-Doe-1-2 |
| 6 | Slug Doe 2 | Slug-Doe-2-1 |
| 7 | Slug Doe 2 | Slug-Doe-2-2 |
| 8 | Slug Doe 1 1 | Slug-Doe-1-1-1 |
| 9 | Slug Doe 1 1 | Slug-Doe-1-1-2 |
## How to start?
Add this code to a module's `changeset/2`
```ex
|> IncrementalSlug.put(__MODULE__, :title, :slug)
```
or if default fields have been connfiugred
```ex
|> IncrementalSlug.put(__MODULE__)
```
The result may look like this
```ex
def changeset(post, attrs) do
post
|> cast(attrs, [:title, :slug])
|> IncrementalSlug.put(__MODULE__, :title, :slug)
|> validate_required([:title, :slug])
end
```
For more information see `put/4`.
## Config
### Repo
```ex
config :incremental_slug, repo: PROJECT.Repo
```
See `repo/0`.
### Fields
```ex
config :incremental_slug, fields: %{from: :title, to: :slug}
```
Can be overwiiten when calling a method.
## Dependencies
* [github.com/h4cc/slugger](https://github.com/h4cc/slugger)
* [github.com/elixir-ecto/ecto_sql](https://github.com/elixir-ecto/ecto_sql)
"""
import Ecto.Query, warn: false
import Ecto.Changeset,
only: [
put_change: 3,
get_change: 2
]
@incremental_slug Application.get_env(:incremental_slug, :fields, %{from: :title, to: :slug})
# @repo Application.get_env(:incremental_slug, :repo)
@doc ~S"""
Append the increment to the slug.
iex> "Slug-Doe" |> IncrementalSlug.append(7)
"Slug-Doe-7"
iex> "Henry" |> IncrementalSlug.append(123)
"Henry-123"
"""
@spec append(slug :: String.t(), increment :: integer) :: String.t()
def append(slug, increment), do: "#{slug}-#{increment}"
@doc ~S"""
Exclude this ID from the query.
## Examples
iex> import Ecto.Query, warn: false
iex> query = TestPost |> select(count("*")) |> limit(1)
#Ecto.Query<from t0 in IncrementalSlug.TestPost, limit: 1, select: count("*")>
iex> IncrementalSlug.exclude_id(query, nil)
#Ecto.Query<from t0 in IncrementalSlug.TestPost, limit: 1, select: count("*")>
iex> IncrementalSlug.exclude_id(query, 123)
#Ecto.Query<from t0 in IncrementalSlug.TestPost, where: t0.id != ^123, limit: 1, select: count("*")>
"""
@spec exclude_id(queryable :: Ecto.Queryable.t(), id :: integer()) :: Ecto.Query.t()
def exclude_id(queryable, id) when is_nil(id), do: queryable
def exclude_id(queryable, id), do: queryable |> where([a], a.id != ^id)
@doc ~S"""
Find the taken slug in the database. It may contain an increment.
## Parameters
* `slug` - A regular slug without an increment.
* `id` - Exclude this ID from the query.
* `queryable` - In which table to look?
* `to` - In which column is the slug stored?
## Return value
A slug with an increment or `nil`.
In case, if multiple items were found, return the one with the greatest increment.
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.find("Slug-Doe", nil, TestPost)
nil
iex> post = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> IncrementalSlug.find("Slug-Doe", nil, TestPost)
nil
iex> post2 = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post2.slug == "Slug-Doe-1"
true
iex> IncrementalSlug.find("Slug-Doe", nil, TestPost)
"Slug-Doe-1"
iex> post3 = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post3.slug == "Slug-Doe-2"
true
iex> IncrementalSlug.find("Slug-Doe", nil, TestPost)
"Slug-Doe-2"
"""
@spec find(slug :: String.t(), id :: integer(), queryable :: Ecto.Queryable.t(), to :: atom()) ::
String.t() | nil
@spec find(slug :: nil, id :: integer(), queryable :: Ecto.Queryable.t(), to :: atom()) :: nil
@spec find(slug :: String.t(), id :: 0, queryable :: Ecto.Queryable.t(), to :: atom()) :: nil
@spec find(slug :: String.t(), id :: integer(), queryable :: nil, to :: atom()) :: nil
def find(slug, id, queryable, to \\ @incremental_slug.to)
def find(slug, id, queryable, _to) when is_nil(slug) or id == 0 or is_nil(queryable), do: nil
def find(slug, id, queryable, to),
do:
queryable
|> select_field(to)
|> where_slug_with_increment(slug, to)
|> exclude_id(id)
|> find_item_with_greatest_increment(to)
@doc ~S"""
Find the item that has the slug with a greatest increment.
## Parameters
* `queryable` - In which table to look?
* `to` - In which column is the slug stored?
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.find_item_with_greatest_increment(TestPost)
nil
iex> post = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> post2 = IncrementalSlug.find_item_with_greatest_increment(TestPost)
iex> post2.slug == "Slug-Doe"
true
iex> post3 = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post3.slug == "Slug-Doe-1"
true
iex> post4 = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post4.slug == "Slug-Doe-2"
true
iex> post5 = IncrementalSlug.find_item_with_greatest_increment(TestPost)
iex> post5.slug == "Slug-Doe-2"
true
"""
@spec find_item_with_greatest_increment(queryable :: Ecto.Queryable.t(), atom()) ::
Ecto.Schema.t() | nil
def find_item_with_greatest_increment(queryable, to \\ @incremental_slug.to)
def find_item_with_greatest_increment(queryable, to),
do: queryable |> order_by(desc: ^to) |> limit(1) |> repo().one()
@doc ~S"""
Get a count of how many items have taken this exact slug.
## Parameters
* `slug` - A regular slug without an increment.
* `id` - Exclude this ID from the query.
* `queryable` - In which table to look?
* `to` - In which column is the slug stored?
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.get_count("Slug-Doe", nil, TestPost)
0
iex> post = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> IncrementalSlug.get_count("Slug-Doe", nil, TestPost)
1
iex> post1 = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post1.slug == "Slug-Doe-1"
true
iex> IncrementalSlug.get_count("Slug-Doe", nil, TestPost)
1
"""
@spec get_count(
slug :: String.t(),
id :: integer(),
queryable :: Ecto.Queryable.t(),
to :: atom()
) :: integer()
def get_count(slug, id, queryable, to \\ @incremental_slug.to)
def get_count(slug, id, queryable, to),
do:
queryable
|> select(count("*"))
|> limit(1)
|> where([a], field(a, ^to) == ^slug)
|> exclude_id(id)
|> repo().one()
@doc ~S"""
Find the greatest increment from the items that have taken this slug.
## Parameters
* `slug` - A regular slug without an increment.
* `id` - Exclude this ID from the query.
* `queryable` - In which table to look?
* `to` - In which column is the slug stored?
## Return value
The greatest increment or `0` if the slug is not taken.
## Useful to know
`9` is the greatest increment that can be found. See why in `where_slug_with_increment/3`.
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.get_greatest_increment("Slug-Doe", nil, TestPost)
0
iex> post = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> IncrementalSlug.get_greatest_increment("Slug-Doe", nil, TestPost)
0
iex> post1 = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post1.slug == "Slug-Doe-1"
true
iex> IncrementalSlug.get_greatest_increment("Slug-Doe", nil, TestPost)
1
"""
@spec get_greatest_increment(
slug :: String.t(),
id :: integer(),
queryable :: Ecto.Queryable.t(),
to :: atom()
) :: integer()
def get_greatest_increment(slug, id, queryable, to \\ @incremental_slug.to)
def get_greatest_increment(slug, id, queryable, to),
do: find(slug, id, queryable, to) |> get_greatest_increment()
@doc ~S"""
Extract an increment from the slug.
## Parameters
* `slug` - A slug with an increment.
## Return value
An increment or `0`.
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.get_greatest_increment(nil)
0
iex> IncrementalSlug.get_greatest_increment("Slug-Doe-1")
1
iex> IncrementalSlug.get_greatest_increment("Slug-Doe-5")
5
"""
@spec get_greatest_increment(slug | nil :: String.t()) :: integer
def get_greatest_increment(slug) when is_nil(slug), do: 0
def get_greatest_increment(slug),
do: slug |> String.split("-") |> List.last() |> String.to_integer()
@doc ~S"""
Find an increment that can make this slug unique.
## Parameters
* `slug` - A regular slug without an increment.
* `id` - Exclude this ID from the query.
* `queryable` - In which table to look?
* `to` - In which column is the slug stored?
## Return value
The greatest increment `+1` or `1` if the slug is not taken.
## Useful to know
`10` is the greatest available increment. See why in `where_slug_with_increment/3`.
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.get_increment("Slug-Doe", nil, TestPost)
1
iex> post = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> IncrementalSlug.get_increment("Slug-Doe", nil, TestPost)
1
iex> post1 = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post1.slug == "Slug-Doe-1"
true
iex> IncrementalSlug.get_increment("Slug-Doe", nil, TestPost)
2
"""
@spec get_increment(
slug :: String.t(),
id :: integer(),
queryable :: Ecto.Queryable.t(),
to :: atom()
) :: integer()
def get_increment(slug, id, queryable, to \\ @incremental_slug.to)
def get_increment(slug, id, queryable, to),
do: slug |> get_greatest_increment(id, queryable, to) |> get_increment()
# @doc false
@spec get_increment(last_increment :: integer()) :: integer()
defp get_increment(last_increment), do: last_increment + 1
@doc ~S"""
Get a slug from the passed string.
Trim and pass it to [`Slugger.slugify/2`](https://github.com/h4cc/slugger)
## Examples
iex> IncrementalSlug.get_slug("Slug Doe")
"Slug-Doe"
iex> IncrementalSlug.get_slug(" z e ā Č Ф А - Б В Г Д š \ / * ^ % ! + ) |")
"z-e-a-C-F-A-B-V-G-D-s-or"
"""
@spec get_slug(string :: nil) :: nil
def get_slug(string) when is_nil(string), do: nil
@spec get_slug(string :: String.t()) :: String.t()
def get_slug(string), do: string |> String.trim() |> Slugger.slugify()
@doc ~S"""
Get a unique slug from the selected changeset's field.
## Parameter
* `changeset` - Take the value from a field, and put back the slug in another.
* `queryable` - In which table to look?
* `from` - From which changeset's field generate the slug?
* `to` - In which column is the slug stored?
## Examples
iex> TestPost.truncate
iex> changeset = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"})
iex> changeset |> IncrementalSlug.get_slug_from_field(TestPost)
"Slug-Doe"
iex> post = changeset |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> changeset |> IncrementalSlug.get_slug_from_field(TestPost)
"Slug-Doe-1"
"""
@spec get_slug_from_field(
changeset :: Ecto.Changeset.t(),
queryable :: Ecto.Queryable.t(),
from :: atom(),
to :: atom()
) :: String.t()
def get_slug_from_field(
changeset,
queryable,
from \\ @incremental_slug.from,
to \\ @incremental_slug.to
)
def get_slug_from_field(changeset, queryable, from, to) do
string = changeset |> get_change(from)
id = changeset |> get_change(:id)
string |> get_unique(id, queryable, to)
end
@doc ~S"""
Get a unique slug from a string.
## Parameter
* `string` - Generate the slug from this string.
* `id` - Exclude this ID from the query.
* `queryable` - In which table to look?
* `to` - In which column is the slug stored?
## Return value
A slug (with an increment, if it was taken).
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.get_unique("Slug Doe", nil, TestPost)
"Slug-Doe"
iex> post = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> IncrementalSlug.get_unique("Slug Doe", nil, TestPost)
"Slug-Doe-1"
"""
@spec get_unique(
string :: String.t(),
id :: integer(),
queryable :: Ecto.Queryable.t(),
to :: atom()
) :: String.t()
def get_unique(string, id, queryable, to \\ @incremental_slug.to)
def get_unique(string, id, queryable, _to) when is_nil(string) or id == 0 or is_nil(queryable),
do: nil
def get_unique(string, id, queryable, to),
do: string |> get_slug |> make_slug_unique(id, queryable, to)
@doc ~S"""
Check if another item has taken this slug.
## Parameters
* `slug` - A regular slug without an increment.
* `id` - Exclude this ID from the query.
* `queryable` - In which table to look?
* `to` - In which column is the slug stored?
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.is_taken("Slug-Doe", nil, TestPost)
false
iex> post = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> IncrementalSlug.is_taken("Slug-Doe", nil, TestPost)
true
"""
@spec is_taken(
slug :: String.t(),
id :: integer(),
queryable :: Ecto.Queryable.t(),
to :: atom()
) :: boolean()
def is_taken(slug, id, queryable, to \\ @incremental_slug.to)
def is_taken(slug, id, queryable, _to) when is_nil(slug) or id == 0 or is_nil(queryable),
do: nil
def is_taken(slug, id, queryable, to), do: slug |> get_count(id, queryable, to) > 0
@doc ~S"""
Append an increment (1-10), if this slug is already taken.
## Parameters
* `slug` - A regular slug without an increment.
* `id` - Exclude this ID from the query.
* `queryable` - In which table to look?
* `to` - In which column is the slug stored?
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.make_slug_unique("Slug-Doe", nil, TestPost)
"Slug-Doe"
iex> post = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> IncrementalSlug.make_slug_unique("Slug-Doe", nil, TestPost)
"Slug-Doe-1"
"""
@spec make_slug_unique(
slug :: String.t(),
id :: integer(),
queryable :: Ecto.Queryable.t(),
to :: atom()
) :: String.t()
def make_slug_unique(slug, id, queryable, to \\ @incremental_slug.to)
def make_slug_unique(slug, id, queryable, to),
do: slug |> is_taken(id, queryable, to) |> make_slug_unique_if_taken(slug, id, queryable, to)
@doc ~S"""
Append an increment (1-10), if this slug is already taken.
## Parameters
* `taken` - is this slug already taken?
* `slug` - A regular slug without an increment.
* `id` - Exclude this ID from the query.
* `queryable` - If it is taken, then get the last increment.
* `to` - In which column is the slug stored?
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.make_slug_unique_if_taken(false, "Slug-Doe", nil, TestPost)
"Slug-Doe"
iex> post = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> IncrementalSlug.make_slug_unique_if_taken(false, "Slug-Doe", nil, TestPost)
"Slug-Doe"
iex> IncrementalSlug.make_slug_unique_if_taken(true, "Slug-Doe", nil, TestPost)
"Slug-Doe-1"
iex> post1 = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.repo().insert!()
iex> post1.slug == "Slug-Doe-1"
true
iex> IncrementalSlug.make_slug_unique_if_taken(true, "Slug-Doe", nil, TestPost)
"Slug-Doe-2"
"""
@spec make_slug_unique_if_taken(
taken :: boolean(),
slug :: String.t(),
id :: integer(),
queryable :: Ecto.Queryable.t(),
to :: atom()
) :: String.t()
def make_slug_unique_if_taken(taken, slug, id, queryable, to \\ @incremental_slug.to)
def make_slug_unique_if_taken(taken, slug, id, queryable, to) when taken === true do
increment = slug |> get_increment(id, queryable, to)
slug |> append(increment)
end
def make_slug_unique_if_taken(_taken, slug, _id, _queryable, _to), do: slug
@doc ~S"""
Get a slug and put it in the changeset.
## Parameter
* `changeset` - Take the value from a field, and put back the slug in another.
* `queryable` - In which table to look?
* `from` - From which changeset's field generate the slug?
* `to` - In which column is the slug stored?
## Return values
If everything went well, return the same changeset with a new slug, otherwise without.
## Examples
iex> TestPost.truncate
iex> changeset = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.put(TestPost)
iex> post = changeset |> IncrementalSlug.repo().insert!()
iex> post.slug == "Slug-Doe"
true
iex> changeset2 = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"}) |> IncrementalSlug.put(TestPost)
iex> post1 = changeset2 |> IncrementalSlug.repo().insert!()
iex> post1.slug == "Slug-Doe-1"
true
"""
@spec put(
changeset :: Ecto.Changeset.t() | nil,
queryable :: Ecto.Queryable.t() | nil,
from :: atom(),
to :: atom()
) :: Ecto.Changeset.t()
def put(
changeset,
queryable,
from \\ @incremental_slug.from,
to \\ @incremental_slug.to
)
def put(changeset, queryable, _from, _to) when is_nil(changeset) or is_nil(queryable),
do: changeset
def put(changeset, queryable, from, to),
do: changeset |> get_slug_from_field(queryable, from, to) |> put_slug(changeset, to)
@doc ~S"""
Put this slug into the selected changeset's field.
## Parameters
* `slug` - A regular slug without an increment.
* `changeset` - Take the value from a field, and put back the slug in another.
* `to` - In which column is the slug stored?
## Examples
iex> TestPost.truncate
iex> changeset = TestPost.changeset(%TestPost{}, %{title: "Slug Doe"})
iex> changeset2 = "Slug-Doe" |> IncrementalSlug.put_slug(changeset)
iex> changeset2.changes
%{title: "Slug Doe", slug: "Slug-Doe"}
"""
@spec put_slug(slug :: String.t(), changeset :: Ecto.Changeset.t(), to :: atom()) ::
Ecto.Changeset.t()
def put_slug(slug, changeset, to \\ @incremental_slug.to),
do: changeset |> put_change(to, slug)
@doc ~S"""
Connect to the project's repository.
The connection is used when checking if the slug is taken in the table.
Must be defined in the project's config file:
```ex
config :incremental_slug, repo: PROJECT.Repo
```
"""
@spec repo() :: Ecto.Repo.t()
def repo() do
Application.get_env(:incremental_slug, :repo)
end
@doc ~S"""
Specify the field where to look for a slug in a query.
## Parameters
* `queryable` - In which table to look?
* `to` - In which column is the slug stored?
## Return value
A query with a selected field.
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.select_field(TestPost, :slug)
#Ecto.Query<from t0 in IncrementalSlug.TestPost, select: t0.slug>
iex> IncrementalSlug.select_field(TestPost, :slug)
#Ecto.Query<from t0 in IncrementalSlug.TestPost, select: t0.slug>
"""
@spec select_field(queryable :: Ecto.Queryable.t(), to :: atom()) :: Ecto.Query.t()
def select_field(queryable, to \\ @incremental_slug.to)
def select_field(queryable, to), do: queryable |> select([a], field(a, ^to))
@doc ~S"""
Search for slugs that start just like this one and end with '-' and exactly 1 character.
* [MySQL pattern matching](https://dev.mysql.com/doc/refman/8.0/en/pattern-matching.htm)
* [PostgreSQL pattern matching](https://www.postgresql.org/docs/8.3/functions-matching.html#FUNCTIONS-LIKE)
## Parameters
* `queryable` - In which table to look?
* `slug` - A regular slug without an increment.
* `to` - In which column is the slug stored?
## Return value
A query with a `WHERE LIKE` condition.
## Examples
iex> TestPost.truncate
iex> IncrementalSlug.where_slug_with_increment(TestPost, "Slug-Doe")
#Ecto.Query<from t0 in IncrementalSlug.TestPost, where: like(t0.slug, ^"Slug-Doe-_")>
iex> IncrementalSlug.where_slug_with_increment(TestPost, "Henry")
#Ecto.Query<from t0 in IncrementalSlug.TestPost, where: like(t0.slug, ^"Henry-_")>
"""
@spec where_slug_with_increment(queryable :: Ecto.Queryable.t(), slug :: String.t(), atom()) ::
Ecto.Query.t()
def where_slug_with_increment(queryable, slug, to \\ @incremental_slug.to)
def where_slug_with_increment(queryable, slug, to),
do: queryable |> where([a], like(field(a, ^to), ^"#{slug}-_"))
end
|
backend/my_deps/incremental_slug/lib/incremental_slug.ex
| 0.893913
| 0.765681
|
incremental_slug.ex
|
starcoder
|
defmodule Attempt.Bucket.Token do
@moduledoc """
Implementation of a Token Bucket
A token bucket provides a form of [rate limiting](https://en.wikipedia.org/wiki/Token_bucket)
This implmentation is designed to allow for both
synchronous and asynchronous token requests. The
intent is to simplify the higher level APIs by giving
them a soft guarantee of token return.
Since the implementation uses timers (via Process.send_after/3)
neither the timing precision not the minimum time window
are likely to be useful for all applications.
The primary purpose of this token bucket is to
support "longer lived" functions such as 3rd party
API calls and calls to other external services
like databases.
## Implementation
* A bucket is defined to hold a maximum number of tokens
* The token count is reduced by each call to `get_token/2`
* When the token count reaches 0, the request is placed in
a queue.
* Every `:fill_rate` milliseconds a new token is created.
* When the timer is reached and a new token is added the pending
queue is processed
"""
use GenServer
alias Attempt.Bucket
alias Attempt.Retry.Budget
require Logger
import Attempt.Errors
import Supervisor.Spec
defstruct name: nil,
# Add a token each per fill_rate milliseconds
fill_rate: 3,
# Don't allow the queue to expand forever
max_queue_length: 100,
# The pending queue
queue: nil,
# Available tokens
tokens: 0,
# Maximum number of tokens that can be consumed in a burst
burst_size: 10
@type t :: struct()
@default_config @struct
@default_timeout 5_000
@spec new(atom(), Keyword.t() | Bucket.Token.t()) ::
{:ok, Bucket.Token.t()} | {:error, {Exception.t(), String.t()}}
def new(name, config \\ @default_config)
def new(name, config) when is_atom(name) and is_list(config) do
config =
@default_config
|> Map.delete(:__struct__)
|> Map.to_list()
|> Keyword.merge(config)
|> Enum.into(%{})
new(name, struct(__MODULE__, config))
end
def new(name, %Bucket.Token{} = config) when is_atom(name) do
config = %Bucket.Token{config | name: name}
bucket_worker = worker(__MODULE__, [name, config])
case DynamicSupervisor.start_child(Bucket.Supervisor, bucket_worker) do
{:ok, _pid} -> {:ok, config}
{:error, {:already_started, _}} -> {:error, already_started_error(config), config}
end
end
@spec new(atom(), Keyword.t() | Bucket.Token.t()) :: {:ok, Bucket.Token.t()} | no_return()
def new!(name, config) do
case new(name, config) do
{:ok, bucket} -> bucket
error -> raise "Couldn't start bucket #{inspect(config.token_bucket)}: #{inspect(error)}"
end
end
@spec state(Token.Bucket.t()) :: {:ok, non_neg_integer} | {:error, {Exception.t(), String.t()}}
def state(bucket) do
GenServer.call(bucket.name, :state)
end
def start_link(name, bucket \\ @default_config) do
bucket = %{bucket | tokens: bucket.burst_size, queue: :queue.new()}
GenServer.start_link(__MODULE__, bucket, name: name)
end
@spec stop(atom() | Retry.Budget.t() | Bucket.Token.t()) ::
:ok | {:error, {Exception.t(), String.t()}}
def stop(name) when is_atom(name) do
if pid = Process.whereis(name) do
DynamicSupervisor.terminate_child(Bucket.Supervisor, pid)
else
{:error, unknown_bucket_error(name)}
end
end
def stop(%Budget{token_bucket: %Bucket.Token{name: name}}) do
stop(name)
end
def stop(%Bucket.Token{name: name}) do
stop(name)
end
def init(budget) do
schedule_increment(budget)
{:ok, budget}
end
def claim_token(bucket, %Budget{} = budget) do
timeout = budget.timeout || @default_timeout
try do
GenServer.call(bucket.name, :claim_token, timeout)
catch
:exit, {:timeout, {GenServer, :call, [bucket_name, :claim_token, timeout]}} ->
{:error, timeout_error(bucket_name, timeout)}
end
end
def claim_token!(bucket, %Budget{} = budget) do
timeout = budget.timeout || @default_timeout
GenServer.call(bucket, :claim_token!, timeout)
end
# Callbacks
def handle_call(:claim_token, from, %{tokens: tokens} = bucket) when tokens > 0 do
bucket = process_queue(bucket)
if bucket.tokens > 0 do
bucket = decrement(bucket)
{:reply, {:ok, bucket.tokens}, bucket}
else
handle_call(:claim_token, from, bucket)
end
end
def handle_call(:claim_token, from, %{queue: queue} = bucket) do
if :queue.len(queue) >= bucket.max_queue_length do
{:reply, {:error, full_queue_error()}, bucket}
else
bucket = %{bucket | queue: :queue.in(from, queue)}
{:noreply, bucket}
end
end
def handle_call(:claim_token!, _from, bucket) do
if bucket.tokens > 0 do
bucket = decrement(bucket)
{:reply, {:ok, bucket.tokens}, bucket}
else
{:reply, {:error, no_tokens_error()}, bucket}
end
end
def handle_call(:state, _from, bucket) do
{:reply, {:ok, bucket}, bucket}
end
def handle_info(:increment_bucket, bucket) do
schedule_increment(bucket)
bucket = %{bucket | tokens: min(bucket.tokens + 1, bucket.burst_size)}
{:noreply, process_queue(bucket)}
end
defp process_queue(%{queue: queue, tokens: tokens} = bucket) do
if :queue.is_empty(queue) || tokens == 0 do
bucket
else
bucket = decrement(bucket)
{{:value, pid}, new_queue} = :queue.out(queue)
GenServer.reply(pid, {:ok, bucket.tokens})
process_queue(%{bucket | queue: new_queue})
end
end
defp decrement(bucket) do
%{bucket | tokens: bucket.tokens - 1}
end
defp schedule_increment(bucket) do
Process.send_after(self(), :increment_bucket, bucket.fill_rate)
end
end
|
lib/attempt/bucket/token.ex
| 0.817246
| 0.47524
|
token.ex
|
starcoder
|
defmodule Rules.Graph.Brut.Inversion_ do
def rules_result, do: Enum.random(["", "12"])
def generate_zones do
[
%{id: "123", name: "Cartier EU"},
%{id: "11", name: "Cartier Asia"},
%{id: "41", name: "Cartier NA"},
]
end
def generate_stocks do
[
%{location: "PW", location_type: "supply", country: "FR", quantity: 999},
%{location: "RW", location_type: "store", country: "FR", quantity: 999},
%{location: "GD", location_type: "store", country: "UK", quantity: 999},
%{location: "GCD", location_type: "supply", country: "UK", quantity: 999},
]
end
def generate_regions do
[
%{location: "PW", location_type: "supply", country: "FR", quantity: 999, rank: 1},
%{location: "RW", location_type: "store", country: "FR", quantity: 999, rank: 2},
]
end
# (input -> Log output) -> (input -> Log bool) -> (input -> Log output) -> (input -> Log output) -> (input -> Log output)
def split2(f, condition, true_branch, false_branch) do
fn input ->
input = f.(input)
cond? = condition.(input)
if cond?, do: true_branch.(input), else: false_branch.(input)
end
end
# (input -> Log output) -> (input -> Log output)
def straight(f, single_branch) do
fn input ->
input = f.(input)
single_branch.(input)
end
end
@name "start"
@coordinates {0, 200}
# (input -> output) -> input -> output
def start(f) do
straight(f, fulfillement(fn input -> %{order: input.order, warehouses: []} end))
end
@name "fulfillement"
@coordinates {200, 200}
# (input -> output) -> (input -> output)
def fulfillement(f) do
split2(f,
fn input -> input.order == "0" end,
to_end(fn input -> %{order: input.order, stock: nil, leadtime: nil} end),
exclusion(fn input -> %{order: input.order, zones: generate_zones()} end)
)
end
@name "exclusion"
@coordinates {400, 400}
# (input -> output) -> input -> output
def exclusion(f) do
split2(f,
fn input -> input.order == "1" end,
to_end(fn input -> %{order: input.order, stock: nil, leadtime: nil} end),
sourcing(fn input -> %{order: input.order, zones: input.zones, stocks: generate_stocks()} end)
)
end
@name "sourcing"
@coordinates {600, 600}
# (input -> output) -> (input -> output)
def sourcing(f) do
split2(f,
fn input -> input.order == "2" end,
to_end(fn input -> %{order: input.order, stock: nil, leadtime: nil} end),
allocation(fn input -> %{order: input.order, zones: input.zones, stocks: input.stocks, regions: generate_regions()} end)
)
end
@name "allocation"
@coordinates {800, 600}
# (input -> output) -> (input -> output)
def allocation(f) do
straight(f, leadtime(fn input -> %{order: input.order, stock: List.first(input.stocks), zone: List.first(input.zones), region: List.first(input.regions)} end))
end
@name "leadtime"
@coordinates {1000, 600}
# (input -> output) -> (input -> output)
def leadtime(f) do
straight(f, to_end(fn input -> %{order: input.order, stock: input.stock, zone: input.zone, region: input.region, leadtime: 61} end))
end
@name "to_end"
@coordinates {1200, 200}
# (input -> output) -> (input -> output)
def to_end(f) do
straight(f, fn input -> input.order end)
end
_ = @name
_ = @coordinates
end
|
rules/lib/rules_inversion.ex
| 0.540681
| 0.512876
|
rules_inversion.ex
|
starcoder
|
defmodule FusionAuth do
require Logger
@moduledoc """
The `FusionAuth` module provides functions for building a dynamic HTTP client as well as standardizing the responses returned from the FusionAuth API.
## Examples
iex> FusionAuth.client("http://localhost:9011", "<KEY>", "6b40f9d6-cfd8-4312-bff8-b082ad45e93c")
%Tesla.Client{
adapter: {Tesla.Adapter.Hackney, :call, [[recv_timeout: 30000]]},
fun: nil,
post: [],
pre: [
{Tesla.Middleware.BaseUrl, :call, ["http://localhost:9011"]},
{Tesla.Middleware.JSON, :call, [[]]},
{Tesla.Middleware.Headers, :call,
[
[
{"Authorization", "<KEY>"},
{"X-FusionAuth-TenantId", "6b40f9d6-cfd8-4312-bff8-b082ad45e93c"}
]
]}
]
}
"""
@type client() :: Tesla.Client.t()
@type result() :: {:ok, map() | String.t(), Tesla.Env.t()} | {:error, map(), any}
@doc """
Builds a dynamic client for executing HTTP requests to the FusionAuth API using Application variables
specified in `config/{env}.exs`.
```
config :fusion_auth,
api_key: "FUSION_AUTH_API_KEY",
api_url: "FUSION_AUTH_URL",
tenant_id: "FUSION_AUTH_TENANT_ID"
```
"""
@spec client() :: client()
def client(),
do:
client(
Application.get_env(:fusion_auth, :api_url),
Application.get_env(:fusion_auth, :api_key),
Application.get_env(:fusion_auth, :tenant_id)
)
@doc """
Builds a dynamic client for executing HTTP requests to the FusionAuth API based on runtime arguments.
"""
@spec client(String.t(), String.t(), String.t()) :: client()
def client(base_url, api_key, tenant_id) do
middleware = [
{Tesla.Middleware.BaseUrl, base_url},
Tesla.Middleware.JSON,
Tesla.Middleware.Telemetry,
{Tesla.Middleware.Headers,
[
{"Authorization", api_key},
{"X-FusionAuth-TenantId", tenant_id}
]}
]
Tesla.client(middleware, adapter())
end
@doc """
Standardizes the response to be returned from the FusionAuth API request.
"""
@spec result({:ok, Tesla.Env.t()}) :: result()
def result({:ok, %{status: status, body: body} = env}) when status < 300 do
{:ok, body, env}
end
@doc """
Standardizes the response to be returned from the FusionAuth API request.
"""
@spec result({:ok, Tesla.Env.t()}) :: result()
def result({:ok, %{status: status, body: body} = env}) when status >= 300 do
Logger.warn("""
FusionAuth request resulted in a status code >= 300.
Env: #{inspect(env)}
""")
{:error, body, env}
end
@doc """
Standardizes the response to be returned from the FusionAuth API request.
"""
@spec result({:error, any}) :: result()
def result({:error, any}) do
Logger.error("""
FusionAuth request resulted in an error.
Error: #{inspect(any)}
""")
{:error, %{}, any}
end
@doc false
def adapter do
case Application.get_env(:fusion_auth, :tesla) do
nil -> {Tesla.Adapter.Hackney, [recv_timeout: 30_000]}
tesla -> tesla[:adapter]
end
end
end
|
lib/fusion_auth.ex
| 0.929544
| 0.585723
|
fusion_auth.ex
|
starcoder
|
defmodule Google.Bigtable.Admin.V2.Instance do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
display_name: String.t(),
state: integer,
type: integer,
labels: %{String.t() => String.t()}
}
defstruct [:name, :display_name, :state, :type, :labels]
field :name, 1, type: :string
field :display_name, 2, type: :string
field :state, 3, type: Google.Bigtable.Admin.V2.Instance.State, enum: true
field :type, 4, type: Google.Bigtable.Admin.V2.Instance.Type, enum: true
field :labels, 5, repeated: true, type: Google.Bigtable.Admin.V2.Instance.LabelsEntry, map: true
end
defmodule Google.Bigtable.Admin.V2.Instance.LabelsEntry do
@moduledoc false
use Protobuf, map: true, syntax: :proto3
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field :key, 1, type: :string
field :value, 2, type: :string
end
defmodule Google.Bigtable.Admin.V2.Instance.State do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
field :STATE_NOT_KNOWN, 0
field :READY, 1
field :CREATING, 2
end
defmodule Google.Bigtable.Admin.V2.Instance.Type do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
field :TYPE_UNSPECIFIED, 0
field :PRODUCTION, 1
field :DEVELOPMENT, 2
end
defmodule Google.Bigtable.Admin.V2.Cluster do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
name: String.t(),
location: String.t(),
state: integer,
serve_nodes: integer,
default_storage_type: integer
}
defstruct [:name, :location, :state, :serve_nodes, :default_storage_type]
field :name, 1, type: :string
field :location, 2, type: :string
field :state, 3, type: Google.Bigtable.Admin.V2.Cluster.State, enum: true
field :serve_nodes, 4, type: :int32
field :default_storage_type, 5, type: Google.Bigtable.Admin.V2.StorageType, enum: true
end
defmodule Google.Bigtable.Admin.V2.Cluster.State do
@moduledoc false
use Protobuf, enum: true, syntax: :proto3
field :STATE_NOT_KNOWN, 0
field :READY, 1
field :CREATING, 2
field :RESIZING, 3
field :DISABLED, 4
end
defmodule Google.Bigtable.Admin.V2.AppProfile do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
routing_policy: {atom, any},
name: String.t(),
etag: String.t(),
description: String.t()
}
defstruct [:routing_policy, :name, :etag, :description]
oneof :routing_policy, 0
field :name, 1, type: :string
field :etag, 2, type: :string
field :description, 3, type: :string
field :multi_cluster_routing_use_any, 5,
type: Google.Bigtable.Admin.V2.AppProfile.MultiClusterRoutingUseAny,
oneof: 0
field :single_cluster_routing, 6,
type: Google.Bigtable.Admin.V2.AppProfile.SingleClusterRouting,
oneof: 0
end
defmodule Google.Bigtable.Admin.V2.AppProfile.MultiClusterRoutingUseAny do
@moduledoc false
use Protobuf, syntax: :proto3
defstruct []
end
defmodule Google.Bigtable.Admin.V2.AppProfile.SingleClusterRouting do
@moduledoc false
use Protobuf, syntax: :proto3
@type t :: %__MODULE__{
cluster_id: String.t(),
allow_transactional_writes: boolean
}
defstruct [:cluster_id, :allow_transactional_writes]
field :cluster_id, 1, type: :string
field :allow_transactional_writes, 2, type: :bool
end
|
lib/grpc/admin/instance.pb.ex
| 0.692538
| 0.406774
|
instance.pb.ex
|
starcoder
|
if Code.ensure_loaded?(Decorator.Define) do
defmodule Nebulex.Hook do
@moduledoc """
Pre/Post Hooks
Since `v2.0.0`, pre/post hooks are not supported and/or handled by `Nebulex`
itself. Hooks feature is not a common use-case and also it is something that
can be be easily implemented on top of the Cache at the application level.
Nevertheless, to keep backward compatibility somehow, `Nebulex` provides the
next decorators for implementing pre/post hooks very easily.
## `before` decorator
The `before` decorator is declared for performing a hook action or callback
before the annotated function is executed.
@decorate before(fn %Nebulex.Hook{} = hook -> inspect(hook) end)
def some_fun(var) do
# logic ...
end
## `after_return` decorator
The `after_return` decorator is declared for performing a hook action or
callback after the annotated function is executed and its return is passed
through the `return:` attribute.
@decorate after_return(&inspect(&1.return))
def some_fun(var) do
# logic ...
end
## `around` decorator
The final kind of hook is `around` decorator. The `around` decorator runs
"around" the annotated function execution. It has the opportunity to do
work both **before** and **after** the function executes. This means the
given hook function is invoked twice, before and after the code-block is
evaluated.
@decorate around(&inspect(&1.step))
def some_fun(var) do
# logic ...
end
## Putting all together
Suppose we want to track all cache calls (before and after they are called)
by logging them (including the execution time). In this case, we need to
provide a pre/post hook to log these calls.
First of all, we have to create a module implementing the hook function:
defmodule MyApp.Tracker do
use GenServer
alias Nebulex.Hook
require Logger
@actions [:get, :put]
## API
def start_link(opts \\\\ []) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
def track(%Hook{step: :before, name: name}) when name in @actions do
System.system_time(:microsecond)
end
def track(%Hook{step: :after_return, name: name} = event) when name in @actions do
GenServer.cast(__MODULE__, {:track, event})
end
def track(hook), do: hook
## GenServer Callbacks
@impl true
def init(_opts) do
{:ok, %{}}
end
@impl true
def handle_cast({:track, %Hook{acc: start} = hook}, state) do
diff = System.system_time(:microsecond) - start
Logger.info("#=> #\{hook.module}.#\{hook.name}/#\{hook.arity}, Duration: #\{diff}")
{:noreply, state}
end
end
And then, in the Cache:
defmodule MyApp.Cache do
use Nebulex.Hook
@decorate_all around(&MyApp.Tracker.track/1)
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Local
end
Try it out:
iex> MyApp.Cache.put 1, 1
10:19:47.736 [info] Elixir.MyApp.Cache.put/3, Duration: 27
iex> MyApp.Cache.get 1
10:20:14.941 [info] Elixir.MyApp.Cache.get/2, Duration: 11
"""
use Decorator.Define, before: 1, after_return: 1, around: 1
@enforce_keys [:step, :module, :name, :arity]
defstruct [:step, :module, :name, :arity, :return, :acc]
@type t :: %__MODULE__{
step: :before | :after_return,
module: Nebulex.Cache.t(),
name: atom,
arity: non_neg_integer,
return: term,
acc: term
}
@type hook_fun :: (t -> term)
alias Nebulex.Hook
@doc """
Before decorator.
Intercepts any call to the annotated function and calls the given `fun`
before the logic is executed.
## Example
defmodule MyApp.Example do
use Nebulex.Hook
@decorate before(&inspect(&1))
def some_fun(var) do
# logic ...
end
end
"""
@spec before(hook_fun, term, map) :: term
def before(fun, block, context) do
with_hook([:before], fun, block, context)
end
@doc """
After-return decorator.
Intercepts any call to the annotated function and calls the given `fun`
after the logic is executed, and the returned result is passed through
the `return:` attribute.
## Example
defmodule MyApp.Example do
use Nebulex.Hook
@decorate after_return(&inspect(&1))
def some_fun(var) do
# logic ...
end
end
"""
@spec after_return(hook_fun, term, map) :: term
def after_return(fun, block, context) do
with_hook([:after_return], fun, block, context)
end
@doc """
Around decorator.
Intercepts any call to the annotated function and calls the given `fun`
before and after the logic is executed. The result of the first call to
the hook function is passed through the `acc:` attribute, so it can be
used in the next call (after return). Finally, as the `after_return`
decorator, the returned code-block evaluation is passed through the
`return:` attribute.
## Example
defmodule MyApp.Profiling do
alias Nebulex.Hook
def prof(%Hook{step: :before}) do
System.system_time(:microsecond)
end
def prof(%Hook{step: :after_return, acc: start} = hook) do
:telemetry.execute(
[:my_app, :profiling],
%{duration: System.system_time(:microsecond) - start},
%{module: hook.module, name: hook.name}
)
end
end
defmodule MyApp.Example do
use Nebulex.Hook
@decorate around(&MyApp.Profiling.prof/1)
def some_fun(var) do
# logic ...
end
end
"""
@spec around(hook_fun, term, map) :: term
def around(fun, block, context) do
with_hook([:before, :after_return], fun, block, context)
end
defp with_hook(hooks, fun, block, context) do
quote do
hooks = unquote(hooks)
fun = unquote(fun)
hook = %Nebulex.Hook{
step: :before,
module: unquote(context.module),
name: unquote(context.name),
arity: unquote(context.arity)
}
# eval before
acc =
if :before in hooks do
Hook.eval_hook(:before, fun, hook)
end
# eval code-block
return = unquote(block)
# eval after_return
if :after_return in hooks do
Hook.eval_hook(
:after_return,
fun,
%{hook | step: :after_return, return: return, acc: acc}
)
end
return
end
end
@doc """
This function is for internal purposes.
"""
@spec eval_hook(:before | :after_return, hook_fun, t) :: term
def eval_hook(step, fun, hook) do
fun.(hook)
rescue
e ->
msg = "hook execution failed on step #{inspect(step)} with error #{inspect(e)}"
reraise RuntimeError, msg, __STACKTRACE__
end
end
end
|
lib/nebulex/hook.ex
| 0.894417
| 0.556821
|
hook.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.