code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule E24 do
def maybe_move(location, path) do
case MapSet.member?(path, location) do
true -> [location]
_ -> []
end
end
def adjacent_pairs(vertex, vertices) do
[
%{x: vertex.pos.x - 1, y: vertex.pos.y},
%{x: vertex.pos.x + 1, y: vertex.pos.y},
%{x: vertex.pos.x, y: vertex.pos.y - 1},
%{x: vertex.pos.x, y: vertex.pos.y + 1}
]
|> Enum.filter(fn(v) -> Map.has_key?(vertices, v) end)
|> Enum.map(fn(v) -> Map.get(vertices, v) end)
end
defp edges(vertices) do
pos_to_n = vertices |> Map.new(fn(v) -> {v.pos, v} end)
vertices |> Enum.flat_map(
fn(vertex) ->
Enum.zip(
Stream.cycle([vertex]),
adjacent_pairs(vertex, pos_to_n)
)
end
)
end
defp to_graph_where_up_down_left_right_are_adjacent(vertices) do
g = Graph.new()
{_, g} = Enum.map_reduce(vertices, g, fn(v, g) -> {v, Graph.add_vertex(g, v, "#{v.n} (#{v.pos.x},#{v.pos.y})")} end)
{_, g} = Enum.map_reduce(edges(vertices), g, fn(e, g) -> {e, Graph.add_edge(g, elem(e, 0), elem(e, 1))} end)
{_, g} = Enum.map_reduce(edges(vertices), g, fn(e, g) -> {e, Graph.add_edge(g, elem(e, 1), elem(e, 0))} end)
g
end
defp number_vertices(vertices) do
vertices |> Enum.filter(fn(v) ->
case Integer.parse(v.n) do
{_, ""} -> true
_ -> false
end
end
)
end
defp path(input, width) do
Regex.scan(~r/[0-9\.]/, input, return: :index)
|> Enum.map(
fn([{i, _}]) -> %{
n: String.at(input, i),
pos: %{
x: rem(i, width + 1),
y: div(i, width + 1)
}
}
end
)
|> MapSet.new
end
defp width(input) do
input
|> String.split("\n")
|> Enum.at(0)
|> String.length
end
defp pair_up(list) do
# pair up
# [:a, :b, :c] => [[:a, :b], [:b, :c]]
Enum.chunk_every(list, 2, 1, :discard)
end
defp full_path(vertices, opts) do
case opts do
:return_to_start -> Enum.concat([vertices, hd(vertices)])
_ -> vertices
end
end
defp shortest_path_visiting_all(graph, vertices, opts) do
full_path = full_path(vertices, opts)
pair_up(vertices)
|> Enum.map_reduce(
0,
fn(pair, acc) ->
length = Enum.count(Graph.dijkstra(graph, Enum.at(pair, 0), Enum.at(pair, 1))) - 1
{length, acc + length}
end
)
|> elem(1)
end
def shortest(input, opts \\ nil) do
width = width(input)
path = path(input, width)
graph = to_graph_where_up_down_left_right_are_adjacent(path)
number_vertices = number_vertices(path)
starting_location = Enum.find(number_vertices, fn(v) -> v.n == "0" end)
non_starting_locations = Enum.filter(number_vertices, fn(v) -> v.n != "0" end)
permutations = non_starting_locations
|> Combination.permutate
|> Enum.map(fn(permutation) -> Enum.concat([[starting_location], permutation]) end)
stream = Task.async_stream(permutations, fn(permutation) -> shortest_path_visiting_all(graph, permutation, opts) end)
{:ok, min} = Enum.min(stream)
min
end
end
|
24/lib/e24.ex
| 0.5794
| 0.70369
|
e24.ex
|
starcoder
|
defmodule Vivid.Path do
alias Vivid.{Path, Point, Line, Shape}
defstruct vertices: []
@moduledoc ~S"""
Describes a path as a series of vertices.
Path implements both the `Enumerable` and `Collectable` protocols.
## Example
iex> use Vivid
...> 0..3
...> |> Stream.map(fn
...> i when rem(i, 2) == 0 -> Point.init(i * 3, i * 4)
...> i -> Point.init(i * 3, i * 2)
...> end)
...> |> Enum.into(Path.init())
...> |> to_string()
"@@@@@@@@@@@@\n" <>
"@@@@@@@ @@@@\n" <>
"@@@@@@@ @@\n" <>
"@@@@@@ @@@ @\n" <>
"@@@@@@ @@@@@\n" <>
"@@@@@ @@@@@@\n" <>
"@@@@@ @@@@@@\n" <>
"@@@@ @@@@@@@\n" <>
"@@ @@@@@@@@\n" <>
"@ @@@@@@@@@@\n" <>
"@@@@@@@@@@@@\n"
"""
@opaque t :: %Path{vertices: [Shape.t()]}
@doc """
Initialize an empty path.
## Example
iex> Vivid.Path.init
%Vivid.Path{vertices: []}
"""
@spec init() :: Path.t()
def init, do: %Path{vertices: []}
@doc """
Initialize a path from a list of points.
## Example
iex> Vivid.Path.init([Vivid.Point.init(1,1), Vivid.Point.init(1,2), Vivid.Point.init(2,2), Vivid.Point.init(2,1)])
%Vivid.Path{vertices: [
%Vivid.Point{x: 1, y: 1},
%Vivid.Point{x: 1, y: 2},
%Vivid.Point{x: 2, y: 2},
%Vivid.Point{x: 2, y: 1}
]}
"""
@spec init([Point.t()]) :: Path.t()
def init(points) when is_list(points), do: %Path{vertices: points}
@doc """
Convert a path into a list of lines joined by the vertices.
## Examples
iex> Vivid.Path.init([Vivid.Point.init(1,1), Vivid.Point.init(1,2), Vivid.Point.init(2,2), Vivid.Point.init(2,1)]) |> Vivid.Path.to_lines
[%Vivid.Line{origin: %Vivid.Point{x: 1, y: 1},
termination: %Vivid.Point{x: 1, y: 2}},
%Vivid.Line{origin: %Vivid.Point{x: 1, y: 2},
termination: %Vivid.Point{x: 2, y: 2}},
%Vivid.Line{origin: %Vivid.Point{x: 2, y: 2},
termination: %Vivid.Point{x: 2, y: 1}}]
"""
@spec to_lines(Path.t()) :: [Line.t()]
def to_lines(%Path{vertices: points}) do
points_to_lines([], points)
end
@doc """
Remove a vertex from a Path.
## Example
iex> Vivid.Path.init([Vivid.Point.init(1,1), Vivid.Point.init(2,2)]) |> Vivid.Path.delete(Vivid.Point.init(2,2))
%Vivid.Path{vertices: [%Vivid.Point{x: 1, y: 1}]}
"""
@spec delete(Path.t(), Point.t()) :: Path.t()
def delete(%Path{vertices: points}, %Point{} = point) do
points
|> List.delete(point)
|> init
end
@doc """
Remove a vertex at a specific index in the Path.
## Example
iex> Vivid.Path.init([Vivid.Point.init(1,1), Vivid.Point.init(2,2)]) |> Vivid.Path.delete_at(1)
%Vivid.Path{vertices: [%Vivid.Point{x: 1, y: 1}]}
"""
@spec delete_at(Path.t(), integer) :: Path.t()
def delete_at(%Path{vertices: points}, index) do
points
|> List.delete_at(index)
|> init
end
@doc """
Return the first vertex in the Path.
## Example
iex> Vivid.Path.init([Vivid.Point.init(1,1), Vivid.Point.init(2,2)]) |> Vivid.Path.first
%Vivid.Point{x: 1, y: 1}
"""
@spec first(Path.t()) :: Point.t()
def first(%Path{vertices: points}) do
points
|> List.first()
end
@doc """
Insert a vertex at a specific index in the Path.
## Example
iex> Vivid.Path.init([Vivid.Point.init(1,1), Vivid.Point.init(2,2)]) |> Vivid.Path.insert_at(1, Vivid.Point.init(3,3))
%Vivid.Path{vertices: [
%Vivid.Point{x: 1, y: 1},
%Vivid.Point{x: 3, y: 3},
%Vivid.Point{x: 2, y: 2}
]}
"""
@spec insert_at(Path.t(), integer, Point.t()) :: Path.t()
def insert_at(%Path{vertices: points}, index, %Point{} = point) do
points
|> List.insert_at(index, point)
|> init
end
@doc """
Return the last vertex in the Path.
## Example
iex> Vivid.Path.init([Vivid.Point.init(1,1), Vivid.Point.init(2,2)]) |> Vivid.Path.last
%Vivid.Point{x: 2, y: 2}
"""
@spec last(Path.t()) :: Point.t()
def last(%Path{vertices: points}) do
points
|> List.last()
end
@doc """
Replace a vertex at a specific index in the Path.
## Example
iex> Vivid.Path.init([Vivid.Point.init(1,1), Vivid.Point.init(2,2), Vivid.Point.init(3,3)]) |> Vivid.Path.replace_at(1, Vivid.Point.init(4,4))
%Vivid.Path{vertices: [
%Vivid.Point{x: 1, y: 1},
%Vivid.Point{x: 4, y: 4},
%Vivid.Point{x: 3, y: 3}
]}
"""
@spec replace_at(Path.t(), integer, Point.t()) :: Path.t()
def replace_at(%Path{vertices: points}, index, %Point{} = point) do
points
|> List.replace_at(index, point)
|> init
end
defp points_to_lines(lines, []), do: lines
defp points_to_lines([], [origin | [term | points]]) do
line = Line.init(origin, term)
points_to_lines([line], points)
end
defp points_to_lines(lines, [point | rest]) do
origin = lines |> List.last() |> Line.termination()
term = point
lines = lines ++ [Line.init(origin, term)]
points_to_lines(lines, rest)
end
end
|
lib/vivid/path.ex
| 0.918063
| 0.414751
|
path.ex
|
starcoder
|
defmodule RDF.Vocabulary.Namespace do
@moduledoc """
A RDF vocabulary as a `RDF.Namespace`.
`RDF.Vocabulary.Namespace` modules represent a RDF vocabulary as a `RDF.Namespace`.
They can be defined with the `defvocab/2` macro of this module.
RDF.ex comes with predefined modules for some fundamental vocabularies in
the `RDF.NS` module.
"""
alias RDF.Utils.ResourceClassifier
@vocabs_dir "priv/vocabs"
defmacro __using__(_opts) do
quote do
import unquote(__MODULE__)
end
end
@doc """
Defines a `RDF.Namespace` module for a RDF vocabulary.
"""
defmacro defvocab(name, opts) do
strict = strict?(opts)
base_iri = base_iri!(opts)
file = filename!(opts)
{terms, data} =
case source!(opts) do
{:terms, terms} -> {terms, nil}
{:data, data} -> {rdf_data_vocab_terms(data, base_iri), data}
end
unless Mix.env() == :test do
IO.puts("Compiling vocabulary namespace for #{base_iri}")
end
ignored_terms = ignored_terms!(opts)
terms =
terms
|> term_mapping!(opts)
|> Map.drop(MapSet.to_list(ignored_terms))
|> validate_terms!
|> validate_characters!(opts)
|> validate_case!(data, base_iri, opts)
case_separated_terms = group_terms_by_case(terms)
lowercased_terms = Map.get(case_separated_terms, :lowercased, %{})
quote do
vocabdoc = Module.delete_attribute(__MODULE__, :vocabdoc)
defmodule unquote(name) do
@moduledoc vocabdoc
@behaviour Elixir.RDF.Namespace
if unquote(file) do
@external_resource unquote(file)
end
@base_iri unquote(base_iri)
@spec __base_iri__ :: String.t
def __base_iri__, do: @base_iri
@strict unquote(strict)
@spec __strict__ :: boolean
def __strict__, do: @strict
@terms unquote(Macro.escape(terms))
@impl Elixir.RDF.Namespace
def __terms__, do: @terms |> Map.keys
@ignored_terms unquote(Macro.escape(ignored_terms))
@doc """
Returns all known IRIs of the vocabulary.
"""
@spec __iris__ :: [Elixir.RDF.IRI.t]
def __iris__ do
@terms
|> Enum.map(fn
{term, true} -> term_to_iri(@base_iri, term)
{_alias, term} -> term_to_iri(@base_iri, term)
end)
|> Enum.uniq
end
define_vocab_terms unquote(lowercased_terms), unquote(base_iri)
@impl Elixir.RDF.Namespace
@dialyzer {:nowarn_function, __resolve_term__: 1}
def __resolve_term__(term) do
case @terms[term] do
nil ->
if @strict or MapSet.member?(@ignored_terms, term) do
{:error,
%Elixir.RDF.Namespace.UndefinedTermError{
message: "undefined term #{term} in strict vocabulary #{__MODULE__}"
}
}
else
{:ok, term_to_iri(@base_iri, term)}
end
true ->
{:ok, term_to_iri(@base_iri, term)}
original_term ->
{:ok, term_to_iri(@base_iri, original_term)}
end
end
if not @strict do
def unquote(:"$handle_undefined_function")(term, []) do
if MapSet.member?(@ignored_terms, term) do
raise UndefinedFunctionError
else
term_to_iri(@base_iri, term)
end
end
def unquote(:"$handle_undefined_function")(term, [subject | objects]) do
if MapSet.member?(@ignored_terms, term) do
raise UndefinedFunctionError
else
RDF.Description.new(subject, term_to_iri(@base_iri, term), objects)
end
end
end
end
end
end
@doc false
defmacro define_vocab_terms(terms, base_iri) do
terms
|> Stream.filter(fn
{term, true} -> valid_term?(term)
{_, _} -> true
end)
|> Stream.map(fn
{term, true} -> {term, term}
{term, original_term} -> {term, original_term}
end)
|> Enum.map(fn {term, iri_suffix} ->
iri = term_to_iri(base_iri, iri_suffix)
quote do
@doc "<#{unquote(to_string(iri))}>"
def unquote(term)(), do: unquote(Macro.escape(iri))
@doc "`RDF.Description` builder for `#{unquote(term)}/0`"
def unquote(term)(subject, object) do
RDF.Description.new(subject, unquote(Macro.escape(iri)), object)
end
# Is there a better way to support multiple objects via arguments?
@doc false
def unquote(term)(subject, o1, o2),
do: unquote(term)(subject, [o1, o2])
@doc false
def unquote(term)(subject, o1, o2, o3),
do: unquote(term)(subject, [o1, o2, o3])
@doc false
def unquote(term)(subject, o1, o2, o3, o4),
do: unquote(term)(subject, [o1, o2, o3, o4])
@doc false
def unquote(term)(subject, o1, o2, o3, o4, o5),
do: unquote(term)(subject, [o1, o2, o3, o4, o5])
end
end)
end
defp strict?(opts),
do: Keyword.get(opts, :strict, true)
defp base_iri!(opts) do
base_iri = Keyword.fetch!(opts, :base_iri)
unless is_binary(base_iri) and String.ends_with?(base_iri, ["/", "#"]) do
raise RDF.Namespace.InvalidVocabBaseIRIError,
"a base_iri without a trailing '/' or '#' is invalid"
else
base_iri
end
end
defp source!(opts) do
cond do
Keyword.has_key?(opts, :file) -> {:data, filename!(opts) |> RDF.read_file!()}
rdf_data = Keyword.get(opts, :data) -> {:data, raw_rdf_data(rdf_data)}
terms = Keyword.get(opts, :terms) -> {:terms, terms_from_user_input!(terms)}
true ->
raise KeyError, key: ~w[terms data file], term: opts
end
end
defp terms_from_user_input!(terms) do
# TODO: find an alternative to Code.eval_quoted - We want to support that the terms can be given as sigils ...
{terms, _ } = Code.eval_quoted(terms, [], rdf_data_env())
Enum.map terms, fn
term when is_atom(term) -> term
term when is_binary(term) -> String.to_atom(term)
term ->
raise RDF.Namespace.InvalidTermError,
"'#{term}' is not a valid vocabulary term"
end
end
defp raw_rdf_data(%RDF.Description{} = rdf_data), do: rdf_data
defp raw_rdf_data(%RDF.Graph{} = rdf_data), do: rdf_data
defp raw_rdf_data(%RDF.Dataset{} = rdf_data), do: rdf_data
defp raw_rdf_data(rdf_data) do
# TODO: find an alternative to Code.eval_quoted
{rdf_data, _} = Code.eval_quoted(rdf_data, [], rdf_data_env())
rdf_data
end
defp ignored_terms!(opts) do
# TODO: find an alternative to Code.eval_quoted - We want to support that the terms can be given as sigils ...
with terms = Keyword.get(opts, :ignore, []) do
{terms, _ } = Code.eval_quoted(terms, [], rdf_data_env())
terms
|> Enum.map(fn
term when is_atom(term) -> term
term when is_binary(term) -> String.to_atom(term)
term -> raise RDF.Namespace.InvalidTermError, inspect(term)
end)
|> MapSet.new
end
end
defp term_mapping!(terms, opts) do
terms = Map.new terms, fn
term when is_atom(term) -> {term, true}
term -> {String.to_atom(term), true}
end
Keyword.get(opts, :alias, [])
|> Enum.reduce(terms, fn {alias, original_term}, terms ->
term = String.to_atom(original_term)
cond do
not valid_characters?(alias) ->
raise RDF.Namespace.InvalidAliasError,
"alias '#{alias}' contains invalid characters"
Map.get(terms, alias) == true ->
raise RDF.Namespace.InvalidAliasError,
"alias '#{alias}' already defined"
strict?(opts) and not Map.has_key?(terms, term) ->
raise RDF.Namespace.InvalidAliasError,
"term '#{original_term}' is not a term in this vocabulary"
Map.get(terms, term, true) != true ->
raise RDF.Namespace.InvalidAliasError,
"'#{original_term}' is already an alias"
true ->
Map.put(terms, alias, to_string(original_term))
end
end)
end
defp aliased_terms(terms) do
terms
|> Map.values
|> MapSet.new
|> MapSet.delete(true)
|> Enum.map(&String.to_atom/1)
end
@invalid_terms MapSet.new ~w[
and
or
xor
in
fn
def
when
if
for
case
with
quote
unquote
unquote_splicing
alias
import
require
super
__aliases__
]a
def invalid_terms, do: @invalid_terms
defp validate_terms!(terms) do
with aliased_terms = aliased_terms(terms) do
for {term, _} <- terms, term not in aliased_terms and not valid_term?(term) do
term
end
|> handle_invalid_terms!
end
terms
end
defp valid_term?(term), do: term not in @invalid_terms
defp handle_invalid_terms!([]), do: nil
defp handle_invalid_terms!(invalid_terms) do
raise RDF.Namespace.InvalidTermError, """
The following terms can not be used, because they conflict with the Elixir semantics:
- #{Enum.join(invalid_terms, "\n- ")}
You have the following options:
- define an alias with the :alias option on defvocab
- ignore the resource with the :ignore option on defvocab
"""
end
defp validate_characters!(terms, opts) do
if (handling = Keyword.get(opts, :invalid_characters, :fail)) == :ignore do
terms
else
terms
|> detect_invalid_characters
|> handle_invalid_characters(handling, terms)
end
end
defp detect_invalid_characters(terms) do
with aliased_terms = aliased_terms(terms) do
for {term, _} <- terms, term not in aliased_terms and not valid_characters?(term),
do: term
end
end
defp handle_invalid_characters([], _, terms), do: terms
defp handle_invalid_characters(invalid_terms, :fail, _) do
raise RDF.Namespace.InvalidTermError, """
The following terms contain invalid characters:
- #{Enum.join(invalid_terms, "\n- ")}
You have the following options:
- if you are in control of the vocabulary, consider renaming the resource
- define an alias with the :alias option on defvocab
- change the handling of invalid characters with the :invalid_characters option on defvocab
- ignore the resource with the :ignore option on defvocab
"""
end
defp handle_invalid_characters(invalid_terms, :warn, terms) do
Enum.each invalid_terms, fn term ->
IO.warn "'#{term}' is not valid term, since it contains invalid characters"
end
terms
end
defp valid_characters?(term) when is_atom(term),
do: valid_characters?(Atom.to_string(term))
defp valid_characters?(term),
do: Regex.match?(~r/^[a-zA-Z_]\w*$/, term)
defp validate_case!(terms, nil, _, _), do: terms
defp validate_case!(terms, data, base_iri, opts) do
if (handling = Keyword.get(opts, :case_violations, :warn)) == :ignore do
terms
else
terms
|> detect_case_violations(data, base_iri)
|> group_case_violations
|> handle_case_violations(handling, terms, base_iri, opts)
end
end
defp detect_case_violations(terms, data, base_iri) do
aliased_terms = aliased_terms(terms)
terms
|> Enum.filter(fn {term, _} ->
not(Atom.to_string(term) |> String.starts_with?("_"))
end)
|> Enum.filter(fn
{term, true} ->
if term not in aliased_terms do
proper_case?(term, base_iri, Atom.to_string(term), data)
end
{term, original_term} ->
proper_case?(term, base_iri, original_term, data)
end)
end
defp proper_case?(term, base_iri, iri_suffix, data) do
case ResourceClassifier.property?(term_to_iri(base_iri, iri_suffix), data) do
true -> not lowercase?(term)
false -> lowercase?(term)
nil -> lowercase?(term)
end
end
defp group_case_violations(violations) do
violations
|> Enum.group_by(fn
{term, true} ->
if lowercase?(term),
do: :lowercased_term,
else: :capitalized_term
{term, _original} ->
if lowercase?(term),
do: :lowercased_alias,
else: :capitalized_alias
end)
end
defp handle_case_violations(%{} = violations, _, terms, _, _) when map_size(violations) == 0,
do: terms
defp handle_case_violations(violations, :fail, _, base_iri, _) do
resource_name_violations = fn violations ->
violations
|> Enum.map(fn {term, true} -> term_to_iri(base_iri, term) end)
|> Enum.map(&to_string/1)
|> Enum.join("\n- ")
end
alias_violations = fn violations ->
violations
|> Enum.map(fn {term, original} ->
"alias #{term} for #{term_to_iri(base_iri, original)}"
end)
|> Enum.join("\n- ")
end
violation_error_lines =
violations
|> Enum.map(fn
{:capitalized_term, violations} ->
"""
Terms for properties should be lowercased, but the following properties are
capitalized:
- #{resource_name_violations.(violations)}
"""
{:lowercased_term, violations} ->
"""
Terms for non-property resource should be capitalized, but the following
non-properties are lowercased:
- #{resource_name_violations.(violations)}
"""
{:capitalized_alias, violations} ->
"""
Terms for properties should be lowercased, but the following aliases for
properties are capitalized:
- #{alias_violations.(violations)}
"""
{:lowercased_alias, violations} ->
"""
Terms for non-property resource should be capitalized, but the following
aliases for non-properties are lowercased:
- #{alias_violations.(violations)}
"""
end)
|> Enum.join
raise RDF.Namespace.InvalidTermError, """
Case violations detected
#{violation_error_lines}
You have the following options:
- if you are in control of the vocabulary, consider renaming the resource
- define a properly cased alias with the :alias option on defvocab
- change the handling of case violations with the :case_violations option on defvocab
- ignore the resource with the :ignore option on defvocab
"""
end
defp handle_case_violations(violations, :warn, terms, base_iri, _) do
for {type, violations} <- violations,
{term, original} <- violations do
case_violation_warning(type, term, original, base_iri)
end
terms
end
defp case_violation_warning(:capitalized_term, term, _, base_iri) do
IO.warn "'#{term_to_iri(base_iri, term)}' is a capitalized property"
end
defp case_violation_warning(:lowercased_term, term, _, base_iri) do
IO.warn "'#{term_to_iri(base_iri, term)}' is a lowercased non-property resource"
end
defp case_violation_warning(:capitalized_alias, term, _, _) do
IO.warn "capitalized alias '#{term}' for a property"
end
defp case_violation_warning(:lowercased_alias, term, _, _) do
IO.warn "lowercased alias '#{term}' for a non-property resource"
end
defp filename!(opts) do
if filename = Keyword.get(opts, :file) do
cond do
File.exists?(filename) ->
filename
File.exists?(expanded_filename = Path.expand(filename, @vocabs_dir)) ->
expanded_filename
true ->
raise File.Error, path: filename, action: "find", reason: :enoent
end
end
end
defp rdf_data_env do
import RDF.Sigils, warn: false
__ENV__
end
defp rdf_data_vocab_terms(data, base_iri) do
data
|> RDF.Data.resources
|> Stream.filter(fn
%RDF.IRI{} -> true
_ -> false
end)
|> Stream.map(&to_string/1)
|> Stream.map(&(strip_base_iri(&1, base_iri)))
|> Stream.filter(&vocab_term?/1)
|> Enum.map(&String.to_atom/1)
end
defp group_terms_by_case(terms) do
terms
|> Enum.group_by(fn {term, _} ->
if lowercase?(term),
do: :lowercased,
else: :capitalized
end)
|> Map.new(fn {group, term_mapping} ->
{group, Map.new(term_mapping)}
end)
end
defp lowercase?(term) when is_atom(term),
do: Atom.to_string(term) |> lowercase?
defp lowercase?(term),
do: term =~ ~r/^(_|\p{Ll})/u
defp strip_base_iri(iri, base_iri) do
if String.starts_with?(iri, base_iri) do
String.replace_prefix(iri, base_iri, "")
end
end
defp vocab_term?(""), do: false
defp vocab_term?(term) when is_binary(term) do
not String.contains?(term, "/")
end
defp vocab_term?(_), do: false
@doc false
@spec term_to_iri(String.t, String.t | atom) :: RDF.IRI.t
def term_to_iri(base_iri, term) when is_atom(term),
do: term_to_iri(base_iri, Atom.to_string(term))
def term_to_iri(base_iri, term),
do: RDF.iri(base_iri <> term)
@doc false
@spec vocabulary_namespace?(module) :: boolean
def vocabulary_namespace?(name) do
case Code.ensure_compiled(name) do
{:module, name} -> function_exported?(name, :__base_iri__, 0)
_ -> false
end
end
end
|
lib/rdf/vocabulary_namespace.ex
| 0.836855
| 0.549641
|
vocabulary_namespace.ex
|
starcoder
|
defmodule Plug.Bunyan do
@moduledoc """
A plug for logging JSON messages.
This [Plug](https://github.com/elixir-lang/plug) wraps the standard
[Elixir Logger](http://elixir-lang.org/docs/stable/logger/Logger.html)
and automatically generates JSON log messages.
All requests that are processed by your plug pipeline will log the following
fields, when avaiable:
* `level`
* `timestamp` (UTC)
* `request_id` (when used in conjuction with `Plug.RequestId`)
* `method`
* `host`
* `path`
* `status`
* `logger_name`
* `params`
* `duration`
* `controller` (when used in a Phoenix application)
* `action` (when used in a Phoenix application)
* `format` (when used in a Phoenix application)
To avoid logging sensitive information passed in via HTTP headers or
params, configure headers/params to be filtered within config.exs using
the `filter_paramaters` key, e.g.:
```
config :bunyan,
filter_parameters: ["password", "<PASSWORD>"]
```
Parameter filtering is case insensitive and will replace filtered values with
the string `"[FILTERED]"`.
If you wish to log any environment variables with `Plug.Bunyan`, provide
Bunyan config with a list of environment variables to log along with your
desired output names.
For example:
```
config :bunyan,
env_vars: [{"CUSTOM_ENV_VAR", "our_env_var"}]
```
...will output the value of `CUSTOM_ENV_VAR` under a JSON key of `"our_env_var"`
"""
alias Plug.Conn
alias Bunyan.{Params, Timestamp}
@behaviour Plug
require Logger
@env_vars Application.get_env(:bunyan, :env_vars, [])
def init(_), do: false
@spec call(Plug.Conn.t, any) :: Plug.Conn.t
def call(conn, _opts) do
start = :os.timestamp
Conn.register_before_send(conn, fn connection ->
:ok = log(connection, start)
connection
end)
end
@spec log(Plug.Conn.t, {non_neg_integer, non_neg_integer, non_neg_integer}) :: atom
defp log(conn, start) do
Logger.info fn ->
stop = :os.timestamp
duration = :timer.now_diff(stop, start)
%{
"level" => :info,
"method" => conn.method,
"timestamp" => Timestamp.format_string(stop),
"host" => conn.host,
"path" => conn.request_path,
"status" => conn.status |> Integer.to_string,
"duration" => duration |> format_duration |> List.to_string,
"logger_name" => "Plug.Bunyan"
}
|> merge_request_id(Logger.metadata[:request_id])
|> merge_params(conn)
|> merge_phoenix_attributes(conn)
|> merge_headers(conn)
|> merge_env_vars
|> Poison.encode!
end
end
@spec merge_params(map, Plug.Conn.t) :: map
defp merge_params(log, %{params: params}) when params == %{}, do: log
defp merge_params(log, %{params: params}) do
Map.put(log, :params, Params.filter(params))
end
@spec merge_headers(map, Plug.Conn.t) :: map
defp merge_headers(log, %{req_headers: headers}) when headers == %{}, do: log
defp merge_headers(log, %{req_headers: headers}) do
request_headers = headers
|> Enum.into(%{})
|> Params.filter
Map.put(log, :headers, request_headers)
end
@spec merge_phoenix_attributes(map, Plug.Conn.t) :: map
defp merge_phoenix_attributes(log, %{
private: %{
phoenix_controller: controller,
phoenix_action: action,
phoenix_format: format
}
}) do
Map.merge(log, %{"controller" => controller, "action" => action, "format" => format})
end
defp merge_phoenix_attributes(log, _), do: log
@spec merge_env_vars(map) :: map
defp merge_env_vars(log) do
vars = Enum.reduce(@env_vars, %{}, fn({env_var, output_name}, m) ->
Map.put(m, output_name, System.get_env(env_var))
end)
Map.put(log, :env_vars, vars)
end
@spec format_duration(non_neg_integer) :: IO.chardata
defp format_duration(duration) when duration > 1000 do
[duration |> div(1000) |> Integer.to_string, "ms"]
end
defp format_duration(duration) do
[duration |> Integer.to_string, "Β΅s"]
end
defp merge_request_id(log, nil), do: log
defp merge_request_id(log, request_id) do
Map.put(log, :request_id, request_id)
end
end
|
lib/plug_bunyan.ex
| 0.867528
| 0.849971
|
plug_bunyan.ex
|
starcoder
|
defmodule Trash.Query do
@moduledoc """
Provides query methods for working with records that implement `Trash`.
Schemas should first include `Trash.Schema` and/or manually add the necessary
fields for these to work.
"""
require Ecto.Query
alias Ecto.Query
alias Ecto.Queryable
@doc """
Adds trashable fields to select.
This ensures that both trashable fields are included in the select statement
by using `Ecto.Query.select_merge/3` to merge in the fields.
For a list of the current trashable fields, see
`Trash.Schema.trashable_fields/0`.
This loads `discarded_at` from the database and computes the boolean for
`discarded?` from the SQL expression `discarded_at IS NOT NULL`.
Note: Since `discarded?` is a virtual field, without using this function,
it'll be `nil` by default.
## Examples
iex> Trash.Query.select_trashable(Post) |> Repo.all()
[%Post{title: "<NAME>", discarded_at: %DateTime{}, discarded?: true}]
iex> Trash.Query.select_trashable(Post) |> Repo.all()
[%Post{title: "<NAME>", discarded_at: nil, discarded?: false}]
"""
@spec select_trashable(queryable :: Ecto.Queryable.t()) :: Ecto.Queryable.t()
def select_trashable(queryable) do
queryable
|> Queryable.to_query()
|> Query.select_merge([t], %{
discarded_at: t.discarded_at,
discarded?: not is_nil(t.discarded_at)
})
end
@doc """
Adds a where clause for returning discarded records.
This adds a where clause equivalent to the SQL expression `discarded_at IS NOT
NULL` which denotes a record that has been discarded.
## Examples
iex> Trash.Query.where_discarded(Post) |> Repo.all()
[%Post{title: "<NAME>", discarded_at: %DateTime{}, discarded?: nil}]
"""
@spec where_discarded(queryable :: Ecto.Queryable.t()) :: Ecto.Queryable.t()
def where_discarded(queryable) do
queryable
|> Queryable.to_query()
|> Query.where([t], not is_nil(t.discarded_at))
end
@doc """
Adds a where clause for returning kept records.
This adds a where clause equivalent to the SQL expression `discarded_at IS
NULL` which denotes a record that has been kept.
## Examples
iex> Trash.Query.where_kept(Post) |> Repo.all()
[%Post{title: "<NAME>", discarded_at: nil, discarded?: nil}]
"""
@spec where_kept(queryable :: Ecto.Queryable.t()) :: Ecto.Queryable.t()
def where_kept(queryable) do
queryable
|> Queryable.to_query()
|> Query.where([t], is_nil(t.discarded_at))
end
end
|
lib/trash/query.ex
| 0.908056
| 0.568386
|
query.ex
|
starcoder
|
defmodule Screens.DupScreenData.Data do
@moduledoc false
alias Screens.Config.Dup
def choose_alert([]), do: nil
def choose_alert(alerts) do
# Prioritize shuttle alerts when one exists; otherwise just choose the first in the list.
Enum.find(alerts, hd(alerts), &(&1.effect == :shuttle))
end
def interpret_alert(alert, [parent_stop_id], pill) do
informed_stop_ids = Enum.into(alert.informed_entities, MapSet.new(), & &1.stop)
{region, headsign} =
:screens
|> Application.get_env(:dup_alert_headsign_matchers)
|> Map.get(parent_stop_id)
|> Enum.find_value({:inside, nil}, fn {informed, not_informed, headsign} ->
if alert_region_match?(to_set(informed), to_set(not_informed), informed_stop_ids),
do: {:boundary, headsign},
else: false
end)
%{
cause: alert.cause,
effect: alert.effect,
region: region,
headsign: headsign,
pill: pill
}
end
def alert_routes_at_station(alert, [parent_stop_id]) do
filter_fn = fn
%{stop: stop_id} -> stop_id == parent_stop_id
_ -> false
end
route_fn = fn
%{route: route_id} -> [route_id]
_ -> []
end
alert.informed_entities
|> Enum.filter(filter_fn)
|> Enum.flat_map(route_fn)
end
def station_line_count(%Dup.Departures{sections: [section | _]}) do
stop_id = hd(section.stop_ids)
if stop_id in Application.get_env(:screens, :two_line_stops), do: 2, else: 1
end
def limit_three_departures([[d1, d2], [d3, _d4]]), do: [[d1, d2], [d3]]
def limit_three_departures([[d1, d2, d3, _d4]]), do: [[d1, d2, d3]]
def limit_three_departures(sections), do: sections
def response_type([], _, _), do: :departures
def response_type(alerts, line_count, rotation_index) do
if Enum.any?(alerts, &(&1.effect == :station_closure)) do
:fullscreen_alert
else
response_type_helper(alerts, line_count, rotation_index)
end
end
defp to_set(stop_id) when is_binary(stop_id), do: MapSet.new([stop_id])
defp to_set(stop_ids) when is_list(stop_ids), do: MapSet.new(stop_ids)
defp to_set(%MapSet{} = already_a_set), do: already_a_set
defp alert_region_match?(informed, not_informed, informed_stop_ids) do
MapSet.subset?(informed, informed_stop_ids) and
MapSet.disjoint?(not_informed, informed_stop_ids)
end
defp response_type_helper([alert], 1, rotation_index) do
case {alert.region, rotation_index} do
{:inside, _} -> :fullscreen_alert
{:boundary, "0"} -> :partial_alert
{:boundary, "1"} -> :fullscreen_alert
end
end
defp response_type_helper([_alert], 2, rotation_index) do
case rotation_index do
"0" -> :partial_alert
"1" -> :fullscreen_alert
end
end
defp response_type_helper([_alert1, _alert2], 2, _rotation_index) do
:fullscreen_alert
end
end
|
lib/screens/dup_screen_data/data.ex
| 0.584153
| 0.420391
|
data.ex
|
starcoder
|
defmodule AutoApi.DiagnosticsState do
@moduledoc """
Keeps Diagnostics state
engine_oil_temperature: Engine oil temperature in Celsius, whereas can be negative
"""
alias AutoApi.{CommonData, State, UnitType}
use AutoApi.State, spec_file: "diagnostics.json"
@type check_control_message :: %{
id: integer,
remaining_time: UnitType.duration(),
text: String.t(),
status: String.t()
}
@type confirmed_trouble_code :: %{
id: String.t(),
ecu_address: String.t(),
ecu_variant_name: String.t(),
status: String.t()
}
@type diesel_exhaust_filter_status :: %{
status: :unknown | :normal_operation | :overloaded | :at_limit | :over_limit,
component:
:unknown
| :exhaust_filter
| :diesel_particulate_filter
| :overboost_code_regulator
| :off_board_regeneration,
cleaning: :unknown | :in_progress | :complete | :interrupted
}
@type fluid_level :: :low | :filled
@type location_wheel ::
:front_left
| :front_right
| :rear_right
| :rear_left
| :rear_right_outer
| :rear_left_outer
@type oem_trouble_code_value :: %{
id: String.t(),
key_value: %{
key: String.t(),
value: String.t()
}
}
@type tire_pressure :: %{
location: location_wheel(),
pressure: UnitType.pressure()
}
@type tire_pressure_status :: %{
location: location_wheel(),
status: :normal | :low | :alert
}
@type tire_temperature :: %{
location: location_wheel(),
temperature: UnitType.temperature()
}
@type trouble_code :: %{
occurrences: integer,
id: String.t(),
ecu_id: String.t(),
status: String.t(),
system: :unknown | :body | :chassis | :powertrain | :network
}
@type wheel_rpm :: %{
location: location_wheel(),
rpm: UnitType.angular_velocity()
}
@type t :: %__MODULE__{
# Deprecated
mileage: State.property(UnitType.length()),
engine_oil_temperature: State.property(UnitType.temperature()),
speed: State.property(UnitType.speed()),
engine_rpm: State.property(UnitType.angular_velocity()),
fuel_level: State.property(float),
estimated_range: State.property(UnitType.length()),
washer_fluid_level: State.property(fluid_level()),
battery_voltage: State.property(UnitType.electric_potential_difference()),
adblue_level: State.property(float),
distance_since_reset: State.property(UnitType.length()),
distance_since_start: State.property(UnitType.length()),
fuel_volume: State.property(UnitType.volume()),
anti_lock_braking: State.property(CommonData.activity()),
engine_coolant_temperature: State.property(UnitType.temperature()),
# Deprecated
engine_total_operating_hours: State.property(UnitType.duration()),
engine_total_fuel_consumption: State.property(UnitType.volume()),
brake_fluid_level: State.property(fluid_level()),
engine_torque: State.property(float),
engine_load: State.property(float),
wheel_based_speed: State.property(UnitType.length()),
battery_level: State.property(float),
check_control_messages: State.multiple_property(check_control_message()),
tire_pressures: State.multiple_property(tire_pressure()),
tire_temperatures: State.multiple_property(tire_temperature()),
wheel_rpms: State.multiple_property(wheel_rpm()),
trouble_codes: State.multiple_property(trouble_code()),
# Deprecated
mileage_meters: State.property(UnitType.length()),
odometer: State.property(UnitType.length()),
engine_total_operating_time: State.property(UnitType.duration()),
tire_pressure_statuses: State.multiple_property(tire_pressure_status()),
brake_lining_wear_pre_warning: State.property(CommonData.activity()),
engine_oil_life_remaining: State.property(float),
oem_trouble_code_values: State.multiple_property(oem_trouble_code_value()),
diesel_exhaust_fluid_range: State.property(UnitType.length()),
diesel_particulate_filter_soot_level: State.property(float),
confirmed_trouble_codes: State.multiple_property(confirmed_trouble_code()),
diesel_exhaust_filter_status: State.property(diesel_exhaust_filter_status())
}
@doc """
Build state based on binary value
iex> bin = <<22, 0, 11, 1, 0, 8, 64, 40, 0, 0, 0, 0, 0, 0>>
iex> AutoApi.DiagnosticsState.from_bin(bin)
%AutoApi.DiagnosticsState{engine_load: %AutoApi.Property{data: 12.0}}
"""
@spec from_bin(binary) :: __MODULE__.t()
def from_bin(bin) do
parse_bin_properties(bin, %__MODULE__{})
end
@spec to_bin(__MODULE__.t()) :: binary
@doc """
Parse state to bin
iex> state = %AutoApi.DiagnosticsState{engine_load: %AutoApi.Property{data: 12}}
iex> AutoApi.DiagnosticsState.to_bin(state)
<<22, 0, 11, 1, 0, 8, 64, 40, 0, 0, 0, 0, 0, 0>>
"""
def to_bin(%__MODULE__{} = state) do
parse_state_properties(state)
end
end
|
lib/auto_api/states/diagnostics_state.ex
| 0.786787
| 0.424173
|
diagnostics_state.ex
|
starcoder
|
defmodule Timex.Parse.DateTime.Parser do
@moduledoc """
This is the base plugin behavior for all Timex date/time string parsers.
"""
import Combine.Parsers.Base, only: [eof: 0, map: 2, pipe: 2]
alias Timex.{Timezone, TimezoneInfo, AmbiguousDateTime, AmbiguousTimezoneInfo}
alias Timex.Parse.ParseError
alias Timex.Parse.DateTime.Tokenizers.{Directive, Default, Strftime}
@doc """
Parses a date/time string using the default parser.
## Examples
iex> use Timex
...> {:ok, dt} = #{__MODULE__}.parse("2014-07-29T00:20:41.196Z", "{ISO:Extended:Z}")
...> dt.year
2014
iex> dt.month
7
iex> dt.day
29
iex> dt.time_zone
"Etc/UTC"
"""
@spec parse(binary, binary) :: {:ok, DateTime.t() | NaiveDateTime.t()} | {:error, term}
def parse(date_string, format_string)
when is_binary(date_string) and is_binary(format_string),
do: parse(date_string, format_string, Default)
def parse(_, _),
do: {:error, :badarg}
@doc """
Parses a date/time string using the provided tokenizer. Tokenizers must implement the
`Timex.Parse.DateTime.Tokenizer` behaviour.
## Examples
iex> use Timex
...> {:ok, dt} = #{__MODULE__}.parse("2014-07-29T00:30:41.196-02:00", "{ISO:Extended}", Timex.Parse.DateTime.Tokenizers.Default)
...> dt.year
2014
iex> dt.month
7
iex> dt.day
29
iex> dt.time_zone
"Etc/UTC-2"
"""
@spec parse(binary, binary, atom) :: {:ok, DateTime.t() | NaiveDateTime.t()} | {:error, term}
def parse(date_string, format_string, tokenizer)
when is_binary(date_string) and is_binary(format_string) do
try do
{:ok, parse!(date_string, format_string, tokenizer)}
rescue
err in [ParseError] ->
{:error, err.message}
end
end
def parse(_, _, _), do: {:error, :badarg}
@doc """
Same as `parse/2` and `parse/3`, but raises on error.
"""
@spec parse!(String.t(), String.t(), atom | nil) :: DateTime.t() | NaiveDateTime.t() | no_return
def parse!(date_string, format_string, tokenizer \\ Default)
def parse!(date_string, format_string, :strftime),
do: parse!(date_string, format_string, Strftime)
def parse!(date_string, format_string, tokenizer)
when is_binary(date_string) and is_binary(format_string) and is_atom(tokenizer) do
case tokenizer.tokenize(format_string) do
{:error, err} when is_binary(err) ->
raise ParseError, message: err
{:error, err} ->
raise ParseError, message: err
{:ok, []} ->
raise ParseError,
message: "There were no parsing directives in the provided format string."
{:ok, directives} ->
case date_string do
"" ->
raise ParseError, message: "Input datetime string cannot be empty!"
_ ->
case do_parse(date_string, directives, tokenizer) do
{:ok, dt} ->
dt
{:error, reason} when is_binary(reason) ->
raise ParseError, message: reason
{:error, reason} ->
raise ParseError, message: reason
end
end
end
end
# Special case iso8601/rfc3339 for performance
defp do_parse(str, [%Directive{:type => type}], _tokenizer)
when type in [:iso_8601_extended, :iso_8601_extended_z, :rfc_3339, :rfc_3339z] do
case Combine.parse(str, Timex.Parse.DateTime.Parsers.ISO8601Extended.parse()) do
{:error, _} = err ->
err
[parts] when is_list(parts) ->
case Enum.into(parts, %{}) do
%{year4: y, month: m, day: d, hour24: h, zname: tzname} = mapped ->
mm = Map.get(mapped, :min, 0)
ss = Map.get(mapped, :sec, 0)
us = Map.get(mapped, :sec_fractional, {0, 0})
naive = Timex.NaiveDateTime.new!(y, m, d, h, mm, ss, us)
with %DateTime{} = datetime <- Timex.Timezone.convert(naive, tzname) do
{:ok, datetime}
end
%{year4: y, month: m, day: d, hour24: h} = mapped ->
mm = Map.get(mapped, :min, 0)
ss = Map.get(mapped, :sec, 0)
us = Map.get(mapped, :sec_fractional, {0, 0})
NaiveDateTime.new(y, m, d, h, mm, ss, us)
end
end
end
defp do_parse(str, directives, tokenizer) do
parsers =
directives
|> Stream.map(fn %Directive{weight: weight, parser: parser} ->
map(parser, &{&1, weight})
end)
|> Stream.filter(fn
nil -> false
_ -> true
end)
|> Enum.reverse()
case Combine.parse(str, pipe([eof() | parsers] |> Enum.reverse(), & &1)) do
[results] when is_list(results) ->
results
|> extract_parse_results
|> Stream.with_index()
|> Enum.sort_by(fn
# If :force_utc exists, make sure it is applied last
{{{:force_utc, true}, _}, _} -> 9999
# Timezones must always be applied after other date/time tokens ->
{{{tz, _}, _}, _} when tz in [:zname, :zoffs, :zoffs_colon, :zoffs_sec] -> 9998
# If no weight is set, use the index as its weight
{{{_token, _value}, 0}, i} -> i
# Use the directive weight
{{{_token, _value}, weight}, _} -> weight
end)
|> Stream.flat_map(fn {{token, _}, _} -> [token] end)
|> Enum.filter(&Kernel.is_tuple/1)
|> apply_directives(tokenizer)
{:error, _} = err ->
err
end
end
defp extract_parse_results(parse_results), do: extract_parse_results(parse_results, [])
defp extract_parse_results([], acc), do: Enum.reverse(acc)
defp extract_parse_results([{tokens, weight} | rest], acc) when is_list(tokens) do
extracted =
extract_parse_results(tokens)
|> Enum.map(fn {{token, value}, _weight} -> {{token, value}, weight} end)
|> Enum.reverse()
extract_parse_results(rest, extracted ++ acc)
end
defp extract_parse_results([{{token, value}, weight} | rest], acc) when is_atom(token) do
extract_parse_results(rest, [{{token, value}, weight} | acc])
end
defp extract_parse_results([{token, value} | rest], acc) when is_atom(token) do
extract_parse_results(rest, [{{token, value}, 0} | acc])
end
defp extract_parse_results([[{token, value}] | rest], acc) when is_atom(token) do
extract_parse_results(rest, [{{token, value}, 0} | acc])
end
defp extract_parse_results([h | rest], acc) when is_list(h) do
extracted = Enum.reverse(extract_parse_results(h))
extract_parse_results(rest, extracted ++ acc)
end
defp extract_parse_results([_ | rest], acc) do
extract_parse_results(rest, acc)
end
# Constructs a DateTime from the parsed tokens
defp apply_directives([], _),
do: {:ok, Timex.DateTime.Helpers.empty()}
defp apply_directives(tokens, tokenizer),
do: apply_directives(tokens, Timex.DateTime.Helpers.empty(), tokenizer)
defp apply_directives([], datetime, _) do
with :ok <- validate_datetime(datetime) do
{:ok, datetime}
end
end
defp apply_directives([{token, value} | tokens], date, tokenizer) do
case update_date(date, token, value, tokenizer) do
{:error, _} = error ->
error
updated ->
apply_directives(tokens, updated, tokenizer)
end
end
defp validate_datetime(%{year: y, month: m, day: d} = datetime) do
with {:date, true} <- {:date, :calendar.valid_date(y, m, d)},
{:ok, %Time{}} <-
Time.new(datetime.hour, datetime.minute, datetime.second, datetime.microsecond) do
:ok
else
{:date, _} ->
{:error, :invalid_date}
{:error, _} = err ->
err
end
end
defp validate_datetime(%AmbiguousDateTime{before: before_dt, after: after_dt}) do
with :ok <- validate_datetime(before_dt),
:ok <- validate_datetime(after_dt) do
:ok
else
{:error, _} = err ->
err
end
end
# Given a date, a token, and the value for that token, update the
# date according to the rules for that token and the provided value
defp update_date(%AmbiguousDateTime{} = adt, token, value, tokenizer) when is_atom(token) do
bd = update_date(adt.before, token, value, tokenizer)
ad = update_date(adt.after, token, value, tokenizer)
%{adt | :before => bd, :after => ad}
end
defp update_date(%{year: year, hour: hh} = date, token, value, tokenizer) when is_atom(token) do
case token do
# Formats
clock when clock in [:kitchen, :strftime_iso_kitchen] ->
date =
cond do
date == Timex.DateTime.Helpers.empty() ->
{{y, m, d}, _} = :calendar.universal_time()
%{date | :year => y, :month => m, :day => d}
true ->
date
end
case apply_directives(value, date, tokenizer) do
{:error, _} = err ->
err
{:ok, date} when clock == :kitchen ->
%{date | :second => 0, :microsecond => {0, 0}}
{:ok, date} ->
%{date | :microsecond => {0, 0}}
end
# Years
:century ->
century = Timex.century(%{date | :year => year})
year_shifted = year + (value - century) * 100
%{date | :year => year_shifted}
y when y in [:year2, :iso_year2] ->
{{y, _, _}, _} = :calendar.universal_time()
current_century = Timex.century(y)
year_shifted = value + (current_century - 1) * 100
%{date | :year => year_shifted}
y when y in [:year4, :iso_year4] ->
date = %{date | :year => value}
# Special case for UNIX format dates, where the year is parsed after the timezone,
# so we must lookup the timezone again to ensure it's properly set
case Map.get(date, :time_zone) do
time_zone when is_binary(time_zone) ->
# Need to validate the date/time before doing timezone operations
with :ok <- validate_datetime(date) do
seconds_from_zeroyear = Timex.to_gregorian_seconds(date)
case Timezone.resolve(time_zone, seconds_from_zeroyear) do
%TimezoneInfo{} = tz ->
Timex.to_datetime(date, tz)
%AmbiguousTimezoneInfo{before: b, after: a} ->
bd = Timex.to_datetime(date, b)
ad = Timex.to_datetime(date, a)
%AmbiguousDateTime{before: bd, after: ad}
end
end
nil ->
date
end
# Months
:month ->
%{date | :month => value}
month when month in [:mshort, :mfull] ->
%{date | :month => Timex.month_to_num(value)}
# Days
:day ->
%{date | :day => value}
:oday when is_integer(value) and value >= 0 ->
Timex.from_iso_day(value, date)
:wday_mon ->
current_day = Timex.weekday(date)
cond do
current_day == value -> date
current_day > value -> Timex.shift(date, days: current_day - value)
current_day < value -> Timex.shift(date, days: value - current_day)
end
:wday_sun ->
current_day = Timex.weekday(date) - 1
cond do
current_day == value -> date
current_day > value -> Timex.shift(date, days: current_day - value)
current_day < value -> Timex.shift(date, days: value - current_day)
end
day when day in [:wdshort, :wdfull] ->
%{date | :day => Timex.day_to_num(value)}
# Weeks
:iso_weeknum ->
{year, _, weekday} = Timex.iso_triplet(date)
%Date{year: y, month: m, day: d} = Timex.from_iso_triplet({year, value, weekday})
%{date | :year => y, :month => m, :day => d}
week_num when week_num in [:week_mon, :week_sun] ->
reset = %{date | :month => 1, :day => 1}
reset |> Timex.shift(weeks: value)
:weekday ->
current_dow = Timex.Date.day_of_week(date, :monday)
if current_dow == value do
date
else
Timex.shift(date, days: value - current_dow)
end
# Hours
hour when hour in [:hour24, :hour12] ->
%{date | :hour => value}
:min ->
%{date | :minute => value}
:sec ->
case value do
60 ->
Timex.shift(date, minutes: 1)
value ->
%{date | :second => value}
end
:sec_fractional ->
case value do
"" ->
date
n when is_number(n) ->
%{date | :microsecond => Timex.DateTime.Helpers.construct_microseconds(n, -1)}
{_n, _precision} = us ->
%{date | :microsecond => us}
end
:us ->
%{date | :microsecond => Timex.DateTime.Helpers.construct_microseconds(value, -1)}
:ms ->
%{date | :microsecond => Timex.DateTime.Helpers.construct_microseconds(value * 1_000, -1)}
:sec_epoch ->
DateTime.from_unix!(value)
am_pm when am_pm in [:am, :AM] ->
cond do
hh == 24 ->
%{date | :hour => 0}
hh == 12 and String.downcase(value) == "am" ->
%{date | :hour => 0}
hh in 1..11 and String.downcase(value) == "pm" ->
%{date | :hour => hh + 12}
true ->
date
end
# Timezones
:zoffs ->
with :ok <- validate_datetime(date) do
case value do
<<sign::utf8, _::binary-size(2)-unit(8)>> = zone when sign in [?+, ?-] ->
Timex.to_datetime(date, zone)
<<sign::utf8, _::binary-size(4)-unit(8)>> = zone when sign in [?+, ?-] ->
Timex.to_datetime(date, zone)
_ ->
{:error, {:invalid_zoffs, value}}
end
end
:zname ->
with :ok <- validate_datetime(date) do
Timex.to_datetime(date, value)
end
:zoffs_colon ->
with :ok <- validate_datetime(date) do
case value do
<<sign::utf8, _::binary-size(2)-unit(8), ?:, _::binary-size(2)-unit(8)>> = zone
when sign in [?+, ?-] ->
Timex.to_datetime(date, zone)
_ ->
{:error, {:invalid_zoffs_colon, value}}
end
end
:zoffs_sec ->
with :ok <- validate_datetime(date) do
case value do
<<sign::utf8, _::binary-size(2)-unit(8), ?:, _::binary-size(2)-unit(8), ?:,
_::binary-size(2)-unit(8)>> = zone
when sign in [?+, ?-] ->
Timex.to_datetime(date, zone)
_ ->
{:error, {:invalid_zoffs_sec, value}}
end
end
:force_utc ->
with :ok <- validate_datetime(date) do
Timex.to_datetime(date, "Etc/UTC")
end
:literal ->
date
:week_of_year_iso ->
shift_to_week_of_year(:iso, date, value)
:week_of_year_mon ->
shift_to_week_of_year(:monday, date, value)
:week_of_year_sun ->
shift_to_week_of_year(:sunday, date, value)
_ ->
case tokenizer.apply(date, token, value) do
{:ok, date} ->
date
{:error, _} = err ->
err
_ ->
{:error, "Unrecognized token: #{token}"}
end
end
end
defp shift_to_week_of_year(:iso, %{year: y} = datetime, value) when is_integer(value) do
{dow11, _, _} = Timex.Date.day_of_week(y, 1, 1, :monday)
{dow14, _, _} = Timex.Date.day_of_week(y, 1, 4, :monday)
# See https://en.wikipedia.org/wiki/ISO_week_date#Calculating_an_ordinal_or_month_date_from_a_week_date
ordinal = value * 7 + dow11 - (dow14 + 3)
{year, month, day} = Timex.Helpers.iso_day_to_date_tuple(y, ordinal)
%Date{year: year, month: month, day: day} =
Timex.Date.beginning_of_week(Timex.Date.new!(year, month, day))
%{datetime | year: year, month: month, day: day}
end
defp shift_to_week_of_year(weekstart, %{year: y} = datetime, value) when is_integer(value) do
new_year = Timex.Date.new!(y, 1, 1)
week_start = Timex.Date.beginning_of_week(new_year, weekstart)
# This date can be calculated by taking the day number of the year,
# shifting the day number of the year down by the number of days which
# occurred in the previous year, then dividing by 7
day_num =
if Date.compare(week_start, new_year) == :lt do
prev_year_day_start = Date.day_of_year(week_start)
prev_year_day_end = Date.day_of_year(Timex.Date.new!(week_start.year, 12, 31))
shift = prev_year_day_end - prev_year_day_start
shift + value * 7
else
value * 7
end
datetime = Timex.to_naive_datetime(datetime)
Timex.shift(%{datetime | month: 1, day: 1}, days: day_num)
end
end
|
lib/parse/datetime/parser.ex
| 0.912548
| 0.502869
|
parser.ex
|
starcoder
|
defmodule Scrabble do
@moduledoc """
An Elixir version of the famous game
"""
def default_range do
7
end
@doc """
The board template, i.e. top-left part of the game board
"""
def blank_board_template(range) do
for i <- 0..range, j <- 0..range do
[i, j]
end
end
def make_triple_word_square(x, y) do
[x, y, :word_triple]
end
def make_double_word_square(x, y) do
[x, y, :word_double]
end
def make_double_letter_square(x, y) do
[x, y, :letter_double]
end
def make_triple_letter_square(x, y) do
[x, y, :letter_triple]
end
def make_regular_square(x, y) do
[x, y, :regular]
end
def make_diagonal_squares(x) do
case x do
0 -> make_triple_word_square(x, x)
5 -> make_triple_letter_square(x, x)
6 -> make_double_letter_square(x, x)
_ -> make_double_word_square(x, x)
end
end
def make_other_special_squares(x, y) do
case x + y do
6 -> make_triple_letter_square(x, y)
_ -> make_double_letter_square(x, y)
end
end
@doc """
The default board template, i.e. top-left part of the classic game board
"""
def default_board_template do
for [x, y] <- default_range() |> blank_board_template() do
case abs(x - y) do
0 ->
make_diagonal_squares(x)
3 when x == 0 or y == 0 ->
make_double_letter_square(x, y)
4 when x != 0 and y != 0 ->
make_other_special_squares(x, y)
7 ->
make_triple_word_square(x, y)
_ ->
make_regular_square(x, y)
end
end
end
def apply_x_symetry(template, x_length) do
Enum.flat_map(template, fn square ->
[x, y, type] = square
[square, [2 * x_length - x, y, type]]
end)
|> Enum.uniq()
end
# I know: it reapeats previous logic
def apply_y_symetry(template, y_length) do
Enum.flat_map(template, fn square ->
[x, y, type] = square
[square, [x, 2 * y_length - y, type]]
end)
|> Enum.uniq()
end
@doc """
Create the board. It takes an array, which defines the top left quarter of the board and generates the whole board by applying 2 consecutives axial symetries
template_range is the the size of your board's sides/4 +1 (to account for the center which can't be replicated by symetry)
"""
def create_board(template, template_range) do
apply_x_symetry(template, template_range) |> apply_y_symetry(template_range)
end
def show_triple(board) do
Enum.filter(board, fn [_x, _y, type] ->
type == :letter_triple
end)
end
def show_double(board) do
Enum.filter(board, fn [_x, _y, type] ->
type == :letter_double
end)
end
@doc """
Creates a classic scrabble board
"""
def create_default_board() do
default_board_template() |> create_board(default_range)
end
end
|
lib/scrabble.ex
| 0.781456
| 0.688743
|
scrabble.ex
|
starcoder
|
defmodule SpandexOTLP.Conversion do
@moduledoc false
alias Spandex.Span
alias SpandexOTLP.Opentelemetry.Proto.Trace.V1.{InstrumentationLibrarySpans, ResourceSpans}
alias SpandexOTLP.Opentelemetry.Proto.Common.V1.{AnyValue, KeyValue}
alias SpandexOTLP.Opentelemetry.Proto.Trace.V1.Span, as: OTLPSpan
@resources Application.compile_env(:spandex_otlp, SpandexOTLP)[:resources] || []
@doc false
@spec traces_to_resource_spans([Spandex.Trace.t()]) :: [ResourceSpans.t()]
def traces_to_resource_spans(spandex_traces) do
Enum.map(spandex_traces, fn spandex_trace ->
%ResourceSpans{
resource: resource(),
instrumentation_library_spans: [
instrumentation_library_spans(spandex_trace)
],
schema_url: ""
}
end)
end
defp instrumentation_library_spans(spandex_trace) do
%InstrumentationLibrarySpans{
instrumentation_library: %{
name: "SpandexOTLP",
version: library_version()
},
spans: spans(spandex_trace),
schema_url: ""
}
end
def convert_span(span) do
%SpandexOTLP.Opentelemetry.Proto.Trace.V1.Span{
trace_id: span.trace_id,
span_id: span.id,
trace_state: "",
parent_span_id: span.parent_id,
name: span.name,
kind: :SPAN_KIND_INTERNAL,
start_time_unix_nano: span.start,
end_time_unix_nano: span.completion_time,
attributes: attributes_from_span_tags(span),
dropped_attributes_count: 0,
events: [],
dropped_events_count: 0,
links: [],
dropped_links_count: 0,
status: convert_status(span)
}
end
defp convert_status(%Span{error: nil}) do
%SpandexOTLP.Opentelemetry.Proto.Trace.V1.Status{
deprecated_code: :DEPRECATED_STATUS_CODE_OK,
message: nil,
code: :STATUS_CODE_OK
}
end
defp convert_status(%Span{error: error}) do
%SpandexOTLP.Opentelemetry.Proto.Trace.V1.Status{
deprecated_code: :DEPRECATED_STATUS_CODE_UNAVAILABLE,
message: error_message(error),
code: :STATUS_CODE_ERROR
}
end
@spec error_message(keyword()) :: String.t()
defp error_message(error) do
cond do
Keyword.has_key?(error, :exception) ->
Exception.message(error[:exception])
Keyword.has_key?(error, :message) ->
error[:message]
true ->
nil
end
end
@spec spans(Spandex.Trace.t()) :: [OTLPSpan.t()]
defp spans(spandex_trace) do
Enum.map(spandex_trace.spans, &convert_span/1)
end
defp resource_attribute(%Span{resource: nil}), do: []
defp resource_attribute(%Span{resource: resource}), do: [key_value("resource", resource)]
defp sql_attributes(%Span{sql_query: nil}), do: []
defp sql_attributes(%Span{sql_query: sql_query}) do
[
key_value("sql.query", sql_query[:query]),
key_value("sql.rows", sql_query[:rows]),
key_value("sql.db", sql_query[:db])
]
end
defp error_attributes(%Span{error: error}) do
%{}
|> add_error_type(error[:exception])
|> add_error_message(error[:exception])
|> add_error_stacktrace(error[:stacktrace])
|> Map.to_list()
|> Enum.map(fn {key, value} -> key_value(key, value) end)
end
@spec add_error_type(map(), Exception.t() | nil) :: map()
defp add_error_type(attrs, nil), do: attrs
defp add_error_type(attrs, exception) do
Map.put(attrs, "exception.type", inspect(exception.__struct__))
end
@spec add_error_message(map(), Exception.t() | nil) :: map()
defp add_error_message(attrs, nil), do: attrs
defp add_error_message(attrs, exception) do
Map.put(attrs, "exception.message", Exception.message(exception))
end
@spec add_error_stacktrace(map(), Exception.t() | nil) :: map()
defp add_error_stacktrace(attrs, nil), do: attrs
defp add_error_stacktrace(attrs, stacktrace) do
Map.put(attrs, "exception.stacktrace", Exception.format_stacktrace(stacktrace))
end
defp attributes_from_span_tags(spandex_span) do
spandex_span.tags
|> Enum.map(fn {key, value} ->
key_value(key, value)
end)
|> Kernel.++(resource_attribute(spandex_span))
|> Kernel.++(sql_attributes(spandex_span))
|> Kernel.++(error_attributes(spandex_span))
end
def convert_key(key) when is_atom(key), do: Atom.to_string(key)
def convert_key(key) when is_binary(key), do: key
def convert_key(key), do: inspect(key)
defp convert_value(value) when is_binary(value) do
%AnyValue{value: {:string_value, value}}
end
defp convert_value(value) when is_integer(value) do
%AnyValue{value: {:int_value, value}}
end
defp convert_value(value) when is_boolean(value) do
%AnyValue{value: {:bool_value, value}}
end
defp convert_value(value), do: convert_value(inspect(value))
defp key_value(key, value) do
%KeyValue{
key: convert_key(key),
value: convert_value(value)
}
end
defp resource do
config_resources = Enum.map(@resources, fn {k, v} -> key_value(k, v) end)
%SpandexOTLP.Opentelemetry.Proto.Resource.V1.Resource{
attributes:
config_resources ++
[
key_value("library.name", "SpandexOTLP"),
key_value("library.language", "elixir"),
key_value("library.version", library_version())
],
dropped_attributes_count: 0
}
end
defp library_version do
{:ok, version} = :application.get_key(:spandex_otlp, :vsn)
:binary.list_to_bin(version)
end
end
|
lib/spandex_otlp/conversion.ex
| 0.671578
| 0.424382
|
conversion.ex
|
starcoder
|
defmodule Stargate.Receiver.Supervisor do
@moduledoc """
Defines a supervisor for the `Stargate.Receiver` reader
and consumer connections and the associated GenStage pipeline
for processing and acknowledging messages received on the connection.
The top-level `Stargate.Supervisor` passes the shared connection and
`:consumer` or `:reader` configurations to the receiver supervisor
to delegate management of all receiving processes.
"""
use Supervisor
import Stargate.Supervisor, only: [via: 2]
@doc """
Starts a `Stargate.Receiver.Supevisor` and links it to the calling
process.
"""
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(args) do
type = Keyword.fetch!(args, :type)
registry = Keyword.fetch!(args, :registry)
tenant = Keyword.fetch!(args, :tenant)
namespace = Keyword.fetch!(args, :namespace)
topic = Keyword.fetch!(args, :topic)
Supervisor.start_link(__MODULE__, args,
name: via(registry, :"sg_#{type}_sup_#{tenant}_#{namespace}_#{topic}")
)
end
@doc """
Generates a list of child processes to initialize and
start them under the supervisor with a `:one_for_all` strategy
to ensure messages are not dropped if any single stage in
the pipeline fails.
The processors stage is configurable to a desired number of processes
for parallelizing complex or long-running message handling operations.
"""
@impl Supervisor
def init(init_args) do
children =
[
{Stargate.Receiver.Dispatcher, init_args},
processors(init_args),
{Stargate.Receiver.Acknowledger, init_args}
]
|> List.flatten()
Supervisor.init(children, strategy: :one_for_all)
end
defp processors(args) do
count = Keyword.get(args, :processors, 1)
Enum.map(0..(count - 1), &to_child_spec(&1, args))
end
defp to_child_spec(number, init_args) do
tenant = Keyword.fetch!(init_args, :tenant)
ns = Keyword.fetch!(init_args, :namespace)
topic = Keyword.fetch!(init_args, :topic)
name = :"sg_processor_#{tenant}_#{ns}_#{topic}_#{number}"
named_args = Keyword.put(init_args, :processor_name, name)
Supervisor.child_spec({Stargate.Receiver.Processor, named_args}, id: name)
end
end
|
lib/stargate/receiver/supervisor.ex
| 0.786377
| 0.461684
|
supervisor.ex
|
starcoder
|
defmodule Estated.Property.Owner do
@moduledoc "Current owner details taken from either the assessment."
@moduledoc since: "0.2.0"
defstruct [
:name,
:formatted_street_address,
:unit_type,
:unit_number,
:city,
:state,
:zip_code,
:zip_plus_four_code,
:owner_occupied
]
@typedoc "Current owner details taken from either the assessment."
@typedoc since: "0.2.0"
@type t :: %__MODULE__{
name: name() | nil,
formatted_street_address: formatted_street_address() | nil,
unit_type: unit_type() | nil,
unit_number: unit_number() | nil,
city: city() | nil,
state: state() | nil,
zip_code: zip_code() | nil,
zip_plus_four_code: zip_plus_four_code() | nil,
owner_occupied: owner_occupied() | nil
}
@typedoc """
Assessed owner names.
Eg. **"<NAME>; <NAME>"** or **"<NAME>"**
"""
@typedoc since: "0.2.0"
@type name :: String.t()
@typedoc """
The address where the current tax bill is mailed (not including unit).
Eg. **123 MAIN ST**
"""
@typedoc since: "0.2.0"
@type formatted_street_address :: String.t()
@typedoc """
The unit type.
Eg. [**APT**](https://estated.com/developers/docs/v4/property/enum-overview#unit_type)
"""
@typedoc since: "0.2.0"
@type unit_type :: String.t()
@typedoc """
The unit number.
Eg. **104**
"""
@typedoc since: "0.2.0"
@type unit_number :: String.t()
@typedoc """
The city where the current tax bill is mailed.
In the case it is out of the USA, the country will also be located here.
Eg. **ATLANTA**
"""
@typedoc since: "0.2.0"
@type city :: String.t()
@typedoc """
The state abbreviation where the current tax bill is mailed; XX for out of country addresses.
Eg. **GA**
"""
@typedoc since: "0.2.0"
@type state :: String.t()
@typedoc """
The zip code where the current tax bill is mailed.
Eg. **30342**
"""
@typedoc since: "0.2.0"
@type zip_code :: String.t()
@typedoc """
Four digit postal zip extension for where the tax bill is mailed.
Eg. **3019**
"""
@typedoc since: "0.2.0"
@type zip_plus_four_code :: String.t()
@typedoc """
Description of the owner occupancy.
Can be "YES" or "PROBABLE". Data not available if null.
Eg. **YES**
"""
@typedoc since: "0.2.0"
@type owner_occupied :: String.t()
@doc false
@doc since: "0.2.0"
@spec cast(map()) :: t()
def cast(%{} = owner) do
Enum.reduce(owner, %__MODULE__{}, &cast_field/2)
end
@spec cast(nil) :: nil
def cast(nil) do
nil
end
defp cast_field({"name", name}, acc) do
%__MODULE__{acc | name: name}
end
defp cast_field({"formatted_street_address", formatted_street_address}, acc) do
%__MODULE__{acc | formatted_street_address: formatted_street_address}
end
defp cast_field({"unit_type", unit_type}, acc) do
%__MODULE__{acc | unit_type: unit_type}
end
defp cast_field({"unit_number", unit_number}, acc) do
%__MODULE__{acc | unit_number: unit_number}
end
defp cast_field({"city", city}, acc) do
%__MODULE__{acc | city: city}
end
defp cast_field({"state", state}, acc) do
%__MODULE__{acc | state: state}
end
defp cast_field({"zip_code", zip_code}, acc) do
%__MODULE__{acc | zip_code: zip_code}
end
defp cast_field({"zip_plus_four_code", zip_plus_four_code}, acc) do
%__MODULE__{acc | zip_plus_four_code: zip_plus_four_code}
end
defp cast_field({"owner_occupied", owner_occupied}, acc) do
%__MODULE__{acc | owner_occupied: owner_occupied}
end
defp cast_field(_map_entry, acc) do
acc
end
end
|
lib/estated/property/owner.ex
| 0.866331
| 0.466603
|
owner.ex
|
starcoder
|
defmodule Tesseract.Tree.R.Validation do
alias Tesseract.Tree.R.Util
alias Tesseract.Ext.MathExt
# The rules for root being a leaf are a little bit different:
# A leaf node can have between MIN and MAX entries, EXCEPT when
# leaf node is also a root node. So we have to take care of that
# exception by having a more loose validation rule.
def tree_valid?({:leaf, entries}, %{max_entries: max_entries}) do
length(entries) <= max_entries
end
def tree_valid?(root, %{min_entries: min_entries} = cfg) do
# Validate depth
n = Util.count_entries(root)
d = Util.depth(root)
depth_valid = if n > 1, do: d <= Float.ceil(MathExt.log(n, min_entries)) - 1, else: true
depth_valid && node_valid_by_configuration?(root, cfg)
end
defp node_valid_by_configuration?({:leaf, entries}, %{
max_entries: max_entries,
min_entries: min_entries
}) do
length(entries) >= min_entries && length(entries) <= max_entries
end
defp node_valid_by_configuration?(
{:internal, entries},
%{max_entries: max_entries, min_entries: min_entries} = cfg
) do
self_valid? = length(entries) >= min_entries && length(entries) <= max_entries
children_valid? =
entries
|> Enum.map(&Util.entry_value/1)
|> Enum.all?(&node_valid_by_configuration?(&1, cfg))
self_valid? && children_valid?
end
def tree_valid!({:leaf, entries} = node, %{max_entries: max_entries} = cfg) do
if length(entries) > max_entries do
raise Tesseract.Tree.Error.NodeOverflowError, node: node, cfg: cfg
end
true
end
def tree_valid!(root, %{min_entries: min_entries} = cfg) do
# Validate depth
n = Util.count_entries(root)
d = Util.depth(root)
depth_valid = if n > 1, do: d <= Float.ceil(MathExt.log(n, min_entries)) - 1, else: true
if not depth_valid do
raise Tesseract.Tree.Error.DepthInvalidError, [root, d]
end
node_valid_by_configuration!(root, cfg)
end
defp node_valid_by_configuration!({:leaf, _} = node, cfg) do
node_properties_valid!(node, cfg)
end
defp node_valid_by_configuration!({:internal, entries} = node, cfg) do
true = node_properties_valid!(node, cfg)
entries
|> Enum.map(&Util.entry_value/1)
|> Enum.each(&node_valid_by_configuration!(&1, cfg))
end
defp node_properties_valid!({_, entries} = node, cfg) do
%{max_entries: max_entries, min_entries: min_entries} = cfg
if length(entries) > max_entries do
raise Tesseract.Tree.Error.NodeOverflowError, node: node, cfg: cfg
end
if length(entries) < min_entries do
raise Tesseract.Tree.Error.NodeUnderflowError, node: node, cfg: cfg
end
true
end
end
|
lib/tree/r/validation.ex
| 0.681833
| 0.438845
|
validation.ex
|
starcoder
|
defmodule Timeout do
@moduledoc """
An module for manipulating configurable timeouts.
Comes with the following features.
* Randomizing within +/- of a given percent range
* Backoffs with an optional maximum.
* Timer management using the above configuration.
### Backoff
Backoffs can be configured using the `:backoff` and `:backoff_max` options
when creating a timeout with `new/1`. Each call to `next/1` will increment the
timeout by the given backoff and store that new value as the current timeout.
This comes in handy when you might want to backoff a reconnect attempt or a
polling process during times of low activity.
t = Timeout.new(100, backoff: 1.25)
Timeout.current(t) # => 100
t = Timeout.next(t)
Timeout.current(t) # => 100
t = Timeout.next(t)
Timeout.current(t) # => 125
t = Timeout.next(t)
Timeout.current(t) # => 156
*Note* how the first call to next returns the initial value. If we incremented
it on the first call, the initial value would never be used.
### Randomizing
This module is capabable of randomizing within `+/-` of a given percent range.
This feature can be especially useful if you want to avoid something like the
[Thundering Heard Problem][thp] when multiple processes might be sending
requests to a remote service. For example:
t = Timeout.new(100, random: 0.10)
Timeout.current(t) # => 95
Timeout.current(t) # => 107
Timeout.current(t) # => 108
Timeout.current(t) # => 99
Timeout.current(t) # => 100
This works in combination with the `backoff` configuration as well:
t = Timeout.new(100, backoff: 1.25, random: 0.10)
t = Timeout.next(t)
Timeout.current(t) # => Within +/- 10% of 100
t = Timeout.next(t)
Timeout.current(t) # => Within +/- 10% of 125
t = Timeout.next(t)
Timeout.current(t) # => Within +/- 10% of 156
### Timers
The main reason for writing this library was to be able to configure a timeout
once, then be able to schedulle server messages without having to keep track
of the timeout values being used.
After configuring your timeout using the options above, you can start
scheduling messages using the following workflow:
t = Timeout.new(100, backoff: 1.25, backoff_max: 1_250, random: 0.10)
{t, delay} = Timeout.send_after(t, self(), :message)
IO.puts("Message delayed for: \#{delay}")
receive do
:message -> IO.puts("Received message!")
end
The timer API methods include:
* `send_after/3`: Sends the message, returns `{timeout, delay}`.
* `send_after!/3`: Same as above, but just returns the timeout.
* `cancel_timer/1`: Cancels the stored timer, returns `{timeout, result}`.
* `cancel_timer!/1`: Same as above, but just returns the timeout.
[thp]: https://en.wikipedia.org/wiki/Thundering_herd_problem
"""
@type timeout_value :: pos_integer
@typedoc "Represents timeout growth factor. Should be `> 1`."
@type backoff :: pos_integer | float | nil
@typedoc "Represents the max growth of a timeout using backoff."
@type backoff_max:: pos_integer | nil
@typedoc "Represents a % range when randomizing. Should be `0 < x < 1`."
@type random :: float | nil
@type options :: [backoff: backoff, backoff_max: backoff_max, random: random]
@type t :: %__MODULE__{
base: timeout_value,
timeout: timeout_value,
backoff: backoff,
backoff_round: non_neg_integer,
backoff_max: backoff_max,
random: {float, float} | nil,
timer: reference | nil
}
defstruct ~w(base timeout backoff backoff_round backoff_max random timer)a
@doc """
Builds a `Timeout` struct.
Accepts an integer timeout value and the following optional configuration:
* `:backoff` - A backoff growth factor for growing a timeout period over time.
* `:backoff_max` - Given `:backoff`, will never grow past max.
* `:random` - A float indicating the `%` timeout values will be randomized
within. Expects `0 < :random < 1` or raises an `ArgumentError`. For example,
use `0.10` to randomize within +/- 10% of the desired timeout.
For more information, see `Timeout`.
"""
@spec new(timeout_value, options) :: t
def new(timeout, opts \\ []) when is_integer(timeout) do
%__MODULE__{
base: timeout,
timeout: timeout,
backoff: Keyword.get(opts, :backoff),
backoff_round: 0,
backoff_max: Keyword.get(opts, :backoff_max),
random: opts |> Keyword.get(:random) |> parse_random_max_min()
}
end
@doc """
Resets the current timeout.
"""
@spec reset(t) :: t
def reset(t = %__MODULE__{base: base}) do
%{t | backoff_round: 0, timeout: base}
end
@doc """
Increments the current timeout based on the `backoff` configuration.
If there is no `backoff` configured, this function simply returns the timeout
as is. If `backoff_max` is configured, the timeout will never be incremented
above that value.
**Note:** The first call to `next/1` will always return the initial timeout
first.
"""
@spec next(t) :: t
def next(t = %__MODULE__{backoff: nil}), do: t
def next(t = %__MODULE__{base: base, timeout: nil}), do: %{t | timeout: base}
def next(t = %__MODULE__{timeout: c, backoff_max: c}), do: t
def next(t = %__MODULE__{base: c, backoff: b, backoff_round: r, backoff_max: m}) do
timeout = round(c * :math.pow(b, r))
%{t | backoff_round: r + 1, timeout: (m && (timeout > m and m)) || timeout}
end
@doc """
Returns the timeout value represented by the current state.
iex> Timeout.new(100) |> Timeout.current()
100
If `backoff` was configured, returns the current timeout with backoff applied:
iex> t = Timeout.new(100, backoff: 1.25) |> Timeout.next() |> Timeout.next()
...> Timeout.current(t)
125
If `random` was configured, the current timeout out is randomized within the
configured range:
iex> t = Timeout.new(100, random: 0.10)
...> if Timeout.current(t) in 91..110, do: true, else: false
true
"""
@spec current(t) :: timeout_value
def current(%__MODULE__{base: base, timeout: nil, random: random}),
do: calc_current(base, random)
def current(%__MODULE__{timeout: timeout, random: random}),
do: calc_current(timeout, random)
@doc """
Sends a process a message with `Process.send_after/3` using the given timeout,
the stores the resulting timer on the struct.
Sends the message to `self()` if pid is omitted, otherwise sends to the given
`pid`.
Always calls `next/1` first on the given timer, then uses the return value of
`current/1` to delay the message.
This function is a convienence wrapper around the following workflow:
t = Timeout.new(100, backoff: 1.25) |> Timeout.next()
timer = Process.send_after(self(), :message, Timeout.current(t))
t = %{t | timer: timer}
Returns `{%Timeout{}, delay}` where delay is the message schedule delay.
"""
@spec send_after(t, pid, term) ::{t, pos_integer}
def send_after(t = %__MODULE__{}, pid \\ self(), message) do
t = next(t)
delay = current(t)
{%{t | timer: Process.send_after(pid, message, delay)}, delay}
end
@doc """
Calls `send_after/3`, but returns only the timeout struct.
"""
@spec send_after!(t, pid, term) :: t
def send_after!(t = %__MODULE__{}, pid \\ self(), message) do
with {timeout, _delay} <- send_after(t, pid, message), do: timeout
end
@doc """
Cancels the stored timer.
Returns `{%Timeout{}, result}` where result is the value returned by calling
`Process.cancel_timer/1` on the stored timer reference.
"""
@spec cancel_timer(t) :: {t, non_neg_integer | false | :ok}
def cancel_timer(t = %__MODULE__{timer: nil}), do: {t, false}
def cancel_timer(t = %__MODULE__{timer: timer}) when is_reference(timer) do
{%{t | timer: nil}, Process.cancel_timer(timer)}
end
@doc """
Calls `cancel_timer/1` but returns only the timeout struct.
Returns `{%Timeout{}, result}` where result is the value returned by calling
`Process.cancel_timer/1` on the stored timer reference.
"""
@spec cancel_timer!(t) :: t
def cancel_timer!(t = %__MODULE__{}) do
with {timeout, _result} <- cancel_timer(t), do: timeout
end
defp calc_current(timeout, nil), do: timeout
defp calc_current(timeout, {rmax, rmin}) do
max = round(timeout * rmax)
min = round(timeout * rmin)
min + do_rand(max - min)
end
defp parse_random_max_min(nil), do: nil
defp parse_random_max_min(range) when is_float(range) and range > 0 and range < 1 do
{1.0 + range, 1.0 - range}
end
defp parse_random_max_min(range) do
raise ArgumentError, "Invalid option for :random. Expected 0 < float < 1, got: #{range}"
end
defp do_rand(0), do: 0
defp do_rand(n), do: :rand.uniform(n)
end
|
lib/timeout.ex
| 0.918306
| 0.655956
|
timeout.ex
|
starcoder
|
defmodule Cacherl do
alias Cacherl.Cache
alias Cacherl.Store
@default_lease_time 60*60*24
@moduledoc """
The main APIs for the cache system. It provides an interface
to manipulate the cache without knowing the inner working.
"""
@doc """
Inserts a {key, value} pair into the cache, setting an expiry
if set. If the cache exists, update it; otherwise, create a
new cache, and keep track of its pid in the store.
"""
def insert(key, value, lease_time // @default_lease_time) do
case Store.lookup(key) do
{:ok, pid} ->
Cache.replace(pid, value)
{:error, _} ->
{:ok, pid} = Cache.create(value, lease_time)
Store.insert(key, pid)
end
end
@doc """
Looks up the value for the key in the cache.
Acquires the pid of the key from the store, and fetch the value
from it. Since the cache may expires, which throws exception,
the function is wrapped in a try... catch.
"""
def lookup(key) do
try do
{:ok, pid} = Store.lookup(key)
{:ok, _value} = Cache.fetch(pid)
rescue
_ -> {:error, :not_found}
end
end
@doc """
Looks up for a range of values base on a matcher on the key.
This is mostly used when the key is any composite data types
such as list or tuple to keep track a cache for each property
of a target. For example, we may have a cache for each of the
user's email and we store with the key `{username, category}`.
We can then use this to lookup all the emails in a certain
category.
This function takes a matcher based on ETS's spec, and since
the store is a KVS, the matcher needs to apply on the key only
because the user does not need to know the `pid`. It takes as
second argument a function that is called on each match. The
return value of that function will be used as the key to look
up in a reduce, which returns only found values.
## Example
caches = range_lookup({username, :'$1'}, fn([cat]) ->
# Since we only match one variable, the argument for
# the `fn` is a list of one element.
{username, cat}
end)
"""
def match(key_pattern, key_generator) do
case Store.match({key_pattern, :'_'}) do
[] -> []
result ->
Enum.reduce(result, [], fn(match, acc) ->
key = key_generator.(match)
case Cacherl.lookup(key) do
{:ok, value} -> [value | acc]
_ -> acc
end
end)
end
end
def keys() do
Store.match({:'$1', :'_'})
|> Enum.map(fn([key]) -> key end)
end
def last_updated(key) do
case Store.lookup(key) do
{:ok, pid} ->
{:ok, last_updated} = Cache.last_updated(pid)
last_updated
{:error, _} -> 0
end
end
@doc """
Delete the cache associated with the provided key.
"""
def delete(key) do
case Store.lookup(key) do
{:ok, pid} ->
Cache.delete(pid)
Store.delete(pid)
{:error, _reason} ->
:ok
end
end
end
|
lib/cacherl/cacherl.ex
| 0.556882
| 0.597344
|
cacherl.ex
|
starcoder
|
defmodule Crutches.Enum do
@moduledoc ~s"""
Convenience functions for enums.
This module provides several convenience functions operating on enums.
Simply call any function (with any options if applicable) to make use of it.
"""
@type t :: Enumerable.t
@type element :: any
@doc ~S"""
Returns a copy of the `collection` without the specified `elements`.
## Examples
iex> Enum.without(["David", "Rafael", "Aaron", "Todd"], ["Aaron", "Todd"])
["David", "Rafael"]
iex> Enum.without([1, 1, 2, 1, 4], [1, 2])
[4]
iex> Enum.without(%{ movie: "Inception", release: 2010 }, [:release])
%{ movie: "Inception" }
iex > Enum.without([ answer: 42 ], [:answer])
[]
"""
@spec without(list(any), list(any)) :: list(any)
def without(collection, elements) when is_list(collection) do
if Keyword.keyword? collection do
Keyword.drop collection, elements
else
Enum.reject collection, &Enum.member?(elements, &1)
end
end
@spec without(map, list(any)) :: map
defdelegate without(map, keys), to: Map, as: :drop
@doc ~S"""
Shorthand for length(collection) > 1
## Examples
iex> Enum.many?([])
false
iex> Enum.many?([nil, nil, nil])
true
iex> Enum.many?([1, 2, 3])
true
iex> Enum.many?(%{})
false
iex> Enum.many?(%{ name: "Kash" })
false
iex> Enum.many?([ answer: 42 ])
false
"""
@spec many?(list(any)) :: boolean
def many?([]), do: false
def many?([_ | tail]), do: !Enum.empty?(tail)
@spec many?(map) :: boolean
def many?(%{}), do: false
def many?(collection) when is_map(collection), do: map_size(collection) > 1
@doc """
Invokes the given `fun` for each item in the enumerable and returns `true` if
none of the invocations return a truthy value.
Returns `false` otherwise.
## Examples
iex> Enum.none?([2, 4, 6], fn(x) -> rem(x, 2) == 1 end)
true
iex> Enum.none?([2, 3, 4], fn(x) -> rem(x, 2) == 1 end)
false
If no function is given, it defaults to checking if all items in the
enumerable are a falsy value.
iex> Enum.none?([false, false, false])
true
iex> Enum.none?([false, true, false])
false
"""
@spec none?(t) :: boolean
@spec none?(t, (element -> as_boolean(term))) :: boolean
def none?(enumerable, fun \\ fn(x) -> x end)
def none?(enumerable, fun) do
not Enum.any?(enumerable, fun)
end
@doc """
Invokes the given `fun` for each item in the enumerable and returns `true` if
exactly one invocation returns a truthy value.
Returns `false` otherwise.
## Examples
iex> Enum.one?([1, 2, 3], fn(x) -> rem(x, 2) == 0 end)
true
iex> Enum.one?([1, 3, 5], fn(x) -> rem(x, 2) == 0 end)
false
iex> Enum.one?([2, 4, 6], fn(x) -> rem(x, 2) == 0 end)
false
If no function is given, it defaults to checking if exactly one item in the
enumerable is a truthy value.
iex> Enum.one?([1, 2, 3])
false
iex> Enum.one?([1, nil, false])
true
"""
@spec one?(t) :: boolean
@spec one?(t, (element -> as_boolean(term))) :: boolean
def one?(enumerable, fun \\ fn(x) -> x end) do
match? [_], Stream.filter(enumerable, fun) |> Enum.take(2)
end
@doc ~S"""
Returns a copy of the `collection` with any and all nils removed.
## Examples
iex> Enum.compact(["David", "Rafael", nil, "Todd"])
["David", "Rafael", "Todd"]
iex> Enum.compact([1, 1, 2, nil, 4, nil])
[1, 1, 2, 4]
iex> Enum.compact(%{ movie: "Frozen", rating: nil })
%{ movie: "Frozen" }
"""
@spec compact(list(any)) :: list(any)
@spec compact(map) :: map
def compact(list) when is_list(list) do
Enum.reject(list, &is_nil(&1))
end
def compact(map) when is_map(map) do
for {key, value} <- map, !is_nil(value), do: {key, value}, into: %{}
end
end
|
lib/crutches/enum.ex
| 0.882003
| 0.478529
|
enum.ex
|
starcoder
|
defmodule RateLimiter do
@moduledoc """
A high performance rate limiter implemented on top of erlang `:atomics`
which uses only atomic hardware instructions without any software level locking.
As a result RateLimiter is ~20x faster than `ExRated` and ~80x faster than `Hammer`.
"""
@ets_table Application.get_env(:rate_limiter, :ets_table, :rate_limiters)
@enforce_keys [:ref, :limit, :scale]
defstruct [:id, :ref, :limit, :scale]
def init() do
:ets.new(@ets_table, [:named_table, :ordered_set, :public])
:ok
end
def new(scale, limit) do
%RateLimiter{scale: scale, limit: limit, ref: atomics()}
end
def new(id, scale, limit) do
case get(id) do
rate_limiter = %RateLimiter{scale: ^scale, limit: ^limit} ->
reset(rate_limiter)
rate_limiter = %RateLimiter{} ->
rate_limiter
|> update(scale, limit)
|> reset()
nil ->
rate_limiter = %RateLimiter{id: id, scale: scale, limit: limit, ref: atomics()}
:ets.insert(@ets_table, {id, rate_limiter})
rate_limiter
end
end
def update(rate_limiter = %RateLimiter{id: nil}, scale, limit) do
%{rate_limiter | scale: scale, limit: limit}
end
def update(rate_limiter = %RateLimiter{id: id}, scale, limit) do
rate_limiter = %{rate_limiter | scale: scale, limit: limit}
:ets.insert(@ets_table, {id, rate_limiter})
rate_limiter
end
def update(id, scale, limit) do
get!(id) |> update(scale, limit)
end
def delete(%RateLimiter{id: id}) do
delete(id)
end
def delete(id) do
:ets.delete(@ets_table, id)
end
def get(id) do
case :ets.lookup(@ets_table, id) do
[{_, rate_limiter}] -> rate_limiter
[] -> nil
end
end
def get!(id) do
get(id) || raise "Rate limiter #{inspect(id)} not found"
end
def hit(rate_limiter, hits \\ 1)
def hit(rate_limiter = %RateLimiter{ref: ref, scale: scale, limit: limit}, hits) do
if :atomics.add_get(ref, 2, hits) > limit do
now = :erlang.monotonic_time(:millisecond)
last_reset = :atomics.get(ref, 1)
if last_reset + scale < now do
if :ok == :atomics.compare_exchange(ref, 1, last_reset, now) do
:atomics.put(ref, 2, 0)
end
hit(rate_limiter, hits)
else
{:error, last_reset + scale - now}
end
else
:ok
end
end
def hit(id, hits) do
get!(id) |> hit(hits)
end
def hit(id, scale, limit, hits \\ 1) do
case get(id) do
rate_limiter = %RateLimiter{} -> rate_limiter
nil -> new(id, scale, limit)
end
|> hit(hits)
end
def wait(rate_limiter, hits \\ 1) do
case hit(rate_limiter, hits) do
:ok ->
:ok
{:error, eta} ->
Process.sleep(eta)
wait(rate_limiter, hits)
end
end
def wait(id, scale, limit, hits \\ 1) do
case hit(id, scale, limit, hits) do
:ok ->
:ok
{:error, eta} ->
Process.sleep(eta)
wait(id, hits)
end
end
def inspect_bucket(%RateLimiter{ref: ref}) do
%{
hits: :atomics.get(ref, 2),
created_at: :atomics.get(ref, 1)
}
end
def inspect_bucket(id) do
get!(id) |> inspect_bucket()
end
def reset(rate_limiter = %RateLimiter{ref: ref}) do
:atomics.put(ref, 1, :erlang.monotonic_time(:millisecond))
:atomics.put(ref, 2, 0)
rate_limiter
end
defp atomics do
ref = :atomics.new(2, signed: true)
:atomics.put(ref, 1, :erlang.monotonic_time(:millisecond))
ref
end
end
|
lib/rate_limiter.ex
| 0.748536
| 0.770465
|
rate_limiter.ex
|
starcoder
|
defmodule AWS.Comprehend do
@moduledoc """
Amazon Comprehend is an AWS service for gaining insight into the content of
documents. Use these actions to determine the topics contained in your
documents, the topics they discuss, the predominant sentiment expressed in
them, the predominant language used, and more.
"""
@doc """
Determines the dominant language of the input text for a batch of
documents. For a list of languages that Amazon Comprehend can detect, see
[Amazon Comprehend Supported
Languages](https://docs.aws.amazon.com/comprehend/latest/dg/how-languages.html).
"""
def batch_detect_dominant_language(client, input, options \\ []) do
request(client, "BatchDetectDominantLanguage", input, options)
end
@doc """
Inspects the text of a batch of documents for named entities and returns
information about them. For more information about named entities, see
`how-entities`
"""
def batch_detect_entities(client, input, options \\ []) do
request(client, "BatchDetectEntities", input, options)
end
@doc """
Detects the key noun phrases found in a batch of documents.
"""
def batch_detect_key_phrases(client, input, options \\ []) do
request(client, "BatchDetectKeyPhrases", input, options)
end
@doc """
Inspects a batch of documents and returns an inference of the prevailing
sentiment, `POSITIVE`, `NEUTRAL`, `MIXED`, or `NEGATIVE`, in each one.
"""
def batch_detect_sentiment(client, input, options \\ []) do
request(client, "BatchDetectSentiment", input, options)
end
@doc """
Inspects the text of a batch of documents for the syntax and part of speech
of the words in the document and returns information about them. For more
information, see `how-syntax`.
"""
def batch_detect_syntax(client, input, options \\ []) do
request(client, "BatchDetectSyntax", input, options)
end
@doc """
Creates a new document classification request to analyze a single document
in real-time, using a previously created and trained custom model and an
endpoint.
"""
def classify_document(client, input, options \\ []) do
request(client, "ClassifyDocument", input, options)
end
@doc """
Creates a new document classifier that you can use to categorize documents.
To create a classifier, you provide a set of training documents that
labeled with the categories that you want to use. After the classifier is
trained you can use it to categorize a set of labeled documents into the
categories. For more information, see `how-document-classification`.
"""
def create_document_classifier(client, input, options \\ []) do
request(client, "CreateDocumentClassifier", input, options)
end
@doc """
Creates a model-specific endpoint for synchronous inference for a
previously trained custom model
"""
def create_endpoint(client, input, options \\ []) do
request(client, "CreateEndpoint", input, options)
end
@doc """
Creates an entity recognizer using submitted files. After your
`CreateEntityRecognizer` request is submitted, you can check job status
using the API.
"""
def create_entity_recognizer(client, input, options \\ []) do
request(client, "CreateEntityRecognizer", input, options)
end
@doc """
Deletes a previously created document classifier
Only those classifiers that are in terminated states (IN_ERROR, TRAINED)
will be deleted. If an active inference job is using the model, a
`ResourceInUseException` will be returned.
This is an asynchronous action that puts the classifier into a DELETING
state, and it is then removed by a background job. Once removed, the
classifier disappears from your account and is no longer available for use.
"""
def delete_document_classifier(client, input, options \\ []) do
request(client, "DeleteDocumentClassifier", input, options)
end
@doc """
Deletes a model-specific endpoint for a previously-trained custom model.
All endpoints must be deleted in order for the model to be deleted.
"""
def delete_endpoint(client, input, options \\ []) do
request(client, "DeleteEndpoint", input, options)
end
@doc """
Deletes an entity recognizer.
Only those recognizers that are in terminated states (IN_ERROR, TRAINED)
will be deleted. If an active inference job is using the model, a
`ResourceInUseException` will be returned.
This is an asynchronous action that puts the recognizer into a DELETING
state, and it is then removed by a background job. Once removed, the
recognizer disappears from your account and is no longer available for use.
"""
def delete_entity_recognizer(client, input, options \\ []) do
request(client, "DeleteEntityRecognizer", input, options)
end
@doc """
Gets the properties associated with a document classification job. Use this
operation to get the status of a classification job.
"""
def describe_document_classification_job(client, input, options \\ []) do
request(client, "DescribeDocumentClassificationJob", input, options)
end
@doc """
Gets the properties associated with a document classifier.
"""
def describe_document_classifier(client, input, options \\ []) do
request(client, "DescribeDocumentClassifier", input, options)
end
@doc """
Gets the properties associated with a dominant language detection job. Use
this operation to get the status of a detection job.
"""
def describe_dominant_language_detection_job(client, input, options \\ []) do
request(client, "DescribeDominantLanguageDetectionJob", input, options)
end
@doc """
Gets the properties associated with a specific endpoint. Use this operation
to get the status of an endpoint.
"""
def describe_endpoint(client, input, options \\ []) do
request(client, "DescribeEndpoint", input, options)
end
@doc """
Gets the properties associated with an entities detection job. Use this
operation to get the status of a detection job.
"""
def describe_entities_detection_job(client, input, options \\ []) do
request(client, "DescribeEntitiesDetectionJob", input, options)
end
@doc """
Provides details about an entity recognizer including status, S3 buckets
containing training data, recognizer metadata, metrics, and so on.
"""
def describe_entity_recognizer(client, input, options \\ []) do
request(client, "DescribeEntityRecognizer", input, options)
end
@doc """
Gets the properties associated with a key phrases detection job. Use this
operation to get the status of a detection job.
"""
def describe_key_phrases_detection_job(client, input, options \\ []) do
request(client, "DescribeKeyPhrasesDetectionJob", input, options)
end
@doc """
Gets the properties associated with a sentiment detection job. Use this
operation to get the status of a detection job.
"""
def describe_sentiment_detection_job(client, input, options \\ []) do
request(client, "DescribeSentimentDetectionJob", input, options)
end
@doc """
Gets the properties associated with a topic detection job. Use this
operation to get the status of a detection job.
"""
def describe_topics_detection_job(client, input, options \\ []) do
request(client, "DescribeTopicsDetectionJob", input, options)
end
@doc """
Determines the dominant language of the input text. For a list of languages
that Amazon Comprehend can detect, see [Amazon Comprehend Supported
Languages](https://docs.aws.amazon.com/comprehend/latest/dg/how-languages.html).
"""
def detect_dominant_language(client, input, options \\ []) do
request(client, "DetectDominantLanguage", input, options)
end
@doc """
Inspects text for named entities, and returns information about them. For
more information, about named entities, see `how-entities`.
"""
def detect_entities(client, input, options \\ []) do
request(client, "DetectEntities", input, options)
end
@doc """
Detects the key noun phrases found in the text.
"""
def detect_key_phrases(client, input, options \\ []) do
request(client, "DetectKeyPhrases", input, options)
end
@doc """
Inspects text and returns an inference of the prevailing sentiment
(`POSITIVE`, `NEUTRAL`, `MIXED`, or `NEGATIVE`).
"""
def detect_sentiment(client, input, options \\ []) do
request(client, "DetectSentiment", input, options)
end
@doc """
Inspects text for syntax and the part of speech of words in the document.
For more information, `how-syntax`.
"""
def detect_syntax(client, input, options \\ []) do
request(client, "DetectSyntax", input, options)
end
@doc """
Gets a list of the documentation classification jobs that you have
submitted.
"""
def list_document_classification_jobs(client, input, options \\ []) do
request(client, "ListDocumentClassificationJobs", input, options)
end
@doc """
Gets a list of the document classifiers that you have created.
"""
def list_document_classifiers(client, input, options \\ []) do
request(client, "ListDocumentClassifiers", input, options)
end
@doc """
Gets a list of the dominant language detection jobs that you have
submitted.
"""
def list_dominant_language_detection_jobs(client, input, options \\ []) do
request(client, "ListDominantLanguageDetectionJobs", input, options)
end
@doc """
Gets a list of all existing endpoints that you've created.
"""
def list_endpoints(client, input, options \\ []) do
request(client, "ListEndpoints", input, options)
end
@doc """
Gets a list of the entity detection jobs that you have submitted.
"""
def list_entities_detection_jobs(client, input, options \\ []) do
request(client, "ListEntitiesDetectionJobs", input, options)
end
@doc """
Gets a list of the properties of all entity recognizers that you created,
including recognizers currently in training. Allows you to filter the list
of recognizers based on criteria such as status and submission time. This
call returns up to 500 entity recognizers in the list, with a default
number of 100 recognizers in the list.
The results of this list are not in any particular order. Please get the
list and sort locally if needed.
"""
def list_entity_recognizers(client, input, options \\ []) do
request(client, "ListEntityRecognizers", input, options)
end
@doc """
Get a list of key phrase detection jobs that you have submitted.
"""
def list_key_phrases_detection_jobs(client, input, options \\ []) do
request(client, "ListKeyPhrasesDetectionJobs", input, options)
end
@doc """
Gets a list of sentiment detection jobs that you have submitted.
"""
def list_sentiment_detection_jobs(client, input, options \\ []) do
request(client, "ListSentimentDetectionJobs", input, options)
end
@doc """
Lists all tags associated with a given Amazon Comprehend resource.
"""
def list_tags_for_resource(client, input, options \\ []) do
request(client, "ListTagsForResource", input, options)
end
@doc """
Gets a list of the topic detection jobs that you have submitted.
"""
def list_topics_detection_jobs(client, input, options \\ []) do
request(client, "ListTopicsDetectionJobs", input, options)
end
@doc """
Starts an asynchronous document classification job. Use the operation to
track the progress of the job.
"""
def start_document_classification_job(client, input, options \\ []) do
request(client, "StartDocumentClassificationJob", input, options)
end
@doc """
Starts an asynchronous dominant language detection job for a collection of
documents. Use the operation to track the status of a job.
"""
def start_dominant_language_detection_job(client, input, options \\ []) do
request(client, "StartDominantLanguageDetectionJob", input, options)
end
@doc """
Starts an asynchronous entity detection job for a collection of documents.
Use the operation to track the status of a job.
This API can be used for either standard entity detection or custom entity
recognition. In order to be used for custom entity recognition, the
optional `EntityRecognizerArn` must be used in order to provide access to
the recognizer being used to detect the custom entity.
"""
def start_entities_detection_job(client, input, options \\ []) do
request(client, "StartEntitiesDetectionJob", input, options)
end
@doc """
Starts an asynchronous key phrase detection job for a collection of
documents. Use the operation to track the status of a job.
"""
def start_key_phrases_detection_job(client, input, options \\ []) do
request(client, "StartKeyPhrasesDetectionJob", input, options)
end
@doc """
Starts an asynchronous sentiment detection job for a collection of
documents. use the operation to track the status of a job.
"""
def start_sentiment_detection_job(client, input, options \\ []) do
request(client, "StartSentimentDetectionJob", input, options)
end
@doc """
Starts an asynchronous topic detection job. Use the
`DescribeTopicDetectionJob` operation to track the status of a job.
"""
def start_topics_detection_job(client, input, options \\ []) do
request(client, "StartTopicsDetectionJob", input, options)
end
@doc """
Stops a dominant language detection job in progress.
If the job state is `IN_PROGRESS` the job is marked for termination and put
into the `STOP_REQUESTED` state. If the job completes before it can be
stopped, it is put into the `COMPLETED` state; otherwise the job is stopped
and put into the `STOPPED` state.
If the job is in the `COMPLETED` or `FAILED` state when you call the
`StopDominantLanguageDetectionJob` operation, the operation returns a 400
Internal Request Exception.
When a job is stopped, any documents already processed are written to the
output location.
"""
def stop_dominant_language_detection_job(client, input, options \\ []) do
request(client, "StopDominantLanguageDetectionJob", input, options)
end
@doc """
Stops an entities detection job in progress.
If the job state is `IN_PROGRESS` the job is marked for termination and put
into the `STOP_REQUESTED` state. If the job completes before it can be
stopped, it is put into the `COMPLETED` state; otherwise the job is stopped
and put into the `STOPPED` state.
If the job is in the `COMPLETED` or `FAILED` state when you call the
`StopDominantLanguageDetectionJob` operation, the operation returns a 400
Internal Request Exception.
When a job is stopped, any documents already processed are written to the
output location.
"""
def stop_entities_detection_job(client, input, options \\ []) do
request(client, "StopEntitiesDetectionJob", input, options)
end
@doc """
Stops a key phrases detection job in progress.
If the job state is `IN_PROGRESS` the job is marked for termination and put
into the `STOP_REQUESTED` state. If the job completes before it can be
stopped, it is put into the `COMPLETED` state; otherwise the job is stopped
and put into the `STOPPED` state.
If the job is in the `COMPLETED` or `FAILED` state when you call the
`StopDominantLanguageDetectionJob` operation, the operation returns a 400
Internal Request Exception.
When a job is stopped, any documents already processed are written to the
output location.
"""
def stop_key_phrases_detection_job(client, input, options \\ []) do
request(client, "StopKeyPhrasesDetectionJob", input, options)
end
@doc """
Stops a sentiment detection job in progress.
If the job state is `IN_PROGRESS` the job is marked for termination and put
into the `STOP_REQUESTED` state. If the job completes before it can be
stopped, it is put into the `COMPLETED` state; otherwise the job is be
stopped and put into the `STOPPED` state.
If the job is in the `COMPLETED` or `FAILED` state when you call the
`StopDominantLanguageDetectionJob` operation, the operation returns a 400
Internal Request Exception.
When a job is stopped, any documents already processed are written to the
output location.
"""
def stop_sentiment_detection_job(client, input, options \\ []) do
request(client, "StopSentimentDetectionJob", input, options)
end
@doc """
Stops a document classifier training job while in progress.
If the training job state is `TRAINING`, the job is marked for termination
and put into the `STOP_REQUESTED` state. If the training job completes
before it can be stopped, it is put into the `TRAINED`; otherwise the
training job is stopped and put into the `STOPPED` state and the service
sends back an HTTP 200 response with an empty HTTP body.
"""
def stop_training_document_classifier(client, input, options \\ []) do
request(client, "StopTrainingDocumentClassifier", input, options)
end
@doc """
Stops an entity recognizer training job while in progress.
If the training job state is `TRAINING`, the job is marked for termination
and put into the `STOP_REQUESTED` state. If the training job completes
before it can be stopped, it is put into the `TRAINED`; otherwise the
training job is stopped and putted into the `STOPPED` state and the service
sends back an HTTP 200 response with an empty HTTP body.
"""
def stop_training_entity_recognizer(client, input, options \\ []) do
request(client, "StopTrainingEntityRecognizer", input, options)
end
@doc """
Associates a specific tag with an Amazon Comprehend resource. A tag is a
key-value pair that adds as a metadata to a resource used by Amazon
Comprehend. For example, a tag with "Sales" as the key might be added to a
resource to indicate its use by the sales department.
"""
def tag_resource(client, input, options \\ []) do
request(client, "TagResource", input, options)
end
@doc """
Removes a specific tag associated with an Amazon Comprehend resource.
"""
def untag_resource(client, input, options \\ []) do
request(client, "UntagResource", input, options)
end
@doc """
Updates information about the specified endpoint.
"""
def update_endpoint(client, input, options \\ []) do
request(client, "UpdateEndpoint", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "comprehend"}
host = build_host("comprehend", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "Comprehend_20171127.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/comprehend.ex
| 0.910097
| 0.635632
|
comprehend.ex
|
starcoder
|
defmodule MeterToLengthConverter do
def convert(:feet, m) when is_number(m) and m >= 0, do: m * 3.28084
def convert(:inch, m) when is_number(m) and m >= 0, do: m * 39.3701
def convert(:yard, m) when is_number(m) and m >= 0, do: m * 1.09361
end
div(10, 3) # 3
rem(10, 3) # 1
"Strings are binaries" |> is_binary === true
"ohai" <> <<0>> === <<111, 104, 97, 105, 0>>
?o === 111
?h === 104
?a === 97
?i === 105
<<111, 104, 97, 105>> === "ohai"
'ohai' == "ohai" # false
elem({1, 2, 3}, 1) === 2
put_elem({1, 2, 3}, 0, 0) === {0, 2, 3}
programmers = Map.new # %{}
programmers = Map.put(programmers, :joe, "Erlang") # %{joe: "Erlang"}
programmers = Map.put(programmers, :matz, "Ruby") # %{joe: "Erlang", matz: "Ruby"}
programmers = Map.put(programmers, :rich, "Clojure") # %{joe: "Erlang", matz: "Ruby", rich: "Clojure"}
Map.fetch(programmers, :rich) # {:ok, "Clojure"}
Map.fetch(programmers, :rasmus) # :error
case Map.fetch(programmers, :rich) do
{:ok, language} ->
language
:error ->
"Wrong language"
end # "Clojure"
defmodule ID3Parser do
def parse(filename) do
case File.read(filename) do
{:ok, mp3} ->
mp3_byte_size = byte_size(mp3) - 128
<< _ :: binary-size(mp3_byte_size), id3_tag :: binary >> = mp3
<<
"TAG",
title :: binary-size(30),
artist :: binary-size(30),
album :: binary-size(30),
year :: binary-size(4),
_rest :: binary
>> = id3_tag
IO.puts "#{artist} - #{title} (#{album}, #{year})"
_ ->
IO.puts "Couldn't open #{filename}"
end
end
end
defmodule MyList do
def flatten([]), do: []
def flatten([head | tail]), do: flatten(head) ++ flatten(tail)
def flatten(head), do: [head]
end
MyList.flatten [1, [:two], ["three", []]] # [1, :two, "three"]
"/home/idfumg/Downloads"
|> Path.join("*.pdf")
|> Path.wildcard
|> Enum.filter(fn filename ->
filename |> Path.basename |> String.contains?("Linux")
end)
:random.uniform(123)
# :inets.start
# :ssl.start
# {:ok, {status, headers, body}} = :httpc.request 'https://elixir-lang.org'
# :observer.start
:crypto.hash(:md5, "Tales from the Crypt") === <<79, 132, 235, 77, 3, 224, 121, 88, 98, 75, 61, 67, 62, 16, 233, 91>> # true
:crypto.hash(:sha, "Tales from the Crypt")
:crypto.hash(:sha256, "Tales from the Crypt")
:crypto.hash(:sha512, "Tales from the Crypt")
|
learn/LittleBook/1.ex
| 0.565299
| 0.578805
|
1.ex
|
starcoder
|
defmodule Ash.Type.Enum do
@moduledoc """
A type for abstracting enums into a single type.
For example, you might have:
```elixir
attribute :status, :atom, constraints: [:open, :closed]
```
But as that starts to spread around your system you may find that you want
to centralize that logic. To do that, use this module to define an Ash type
easily.
```elixir
defmodule MyApp.TicketStatus do
use Ash.Type.Enum, values: [:open, :closed]
end
```
Valid values are:
* The atom itself, e.g `:open`
* A string that matches the atom, e.g `"open"`
* A string that matches the atom after being downcased, e.g `"OPEN"` or `"oPeN"`
* A string that matches the stringified, downcased atom, after itself being downcased.
This allows for enum values like `:Open`, `:SomeState` and `:Some_State`
"""
@doc "The list of valid values (not all input types that match them)"
@callback values() :: [atom]
@doc "true if a given term matches a value"
@callback match?(term) :: boolean
@doc "finds the valid value that matches a given input term"
@callback match(term) :: {:ok, atom} | :error
defmacro __using__(opts) do
quote location: :keep, generated: true do
use Ash.Type
@behaviour unquote(__MODULE__)
@values unquote(opts[:values]) ||
raise("Must provide `values` option for `use #{inspect(unquote(__MODULE__))}`")
@string_values @values |> Enum.map(&to_string/1)
@impl unquote(__MODULE__)
def values, do: @values
@impl Ash.Type
def storage_type, do: :string
@impl Ash.Type
def cast_input(value, _) do
match(value)
end
@impl Ash.Type
def cast_stored(value, _) do
match(value)
end
@impl Ash.Type
def dump_to_native(value, _) do
{:ok, to_string(value)}
end
@impl unquote(__MODULE__)
@spec match?(term) :: boolean
def match?(term) do
case match(term) do
{:ok, _} -> true
_ -> false
end
end
@impl unquote(__MODULE__)
@spec match(term) :: {:ok, term} | :error
def match(value) when value in @values, do: {:ok, value}
def match(value) when value in @string_values, do: {:ok, String.to_existing_atom(value)}
def match(value) do
value =
value
|> to_string()
|> String.downcase()
match =
Enum.find_value(@values, fn valid_value ->
sanitized_valid_value =
valid_value
|> to_string()
|> String.downcase()
if sanitized_valid_value == value do
valid_value
end
end)
if match do
{:ok, match}
else
:error
end
rescue
_ ->
:error
end
defoverridable storage_type: 0
end
end
end
|
lib/ash/type/enum.ex
| 0.893704
| 0.856632
|
enum.ex
|
starcoder
|
defmodule Arrow.Compute.Comparison do
alias Arrow.Array
alias Arrow.Native
def eq(%Array{} = left, %Array{} = right), do: Native.array_compute_eq(left, right)
def neq(%Array{} = left, %Array{} = right), do: Native.array_compute_neq(left, right)
def gt(%Array{} = left, %Array{} = right), do: Native.array_compute_gt(left, right)
def gt_eq(%Array{} = left, %Array{} = right), do: Native.array_compute_gt_eq(left, right)
def lt(%Array{} = left, %Array{} = right), do: Native.array_compute_lt(left, right)
def lt_eq(%Array{} = left, %Array{} = right), do: Native.array_compute_lt_eq(left, right)
def eq_utf8(%Array{} = left, %Array{} = right), do: Native.array_compute_eq_utf8(left, right)
def neq_utf8(%Array{} = left, %Array{} = right), do: Native.array_compute_neq_utf8(left, right)
def gt_utf8(%Array{} = left, %Array{} = right), do: Native.array_compute_gt_utf8(left, right)
def gt_eq_utf8(%Array{} = left, %Array{} = right),
do: Native.array_compute_gt_eq_utf8(left, right)
def lt_utf8(%Array{} = left, %Array{} = right), do: Native.array_compute_lt_utf8(left, right)
def lt_eq_utf8(%Array{} = left, %Array{} = right),
do: Native.array_compute_lt_eq_utf8(left, right)
def like_utf8(%Array{} = left, %Array{} = right),
do: Native.array_compute_like_utf8(left, right)
def nlike_utf8(%Array{} = left, %Array{} = right),
do: Native.array_compute_nlike_utf8(left, right)
def eq_scalar(%Array{} = left, right), do: Native.array_compute_eq_scalar(left, right)
def neq_scalar(%Array{} = left, right), do: Native.array_compute_neq_scalar(left, right)
def gt_scalar(%Array{} = left, right), do: Native.array_compute_gt_scalar(left, right)
def gt_eq_scalar(%Array{} = left, right), do: Native.array_compute_gt_eq_scalar(left, right)
def lt_scalar(%Array{} = left, right), do: Native.array_compute_lt_scalar(left, right)
def lt_eq_scalar(%Array{} = left, right), do: Native.array_compute_lt_eq_scalar(left, right)
def eq_utf8_scalar(%Array{} = left, right),
do: Native.array_compute_eq_utf8_scalar(left, right)
def neq_utf8_scalar(%Array{} = left, right),
do: Native.array_compute_neq_utf8_scalar(left, right)
def gt_utf8_scalar(%Array{} = left, right),
do: Native.array_compute_gt_utf8_scalar(left, right)
def gt_eq_utf8_scalar(%Array{} = left, right),
do: Native.array_compute_gt_eq_utf8_scalar(left, right)
def lt_utf8_scalar(%Array{} = left, right),
do: Native.array_compute_lt_utf8_scalar(left, right)
def lt_eq_utf8_scalar(%Array{} = left, right),
do: Native.array_compute_lt_eq_utf8_scalar(left, right)
def like_utf8_scalar(%Array{} = left, right),
do: Native.array_compute_like_utf8_scalar(left, right)
def nlike_utf8_scalar(%Array{} = left, right),
do: Native.array_compute_nlike_utf8_scalar(left, right)
end
|
lib/arrow/compute/comparison.ex
| 0.776411
| 0.432663
|
comparison.ex
|
starcoder
|
defmodule Bittrex.Order do
@moduledoc """
A Bittrex Order.
"""
alias StrawHat.Response
@typedoc """
- `id`: unique ID of this order.
- `market_name`: unique name of the market this order is being placed on.
- `direction`: order direction.
- `type`: order type.
- `quantity`: quantity.
- `limit`: limit price, if present.
- `ceiling`: ceiling, if present.
- `time_in_force`: time in force.
- `fill_quantity`: fill quantity.
- `commission`: commission.
- `proceeds`: proceeds.
- `client_order_id`: client-provided identifier for advanced order tracking.
- `status`: order status.
- `created_at`: timestamp (UTC) of order creation.
- `updated_at`: timestamp (UTC) of last order update.
- `closed_at`: imestamp (UTC) when this order was closed.
"""
@type t :: %__MODULE__{
id: String.t(),
market_name: String.t(),
direction: String.t(),
type: String.t(),
quantity: number(),
limit: number(),
ceiling: number(),
time_in_force: String.t(),
fill_quantity: number(),
commission: number(),
proceeds: number(),
client_order_id: String.t(),
status: String.t(),
created_at: NaiveDateTime.t(),
updated_at: NaiveDateTime.t(),
closed_at: NaiveDateTime.t()
}
defstruct [
:id,
:market_name,
:direction,
:type,
:quantity,
:limit,
:ceiling,
:time_in_force,
:fill_quantity,
:commission,
:proceeds,
:client_order_id,
:status,
:created_at,
:updated_at,
:closed_at
]
@doc false
def new(data) do
%__MODULE__{
id: data["id"],
market_name: data["marketName"],
direction: data["direction"],
type: data["type"],
quantity: data["quantity"],
limit: data["limit"],
ceiling: data["ceiling"],
time_in_force: data["timeInForce"],
fill_quantity: data["fillQuantity"],
commission: data["commission"],
proceeds: data["proceeds"],
client_order_id: data["clientOrderId"],
status: data["status"],
created_at: Bittrex.format_datetime(data["createdAt"]),
updated_at: Bittrex.format_datetime(data["updatedAt"]),
closed_at: Bittrex.format_datetime(data["closedAt"])
}
end
@doc false
def transform_response(data) when is_list(data) do
data
|> Enum.map(&new/1)
|> Response.ok()
end
@doc false
def transform_response(data) do
data
|> new()
|> Response.ok()
end
end
|
lib/bittrex/order.ex
| 0.864825
| 0.502014
|
order.ex
|
starcoder
|
defmodule ZipList do
defstruct previous: [], current: nil, remaining: []
def from_list(list, index \\ 0)
def from_list([], _), do: {:error, :empty_list}
def from_list(list, index) when length(list) < index, do: {:error, :index_out_of_bounds}
def from_list(list, index) when is_list(list) do
previous = list |> Enum.take(index) |> Enum.reverse
[current | remaining] = Enum.drop list, index
ziplist = %__MODULE__{previous: previous, current: current, remaining: remaining}
{:ok, ziplist}
end
def to_list(z = %__MODULE__{}) do
tail = [z.current | z.remaining]
Enum.reverse z.previous, tail
end
def previous?(%__MODULE__{previous: previous}), do: previous != []
def remaining?(%__MODULE__{remaining: remaining}), do: remaining != []
def advance(z = %__MODULE__{remaining: []}), do: z
def advance(z = %__MODULE__{remaining: [remaining | rest]}) do
%__MODULE__{previous: [z.current | z.previous], current: remaining, remaining: rest}
end
def advance(z = %__MODULE__{}, n) do
Enum.reduce(1..n, z, fn _, acc -> ZipList.advance(acc) end)
end
def retreat(z = %__MODULE__{previous: []}), do: z
def retreat(z = %__MODULE__{previous: [previous | rest]}) do
%__MODULE__{previous: rest, current: previous, remaining: [z.current | z.remaining]}
end
def retreat(z = %__MODULE__{}, n) do
Enum.reduce(1..n, z, fn _, acc -> ZipList.retreat(acc) end)
end
def current_index(z = %__MODULE__{}), do: Enum.count(z.previous) + 1
def set_current(ziplist, value) do
%__MODULE__{ziplist | current: value}
end
end
defimpl Enumerable, for: ZipList do
def count(%ZipList{remaining: remaining, previous: previous}) do
count = Enum.count(remaining) + Enum.count(previous) + 1
{:ok, count}
end
def member?(%ZipList{previous: previous, current: current, remaining: remaining}, value) do
result = current == value or Enum.member?(previous, value) or Enum.member?(remaining, value)
{:ok, result}
end
def reduce(ziplist, acc, fun) do
ziplist |> ZipList.to_list |> Enumerable.List.reduce(acc, fun)
end
end
defimpl Collectable, for: ZipList do
def into(ziplist) do
{{ziplist, []}, fn
{ziplist, values}, {:cont, item} -> {ziplist, [item | values]}
{ziplist, values}, :done -> %ZipList{ziplist | remaining: ziplist.remaining ++ Enum.reverse(values)}
_, :halt -> :ok
end}
end
end
|
lib/ziplist.ex
| 0.627495
| 0.419648
|
ziplist.ex
|
starcoder
|
defmodule DsWrapper.Value do
@moduledoc """
`GoogleApi.Datastore.V1.Model.Value` wrapper
"""
alias GoogleApi.Datastore.V1.Model.{ArrayValue, Entity, Key, LatLng, Value}
@meta_filds [:excludeFromIndexes, :meaning]
@doc """
Convert to a `GoogleApi.Datastore.V1.Model.Value`.
## Examples
iex> DsWrapper.Value.from_native(123)
%GoogleApi.Datastore.V1.Model.Value{integerValue: "123"}
iex> DsWrapper.Value.from_native(123, true)
%GoogleApi.Datastore.V1.Model.Value{integerValue: "123", excludeFromIndexes: true}
"""
def from_native(value, exclude_from_index \\ nil)
def from_native(value, exclude_from_index) when is_integer(value) do
%Value{integerValue: Integer.to_string(value), excludeFromIndexes: exclude_from_index}
end
def from_native(value, exclude_from_index) when is_float(value) do
%Value{doubleValue: value, excludeFromIndexes: exclude_from_index}
end
def from_native(value, exclude_from_index) when is_bitstring(value) do
%Value{stringValue: value, excludeFromIndexes: exclude_from_index}
end
def from_native(value, exclude_from_index) when is_boolean(value) do
%Value{booleanValue: value, excludeFromIndexes: exclude_from_index}
end
def from_native(value, exclude_from_index) when is_nil(value) do
%Value{nullValue: "NULL_VALUE", excludeFromIndexes: exclude_from_index}
end
def from_native(value, exclude_from_index) when is_list(value) do
values = Enum.map(value, &from_native/1)
%Value{arrayValue: %ArrayValue{values: values}, excludeFromIndexes: exclude_from_index}
end
def from_native(%{latitude: lat, longitude: lon}, exclude_from_index) do
%Value{geoPointValue: %LatLng{latitude: lat, longitude: lon}, excludeFromIndexes: exclude_from_index}
end
def from_native(%Key{} = value, exclude_from_index) do
%Value{keyValue: value, excludeFromIndexes: exclude_from_index}
end
def from_native(%Entity{} = value, exclude_from_index) do
%Value{entityValue: value, excludeFromIndexes: exclude_from_index}
end
def from_native(%DateTime{} = value, exclude_from_index) do
%Value{timestampValue: value, excludeFromIndexes: exclude_from_index}
end
@doc """
Convert from a `GoogleApi.Datastore.V1.Model.Value`.
## Examples
iex> DsWrapper.Value.to_native(%GoogleApi.Datastore.V1.Model.Value{integerValue: "123"})
123
"""
def to_native(%Value{integerValue: value}) when not is_nil(value) do
value |> Integer.parse() |> elem(0)
end
def to_native(%Value{arrayValue: value}) when not is_nil(value) do
Enum.map(value.values, &to_native/1)
end
def to_native(%Value{entityValue: value}) when not is_nil(value) do
Enum.reduce(value.properties, %{}, fn {key, value}, acc ->
Map.merge(acc, %{key => to_native(value)})
end)
end
def to_native(%Value{geoPointValue: %LatLng{latitude: lat, longitude: lon}} = value) when not is_nil(value) do
%{latitude: lat, longitude: lon}
end
def to_native(%Value{} = value) do
found =
Map.from_struct(value)
|> Enum.find(fn {k, v} -> k not in @meta_filds && v != nil end)
case found do
{_, v} -> v
_ -> nil
end
end
end
|
lib/ds_wrapper/value.ex
| 0.899905
| 0.41253
|
value.ex
|
starcoder
|
defmodule ClosedIntervals do
@moduledoc """
A ClosedIntervals datastructure.
`ClosedIntervals` represents a set of closed intervals and provides functions to
retrieve the interval to which a given value belongs to. `ClosedIntervals` can
handle arbitrary data, as long as it can be ordered in a sensible way. Users
can either use the default term order `&<=/2` if that suits their needs, or
provide an explicit order function.
"""
alias ClosedIntervals.Tree
require Tree
@enforce_keys [:tree, :order, :eq]
defstruct @enforce_keys
@type t(data) :: %__MODULE__{
tree: Tree.t(data),
order: (data, data -> boolean()),
eq: (data, data -> boolean()) | nil
}
@type interval(data) :: {data, data} | {:"-inf", data} | {data, :"+inf"}
@doc """
Create a new `ClosedIntervals` from points.
This function creates a new `ClosedIntervals` from an `Enum` of points. The points
can be of any form, as long as they can be ordered sensibly. For types where the
term order does not order them in a way such that the resulting order represents
a linear ordering along the interval range, a custom order can be applied using the
`order` parameter. `order` defaults to `&<=/2`. Note that a custom order should return
true for equal points, if the resulting order has to be stable.
Additionally, an explicit equality function can be provided which is used in
`ClosedIntervals.get_interval/2` and `ClosedIntervals.get_all_intervals/2`.
## Errors
The function expects that the `enum` contains at least two points. If that is not the case,
an `ArgumentError` is raised.
iex> from([1])
** (ArgumentError) Need at least two points to construct a ClosedIntervals
## Examples
`from/1,2` can handle plain types:
iex> from([1, 2, 3]) |> leaf_intervals()
[{1, 2}, {2, 3}]
It can also handle nested types, if a suitable `order` is defined:
iex> points = [%{idx: 3}, %{idx: 7}, %{idx: 1}]
iex> points |> from(order: &(&1.idx <= &2.idx)) |> leaf_intervals()
[{%{idx: 1}, %{idx: 3}}, {%{idx: 3}, %{idx: 7}}]
## Arguments
* `:order`: A custom order defined on the points used to construct the `ClosedIntervals`
* `:eq`: A custom equality defined on the points used to construct the `ClosedIntervals`
"""
@spec from(Enum.t(), Keyword.t()) :: t(term())
def from(enum, args \\ []) do
{order, eq} = parse_args!(args)
case Enum.sort(enum, order) do
points = [_, _ | _] ->
%__MODULE__{
tree: Tree.construct(points),
order: order,
eq: eq
}
_ ->
raise ArgumentError, "Need at least two points to construct a ClosedIntervals"
end
end
defp parse_args!(args) do
order = Keyword.get(args, :order, &<=/2)
eq = Keyword.get(args, :eq)
if !is_function(order, 2) do
raise ArgumentError, "Expecting :order to be a function of arity 2"
end
if eq && !is_function(eq, 2) do
raise ArgumentError, "Expecting :eq to be a function of arity 2"
end
{order, eq}
end
@doc """
Reconstruct a `ClosedInterval` from the output of `leaf_intervals/1`.
Note that the `args` must match the arguments used when originally constructing the
`ClosedInterval` with `from/1,2`.
## Errors
If the least of leaf intervals is not the result of `leaf_intervals/1`, this can result
in an `ArgumentError`.
## Example
iex> closed_intervals = from([1, 2, 3])
iex> leaf_intervals = leaf_intervals(closed_intervals)
iex> from_leaf_intervals(leaf_intervals)
iex> closed_intervals == from_leaf_intervals(leaf_intervals)
true
"""
def from_leaf_intervals(leaf_intervals = [_ | _], args \\ []) do
tree =
leaf_intervals
|> Enum.map(&Tree.from_bounds/1)
|> Tree.from_leaf_intervals()
{order, eq} = parse_args!(args)
%__MODULE__{
tree: tree,
order: order,
eq: eq
}
end
@doc """
Retrieve a list of all leaf intervals.
A leaf interval is an interval which has been constructed from two adjacent
points. It does not expand to `:"-inf"` or `:"+inf"`.
See `from_leaf_intervals/1,2`. We can reconstruct the original `ClosedInterval`
from a list of leaf intervals.
## Example
iex> from([1, 2, 3]) |> leaf_intervals()
[{1, 2}, {2, 3}]
"""
@spec leaf_intervals(t(data)) :: [{data, data}] when data: var
def leaf_intervals(%__MODULE__{tree: tree}) do
tree |> Tree.leaf_intervals()
end
@doc """
Get the interval to which a value belongs to.
## Example
iex> closed_intervals = from([1, 2, 5])
iex> get_interval(closed_intervals, 3)
{2, 5}
"""
@spec get_interval(t(data), data) :: interval(data)
when data: var
def get_interval(closed_intervals = %__MODULE__{}, value) do
case get_all_intervals(closed_intervals, value) do
[interval] ->
interval
[inf = {:"-inf", _} | _] ->
inf
[inf = {_, :"+inf"} | _] ->
inf
end
end
@doc """
Retrieve all intervals which cover `value`.
This function is useful if the index points used to define the `ClosedIntervals` are not
unique. For example, when defining a step-function, it might make sense to use the same
point multiple times but with different data in order to represent a sharp step. Values
which are placed right at the interval bounds can then belong to multiple closed intervals.
"""
@spec get_all_intervals(t(data), data) :: [interval(data)]
when data: var
def get_all_intervals(%__MODULE__{tree: tree, eq: eq, order: order}, value) do
eq = eq || fn _, _ -> false end
left_bound = Tree.tree(tree, :left_bound)
right_bound = Tree.tree(tree, :right_bound)
cond do
order.(value, left_bound) ->
neg_inf = [{:"-inf", Tree.tree(tree, :left_bound)}]
if eq.(value, left_bound) do
neg_inf ++ Tree.get_all_intervals(tree, value, eq, order)
else
neg_inf
end
order.(right_bound, value) ->
pos_inf = [{Tree.tree(tree, :right_bound), :"+inf"}]
if eq.(value, right_bound) do
pos_inf ++ Tree.get_all_intervals(tree, value, eq, order)
else
pos_inf
end
true ->
Tree.get_all_intervals(tree, value, eq, order)
end
|> List.flatten()
end
@doc """
Serialize `ClosedIntervals` into a list.
## Example
iex> closed_intervals = from([1, 2, 3])
iex> to_list(closed_intervals)
[1, 2, 3]
We can construct the original `ClosedInterval` from a list
generated by `to_list/1`:
iex> closed_intervals = from([1, 2, 3])
iex> to_list(closed_intervals)
iex> closed_intervals == closed_intervals |> to_list() |> from()
true
"""
@spec to_list(t(data)) :: [data] when data: var
def to_list(closed_intervals = %__MODULE__{}) do
Tree.to_list(closed_intervals.tree)
end
@doc """
Map a function over all intervals.
## Example
iex> closed_intervals = from([1, 2, 3])
iex> map(closed_intervals, & &1 + 1) |> to_list()
[2, 3, 4]
"""
@spec map(t(data), (data -> data)) :: t(data) when data: var
def map(closed_intervals = %__MODULE__{}, mapper) when is_function(mapper, 1) do
%__MODULE__{closed_intervals | tree: Tree.map(closed_intervals.tree, mapper)}
end
@doc """
Retrieve the left bound of a `ClosedIntervals`.
## Example
iex> [1, 2, 3] |> from() |> left_bound()
1
"""
def left_bound(%__MODULE__{tree: tree}) do
Tree.left_bound(tree)
end
@doc """
Retrieve the right bound of a `ClosedIntervals`.
## Example
iex> [1, 2, 3] |> from() |> right_bound()
3
"""
def right_bound(%__MODULE__{tree: tree}) do
Tree.right_bound(tree)
end
defimpl Inspect, for: ClosedIntervals do
import Inspect.Algebra
def inspect(closed_intervals, opts) do
concat([
"#ClosedIntervals<",
to_doc(ClosedIntervals.leaf_intervals(closed_intervals), opts),
">"
])
end
end
end
|
lib/closed_intervals.ex
| 0.952717
| 0.90574
|
closed_intervals.ex
|
starcoder
|
defmodule Timex.Format.Time.Formatters.Humanized do
@moduledoc """
Handles formatting timestamp values as human readable strings.
For formatting timestamps as points in time rather than intervals,
use `Timex.format`
"""
use Timex.Format.Time.Formatter
alias Timex.Translator
@minute 60
@hour @minute * 60
@day @hour * 24
@week @day * 7
@month @day * 30
@year @day * 365
@doc """
Return a human readable string representing the time interval.
## Examples
iex> {1435, 180354, 590264} |> #{__MODULE__}.format
"45 years, 6 months, 5 days, 21 hours, 12 minutes, 34 seconds, 590.264 milliseconds"
iex> {0, 65, 0} |> #{__MODULE__}.format
"1 minute, 5 seconds"
"""
@spec format(Types.timestamp) :: String.t | {:error, term}
def format({_,_,_} = timestamp), do: lformat(timestamp, Translator.default_locale)
def format(_), do: {:error, :invalid_timestamp}
@doc """
Return a human readable string representing the time interval, translated to the given locale
## Examples
iex> {1435, 180354, 590264} |> #{__MODULE__}.lformat("ru")
"45 Π³ΠΎΠ΄Π° 6 ΠΌΠ΅ΡΡΡΠ° 5 Π΄Π½Π΅ΠΌ 21 ΡΠ°Ρ 12 ΠΌΠΈΠ½ΡΡΡ 34 ΡΠ΅ΠΊΡΠ½Π΄Ρ 590.264 ΠΌΠΈΠ»Π»ΠΈΡΠ΅ΠΊΡΠ½Π΄Ρ"
iex> {0, 65, 0} |> #{__MODULE__}.lformat("ru")
"1 ΠΌΠΈΠ½ΡΡΠ° 5 ΡΠ΅ΠΊΡΠ½Π΄Ρ"
"""
@spec lformat(Types.timestamp, String.t) :: String.t | {:error, term}
def lformat({_,_,_} = timestamp, locale) do
timestamp
|> deconstruct
|> do_format(locale)
end
def lformat(_, _locale), do: {:error, :invalid_timestamp}
defp do_format(components, locale),
do: do_format(components, <<>>, locale)
defp do_format([], str, _locale),
do: str
defp do_format([{unit, value}|rest], str, locale) do
unit = Atom.to_string(unit)
unit_with_value = Translator.translate_plural(locale, "units", "%{count} #{unit}", "%{count} #{unit}s", value)
separator = Translator.translate(locale, "symbols", ",")
case str do
<<>> -> do_format(rest, "#{unit_with_value}", locale)
_ -> do_format(rest, str <> "#{separator} #{unit_with_value}", locale)
end
end
defp deconstruct({_, _, micro} = ts), do: deconstruct({ts |> Time.to_seconds |> trunc, micro}, [])
defp deconstruct({0, 0}, components), do: components |> Enum.reverse
defp deconstruct({seconds, us}, components) when seconds > 0 do
cond do
seconds >= @year -> deconstruct({rem(seconds, @year), us}, [{:year, div(seconds, @year)} | components])
seconds >= @month -> deconstruct({rem(seconds, @month), us}, [{:month, div(seconds, @month)} | components])
seconds >= @week -> deconstruct({rem(seconds, @week), us}, [{:week, div(seconds, @week)} | components])
seconds >= @day -> deconstruct({rem(seconds, @day), us}, [{:day, div(seconds, @day)} | components])
seconds >= @hour -> deconstruct({rem(seconds, @hour), us}, [{:hour, div(seconds, @hour)} | components])
seconds >= @minute -> deconstruct({rem(seconds, @minute), us}, [{:minute, div(seconds, @minute)} | components])
true -> deconstruct({0, us}, [{:second, seconds} | components])
end
end
defp deconstruct({seconds, micro}, components) when seconds < 0, do: deconstruct({seconds * -1, micro}, components)
defp deconstruct({0, micro}, components) do
msecs = {0, 0, micro} |> Time.abs |> Time.to_milliseconds
cond do
msecs >= 1.0 -> deconstruct({0, 0}, [{:millisecond, msecs} | components])
true -> deconstruct({0, 0}, [{:microsecond, micro} | components])
end
end
end
|
lib/format/time/formatters/humanized.ex
| 0.924159
| 0.443781
|
humanized.ex
|
starcoder
|
defmodule EQRCode.Alphanumeric do
@doc """
Takes a string and encodes each pair of characters into an
11 bit binary. If the string has an odd number of characters
the last character is encoded as a 6 bit binary.
More info: https://www.thonky.com/qr-code-tutorial/alphanumeric-mode-encoding
## Examples
iex> EQRCode.Alphanumeric.from_binary("ABCD")
<<57, 168, 41::size(6)>>
iex> EQRCode.Alphanumeric.from_binary("ABC")
<<57, 166, 0::size(1)>>
iex> EQRCode.Alphanumeric.from_binary("AB")
<<57, 5::size(3)>>
iex> EQRCode.Alphanumeric.from_binary("A")
<<10::size(6)>>
iex> EQRCode.Alphanumeric.from_binary("")
""
Unsupported characters will raise an ArgumentError:
iex> EQRCode.Alphanumeric.from_binary("@")
** (ArgumentError) Alphanumeric encoding does not support '@' character
"""
@spec from_binary(binary()) :: binary()
def from_binary(<<one, two, rest::binary>>) do
value = (45 * encoding_for(one)) + encoding_for(two)
<<value::11, from_binary(rest)::bitstring >>
end
def from_binary(<<one>>), do: <<encoding_for(one)::6>>
def from_binary(<<>>), do: <<>>
# Encoding table sourced from: https://www.thonky.com/qr-code-tutorial/alphanumeric-table
defp encoding_for(codepoint) do
case codepoint do
?0 -> 0
?1 -> 1
?2 -> 2
?3 -> 3
?4 -> 4
?5 -> 5
?6 -> 6
?7 -> 7
?8 -> 8
?9 -> 9
?A -> 10
?B -> 11
?C -> 12
?D -> 13
?E -> 14
?F -> 15
?G -> 16
?H -> 17
?I -> 18
?J -> 19
?K -> 20
?L -> 21
?M -> 22
?N -> 23
?O -> 24
?P -> 25
?Q -> 26
?R -> 27
?S -> 28
?T -> 29
?U -> 30
?V -> 31
?W -> 32
?X -> 33
?Y -> 34
?Z -> 35
32 -> 36
?$ -> 37
?% -> 38
?* -> 39
?+ -> 40
?- -> 41
?. -> 42
?/ -> 43
?: -> 44
bad -> raise(ArgumentError, message: "Alphanumeric encoding does not support '#{<<bad>>}' character")
end
end
end
|
lib/eqrcode/alphanumeric.ex
| 0.78233
| 0.423458
|
alphanumeric.ex
|
starcoder
|
defmodule Day23.PacketQueue do
@moduledoc """
A packet queue for a single Intcode computer in a network.
The packet queue receives packet messages from a `Day23.Router` and queues them up
so they can be provided to an Intcode computer upon request.
The packet queue is the `t:Intcode.Computer.handler/0` for a computer in a network.
When the computer requests input, the packet queue will provide the `x` and `y`
values of the next packet in the queue if there is one. Otherwise, it will provide
the computer with `-1` to indicate there is no packet.
The packet queue also watches the output of the computer and sends the packets the
computer outputs to the `Day23.Router` so they can be sent to the appropriate queue.
"""
@typedoc """
A packet queue PID.
"""
@type t() :: pid()
defstruct router: nil,
addr: nil,
queue: [],
awaiting_addr: nil,
partial_packet: [],
idle_count: 0
@doc """
Starts a new task running a packet queue.
Use the task's PID as the handler for an Intcode computer in the network.
"""
@spec async :: Task.t()
def async do
Task.async(__MODULE__, :run, [])
end
@doc """
Run the loop for the packet queue to process messages.
"""
@spec run :: none
def run do
loop(%Day23.PacketQueue{})
end
@doc """
Assign a network address to the queue.
This informs the queue which router the computer it manages belongs to.
It also causes the packet queue to inform the Intcode computer of its
address, which is required before the computer can begin sending traffic.
"""
@spec assign_addr(t, Day23.Router.addr(), Day23.Router.t()) :: any
def assign_addr(pid, addr, router) do
send(pid, {:assign_addr, addr, router})
end
@doc """
Add a new packet to the queue.
The packet will be delivered to the Intcode computer the next time it asks
for input.
"""
@spec enqueue(t, {number, number}) :: any
def enqueue(pid, point) do
send(pid, {:enqueue, point})
end
defp loop(pq) do
receive do
{:assign_addr, addr, router} ->
case pq.awaiting_addr do
nil ->
loop(%{pq | router: router, addr: addr, queue: [addr]})
pid ->
Intcode.send_input(pid, addr)
loop(%{pq | router: router, addr: addr, awaiting_addr: nil})
end
{:input, pid} ->
case pq do
%Day23.PacketQueue{addr: nil} ->
loop(%{pq | awaiting_addr: pid})
%Day23.PacketQueue{queue: []} ->
Intcode.send_input(pid, -1)
if pq.idle_count == 3 do
Day23.Router.report_idle(pq.router, pq.addr)
end
loop(%{pq | idle_count: pq.idle_count + 1})
%Day23.PacketQueue{queue: [hd | tl]} ->
Intcode.send_input(pid, hd)
loop(%{pq | queue: tl})
end
{:enqueue, {x, y}} ->
Day23.Router.report_active(pq.router, pq.addr)
loop(%{pq | queue: pq.queue ++ [x, y], idle_count: 0})
{:output, _, value} ->
Day23.Router.report_active(pq.router, pq.addr)
packet = pq.partial_packet ++ [value]
case Enum.count(packet) do
3 ->
Day23.Router.route(pq.router, List.to_tuple(packet))
loop(%{pq | partial_packet: [], idle_count: 0})
_ ->
loop(%{pq | partial_packet: packet, idle_count: 0})
end
end
end
end
|
aoc2019_elixir/apps/day23/lib/packet_queue.ex
| 0.845289
| 0.6991
|
packet_queue.ex
|
starcoder
|
defmodule Scenic.Primitive.Component do
@moduledoc """
Add a child component to a graph.
When a scene pushes a graph containing a Component to it's ViewPort,
a new scene, containing the component, is created and added as a child
to the scene that created it.
Any events the new component creates are sent up the parent. The parent
can use functions in the Scenic.Scene module to manage it's children,
send them messages and such.
The standard components, such as button, slider, etc. have wrapper functions
making them very easy to add to a graph. However, if you have a custom
component you can add it to any graph manually using the add_to_graph
function.
You typically want to give components an :id. This will be used to identify
events coming from that components back to your scene.
```elixir
import Components # contains the button helper
graph
|> button( "Press Me", id: :press_me )
|> MyComponent.add_to_graph( {"Some data", 123}, id: :my_component )
```
"""
use Scenic.Primitive
alias Scenic.Script
alias Scenic.Primitive
alias Scenic.Primitive.Style
@type t :: {mod :: module, param :: any, name :: atom | String.t()}
@type styles_t :: [:hidden | :scissor]
@styles [:hidden, :scissor]
# longer names use more memory, but have a lower chance of collision.
# 16 should still have a very very very low chance of collision
# (16 * 8) = 128 bits of randomness
@name_length 16
@main_id Scenic.ViewPort.main_id()
# ============================================================================
# data verification and serialization
@impl Primitive
@spec validate(
{mod :: module, param :: any}
| {mod :: module, param :: any, name :: String.t()}
) ::
{:ok, {mod :: module, param :: any, name :: String.t()}}
| {:error, String.t()}
def validate({mod, param}) do
name =
@name_length
|> :crypto.strong_rand_bytes()
|> Base.url_encode64(padding: false)
validate({mod, param, name})
end
# special case the root
def validate({@main_id, nil, @main_id}), do: {:ok, {@main_id, nil, @main_id}}
def validate({mod, param, name})
when is_atom(mod) and mod != nil and
(is_pid(name) or is_atom(name) or is_bitstring(name)) and name != nil do
case mod.validate(param) do
{:ok, data} -> {:ok, {mod, data, name}}
err -> err
end
end
def validate(data) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Component specification
Received: #{inspect(data)}
#{IO.ANSI.yellow()}
The specification for a component is { module, param } or { module, param, name }
If you do not supply a name, a random string will be chosen for you.#{IO.ANSI.default_color()}
"""
}
end
# --------------------------------------------------------
# filter and gather styles
@doc """
Returns a list of styles recognized by this primitive.
"""
@impl Primitive
@spec valid_styles() :: styles :: styles_t()
def valid_styles(), do: @styles
# --------------------------------------------------------
# compiling a component is a special case and is handled in Scenic.Graph.Compiler
@doc false
@impl Primitive
@spec compile(primitive :: Primitive.t(), styles :: Style.t()) :: Script.t()
def compile(%Primitive{module: __MODULE__}, _styles) do
raise "compiling a Component is a special case and is handled in Scenic.Graph.Compiler"
end
# --------------------------------------------------------
@doc false
def default_pin({@main_id, _data, @main_id}, _styles), do: {0, 0}
def default_pin({module, data, _name}, styles) when is_atom(module) do
case Kernel.function_exported?(module, :default_pin, 2) do
true -> module.default_pin(data, styles)
false -> {0, 0}
end
end
end
|
lib/scenic/primitive/component.ex
| 0.883129
| 0.85555
|
component.ex
|
starcoder
|
defmodule XtbClient.Messages.TradeInfo do
alias XtbClient.Messages.Operation
@moduledoc """
Info about the trade that has happened.
## Parameters
- `close_price` close price in base currency,
- `close_time` `null` if order is not closed,
- `closed` closed,
- `operation` operation code, see `XtbClient.Messages.Operation`,
- `comment` comment,
- `commission` commission in account currency, `null` if not applicable,
- `custom_comment` the value the customer may provide in order to retrieve it later,
- `digits` number of decimal places,
- `expiration` `null` if order is not closed,
- `margin_rate` margin rate,
- `nominal_value` nominal value, `null` if not applicable,
- `offset` trailing offset,
- `open_price` open price in base currency,
- `open_time` open time,
- `order_opened` order number for opened transaction,
- `order_closed` order number for closed transaction,
- `position` order number common both for opened and closed transaction,
- `profit` profit in account currency,
- `stop_loss` zero if stop loss is not set (in base currency),
- `spread` spread,
- `state` state,
- `storage` order swaps in account currency,
- `symbol` symbol name or `null` for deposit/withdrawal operations,
- `taxes` taxes,
- `timestamp` timestamp,
- `take_profit` zero if take profit is not set (in base currency),
- `type` type,
- `volume` volume in lots.
"""
@type t :: %__MODULE__{
close_price: float(),
close_time: DateTime.t() | nil,
closed: boolean(),
operation: integer(),
comment: binary(),
commission: float() | nil,
custom_comment: binary() | nil,
digits: integer(),
expiration: DateTime.t() | nil,
margin_rate: float(),
nominal_value: float() | nil,
offset: integer(),
open_price: float(),
open_time: DateTime.t(),
order_opened: integer(),
order_closed: integer(),
position: integer(),
profit: float(),
stop_loss: float(),
spread: float() | nil,
state: integer() | nil,
storage: float(),
symbol: binary() | nil,
taxes: float() | nil,
timestamp: DateTime.t() | nil,
take_profit: float(),
type: integer() | nil,
volume: float()
}
@enforce_keys [
:close_price,
:close_time,
:closed,
:operation,
:comment,
:commission,
:custom_comment,
:digits,
:expiration,
:margin_rate,
:nominal_value,
:offset,
:open_price,
:open_time,
:order_opened,
:order_closed,
:position,
:profit,
:stop_loss,
:storage,
:symbol,
:take_profit,
:volume
]
defstruct close_price: 0.0,
close_time: nil,
closed: nil,
operation: nil,
comment: "",
commission: nil,
custom_comment: "",
digits: 0,
expiration: nil,
margin_rate: 0.0,
nominal_value: nil,
offset: 0,
open_price: 0.0,
open_time: nil,
order_opened: 0,
order_closed: 0,
position: 0,
profit: nil,
stop_loss: 0.0,
spread: nil,
state: nil,
storage: 0.0,
symbol: "",
taxes: nil,
timestamp: nil,
take_profit: 0.0,
type: nil,
volume: 0.0
def new(
%{
"state" => state,
"type" => type
} = args
) do
value =
args
|> Map.delete("state")
|> Map.delete("type")
|> __MODULE__.new()
%{value | state: state, type: type}
end
def new(
%{
"spread" => spread,
"taxes" => taxes,
"timestamp" => timestamp_value
} = args
)
when is_number(spread) and
is_number(taxes) and
is_integer(timestamp_value) do
value =
args
|> Map.delete("spread")
|> Map.delete("taxes")
|> Map.delete("timestamp")
|> __MODULE__.new()
%{
value
| spread: spread,
taxes: taxes,
timestamp: DateTime.from_unix!(timestamp_value, :millisecond)
}
end
def new(%{
"close_price" => close_price,
"close_time" => close_time_value,
"closed" => closed,
"cmd" => operation,
"comment" => comment,
"commission" => commission,
"customComment" => custom_comment,
"digits" => digits,
"expiration" => expiration_value,
"margin_rate" => margin_rate,
"nominalValue" => nominal_value,
"offset" => offset,
"open_price" => open_price,
"open_time" => open_time_value,
"order" => order_opened,
"order2" => order_closed,
"position" => position,
"profit" => profit,
"sl" => stop_loss,
"storage" => storage,
"symbol" => symbol,
"tp" => take_profit,
"volume" => volume
})
when is_number(close_price) and
is_boolean(closed) and
is_integer(operation) and
is_number(commission) and
is_integer(digits) and
is_number(margin_rate) and
is_integer(offset) and
is_number(open_price) and is_integer(open_time_value) and
is_integer(order_opened) and is_integer(order_closed) and is_integer(position) and
is_number(stop_loss) and
is_number(storage) and
is_number(take_profit) and
is_number(volume) do
%__MODULE__{
close_price: close_price,
close_time:
(not is_nil(close_time_value) && DateTime.from_unix!(close_time_value, :millisecond)) ||
close_time_value,
closed: closed,
operation: Operation.parse(operation),
comment: comment,
commission: commission,
custom_comment: custom_comment,
digits: digits,
expiration:
(not is_nil(expiration_value) && DateTime.from_unix!(expiration_value, :millisecond)) ||
expiration_value,
margin_rate: margin_rate,
nominal_value: nominal_value,
offset: offset,
open_price: open_price,
open_time: DateTime.from_unix!(open_time_value, :millisecond),
order_opened: order_opened,
order_closed: order_closed,
position: position,
profit: profit,
stop_loss: stop_loss,
storage: storage,
symbol: symbol,
take_profit: take_profit,
volume: volume
}
end
end
|
lib/xtb_client/messages/trade_info.ex
| 0.842815
| 0.446736
|
trade_info.ex
|
starcoder
|
defmodule Infusionsoft.Caches.Companies do
@moduledoc false
# Manages the cache for companies.
# The update task runs every 15 minutes, and gets the most recent list of
# companies from Infusionsoft for every API token that's been used in
# the app and is still valid.
use GenServer
alias Infusionsoft.Endpoints.XML.Data
# Client API
def start_link(_) do
GenServer.start_link(__MODULE__, %{}, name: __MODULE__)
end
@doc "The first argument can be either the company Id or Name."
@spec lookup(String.t() | integer(), String.t(), nil | String.t()) ::
{:ok, map()} | {:error, binary()}
def lookup(identifier, token, app \\ nil) do
GenServer.call(__MODULE__, {:lookup, token, app, identifier})
end
# Server API
def init(state) do
if enabled?() do
schedule_initial_job()
{:ok, state}
else
:ignore
end
end
def handle_call({:lookup, token, app, identifier}, _from, state) do
case state[token] do
nil ->
with {:ok, companies} <- get_companies(token, app) do
state = Map.put(state, token, %{token: token, app: app, companies: companies})
{:reply, check_company(companies, identifier), state}
else
{:error, error} ->
{:reply, {:error, "Failed to get companies with error: #{error}"}, state}
_ ->
{:reply, {:error, "Unexpected error occurred getting companies"}, state}
end
group ->
{:reply, check_company(group.companies, identifier), state}
end
end
def handle_info(:refresh, state) do
state =
for {token, group} <- state do
with {:ok, companies} <- get_companies(token, group.app) do
%{token: token, app: group.app, companies: companies}
else
# With these named errors, don't remove the token from state
{:error, "timeout"} -> state[token]
{:error, "closed"} -> state[token]
_ -> nil
end
end
|> Enum.filter(& &1)
|> Enum.group_by(fn g -> g.token end)
|> Enum.into(%{}, fn {token, [group]} -> {token, group} end)
schedule_next_job()
{:noreply, state}
end
# Ignores when HTTP clients send info messages about connections closed
def handle_info(_, state) do
{:noreply, state}
end
defp schedule_initial_job() do
# In 5 seconds
Process.send_after(self(), :refresh, 5_000)
end
defp schedule_next_job() do
# In 15 minutes
Process.send_after(self(), :refresh, 15 * 60 * 1000)
end
defp enabled?() do
not (Application.get_env(:infusionsoft, __MODULE__)[:enabled] == false)
end
defp get_companies(token, app) do
query = %{"Id" => "%"}
return_fields = ["Id", "Company"]
response = Data.query_a_data_table("Company", query, return_fields, token, app)
with {:ok, companies} <- response do
# Duplicate the field list by Name and Id so that access is
# available translating from Common to XML / REST, and vice versa
companies_by_name = group_by_name(companies)
companies_by_id = group_by_id(companies)
{:ok, Map.merge(companies_by_name, companies_by_id)}
end
end
defp group_by_name(companies) do
Enum.group_by(companies, fn c -> String.downcase(c["Company"]) end)
end
defp group_by_id(companies) do
Enum.group_by(companies, fn c -> c["Id"] end)
end
defp check_company(custom_fields, identifier)
defp check_company(companies, id) when is_integer(id) do
case companies[id] do
nil -> {:error, ~s|No company with id #{id} exists|}
[company] -> {:ok, company}
_any -> {:error, ~s|Company with id #{id} exists more than once|}
end
end
defp check_company(companies, name) when is_binary(name) do
case companies[String.downcase(name)] do
nil -> {:error, ~s|No company "#{name}" exists|}
[company] -> {:ok, company}
_any -> {:error, ~s|Company "#{name}" exists more than once|}
end
end
defp check_company(_, name) do
{:error, "Invalid type: #{inspect(name)}"}
end
end
|
lib/infusionsoft/caches/companies.ex
| 0.727201
| 0.406096
|
companies.ex
|
starcoder
|
defmodule ExSync do
require Logger
alias ExSync.{Shadow, Edit, Storage}
@moduledoc """
This module implements the main parts of the server-side flow of the
Diff Sync algorithm.
The main function here is [sync cycle](#sync_cycle/5), which does a full
cycle given the object id, shadow, backup shadow, edits and the storage
adapter.
"""
@type id :: any
@type document :: map
@type error :: (atom | map)
@doc """
The Diff Cycle. Takes changes from the client (can be an empty list) and
applies them to the shadow.
Updates the doc using `ExSync.Storage.get_and_update/2` and returns the new
shadow as well as any edits between the shadow and the doc.
"""
@spec sync_cycle(any, Shadow.t, Shadow.t, [Edit.t], Storage) ::
{:ok, Shadow.t, [Edit.t]} | {:error, error}
def sync_cycle(id, shadow, backup_shadow, edits, storage) do
with {:ok, shadow} <- patch_shadows(shadow, backup_shadow, edits),
{:ok, doc} <- patch_server_doc(storage, id, edits),
{:ok, {shadow, edits}} <- get_server_doc_edits(shadow, doc),
do: {:ok, {put_in(shadow.doc, doc), edits}}
end
@doc """
This function patches the server_shadow or backup_shadow with edits.
Expects server_shadow and backup_shadow as type ExSync.Shadow,
edits as a List of ExSync.Edit.
"""
@spec patch_shadows(Shadow.t, Shadow.t, [Edit.t]) ::
{:ok, Shadow.t} | {:error, error}
def patch_shadows(server_shadow, backup_shadow, edits) do
case apply_shadow_edits(server_shadow, backup_shadow, edits) do
{:ok, {server_shadow, _backup_shadow}} ->
# TODO: Fix logic around backup shadows
{:ok, server_shadow}
{:error, reason} ->
Logger.error "Patching shadows failed: #{inspect reason}"
{:error, reason}
end
end
@doc """
This function patches the server doc.
It expects a storage adapter that implements the ExSync.Storage behaviour,
an id and a list of edits as ExSync.Edit.
It will use the `get_and_update/2` function of the storage, passing it a
function to apply the edits to the server doc. That way we can use locks
in the function to ensure data consistency between reading and writing.
"""
@spec patch_server_doc(atom, any, [Edit.t]) ::
{:ok, document} | {:error, error}
def patch_server_doc(storage_adapter, id, edits) do
storage_adapter.get_and_update(id, &apply_server_doc_edits(&1, edits))
end
@doc """
Calculates the difference between the server shadow and the server doc.
Returns the new edits as a list of one item as well as the server shadow with
an updated server_version number (if applicable).
"""
def get_server_doc_edits(server_shadow, doc) do
case diff_patch().diff(server_shadow.doc, doc) do
{:ok, diff} -> format_diff(diff, server_shadow)
error -> error
end
end
@doc false
defp format_diff(nil, server_shadow), do: {:ok, {server_shadow, []}}
defp format_diff(diff, server_shadow) do
diff = List.wrap %{
diff: diff,
serverVersion: server_shadow.server_version,
localVersion: server_shadow.client_version
}
server_shadow = update_in server_shadow.server_version, &(&1 + 1)
{:ok, {server_shadow, diff}}
end
@doc false
defp apply_server_doc_edits(doc, [edit | edits]) do
case diff_patch().patch(doc, edit["diff"]) do
# Patch succesfull
{:ok, new_doc} ->
apply_server_doc_edits new_doc, edits
# Patch failed, throwing away (Chapter 3 list step f)
{:error, _reason} ->
apply_server_doc_edits doc, edits
end
end
defp apply_server_doc_edits(doc, []), do: {:ok, doc}
@doc false
defp apply_shadow_edits(server_shadow, backup_shadow, [edit | edits]) do
cond do
# Ideal, we are on the same page:
server_shadow.server_version == edit["serverVersion"] ->
do_apply_shadow_edits(server_shadow, backup_shadow, [edit | edits])
# Not so ideal, previous edits were lost but we still have a backup:
backup_shadow.server_version == edit["serverVersion"] ->
do_apply_shadow_edits(backup_shadow, backup_shadow, [edit | edits])
# Nope, we no longer have that server version you are talking about
true ->
{:error, %{
reason: :no_matching_server_version,
shadow_server_version: server_shadow.server_version,
backup_server_version: backup_shadow.server_version,
client_server_version: edit["serverVersion"],
edit: edit
}}
end
end
defp apply_shadow_edits(server_shadow, backup_shadow, []) do
{:ok, {server_shadow, backup_shadow}}
end
@doc false
defp do_apply_shadow_edits(server_shadow, backup_shadow, [edit | edits]) do
cond do
# Ideal, we are on the same page:
server_shadow.client_version == edit["localVersion"] ->
patch_shadow_edits server_shadow, backup_shadow, edit, edits
# Not ideal, but we already saw this client version. Throw away edit.
server_shadow.client_version > edit["localVersion"] ->
apply_shadow_edits server_shadow, backup_shadow, edits
true ->
{:error, %{
reason: :no_matching_client_version,
shadow_client_version: server_shadow.client_version,
client_client_version: edit["localVersion"],
edit: edit
}}
end
end
defp patch_shadow_edits(server_shadow, backup_shadow, edit, edits) do
case diff_patch().patch(server_shadow.doc, edit["diff"]) do
{:ok, new_doc} ->
server_shadow =
server_shadow
|> Map.put(:doc, new_doc)
|> Map.update!(:client_version, &(&1 + 1))
apply_shadow_edits server_shadow, backup_shadow, edits
{:error, reason} -> {:error, reason}
end
end
@doc false
def diff_patch, do: Application.get_env(:ex_sync, :diff_patch)
end
|
lib/ex_sync.ex
| 0.681409
| 0.419618
|
ex_sync.ex
|
starcoder
|
defmodule Magnet.Encoder do
@moduledoc """
Encodes a `Magnet` struct to a Magnet URI.
"""
@spec encode(Magnet.t()) :: {:ok, String.t()}
def encode(%Magnet{} = magnet) do
data =
magnet
|> Map.from_struct()
|> Map.to_list()
|> Enum.reduce(%{}, &do_encode/2)
|> Enum.map_join("&", &encode_kv_pair/1)
{:ok, "magnet:?#{data}"}
end
@spec encode_kv_pair({atom | String.t(), any}) :: String.t()
defp encode_kv_pair({k, v}) do
cond do
k in [:as, :xs, :tr] -> "#{k}=#{URI.encode(v)}"
is_binary(k) && String.starts_with?(k, "x.") -> "#{k}=#{URI.encode(v)}"
true -> "#{k}=#{v}"
end
end
@spec do_encode({atom, any}, map) :: map
defp do_encode({_, []}, acc), do: acc
defp do_encode({_, nil}, acc), do: acc
defp do_encode({:name, name}, acc) when is_binary(name),
do: Map.put(acc, :dn, name)
defp do_encode({:length, length}, acc) when is_number(length),
do: Map.put(acc, :xl, length)
defp do_encode({:announce, [announce]}, acc) when is_binary(announce),
do: Map.put(acc, :tr, announce)
defp do_encode({:announce, announces}, acc) when is_list(announces),
do: into_group(acc, :tr, announces)
defp do_encode({:fallback, fallback}, acc) when is_binary(fallback),
do: Map.put(acc, :as, fallback)
defp do_encode({:info_hash, [info_hash]}, acc) when is_binary(info_hash),
do: Map.put(acc, :xt, info_hash)
defp do_encode({:info_hash, info_hashes}, acc) when is_list(info_hashes),
do: into_group(acc, :xt, info_hashes)
defp do_encode({:keywords, [keyword]}, acc) when is_binary(keyword),
do: Map.put(acc, :kt, keyword)
defp do_encode({:keywords, keywords}, acc) when is_list(keywords),
do: into_group(acc, :kt, keywords)
defp do_encode({:manifest, manifest}, acc) when is_binary(manifest),
do: Map.put(acc, :mt, manifest)
defp do_encode({:source, [source]}, acc) when is_binary(source),
do: Map.put(acc, :xs, source)
defp do_encode({:source, sources}, acc) when is_list(sources),
do: into_group(acc, :xs, sources)
defp do_encode({:experimental, experimentals}, acc) when is_map(experimentals) do
case Enum.empty?(experimentals) do
false ->
Enum.reduce(experimentals, acc, fn {key, value}, exp_acc ->
Map.put(exp_acc, "x.#{key}", value)
end)
true ->
acc
end
end
# Group multible values (lists) into keys of key-dot-n; key.1, key.2, etc
@spec into_group(map, atom, [String.t()]) :: map
defp into_group(acc, key, values) do
values
|> Enum.with_index(1)
|> Enum.reduce(acc, fn {value, index}, acc ->
Map.put(acc, "#{key}.#{index}", value)
end)
end
end
|
lib/magnet/encoder.ex
| 0.798226
| 0.418786
|
encoder.ex
|
starcoder
|
defmodule YEnc do
@moduledoc ~S"""
The Erlang yEnc decoder and encoder.
yEnc is a binary-to-text encoding scheme for transferring binary files in
messages on Usenet or via e-mail. It reduces the overhead over previous
US-ASCII-based encoding methods by using an 8-bit encoding method. yEnc's
overhead is often (if each byte value appears approximately with the same
frequency on average) as little as 1β2%, compared to 33%β40% overhead
for 6-bit encoding methods like uuencode and Base64. yEnc was initially
developed by <NAME> and its first release was early 2001. By 2003
yEnc became the de facto standard encoding system for binary files on Usenet.
The name yEncode is a wordplay on "Why encode?", since the idea is to only
encode characters if it is absolutely required to adhere to the message
format standard.
## Examples
iex> YEnc.encode(<<"ERLANG">>)
"o|vkxq"
iex> YEnc.decode("o|vkxq")
"ERLANG"
iex> YEnc.encode(<<0, 0, 0, 224, 0, 0, 0, 224, 0>>, line_size: 4)
"***=J\r\n***=J\r\n*"
iex> YEnc.decode("***=J\r\n***=J\r\n*")
<<0, 0, 0, 224, 0, 0, 0, 224, 0>>
iex> YEnc.post("0b.bin", <<>>)
"=ybegin line=128 size=0 name=0b.bin\r\n\r\n=yend size=0 crc32=00000000"
iex> YEnc.post("Erlang.txt", <<"ERLANG">>)
"=ybegin line=128 size=6 name=Erlang.txt\r\no|vkxq\r\n=yend size=6 crc32=8a5c101d"
## Encoding Principle
The encoding process represents each octet of input data with a single
corresponding encoded output character. The ASCII value of each output
character is derived by the following simple formula:
O = (I+42) % 256
That is, the output value is equal to the ASCII value of each input
character plus 42, all modulo 256. This reduces overhead by reducing the
number of NULL characters (ASCII 00) that would otherwise have had needed
to be escaped, since many binaries contain a disproportionately large
number of NULLs).
Under special circumstances, a single escape character (ASCII 3Dh, "=") is
used to indicate that the following output character is "critical", and
requires special handling.
Critical characters include the following:
ASCII 00h (NULL)
ASCII 0Ah (LF)
ASCII 0Dh (CR)
ASCII 3Dh (=)
ASCII 09h (TAB)
These characters should always be escaped. Additionally, technique used to
encode critical characters (described in the next section) provides for any
character to be escaped; yDecoder implementations should be capable of
decoding any character following an escape sequence.
The probability of occurance of these 4 characters in binary input data is
approximately 0.4%. On average, escape sequences cause approximately 1.6%
overhead when only these 4 characters are escaped.
The carriage return/linefeed overhead for every line depends on the
developer-defined line length. Header and trailer lines are relatively
small, and cause negligible impact on output size.
## Encoding Technique
A typical encoding process might look something like this:
1. Fetch a character from the input stream.
2. Increment the character's ASCII value by 42, modulo 256
3. If the result is a critical character (as defined in the previous
section), write the escape character to the output stream and increment
character's ASCII value by 64, modulo 256.
4. Output the character to the output stream.
5. Repeat from start.
To facilitate transmission via existing standard protocols (most
notably NNTP), carriage return/linefeed pairs should be written to
the output stream after every n characters, where n is the desired
line length.
The default value for n is 128.
If a critical character appears in the nth position of a line, both
the escape character and the encoded critical character must be
written to the same line, before the carriage return/linefeed. In
this event, the actual number of characters in the line is equal to
n+1. Effectively, this means that a line cannot end with an escape
character, and that a line with n+1 characters must end with an
encoded critical character.
"""
@moduledoc authors: ["<NAME>"]
@doc ~S"""
Performs raw yEnc encoding on data returning the result.
## Options
* `:line_size` - (non-negative integer) sets the length of the line
before inserting CRLF pair.
## Examples
iex> YEnc.encode("")
""
iex> YEnc.encode(<<>>)
""
iex> YEnc.encode("ERLANG")
"o|vkxq"
iex> YEnc.encode(<<69, 82, 76, 65, 78, 71>>)
"o|vkxq"
iex> YEnc.encode(<<0, 0, 0, 224, 0, 0, 0, 224, 0>>, line_size: 4)
"***=J\r\n***=J\r\n*"
"""
@spec encode(binary(), keyword()) :: binary()
defdelegate encode(data, options \\ []), to: :yEnc
@doc ~S"""
Performs raw yEnc decoding on data returning the result.
All CRLF pairs are also removed.
## Examples
iex> YEnc.decode("")
""
iex> YEnc.decode("o|vkxq")
"ERLANG"
iex> YEnc.decode(<<111, 124, 118, 107, 120, 113>>)
"ERLANG"
iex> YEnc.decode("***=J\r\n***=J\r\n*")
<<0, 0, 0, 224, 0, 0, 0, 224, 0>>
"""
@spec decode(binary()) :: binary()
defdelegate decode(text), to: :yEnc
@doc ~S"""
Returns a single yEnc encoded post, suitable for posting.
## Examples
iex> YEnc.post("0b.bin", "")
"=ybegin line=128 size=0 name=0b.bin\r\n\r\n=yend size=0 crc32=00000000"
"""
@spec post(Path.t(), binary()) :: binary()
defdelegate post(filename, data), to: :yEnc
@doc ~S"""
Computes and returns the hexadecimal crc32 checksum for `data`.
## Examples
iex> YEnc.crc32("")
"00000000"
iex> YEnc.crc32("ERLANG")
"8a5c101d"
"""
@spec crc32(binary) :: binary()
defdelegate crc32(data), to: :yEnc
end
|
lib/yEnc.ex
| 0.848847
| 0.528108
|
yEnc.ex
|
starcoder
|
defmodule Astarte.DataUpdaterPlant.DataUpdater.PayloadsDecoder do
alias Astarte.Core.Interface
@max_uncompressed_payload_size 10_485_760
@doc """
Decode a BSON payload a returns a tuple containing the decoded value, the timestamp and metadata.
reception_timestamp is used if no timestamp has been sent with the payload.
"""
@spec decode_bson_payload(binary, integer) :: {map, integer, map}
def decode_bson_payload(payload, reception_timestamp) do
if byte_size(payload) != 0 do
case Cyanide.decode(payload) do
{:ok, %{"v" => bson_value, "t" => %DateTime{} = timestamp, "m" => %{} = metadata}} ->
bson_timestamp = DateTime.to_unix(timestamp, :millisecond)
{bson_value, bson_timestamp, metadata}
{:ok, %{"v" => bson_value, "m" => %{} = metadata}} ->
{bson_value, div(reception_timestamp, 10000), metadata}
{:ok, %{"v" => bson_value, "t" => %DateTime{} = timestamp}} ->
bson_timestamp = DateTime.to_unix(timestamp, :millisecond)
{bson_value, bson_timestamp, %{}}
{:ok, %{"v" => {0 = _subtype, <<>> = _bin}}} ->
{nil, nil, nil}
{:ok, %{"v" => bson_value}} ->
{bson_value, div(reception_timestamp, 10000), %{}}
{:ok, %{} = bson_value} ->
# Handling old format object aggregation
{bson_value, div(reception_timestamp, 10000), %{}}
{:error, _reason} ->
{:error, :undecodable_bson_payload}
_ ->
{:error, :undecodable_bson_payload}
end
else
{nil, nil, nil}
end
end
@doc """
Safely decodes a zlib deflated binary and inflates it.
This function avoids zip bomb vulnerabilities, and it decodes up to 10_485_760 bytes.
"""
@spec safe_inflate(binary) :: binary
def safe_inflate(zlib_payload) do
z = :zlib.open()
:ok = :zlib.inflateInit(z)
{continue_flag, output_list} = :zlib.safeInflate(z, zlib_payload)
uncompressed_size =
List.foldl(output_list, 0, fn output_block, acc ->
acc + byte_size(output_block)
end)
deflated_payload =
if uncompressed_size < @max_uncompressed_payload_size do
output_acc =
List.foldl(output_list, <<>>, fn output_block, acc ->
acc <> output_block
end)
safe_inflate_loop(z, output_acc, uncompressed_size, continue_flag)
else
:error
end
:zlib.inflateEnd(z)
:zlib.close(z)
deflated_payload
end
defp safe_inflate_loop(z, output_acc, size_acc, :continue) do
{continue_flag, output_list} = :zlib.safeInflate(z, [])
uncompressed_size =
List.foldl(output_list, size_acc, fn output_block, acc ->
acc + byte_size(output_block)
end)
if uncompressed_size < @max_uncompressed_payload_size do
output_acc =
List.foldl(output_list, output_acc, fn output_block, acc ->
acc <> output_block
end)
safe_inflate_loop(z, output_acc, uncompressed_size, continue_flag)
else
:error
end
end
defp safe_inflate_loop(_z, output_acc, _size_acc, :finished) do
output_acc
end
@doc """
Decodes a properties paths list and returning a MapSet with them.
"""
@spec parse_device_properties_payload(String.t(), map) ::
{:ok, MapSet.t(String.t())} | {:error, :invalid_properties}
def parse_device_properties_payload("", _introspection) do
{:ok, MapSet.new()}
end
def parse_device_properties_payload(decoded_payload, introspection) do
if String.valid?(decoded_payload) do
parse_device_properties_string(decoded_payload, introspection)
else
{:error, :invalid_properties}
end
end
def parse_device_properties_string(decoded_payload, introspection) do
paths_list =
decoded_payload
|> String.split(";")
|> List.foldl(MapSet.new(), fn property_full_path, paths_acc ->
with [interface, path] <- String.split(property_full_path, "/", parts: 2) do
if Map.has_key?(introspection, interface) do
MapSet.put(paths_acc, {interface, "/" <> path})
else
paths_acc
end
else
_ ->
# TODO: we should print a warning, or return a :issues_found status
paths_acc
end
end)
{:ok, paths_list}
end
@doc """
Decodes introspection string into a list of tuples
"""
@spec parse_introspection(String.t()) ::
{:ok, list({String.t(), integer, integer})} | {:error, :invalid_introspection}
def parse_introspection("") do
{:ok, []}
end
def parse_introspection(introspection_payload) do
if String.valid?(introspection_payload) do
parse_introspection_string(introspection_payload)
else
{:error, :invalid_introspection}
end
end
defp parse_introspection_string(introspection_payload) do
introspection_tokens = String.split(introspection_payload, ";")
all_tokens_are_good =
Enum.all?(introspection_tokens, fn token ->
with [interface_name, major_version_string, minor_version_string] <-
String.split(token, ":"),
{major_version, ""} <- Integer.parse(major_version_string),
{minor_version, ""} <- Integer.parse(minor_version_string) do
cond do
String.match?(interface_name, Interface.interface_name_regex()) == false ->
false
major_version < 0 ->
false
minor_version < 0 ->
false
true ->
true
end
else
_not_expected ->
false
end
end)
if all_tokens_are_good do
parsed_introspection =
for token <- introspection_tokens do
[interface_name, major_version_string, minor_version_string] = String.split(token, ":")
{major_version, ""} = Integer.parse(major_version_string)
{minor_version, ""} = Integer.parse(minor_version_string)
{interface_name, major_version, minor_version}
end
{:ok, parsed_introspection}
else
{:error, :invalid_introspection}
end
end
end
|
lib/astarte_data_updater_plant/data_updater/payloads_decoder.ex
| 0.561936
| 0.409339
|
payloads_decoder.ex
|
starcoder
|
defimpl String.Chars, for: ExPcap.MagicNumber do
@doc """
Returns a human readable representation of the magic number.
"""
@spec to_string(ExPcap.MagicNumber.t) :: String.t
def to_string(magic_number) do
"""
magic number: 0x#{magic_number.magic |> Integer.to_string(16) |> String.downcase}
nanoseconds? #{magic_number.nanos}
reverse bytes? #{magic_number.reverse_bytes}
""" |> String.trim
end
end
defmodule ExPcap.MagicNumber do
@moduledoc """
This module represents a 'magic number' from a pcap header. The magic number
not only contains a known value, but the value indicates the order in which
bytes should be read AND whether or not datetimes use milliseconds or
nanoseconds.
"""
defstruct reverse_bytes: false,
nanos: false,
magic: 0x00000000
@type t :: %ExPcap.MagicNumber{
reverse_bytes: boolean,
nanos: boolean,
magic: non_neg_integer
}
@bytes_in_magic 4
@doc """
Returns the number of bytes contained in the magic number.
"""
@spec bytes_in_magic() :: non_neg_integer
def bytes_in_magic() do
@bytes_in_magic
end
@doc """
Returns a magic number that indicates that the bytes need to be reversed when
read and that datetimes are in milliseconds.
"""
@spec magic_number(0xd4, 0xc3, 0xb2, 0xa1) :: ExPcap.MagicNumber.t
def magic_number(0xd4, 0xc3, 0xb2, 0xa1) do
%ExPcap.MagicNumber{
reverse_bytes: true,
nanos: false,
magic: 0xd4c3b2a1
}
end
@doc """
Returns a magic number that indicates that the bytes do not need to be
reversed when read and that datetimes are in milliseconds.
"""
@spec magic_number(0xa1, 0xb2, 0xc3, 0xd4) :: ExPcap.MagicNumber.t
def magic_number(0xa1, 0xb2, 0xc3, 0xd4) do
%ExPcap.MagicNumber{
reverse_bytes: false,
nanos: false,
magic: 0xa1b2c3d4
}
end
@doc """
Returns a magic number that indicates that the bytes do not need to be
reversed when read and that datetimes are in nanoseconds.
"""
@spec magic_number(0xa1, 0xb2, 0x3c, 0x4d) :: ExPcap.MagicNumber.t
def magic_number(0xa1, 0xb2, 0x3c, 0x4d) do
%ExPcap.MagicNumber{
reverse_bytes: false,
nanos: true,
magic: 0xa1b2c3d4
}
end
@doc """
Returns a magic number that indicates that the bytes need to be reversed when
read and that datetimes are in nanoseconds.
"""
@spec magic_number(0x4d, 0x3c, 0xb2, 0xa1) :: ExPcap.MagicNumber.t
def magic_number(0x4d, 0x3c, 0xb2, 0xa1) do
%ExPcap.MagicNumber{
reverse_bytes: true,
nanos: true,
magic: 0xa1b2c3d4
}
end
@doc """
This reads the bytes of the magic number and matches them with the appropriate
interpretation of the magic number.
"""
@spec read_magic(binary) :: ExPcap.MagicNumber.t
def read_magic(data) do
<<
magic1 :: unsigned-integer-size(8),
magic2 :: unsigned-integer-size(8),
magic3 :: unsigned-integer-size(8),
magic4 :: unsigned-integer-size(8)
>> = data
magic_number(magic1, magic2, magic3, magic4)
end
@doc """
Reads the magic number from the file passed in.
"""
@spec from_file(IO.device) :: ExPcap.MagicNumber.t
def from_file(f) do
f |> IO.binread(@bytes_in_magic) |> read_magic
end
end
|
lib/expcap/magic_number.ex
| 0.850329
| 0.604428
|
magic_number.ex
|
starcoder
|
defmodule FloUI.Dropdown do
@moduledoc """
## Usage in SnapFramework
Dropdown component that scrolls. You can pass two separate themes as options. one for the dropdown, and one for the scroll bar.
Options
``` elixir
theme: theme,
scroll_bar: %{
show: true,
show_buttons: true,
theme: Scenic.Primitive.Style.Theme.preset(:dark),
thickness: 15
}
```
data is a tuple in the form of
``` elixir
{
[
{{"option 1", :option_1}, :option_1},
{{"option 2", :option_2}, :option_2},
{{"option 3", :option_3}, :option_3}
],
:option_1
}
```
Events emitted
`{:value_changed, id, value}`
``` elixir
<%= component FloUI.Dropdown,
{@items, @selected},
id: :dropdown,
theme: @theme,
scroll_bar: %{
show: true,
show_buttons: true,
theme: Scenic.Primitive.Style.Theme.preset(:dark),
thickness: 15
}
%>
```
"""
@default_height 50
@default_frame_height 300
@default_theme FloUI.Theme.preset(:base)
@default_scroll_bar %{
show: true,
show_buttons: true,
theme: Scenic.Primitive.Style.Theme.preset(:dark),
thickness: 15
}
alias FloUI.Dropdown.Items
use SnapFramework.Component,
name: :dropdown,
template: "lib/dropdown/dropdown.eex",
controller: FloUI.DropdownController,
assigns: [],
opts: []
defcomponent(:dropdown, :tuple)
use_effect [assigns: [open?: :any]], [
run: [:on_open_change]
]
use_effect [assigns: [selected_label: :any]], [
run: [:on_selected_change]
]
@impl true
def setup(%{assigns: %{data: {items, selected} = data, opts: opts}} = scene) do
width = get_width(data, opts)
frame_height = get_frame_height(data, opts)
content_height = get_content_height(items)
scroll_bar = opts[:scroll_bar] || @default_scroll_bar
show_vertical_scroll = content_height > frame_height and scroll_bar.show
assign(scene,
items: items,
selected_label: "",
selected_key: nil,
selected: selected,
open?: false,
button_width: if(show_vertical_scroll, do: width + 20, else: width),
button_height: opts[:height] || @default_height,
background_height: frame_height + 20,
frame_width: if(show_vertical_scroll, do: width, else: width),
frame_height: frame_height,
content_height: content_height,
scroll_bar: scroll_bar,
show_vertical_scroll: show_vertical_scroll,
theme: get_theme(opts)
)
end
@impl true
def bounds(data, opts) do
{0.0, 0.0, get_width(data, opts), opts[:height] || @default_height}
end
@impl true
def process_event({:value_changed, {{label, value}, key}}, _, scene) do
{:cont, {:value_changed, scene.assigns.opts[:id], value}, assign(scene, selected_label: label, selected_key: key, open?: false)}
end
def process_event(_, _, scene) do
{:noreply, scene}
end
@impl true
def process_input({:cursor_button, {:btn_left, 0, _, _}}, :bg, %{assigns: %{open?: open?}} = scene) do
{:noreply, assign(scene, open?: not open?)}
end
def process_input({:cursor_button, {:btn_left, 1, _, _}}, :clickout, %{assigns: %{open?: open?}} = scene) do
{:noreply, assign(scene, open?: not open?)}
end
def process_input(_, _, scene) do
{:noreply, scene}
end
defp get_width(data, opts) do
{_, _, w, _h} = Items.bounds(data, opts)
w
end
defp get_frame_height(data, opts) do
{_, _, _w, h} = Items.bounds(data, opts)
frame_height = opts[:frame_height] || @default_frame_height
if(h > frame_height, do: frame_height, else: h)
end
defp get_content_height(items) do
Items.get_height(items)
end
defp get_theme(opts) do
case opts[:theme] do
nil -> @default_theme
:dark -> @default_theme
:light -> @default_theme
theme -> theme
end
|> FloUI.Theme.normalize()
end
end
|
lib/dropdown/dropdown.ex
| 0.785432
| 0.699088
|
dropdown.ex
|
starcoder
|
defmodule Blazer do
@moduledoc """
Blazer is a case parser for json keys.
available case options:
* `:camel` example: `camelCase`
* `:pascal` example: `PascalCase`
* `:snake` example: `snake_case`
* `:upper` example: `UPPERCASE`
* `:kebab` example: `kebab-case`
* `:title` example: `Title Case`
"""
alias Blazer.Native
alias Blazer.Structs.Opts
@doc"""
Parses a map or a string to the desired case
```elixir
iex(1)> Blazer.parse(%{"firstKey" => "data", "secondKey" => "data"}, case: :snake, keys: :atoms)
{:ok, %{first_key: "data", second_key: "data"}}
iex(2)> Blazer.parse("john_doe", case: :title)
{:ok, "<NAME>"}
```
"""
@type opts :: [ case: :camel|:pascal|:snake|:upper|:kebab|:title, keys: :strings|:atoms|:atoms!]
@spec parse(String.t() | map(), Opts.t()) :: {:ok, String.t() | map()} | {:error, String.t()}
def parse(term, opts), do: transform(term, opts)
@spec parse!(String.t() | map(), Opts.t()) :: String.t() | map()
def parse!(term, opts), do: force!(fn -> parse(term, opts) end)
@spec encode_to_iodata!(map(), opts) :: [...]
def encode_to_iodata!(term, opts \\ []) do
{:ok, opts} = get_out_opts(opts)
term
|> parse!(opts)
|> Jason.encode_to_iodata!(opts)
end
@doc"""
encode a map into JSON after parsing its keys
`opts` is passed to Jason, so all its options can be used
"""
@spec encode(map(), opts) :: {:ok, String.t()} | {:error, String.t()}
def encode(term, opts \\ []) do
with {:ok, opts} <- get_out_opts(opts),
{:ok, parsed} <- transform(term, opts),
{:ok, encoded} <- Jason.encode(parsed, opts) do
{:ok, encoded}
else
{:error, reason} -> {:error, reason}
end
end
@spec encode!(map(), opts) :: String.t()
def encode!(term, opts \\ []), do: force!(fn -> encode(term, opts) end)
@doc"""
Decode a JSON into a map and parse its keys
`opts` is passed to Jason, so all its options can be used
"""
@spec decode(String.t(), opts) :: {:ok, map()} | {:error, String.t()}
def decode(json, opts \\ []) do
with {:ok, opts} <- get_in_opts(opts),
{:ok, decoded} <- Jason.decode(json, opts),
{:ok, parsed} <- transform(decoded, opts) do
{:ok, parsed}
else
{:error, reason} -> {:error, reason}
end
end
@spec decode!(String.t(), opts) :: map()
def decode!(json, opts \\ []) do
force!(fn -> decode(json, opts) end)
end
defp transform(term, opts) when is_binary(term),
do: Native.convert_binary(term, opts)
defp transform(term, opts) when is_map(term), do: Native.convert_map(term, opts)
defp transform(_term, _target_opts), do: raise("only strings and maps are accepted.")
defp force!(fun) do
case fun.() do
{:ok, result} -> result
{:error, reason} -> raise reason
end
end
defp get_in_opts(opts), do: get_opts(opts, :inner_case)
defp get_out_opts(opts), do: get_opts(opts, :outer_case)
defp get_opts(opts, direction) do
cond do
length(opts) > 0 -> {:ok, opts}
Application.get_env(:blazer, direction) ->
{:ok, [keys: (Application.get_env(:blazer, :keys) || :strings), case: Application.get_env(:blazer, direction)]}
true ->
{:error, "Target case not provided, either pass an case in the options or set in the configs."}
end
end
end
|
lib/blazer.ex
| 0.899673
| 0.915091
|
blazer.ex
|
starcoder
|
defmodule AdventOfCode.Day13 do
@moduledoc ~S"""
[Advent Of Code day 13](https://adventofcode.com/2018/day/13).
"""
@type point :: {non_neg_integer, non_neg_integer}
@type direction :: :up | :down | :left | :right
@type cart :: {position :: point(), direction :: direction(), turns_sequence :: Stream.t()}
@type track_point_type :: {:left_turn, :rirght_turn, :crossing}
@type track_point :: {point(), track_point_type()}
def solve("1", input) do
parse(input) |> find_first_crash_position()
end
def solve("2", input) do
parse(input) |> find_last_cart_intact()
end
defp find_last_cart_intact({carts, track}), do: find_last_cart_intact({carts_in_movement_order(carts), track}, [])
defp find_last_cart_intact({[last_cart], track}, []) do
{point, _, _} = move_cart(last_cart, track)
point
end
defp find_last_cart_intact({[], track}, moved_carts) do
find_last_cart_intact({moved_carts, track})
end
defp find_last_cart_intact({[cart | rest_carts], track}, moved_carts) do
moved_current_cart = move_cart(cart, track)
case find_crash([moved_current_cart | rest_carts] ++ moved_carts) do
nil ->
find_last_cart_intact({rest_carts, track}, [moved_current_cart | moved_carts])
{_point, crashed_carts} ->
find_last_cart_intact({remove_crashed(rest_carts, crashed_carts), track}, remove_crashed(moved_carts, crashed_carts))
end
end
def remove_crashed(carts, crashed_carts) do
Enum.filter(carts, fn c -> c not in crashed_carts end)
end
defp find_first_crash_position({carts, track}), do: find_first_crash_position({carts_in_movement_order(carts), track}, [])
defp find_first_crash_position({[], track}, moved_carts) do
find_first_crash_position({moved_carts, track})
end
defp find_first_crash_position({[cart | rest_carts], track}, moved_carts) do
moved_current_cart = move_cart(cart, track)
case find_crash([moved_current_cart | rest_carts] ++ moved_carts) do
nil ->
find_first_crash_position({rest_carts, track}, [moved_current_cart | moved_carts])
{point, _carts} ->
point
end
end
def parse(input) do
{carts, track} =
String.split(input, "\n")
|> Enum.with_index()
|> Enum.reduce({[], %{}}, fn {row, y}, acc ->
row
|> String.trim_trailing()
|> String.codepoints()
|> Enum.with_index()
|> Enum.reduce(acc, fn {value, x}, acc ->
do_parse_cell({x, y}, value, acc)
end)
end)
{carts, Enum.into(%{}, track)}
end
defp do_parse_cell(point, value, {carts, grid}) do
case value do
"v" ->
{[{point, :down, new_turns_sequence()} | carts], grid}
"^" ->
{[{point, :up, new_turns_sequence()} | carts], grid}
">" ->
{[{point, :right, new_turns_sequence()} | carts], grid}
"<" ->
{[{point, :left, new_turns_sequence()} | carts], grid}
"-" ->
{carts, grid}
"|" ->
{carts, grid}
" " ->
{carts, grid}
"+" ->
{carts, Map.put(grid, point, :crossing)}
"/" ->
{carts, Map.put(grid, point, :right_turn)}
"\\" ->
{carts, Map.put(grid, point, :left_turn)}
end
end
defp new_turns_sequence, do: [:left, :straight, :right]
defp find_crash(carts) do
Enum.group_by(carts, fn {point, _, _} -> point end) |> Enum.find(fn {_point, carts} -> Enum.count(carts) > 1 end)
end
defp carts_in_movement_order(carts) do
Enum.sort_by(carts, fn {{x, y}, _, _} -> {y, x} end)
end
defp move_cart({{x, y}, direction, turns_seq}, track) do
next_point = next_point({x, y}, direction)
case Map.get(track, next_point) do
nil ->
{next_point, direction, turns_seq}
:crossing ->
{turn_direction, turns_seq} = cycle_turn_seq(turns_seq)
{next_point, turn_cart(direction, turn_direction), turns_seq}
turn_direction ->
{next_point, turn_cart(direction, turn_direction(direction, turn_direction)), turns_seq}
end
end
defp turn_direction(:up, :left_turn), do: :left
defp turn_direction(:down, :left_turn), do: :left
defp turn_direction(_, :left_turn), do: :right
defp turn_direction(:up, :right_turn), do: :right
defp turn_direction(:down, :right_turn), do: :right
defp turn_direction(_, :right_turn), do: :left
defp next_point({x, y}, direction) do
case direction do
:up ->
{x, y - 1}
:down ->
{x, y + 1}
:left ->
{x - 1, y}
:right ->
{x + 1, y}
end
end
defp turn_cart(cart_direction, turn_direction) do
case {cart_direction, turn_direction} do
{:left, :left} -> :down
{:left, :right} -> :up
{:right, :left} -> :up
{:right, :right} -> :down
{:down, :left} -> :right
{:down, :right} -> :left
{any, :straight} -> any
{:up, any} -> any
end
end
defp cycle_turn_seq([turn | rest_seq]) do
{turn, rest_seq ++ [turn]}
end
end
|
lib/advent_of_code/day_13.ex
| 0.74826
| 0.526586
|
day_13.ex
|
starcoder
|
import Kernel, except: [inspect: 2]
defmodule Logger.Formatter do
@moduledoc ~S"""
Conveniences for formatting data for logs.
This module allows developers to specify a string that
serves as template for log messages, for example:
$time $metadata[$level] $message\n
Will print error messages as:
18:43:12.439 user_id=13 [error] Hello\n
The valid parameters you can use are:
* `$time` - time the log message was sent
* `$date` - date the log message was sent
* `$message` - the log message
* `$level` - the log level
* `$node` - the node that prints the message
* `$metadata` - user controled data presented in "key=val key2=val2" format
Backends typically allow developers to supply such control
strings via configuration files. This module provides `compile/1`,
which compiles the string into a format for fast operations at
runtime and `format/5` to format the compiled pattern into an
actual IO data.
## Metadata
Metadata to be sent to the Logger can be read and written with
the `Logger.metadata/0` and `Logger.metadata/1` functions. For example,
you can set `Logger.metadata([user_id: 13])` to add user_id metadata
to the current process. The user can configure the backend to chose
which metadata it wants to print and it will replace the $metadata
value.
"""
@valid_patterns [:time, :date, :message, :level, :node, :metadata]
@default_pattern "$time $metadata[$level] $message\n"
@doc ~S"""
Compiles a format string into an array that the `format/5` can handle.
The valid parameters you can use are:
* $time
* $date
* $message
* $level
* $node
* $metadata - metadata is presented in key=val key2=val2 format.
If you pass nil into compile it will use the default
format of `$time $metadata [$level] $message`
If you would like to make your own custom formatter simply pass
`{module, function}` to compile and the rest is handled.
iex> Logger.Formatter.compile("$time $metadata [$level] $message\n")
[:time, " ", :metadata, " [", :level, "] ", :message, "\n"]
"""
@spec compile(binary | nil) :: list()
@spec compile({atom, atom}) :: {atom, atom}
def compile(nil), do: compile(@default_pattern)
def compile({mod, fun}) when is_atom(mod) and is_atom(fun), do: {mod, fun}
def compile(str) do
for part <- Regex.split(~r/(?<head>)\$[a-z]+(?<tail>)/, str, on: [:head, :tail], trim: true) do
case part do
"$" <> code -> compile_code(String.to_atom(code))
_ -> part
end
end
end
defp compile_code(key) when key in @valid_patterns, do: key
defp compile_code(key) when is_atom(key) do
raise(ArgumentError, message: "$#{key} is an invalid format pattern.")
end
@doc """
Takes a compiled format and injects the, level, timestamp, message and
metadata listdict and returns a properly formatted string.
"""
def format({mod, fun}, level, msg, ts, md) do
Module.function(mod, fun, 4).(level, msg, ts, md)
end
def format(config, level, msg, ts, md) do
for c <- config do
output(c, level, msg, ts, md)
end
end
defp output(:message, _, msg, _, _), do: msg
defp output(:date, _, _, {date, _time}, _), do: Logger.Utils.format_date(date)
defp output(:time, _, _, {_date, time}, _), do: Logger.Utils.format_time(time)
defp output(:level, level, _, _, _), do: Atom.to_string(level)
defp output(:node, _, _, _, _), do: Atom.to_string(node())
defp output(:metadata, _, _, _, []), do: ""
defp output(:metadata, _, _, _, meta) do
Enum.map(meta, fn {key, val} ->
[to_string(key), ?=, to_string(val), ?\s]
end)
end
defp output(other, _, _, _, _), do: other
end
|
lib/logger/lib/logger/formatter.ex
| 0.810291
| 0.50354
|
formatter.ex
|
starcoder
|
defmodule Mock do
@moduledoc """
Mock modules for testing purposes. Usually inside a unit test.
Please see the README file on github for a tutorial
## Example
defmodule MyTest do
use ExUnit.Case
import Mock
test "get" do
with_mock HTTPotion,
[get: fn("http://example.com", _headers) ->
HTTPotion.Response.new(status_code: 200,
body: "hello") end] do
# Code which calls HTTPotion.get
# Check that the call was made as we expected
assert called HTTPotion.get("http://example.com", :_)
end
end
end
"""
@doc """
Mock up `mock_module` with functions specified as a keyword
list of function_name:implementation `mocks` for the duration
of `test`.
`opts` List of optional arguments passed to meck. `:passthrough` will
passthrough arguments to the original module.
## Example
with_mock HTTPotion, [get: fn("http://example.com") ->
"<html></html>" end] do
# Tests that make the expected call
assert called HTTPotion.get("http://example.com")
end
"""
defmacro with_mock(mock_module, opts \\ [], mocks, do: test) do
quote do
unquote(__MODULE__).with_mocks(
[{unquote(mock_module), unquote(opts), unquote(mocks)}], do: unquote(test))
end
end
@doc """
Mock up multiple modules for the duration of `test`.
## Example
with_mocks([{HTTPotion, opts, [{get: fn("http://example.com") -> "<html></html>" end}]}]) do
# Tests that make the expected call
assert called HTTPotion.get("http://example.com")
end
"""
defmacro with_mocks(mocks, do: test) do
quote do
mock_modules = mock_modules(unquote(mocks))
try do
unquote(test)
after
for m <- mock_modules, do: :meck.unload(m)
end
end
end
@doc """
Shortcut to avoid multiple blocks when a test requires a single
mock.
For full description see `with_mock`.
## Example
test_with_mock "test_name", HTTPotion,
[get: fn(_url) -> "<html></html>" end] do
HTTPotion.get("http://example.com")
assert called HTTPotion.get("http://example.com")
end
"""
defmacro test_with_mock(test_name, mock_module, opts \\ [], mocks, test_block) do
quote do
test unquote(test_name) do
unquote(__MODULE__).with_mock(
unquote(mock_module), unquote(opts), unquote(mocks), unquote(test_block))
end
end
end
@doc """
Shortcut to avoid multiple blocks when a test requires a single
mock. Accepts a context argument enabling information to be shared
between callbacks and the test.
For full description see `with_mock`.
## Example
setup do
doc = "<html></html>"
{:ok, doc: doc}
end
test_with_mock "test_with_mock with context", %{doc: doc}, HTTPotion, [],
[get: fn(_url) -> doc end] do
HTTPotion.get("http://example.com")
assert called HTTPotion.get("http://example.com")
end
"""
defmacro test_with_mock(test_name, context, mock_module, opts, mocks, test_block) do
quote do
test unquote(test_name), unquote(context) do
unquote(__MODULE__).with_mock(
unquote(mock_module), unquote(opts), unquote(mocks), unquote(test_block))
end
end
end
@doc """
Call original function inside mock anonymous function.
Allows overriding only a certain behavior of a function.
Compatible with passthrough option.
## Example
with_mock String, [:passthrough], [reverse: fn(str) ->
passthrough([str]) <> "!" end] do
assert String.reverse("xyz") == "zyx!"
end
"""
defmacro passthrough(args) do
quote do
:meck.passthrough(unquote(args))
end
end
@doc """
Use inside a `with_mock` block to determine whether
a mocked function was called as expected.
Pass `:_` as a function argument for wildcard matches.
## Example
assert called HTTPotion.get("http://example.com")
# Matches any invocation
assert called HTTPotion.get(:_)
"""
defmacro called({ {:., _, [ module , f ]} , _, args }) do
quote do
:meck.called unquote(module), unquote(f), unquote(args)
end
end
@doc """
Use inside a `with_mock` block to determine whether
a mocked function was called as expected. If the assertion fails,
the calls that were received are displayed in the assertion message.
Pass `:_` as a function argument for wildcard matches.
## Example
assert_called HTTPotion.get("http://example.com")
# Matches any invocation
assert_called HTTPotion.get(:_)
"""
defmacro assert_called({{:., _, [module, f]}, _, args}) do
quote do
unquoted_module = unquote(module)
value = :meck.called(unquoted_module, unquote(f), unquote(args))
unless value do
calls = unquoted_module
|> :meck.history()
|> Enum.with_index()
|> Enum.map(fn {{_, {m, f, a}, ret}, i} ->
"#{i}. #{m}.#{f}(#{a |> Enum.map(&Kernel.inspect/1) |> Enum.join(",")}) (returned #{inspect ret})"
end)
|> Enum.join("\n")
raise ExUnit.AssertionError,
message: "Expected call but did not receive it. Calls which were received:\n\n#{calls}"
end
end
end
@doc """
Mocks up multiple modules prior to the execution of each test in a case and
execute the callback specified.
For full description of mocking, see `with_mocks`.
For a full description of ExUnit setup, see
https://hexdocs.pm/ex_unit/ExUnit.Callbacks.html
## Example
setup_with_mocks([
{Map, [], [get: fn(%{}, "http://example.com") -> "<html></html>" end]}
]) do
foo = "bar"
{:ok, foo: foo}
end
test "setup_all_with_mocks base case" do
assert Map.get(%{}, "http://example.com") == "<html></html>"
end
"""
defmacro setup_with_mocks(mocks, do: setup_block) do
quote do
setup do
mock_modules(unquote(mocks))
on_exit(fn ->
:meck.unload()
end)
unquote(setup_block)
end
end
end
@doc """
Mocks up multiple modules prior to the execution of each test in a case and
execute the callback specified with a context specified
See `setup_with_mocks` for more details
## Example
setup_with_mocks([
{Map, [], [get: fn(%{}, "http://example.com") -> "<html></html>" end]}
], context) do
{:ok, test_string: Atom.to_string(context.test)}
end
test "setup_all_with_mocks with context", %{test_string: test_string} do
assert Map.get(%{}, "http://example.com") == "<html></html>"
assert test_string == "test setup_all_with_mocks with context"
end
"""
defmacro setup_with_mocks(mocks, context, do: setup_block) do
quote do
setup unquote(context) do
mock_modules(unquote(mocks))
on_exit(fn ->
:meck.unload()
end)
unquote(setup_block)
end
end
end
# Helper macro to mock modules. Intended to be called only within this module
# but not defined as `defmacrop` due to the scope within which it's used.
defmacro mock_modules(mocks) do
quote do
Enum.reduce(unquote(mocks), [], fn({m, opts, mock_fns}, ms) ->
unless m in ms do
# :meck.validate will throw an error if trying to validate
# a module that was not mocked
try do
if :meck.validate(m), do: :meck.unload(m)
rescue
e in ErlangError -> :ok
end
:meck.new(m, opts)
end
unquote(__MODULE__)._install_mock(m, mock_fns)
true = :meck.validate(m)
[ m | ms] |> Enum.uniq
end)
end
end
@doc false
def _install_mock(_, []), do: :ok
def _install_mock(mock_module, [ {fn_name, value} | tail ]) do
:meck.expect(mock_module, fn_name, value)
_install_mock(mock_module, tail)
end
end
|
lib/mock.ex
| 0.889628
| 0.68448
|
mock.ex
|
starcoder
|
defmodule Typo.PDF do
@moduledoc """
PDF server public API.
"""
import Typo.Utils.Colour, only: [colour: 1, from_hex: 1]
import Typo.Utils.Fmt, only: [n2s: 1]
import Typo.Utils.Guards
@k 4.0 * ((:math.sqrt(2) - 1.0) / 3.0)
# appends binary directly onto the current PDF page stream.
# should NOT be used unless you know exactly what you are doing!
@doc false
@spec append(Typo.handle(), binary()) :: :ok
def append(pdf, <<data::binary>>), do: GenServer.cast(pdf, {:append, data})
@doc """
Moves to `p1` and appends a BΓ©zier curve onto the current path.
Uses `p2`, `p3` and `p4` as the curve control points.
"""
@spec bezier(Typo.handle(), Typo.xy(), Typo.xy(), Typo.xy(), Typo.xy()) :: :ok
def bezier(pdf, p1, p2, p3, p4)
when is_handle(pdf) and is_xy(p1) and is_xy(p2) and is_xy(p3) and is_xy(p4),
do: append(pdf, n2s([p1, "m", p2, p3, p4, "c"]))
@doc """
Appends a BΓ©zier curve from the current graphics position onto the current
path using `p1`, `p2` and `p3` as the control points.
"""
@spec bezier_to(Typo.handle(), Typo.xy(), Typo.xy(), Typo.xy()) :: :ok
def bezier_to(pdf, p1, p2, p3) when is_handle(pdf) and is_xy(p1) and is_xy(p2) and is_xy(p3),
do: append(pdf, n2s([p1, p2, p3, "c"]))
@doc """
Appends a circle centred on `p` with radius `r` onto the current path.
"""
@spec circle(Typo.handle(), Typo.xy(), number()) :: :ok
def circle(pdf, p, r) when is_handle(pdf) and is_xy(p) and is_number(r),
do: ellipse(pdf, p, r, r)
@doc """
Closes the current path by appending a straight line from the current graphics
position to the path start position.
"""
@spec close_path(Typo.handle()) :: :ok
def close_path(pdf) when is_handle(pdf), do: append(pdf, "h")
# restricts colour range to between 0.0 and 1.0 inclusive.
@spec colour_range(number()) :: float()
defp colour_range(v) when is_number(v) do
cond do
v < 0 -> 0.0
v > 1.0 -> 1.0
true -> v + 0.0
end
end
@doc """
Appends an ellipse centred on `p` with x radius `rx` and y radius `ry` onto the
current path.
"""
@spec ellipse(Typo.handle(), Typo.xy(), number(), number()) :: :ok
def ellipse(pdf, {x, y} = p, rx, ry)
when is_handle(pdf) and is_xy(p) and is_number(rx) and is_number(ry) do
:ok = move(pdf, {x + rx, y})
:ok = bezier_to(pdf, {x + rx, y + ry * @k}, {x + rx * @k, y + ry}, {x, y + ry})
:ok = bezier_to(pdf, {x - rx * @k, y + ry}, {x - rx, y + ry * @k}, {x - rx, y})
:ok = bezier_to(pdf, {x - rx, y - ry * @k}, {x - rx * @k, y - ry}, {x, y - ry})
:ok = bezier_to(pdf, {x + rx * @k, y - ry}, {x + rx, y - ry * @k}, {x + rx, y})
end
@doc """
Ends the current path without filling or stroking.
"""
@spec end_path(Typo.handle()) :: :ok
def end_path(pdf) when is_handle(pdf), do: append(pdf, "n")
@doc """
Fills the current path using the given `winding` rule.
* `:non_zero` - non-zero winding rule (default).
* `:even_odd` - even-odd winding rule.
"""
@spec fill(Typo.handle(), Typo.winding_rule()) :: :ok
def fill(_pdf, winding \\ :non_zero)
def fill(pdf, :non_zero) when is_handle(pdf), do: append(pdf, "f")
def fill(pdf, :even_odd) when is_handle(pdf), do: append(pdf, "f*")
@doc """
Fills the current path using the given `winding` rule.
* `:non_zero` - non-zero winding rule (default).
* `:even_odd` - even-odd winding rule.
Once filled, the path is then stroked.
"""
@spec fill_stroke(Typo.handle(), Typo.winding_rule()) :: :ok
def fill_stroke(_pdf, winding \\ :non_zero)
def fill_stroke(pdf, :non_zero) when is_handle(pdf), do: append(pdf, "b")
def fill_stroke(pdf, :even_odd) when is_handle(pdf), do: append(pdf, "b*")
@doc """
Returns the current page number.
"""
@spec get_page_number(Typo.handle()) :: integer()
def get_page_number(pdf) when is_handle(pdf), do: GenServer.call(pdf, :get_page_number)
# gets server state (for debugging).
@doc false
@spec get_state(Typo.handle()) :: Typo.PDF.Server.t()
def get_state(pdf) when is_handle(pdf), do: GenServer.call(pdf, :get_state)
@doc """
Appends a line segment onto the current path from the current graphics position
to point `p`.
"""
@spec line_to(Typo.handle(), Typo.xy()) :: :ok
def line_to(pdf, p) when is_handle(pdf) and is_xy(p), do: append(pdf, n2s([p, "l"]))
@doc """
Appends a joined set of line segments onto the current path.
"""
@spec lines(Typo.handle(), [Typo.xy()]) :: :ok
def lines(pdf, coords) when is_handle(pdf) and is_list(coords) do
op = lines_acc(coords, "")
append(pdf, op)
end
defp lines_acc([], result), do: result
defp lines_acc([p | t], result) when is_xy(p),
do: lines_acc(t, <<result::binary, n2s([p, "l "])::binary>>)
@doc """
Moves the current graphics position to `p`, which also begins a new subpath.
"""
@spec move(Typo.handle(), Typo.xy()) :: :ok
def move(pdf, p) when is_handle(pdf) and is_xy(p), do: append(pdf, n2s([p, "m"]))
@doc """
Appends a rectangle with lower left corner `p`, dimensions `width` and `height`
onto the current path.
"""
@spec rectangle(Typo.handle(), Typo.xy(), number(), number()) :: :ok
def rectangle(pdf, p, width, height)
when is_handle(pdf) and is_xy(p) and is_number(width) and is_number(height),
do: append(pdf, n2s([p, width, height, "re"]))
@doc """
Sets fill colour to Greyscale/RGB/CMYK/Hex/Name value `v`.
For Greyscale `v` should be in the range 0.0..1.0; for RGB/CMYK, each component
of the colour tuple should be in the range 0.0..1.0.
For Hex colours, the colour should be specified as `#xxx` or `#xxxxxx` where
`x` represents a single hex digit.
Named colours should be one of the named standard HTML colours.
"""
@spec set_fill_color(Typo.handle(), Typo.colour()) :: :ok | Typo.error()
defdelegate set_fill_color(pdf, v), to: Typo.PDF, as: :set_fill_colour
@doc """
Sets fill colour to Greyscale/RGB/CMYK/Hex/Name value `v`.
For Greyscale `v` should be in the range 0.0..1.0; for RGB/CMYK, each component
of the colour tuple should be in the range 0.0..1.0.
For Hex colours, the colour should be specified as `#xxx` or `#xxxxxx` where
`x` represents a single hex digit.
Named colours should be one of the named standard HTML colours.
"""
@spec set_fill_colour(Typo.handle(), Typo.colour()) :: :ok | Typo.error()
def set_fill_colour(pdf, v) when is_handle(pdf) and is_number(v) do
append(pdf, n2s([colour_range(v), "g"]))
end
def set_fill_colour(pdf, {r, g, b} = _v)
when is_handle(pdf) and is_number(r) and is_number(g) and is_number(b) do
append(pdf, n2s([colour_range(r), colour_range(g), colour_range(b), "rg"]))
end
def set_fill_colour(pdf, {c, m, y, k} = _v)
when is_handle(pdf) and is_number(c) and is_number(m) and is_number(y) and is_number(k) do
append(pdf, n2s([colour_range(c), colour_range(m), colour_range(y), colour_range(k), "k"]))
end
def set_fill_colour(pdf, <<?#::8, colour::binary-size(3)>>) do
with {_r, _g, _b} = c <- from_hex(colour) do
set_fill_colour(pdf, c)
else
:error -> {:error, :invalid_colour}
end
end
def set_fill_colour(pdf, <<?#::8, colour::binary-size(6)>>) do
with {_r, _g, _b} = c <- from_hex(colour) do
set_fill_colour(pdf, c)
else
:error -> {:error, :invalid_colour}
end
end
def set_fill_colour(pdf, <<cn::binary>>) do
with {_r, _g, _b} = c <- colour(String.downcase(cn)) do
set_fill_colour(pdf, c)
else
:error -> {:error, :invalid_colour}
end
end
@doc """
Sets the line cap (end) style to one of:
* `:cap_butt` - line stroke is squared off at the line-segment endpoints.
* `:cap_round` - filled semicircular arc with half line width diameter is drawn
around segment endpoints.
* `:cap_squared` - line stroke continues half line width past endpoints and is
squared off.
"""
@spec set_line_cap(Typo.handle(), Typo.line_cap()) :: :ok
def set_line_cap(pdf, :cap_butt) when is_handle(pdf), do: append(pdf, "0 J")
def set_line_cap(pdf, :cap_round) when is_handle(pdf), do: append(pdf, "1 J")
def set_line_cap(pdf, :cap_square) when is_handle(pdf), do: append(pdf, "2 J")
@doc """
Sets the line dash style. The pattern is on for `on` points, and off for `off`
points. Options `phase` can be used to adjust the phasing of the output pattern.
To turn off dashing, use `set_line_solid/1`.
"""
@spec set_line_dash(Typo.handle(), number(), number(), number()) :: :ok
def set_line_dash(pdf, on, off, phase \\ 0)
when is_handle(pdf) and is_number(on) and is_number(off) and is_number(phase),
do: append(pdf, n2s(["[", on, off, "]", phase, "d"]))
@doc """
Sets the line join style to one of:
* `:join_bevel` - the two line segments are squared off at the join points and
the resulting notch between the two ends is filled with a triangle.
* `:join_mitre` - the outer edges of the stroke are extended until they meet
at an angle (may alternatively be specified as `:join_miter`).
* `:join_round` - a filled arc of a circle with diameter equal to the line
width is drawn around the point where the two line segments meet connecting
the outer edges of the strokes, producing a rounded join.
"""
@spec set_line_join(Typo.handle(), Typo.line_join()) :: :ok
def set_line_join(pdf, :join_bevel) when is_handle(pdf), do: append(pdf, "2 j")
def set_line_join(pdf, :join_mitre) when is_handle(pdf), do: append(pdf, "0 j")
def set_line_join(pdf, :join_miter) when is_handle(pdf), do: append(pdf, "0 j")
def set_line_join(pdf, :join_round) when is_handle(pdf), do: append(pdf, "1 j")
@doc """
Sets the line style to solid (instead of dashed).
"""
def set_line_solid(pdf) when is_handle(pdf), do: append(pdf, "[] 0 d")
@doc """
Sets the line width to `width` points.
"""
@spec set_line_width(Typo.handle(), number()) :: :ok
def set_line_width(pdf, width) when is_handle(pdf) and is_number(width),
do: append(pdf, n2s([width, "w"]))
@doc """
Sets the miter limit to `limit`, which controls the point at which mitered
joins are turned into bevels.
"""
@spec set_miter_limit(Typo.handle(), number()) :: :ok
defdelegate set_miter_limit(pdf, limit), to: Typo.PDF, as: :set_mitre_limit
@doc """
Sets the mitre limit to `limit`, which controls the point at which mitred
joins are turned into bevels.
"""
@spec set_mitre_limit(Typo.handle(), number()) :: :ok
def set_mitre_limit(pdf, limit) when is_handle(pdf) and is_number(limit),
do: append(pdf, n2s([limit, "M"]))
@doc """
Sets stroke colour to Greyscale/RGB/CMYK/Hex/Name value `v`.
For Greyscale `v` should be in the range 0.0..1.0; for RGB/CMYK, each component
of the colour tuple should be in the range 0.0..1.0.
For Hex colours, the colour should be specified as `#xxx` or `#xxxxxx` where
`x` represents a single hex digit.
Named colours should be one of the named standard HTML colours.
"""
@spec set_stroke_color(Typo.handle(), Typo.colour()) :: :ok | Typo.error()
defdelegate set_stroke_color(pdf, v), to: Typo.PDF, as: :set_stroke_colour
@doc """
Sets stroke colour to Greyscale/RGB/CMYK/Hex/Name value `v`.
For Greyscale `v` should be in the range 0.0..1.0; for RGB/CMYK, each component
of the colour tuple should be in the range 0.0..1.0.
For Hex colours, the colour should be specified as `#xxx` or `#xxxxxx` where
`x` represents a single hex digit.
Named colours should be one of the named standard HTML colours.
"""
@spec set_stroke_colour(Typo.handle(), Typo.colour()) :: :ok | Typo.error()
def set_stroke_colour(pdf, v) when is_handle(pdf) and is_number(v) do
append(pdf, n2s([colour_range(v), "G"]))
end
def set_stroke_colour(pdf, {r, g, b} = _v)
when is_handle(pdf) and is_number(r) and is_number(g) and is_number(b) do
append(pdf, n2s([colour_range(r), colour_range(g), colour_range(b), "RG"]))
end
def set_stroke_colour(pdf, {c, m, y, k} = _v)
when is_handle(pdf) and is_number(c) and is_number(m) and is_number(y) and is_number(k) do
append(pdf, n2s([colour_range(c), colour_range(m), colour_range(y), colour_range(k), "K"]))
end
def set_stroke_colour(pdf, <<?#::8, colour::binary-size(3)>>) do
with {_r, _g, _b} = c <- from_hex(colour) do
set_stroke_colour(pdf, c)
else
:error -> {:error, :invalid_colour}
end
end
def set_stroke_colour(pdf, <<?#::8, colour::binary-size(6)>>) do
with {_r, _g, _b} = c <- from_hex(colour) do
set_stroke_colour(pdf, c)
else
:error -> {:error, :invalid_colour}
end
end
def set_stroke_colour(pdf, <<cn::binary>>) do
with {_r, _g, _b} = c <- colour(String.downcase(cn)) do
set_stroke_colour(pdf, c)
else
:error -> {:error, :invalid_colour}
end
end
@doc """
Strokes the current path, with option `close` value:
* `:close` - path is closed before stroking (default).
* `:no_close` - path is stroked without closing.
"""
@spec stroke(Typo.handle(), :close | :no_close) :: :ok
def stroke(_pdf, close \\ :close)
def stroke(pdf, :close) when is_handle(pdf), do: append(pdf, "s")
def stroke(pdf, :no_close) when is_handle(pdf), do: append(pdf, "S")
@doc """
Applies a transformation matrix.
"""
@spec transform(Typo.handle(), Typo.matrix()) :: :ok
def transform(pdf, matrix) when is_handle(pdf) and is_matrix(matrix),
do: append(pdf, n2s([matrix, "cm"]))
@doc """
Appends a triangle with corners `p1`, `p2` and `p3` onto the current path.
"""
@spec triangle(Typo.handle(), Typo.xy(), Typo.xy(), Typo.xy()) :: :ok
def triangle(pdf, p1, p2, p3) when is_handle(pdf) and is_xy(p1) and is_xy(p2) and is_xy(p3),
do: append(pdf, n2s([p1, "m", p2, "l", p3, "l", p1, "l"]))
end
|
lib/typo/pdf.ex
| 0.88127
| 0.595434
|
pdf.ex
|
starcoder
|
defmodule Phoenix.HTML.Tag do
@moduledoc ~S"""
Helpers related to producing HTML tags within templates.
Note the examples in this module use `safe_to_string/1`
imported from `Phoenix.HTML` for readability.
"""
import Phoenix.HTML
@special_attributes ["data", "aria", "class"]
@csrf_param "_csrf_token"
@method_param "_method"
@doc ~S"""
Creates an HTML tag with the given name and options.
iex> safe_to_string tag(:br)
"<br>"
iex> safe_to_string tag(:input, type: "text", name: "user_id")
"<input name=\"user_id\" type=\"text\">"
## Data attributes
In order to add custom data attributes you need to pass
a tuple containing :data atom and a keyword list
with data attributes' names and values as the first element
in the tag's attributes keyword list:
iex> safe_to_string tag(:input, [data: [foo: "bar"], id: "some_id"])
"<input data-foo=\"bar\" id=\"some_id\">"
## Boolean values
In case an attribute contains a boolean value, its key
is repeated when it is true, as expected in HTML, or
the attribute is completely removed if it is false:
iex> safe_to_string tag(:audio, autoplay: "autoplay")
"<audio autoplay=\"autoplay\">"
iex> safe_to_string tag(:audio, autoplay: true)
"<audio autoplay>"
iex> safe_to_string tag(:audio, autoplay: false)
"<audio>"
If you want the boolean attribute to be sent as is,
you can explicitly convert it to a string before.
"""
def tag(name), do: tag(name, [])
def tag(name, attrs) when is_list(attrs) do
{:safe, [?<, to_string(name), build_attrs(attrs) |> Enum.sort() |> tag_attrs(), ?>]}
end
@doc ~S"""
Creates an HTML tag with given name, content, and attributes.
See `Phoenix.HTML.Tag.tag/2` for more information and examples.
iex> safe_to_string content_tag(:p, "Hello")
"<p>Hello</p>"
iex> safe_to_string content_tag(:p, "<Hello>", class: "test")
"<p class=\"test\"><Hello></p>"
iex> safe_to_string(content_tag :p, class: "test" do
...> "Hello"
...> end)
"<p class=\"test\">Hello</p>"
iex> safe_to_string content_tag(:option, "Display Value", [{:data, [foo: "bar"]}, value: "value"])
"<option data-foo=\"bar\" value=\"value\">Display Value</option>"
"""
def content_tag(name, do: block) do
content_tag(name, block, [])
end
def content_tag(name, content) do
content_tag(name, content, [])
end
def content_tag(name, attrs, do: block) when is_list(attrs) do
content_tag(name, block, attrs)
end
def content_tag(name, content, attrs) when is_list(attrs) do
name = to_string(name)
{:safe, escaped} = html_escape(content)
{:safe,
[?<, name, build_attrs(attrs) |> Enum.sort() |> tag_attrs(), ?>, escaped, ?<, ?/, name, ?>]}
end
@doc """
Escapes a list of attributes, returning iodata.
Pay attention that, unlike `tag/2` and `content_tag/2`, this
function does not sort the attributes.
iex> attributes_escape(title: "the title", id: "the id", selected: true)
{:safe,
[
[32, "title", 61, 34, "the title", 34],
[32, "id", 61, 34, "the id", 34],
[32, "selected"]
]}
"""
def attributes_escape(attrs) do
{:safe, attrs |> build_attrs() |> Enum.reverse() |> tag_attrs()}
end
defp build_attrs([]), do: []
defp build_attrs(attrs), do: build_attrs(attrs, [])
defp build_attrs([], acc), do: acc
defp build_attrs([{k, v} | t], acc) when k in @special_attributes do
build_attrs([{String.to_atom(k), v} | t], acc)
end
defp build_attrs([{:data, v} | t], acc) when is_list(v) do
build_attrs(t, nested_attrs("data", v, acc))
end
defp build_attrs([{:aria, v} | t], acc) when is_list(v) do
build_attrs(t, nested_attrs("aria", v, acc))
end
defp build_attrs([{:class, v} | t], acc) when is_list(v) do
build_attrs(t, [{"class", class_value(v)} | acc])
end
defp build_attrs([{k, true} | t], acc) do
build_attrs(t, [key_escape(k) | acc])
end
defp build_attrs([{_, false} | t], acc) do
build_attrs(t, acc)
end
defp build_attrs([{_, nil} | t], acc) do
build_attrs(t, acc)
end
defp build_attrs([{k, v} | t], acc) do
build_attrs(t, [{key_escape(k), v} | acc])
end
defp tag_attrs([]), do: []
defp tag_attrs(attrs) do
for a <- attrs do
case a do
{k, v} -> [?\s, k, ?=, ?", attr_escape(v), ?"]
k -> [?\s, k]
end
end
end
defp nested_attrs(attr, dict, acc) do
Enum.reduce(dict, acc, fn {k, v}, acc ->
attr_name = "#{attr}-#{key_escape(k)}"
case is_list(v) do
true -> nested_attrs(attr_name, v, acc)
false -> [{attr_name, v} | acc]
end
end)
end
defp class_value(value) when is_list(value) do
value
|> Enum.filter(& &1)
|> Enum.join(" ")
end
defp class_value(value) do
value
end
defp key_escape(value) when is_atom(value), do: String.replace(Atom.to_string(value), "_", "-")
defp key_escape(value), do: attr_escape(value)
defp attr_escape({:safe, data}), do: data
defp attr_escape(nil), do: []
defp attr_escape(other) when is_binary(other), do: Phoenix.HTML.Engine.encode_to_iodata!(other)
defp attr_escape(other), do: Phoenix.HTML.Safe.to_iodata(other)
@doc ~S"""
Generates a form tag.
This function generates the `<form>` tag without its
closing part. Check `form_tag/3` for generating an
enclosing tag.
## Examples
form_tag("/hello")
<form action="/hello" method="post">
form_tag("/hello", method: :get)
<form action="/hello" method="get">
## Options
* `:method` - the HTTP method. If the method is not "get" nor "post",
an input tag with name `_method` is generated along-side the form tag.
Defaults to "post".
* `:multipart` - when true, sets enctype to "multipart/form-data".
Required when uploading files
* `:csrf_token` - for "post" requests, the form tag will automatically
include an input tag with name `_csrf_token`. When set to false, this
is disabled
All other options are passed to the underlying HTML tag.
## CSRF Protection
By default, CSRF tokens are generated through `Plug.CSRFProtection`.
"""
def form_tag(action, opts \\ [])
def form_tag(action, do: block) do
form_tag(action, [], do: block)
end
def form_tag(action, opts) when is_list(opts) do
{:safe, method} = html_escape(Keyword.get(opts, :method, "post"))
{extra, opts} =
case method do
"get" ->
{"", opts}
"post" ->
csrf_token_tag(
action,
Keyword.put(opts, :method, "post"),
""
)
_ ->
csrf_token_tag(
action,
Keyword.put(opts, :method, "post"),
~s'<input name="#{@method_param}" type="hidden" value="#{method}">'
)
end
opts =
case Keyword.pop(opts, :multipart, false) do
{false, opts} -> opts
{true, opts} -> Keyword.put(opts, :enctype, "multipart/form-data")
end
html_escape([tag(:form, [action: action] ++ opts), raw(extra)])
end
@doc """
Generates a form tag with the given contents.
## Examples
form_tag("/hello", method: "get") do
"Hello"
end
<form action="/hello" method="get">...Hello...</form>
"""
def form_tag(action, options, do: block) do
html_escape([form_tag(action, options), block, raw("</form>")])
end
defp csrf_token_tag(to, opts, extra) do
case Keyword.pop(opts, :csrf_token, true) do
{csrf_token, opts} when is_binary(csrf_token) ->
{extra <> ~s'<input name="#{@csrf_param}" type="hidden" value="#{csrf_token}">', opts}
{true, opts} ->
csrf_token = csrf_token(to)
{extra <> ~s'<input name="#{@csrf_param}" type="hidden" value="#{csrf_token}">', opts}
{false, opts} ->
{extra, opts}
end
end
defp csrf_token(to) do
{mod, fun, args} = Application.fetch_env!(:phoenix_html, :csrf_token_reader)
apply(mod, fun, [to | args])
end
@doc """
Generates a meta tag with CSRF information.
## Tag attributes
* `content` - a valid csrf token
* `csrf-param` - a request parameter where expected csrf token
* `method-param` - a request parameter where expected a custom HTTP method
"""
def csrf_meta_tag do
tag(
:meta,
charset: "UTF-8",
name: "csrf-token",
content: csrf_token(%URI{host: nil}),
"csrf-param": @csrf_param,
"method-param": @method_param
)
end
@doc """
Generates an img tag with a src.
## Examples
img_tag(user.photo_path)
<img src="/photo.png">
img_tag(user.photo, class: "image")
<img src="/smile.png" class="image">
To generate a path to an image hosted in your application "priv/static",
with the `@conn` endpoint, use `static_path/2` to get a URL with
cache control parameters:
img_tag(Routes.static_path(@conn, "/logo.png"))
<img src="/logo-123456.png?vsn=d">
For responsive images, pass a map, list or string through `:srcset`.
img_tag("/logo.png", srcset: %{"/logo.png" => "1x", "/logo-2x.png" => "2x"})
<img src="/logo.png" srcset="/logo.png 1x, /logo-2x.png 2x">
img_tag("/logo.png", srcset: ["/logo.png", {"/logo-2x.png", "2x"}])
<img src="/logo.png" srcset="/logo.png, /logo-2x.png 2x">
"""
def img_tag(src, opts \\ []) do
opts =
case Keyword.pop(opts, :srcset) do
{nil, opts} -> opts
{srcset, opts} -> [srcset: stringify_srcset(srcset)] ++ opts
end
tag(:img, Keyword.put_new(opts, :src, src))
end
defp stringify_srcset(srcset) when is_map(srcset) or is_list(srcset) do
Enum.map_join(srcset, ", ", fn
{src, descriptor} -> "#{src} #{descriptor}"
default -> default
end)
end
defp stringify_srcset(srcset) when is_binary(srcset),
do: srcset
end
|
lib/phoenix_html/tag.ex
| 0.833833
| 0.427875
|
tag.ex
|
starcoder
|
defmodule Gim.Repo do
@moduledoc """
Defines a repository.
A repository maps to an underlying data store hold in-memory.
When used, the repository expects the `:types` as option.
The `:types` is a list of schema types to register.
For example, the repository:
defmodule Repo do
use Gim.Repo,
types: [Author, Post]
end
"""
# https://github.com/elixir-ecto/ecto/blob/v3.2.5/lib/ecto/repo.ex
@doc false
defmacro __using__(opts) do
types = Macro.expand(Keyword.get(opts, :types, []), __CALLER__)
quote bind_quoted: [types: types] do
alias Gim.Query
@after_compile Gim.Repo
defstruct types
conditions =
types
|> Enum.map(fn type ->
quote do
type == unquote(type)
end
end)
|> Enum.reduce(fn guard, acc ->
quote do
unquote(guard) or unquote(acc)
end
end)
defguard is_type(type) when unquote(conditions)
@default_args [
name: __MODULE__,
module: __MODULE__,
table: Gim.Repo.Table.Ets,
types: types
]
@configurable [:name, :types, :table]
def start_link(args \\ []) do
args = Keyword.merge(@default_args, Keyword.take(args, @configurable))
GenServer.start_link(Gim.Repo.Server, args,
name: args[:name],
spawn_opt: [fullsweep_after: 50]
)
rescue
# Its needed since the spawn ops mess with the dialyzer
_e in ArgumentError ->
{:error, :unexpected}
end
def child_spec(opts) do
%{
id: __MODULE__,
start: {__MODULE__, :start_link, [opts]},
type: :worker,
restart: :permanent,
shutdown: 500
}
end
# API
def types do
unquote(types)
end
def dump do
GenServer.call(__MODULE__, {:all})
end
def resolve(%Gim.Query{type: type, expand: expand} = query)
when is_type(type) and length(expand) > 0 do
case GenServer.call(__MODULE__, {:resolve, query}) do
{:ok, nodes} ->
Gim.Repo.__expand__(nodes, expand)
error ->
error
end
end
def resolve(%Gim.Query{type: type} = query) when is_type(type) do
GenServer.call(__MODULE__, {:resolve, query})
end
def resolve!(query) do
case resolve(query) do
{:ok, nodes} -> nodes
{:error, error} -> raise error
end
end
def all(type) when is_type(type) do
resolve(%Query{type: type})
end
def all!(type) when is_type(type) do
case all(type) do
{:ok, nodes} -> nodes
{:error, exception} -> raise exception
end
end
@doc """
Get all nodes of a given type by a given index value.
Always returns a list.
"""
def get(type, key, value) when is_type(type) do
case resolve(%Query{type: type, filter: [{key, value}]}) do
{:ok, nodes} -> nodes
{:error, error} -> raise error
end
end
@doc """
Get a node by a given edge, or list of nodes by given edges.
"""
def fetch!(type, id) when is_type(type) do
fetch!(type, :__id__, id)
end
@doc """
Fetch a node of a given type by a given unique index value.
Returns `{:ok, node}` or `{:error, _}`.
"""
def fetch(type, key, value) do
case get(type, key, value) do
[node] -> {:ok, node}
[] -> {:error, Gim.NoNodeError}
_ -> {:error, Gim.MultipleNodesError}
end
end
@doc """
Fetch a node of a given type by a given unique index value.
Returns the node or raises.
"""
def fetch!(type, key, value) do
case get(type, key, value) do
[node] -> node
[] -> raise Gim.NoNodeError, "No such Node"
_ -> raise Gim.MultipleNodesError, "Multiple Nodes found"
end
end
@doc """
Inserts a fresh node in the repo without the edges.
"""
def create(%{__struct__: struct} = node) when is_type(struct) do
# remove all edges
naked = Gim.Query.clear_edges(node)
case insert(naked) do
{:ok, %{__id__: id, __repo__: repo}} ->
{:ok, %{node | __id__: id, __repo__: repo}}
error ->
error
end
end
def create!(node) do
case create(node) do
{:ok, node} -> node
{:error, error} -> raise error
end
end
@doc """
Insert a fresh node into the repo. The nodes must not have and id.
"""
def insert(%{__struct__: struct, __id__: nil} = node) when is_type(struct) do
GenServer.call(__MODULE__, {:insert, node})
end
def insert!(node) do
case insert(node) do
{:ok, node} -> node
{:error, error} -> raise error
end
end
@doc """
Update a node by replacing the attributes and edges.
"""
def update(node) do
GenServer.call(__MODULE__, {:update, node})
end
def update!(node) do
case update(node) do
{:ok, node} -> node
{:error, error} -> raise error
end
end
@doc """
Update a node by replacing the attributes and merging the edges.
"""
def merge(node) do
GenServer.call(__MODULE__, {:merge, node})
end
def merge!(node) do
case merge(node) do
{:ok, node} -> node
{:error, error} -> raise error
end
end
@doc """
Deletes a node without consistency checks.
"""
def delete(node) do
GenServer.call(__MODULE__, {:delete, node})
end
# Import helper
# Opts are errors: :raise|:warn|:ignore
def import(nodes, opts \\ []) do
require Logger
errors = Keyword.get(opts, :errors, :raise)
# 1st pass: Create (insert)
nodes =
nodes
|> Enum.map(fn {k, node} -> {k, create!(node)} end)
|> Enum.into(%{})
# 2nd pass: Resolve (merge)
nodes
|> Enum.map(fn {k, node} -> Gim.Repo.__put_assocs__(node, nodes, errors) end)
|> Enum.map(fn node -> merge(node) end)
end
end
end
def __after_compile__(caller, _byte_code) do
types = caller.module.types()
for type <- types do
for {_name, _cardinality, assoc_type, _reflect, _stacktrace} <- type.__schema__(:gim_assocs) do
unless assoc_type in types do
message = ~s'''
#{inspect(type)} has an edge targeting #{inspect(assoc_type)} which is not part of the Repository
'''
reraise Gim.NoSuchTypeError, message, Macro.Env.stacktrace(caller)
end
end
end
end
def __expand__(nodes, []) do
{:ok, nodes}
end
def __expand__(nodes, path) when is_list(nodes) do
Enum.reduce_while(Enum.reverse(nodes), {:ok, []}, fn node, {:ok, acc} ->
case __expand__(node, path) do
{:ok, node} -> {:cont, {:ok, [node | acc]}}
error -> {:halt, error}
end
end)
end
def __expand__(%{__repo__: repo} = node, [{edge, nested} | path]) when is_map(node) do
with {:ok, nodes} <- repo.resolve(Gim.Query.query(node, edge)),
{:ok, nodes} <- __expand__(nodes, nested) do
Map.update!(node, edge, fn assoc ->
if is_list(assoc) do
nodes
else
List.first(nodes)
end
end)
|> __expand__(path)
else
error -> error
end
end
def __put_assocs__(%struct{} = node, nodes, errors) do
assocs = struct.__schema__(:associations)
Enum.reduce(assocs, node, fn assoc, node ->
__put_assoc__(node, assoc, nodes, errors)
end)
end
def __put_assoc__(node, assoc, nodes, _errors) do
import Gim.Query, only: [add_edge: 3, clear_edge: 2]
node
|> Map.fetch!(assoc)
|> List.wrap()
|> Enum.reduce(clear_edge(node, assoc), fn link, node ->
case Map.fetch(nodes, link) do
{:ok, link_node} ->
add_edge(node, assoc, link_node)
end
end)
end
end
|
lib/gim/repo.ex
| 0.954063
| 0.58166
|
repo.ex
|
starcoder
|
defmodule Scenic.Primitive.Triangle do
@moduledoc """
Draw a triangle on the screen.
## Data
`{point_a, point_b, point_c}`
The data for a line is a tuple containing three points.
* `point_a` - position to start drawing from
* `point_b` - position to draw to
* `point_c` - position to draw to
## Styles
This primitive recognizes the following styles
* [`hidden`](Scenic.Primitive.Style.Hidden.html) - show or hide the primitive
* [`fill`](Scenic.Primitive.Style.Fill.html) - fill in the area of the primitive
* [`stroke`](Scenic.Primitive.Style.Stroke.html) - stroke the outline of the primitive. In this case, only the curvy part.
* [`join`](Scenic.Primitive.Style.Join.html) - control how segments are joined.
* [`miter_limit`](Scenic.Primitive.Style.MiterLimit.html) - control how segments are joined.
## Usage
You should add/modify primitives via the helper functions in
[`Scenic.Primitives`](Scenic.Primitives.html#text/3)
"""
use Scenic.Primitive
alias Scenic.Math
@styles [:hidden, :fill, :stroke, :join, :miter_limit]
# ===========================================================================
# data verification and serialization
# --------------------------------------------------------
@doc false
def info(data),
do: """
#{IO.ANSI.red()}#{__MODULE__} data must be three points: {{x0,y0}, {x1,y1}, {x2,y2}}
#{IO.ANSI.yellow()}Received: #{inspect(data)}
#{IO.ANSI.default_color()}
"""
@doc false
def verify({{x0, y0}, {x1, y1}, {x2, y2}} = data)
when is_number(x0) and is_number(y0) and is_number(x1) and is_number(y1) and is_number(x2) and
is_number(y2),
do: {:ok, data}
def verify(_), do: :invalid_data
# ============================================================================
@doc """
Returns a list of styles recognized by this primitive.
"""
@spec valid_styles() :: [:fill | :hidden | :stroke, ...]
def valid_styles(), do: @styles
# --------------------------------------------------------
def default_pin(data), do: centroid(data)
# --------------------------------------------------------
@doc """
Returns the centroid of the triangle. This is used as the default pin when applying
rotate or scale transforms.
"""
def centroid(data)
def centroid({{x0, y0}, {x1, y1}, {x2, y2}}) do
{
(x0 + x1 + x2) / 3,
(y0 + y1 + y2) / 3
}
end
# http://blackpawn.com/texts/pointinpoly/
# --------------------------------------------------------
@degenerate 0.0001
def contains_point?({{x0, y0} = p0, {x1, y1} = p1, {x2, y2} = p2}, px) do
# make sure the points are not collinear, if so the abs(area) will be very small
area = abs(x0 * (y1 - y2) + x1 * (y2 - y0) + x2 * (y0 - y1))
if area < @degenerate do
false
else
# compute vectors
v0 = Math.Vector2.sub(p2, p0)
v1 = Math.Vector2.sub(p1, p0)
v2 = Math.Vector2.sub(px, p0)
# compute dot products
dot00 = Math.Vector2.dot(v0, v0)
dot01 = Math.Vector2.dot(v0, v1)
dot02 = Math.Vector2.dot(v0, v2)
dot11 = Math.Vector2.dot(v1, v1)
dot12 = Math.Vector2.dot(v1, v2)
# Compute barycentric coordinates
inv_denom = 1.0 / (dot00 * dot11 - dot01 * dot01)
u = (dot11 * dot02 - dot01 * dot12) * inv_denom
v = (dot00 * dot12 - dot01 * dot02) * inv_denom
# Check if point is in triangle
u >= 0 && v >= 0 && u + v < 1
end
end
end
|
lib/scenic/primitive/triangle.ex
| 0.924373
| 0.73841
|
triangle.ex
|
starcoder
|
defmodule Shadowsocks do
@moduledoc """
The Shadowsocks.
This module defines common apis to start,update,stop shadowsocks listeners.
### start a listener
Shadowsocks.start(args)
the `args` is a keyword list, fields:
* `type` required `atom` - the connection type, `:client` or `:server` or custom module name
* `port` required `integer` - listen port
* `ip` optional `tuple` - listen ip, example: `{127,0,0,1}`
* `method` optional `string` - encode method, default: `"rc4-md5"`
* `password` required `string` - encode password
* `ota` optional `bool` - is force open one time auth, default: `false`
* `server` optional `tuple` - required if `type` is `:client`, example: `{"la.ss.org", 8388}`
### stop a listener
Shadowsocks.stop(port)
stop listener by listen port, always return `:ok`
### update listener args
Shadowsocks.update(port, args)
the `args` is a keyword list, *see `Shadowsocks.start/1` method*
"""
@doc """
start a listener
the `args` is a keyword list, fields:
* `type` required `atom` - the connection type, `:client` or `:server` or custom module name
There are currently four built-in `type`:
1. `Shadowsocks.Conn.Client` - general client, alias is `:client`
2. `Shadowsocks.Conn.Server` - general server, alias is `:server`
3. `Shadowsocks.Conn.TransparentClient` - transparent client, perfect with iptables
4. `Shadowsocks.Conn.HTTP302` - redirect any http get request to `:redirect_url`, otherwise drop connections
5. `Shadowsocks.Conn.ObfsServer` - simple http obfs server (can both accept http obfs client and original shadowsocks client)
* `port` required `integer` - listen port
* `ip` optional `tuple` - listen ip, example: `{127,0,0,1}`
* `method` optional `string` - encode method, default: `"aes-256-cfb"`
* `password` required `string` - encode password
* `ota` optional `bool` - is force open one time auth, default: `false`
* `server` optional `tuple` - required if `type` is `:client`, example: `{"la.ss.org", 8388}`
"""
def start(args) do
Shadowsocks.ListenerSup.start_child(args)
end
@doc """
update listener args
the `args` is a keyword list, *see `Shadowsocks.start/1` method*
"""
def update(port, args) do
case find_listener(port) do
[pid] ->
Shadowsocks.Listener.update(pid, args)
_ ->
{:error, :not_running}
end
end
@doc """
stop a listener
stop listener by listen port, always return `:ok`
"""
def stop(port) do
find_listener(port)
|> Enum.each(fn p -> Supervisor.terminate_child(Shadowsocks.ListenerSup, p) end)
:ok
end
@doc """
check port is running
"""
def running?(port) do
case find_listener(port) do
[] -> false
_ -> true
end
end
@doc """
get listener `pid`
"""
def get(port) do
case find_listener(port) do
[pid | _] -> pid
_ -> nil
end
end
defp find_listener(port) do
children = Supervisor.which_children(Shadowsocks.ListenerSup)
for {_, p, _, _} <- children, Shadowsocks.Listener.port(p) == port, do: p
end
end
|
lib/shadowsocks.ex
| 0.897983
| 0.529993
|
shadowsocks.ex
|
starcoder
|
defmodule Identicon do
@moduledoc """
Documentation for Identicon.
First three number from the sequence of character will be RGB.
If the number in the cell of the grid is odd, we will show it as white.
If the number is even, we will fill it.
"""
def main(input) do
input
|> hash_input
|> pick_color
|> build_grid
|> filter_odd_squares
|> build_pixel_map
|> draw_image
|> save_image(input)
end
@doc """
Hashing a string to an unique sequence of characters using MD5.
It takes a string and return a list of number (the same each time we run it).
## Examples
iex> Identicon.hash_input('my_string')
%Identicon.Image{ hex: [61, 33, 43, 33, 250, 215, 190, 214, 60, 31, 181, 96, 198, 165, 197, 208],
color: nil,
grid: nil }
iex> Identicon.hash_input('my_string')
%Identicon.Image{ hex: [61, 33, 43, 33, 250, 215, 190, 214, 60, 31, 181, 96, 198, 165, 197, 208],
color: nil,
grid: nil }
"""
def hash_input(input) do
hex = :crypto.hash(:md5, input)
|> :binary.bin_to_list
%Identicon.Image{hex: hex}
end
@doc """
Picking the color as the first 3 numbers from the 'image' struct.
The numbers correspond to (R)ed, (G)reen and (B)lue channels.
## Examples
iex> image = Identicon.hash_input('hash_me')
iex> Identicon.pick_color(image)
%Identicon.Image{ color: {202, 80, 91},
hex: [202, 80, 91, 154, 26, 69, 237, 0, 38, 137, 34, 139, 223, 37, 34, 93],
grid: nil }
"""
def pick_color(image) do
# Accessing first three values using pattern matching
%Identicon.Image{hex: hex_list} = image
# Pattern matching and tossing away the tail
[r, g, b | _tail] = hex_list
# Updating the value for a given key using the 'pipe' syntax
#% Identicon.Image{image | color: {r, g, b}}
# Update the value for a given key using Map.put
Map.put(image, :color, {r, g, b})
end
@doc """
Building the grid.
We are using chunk size = 3, because we are mirroring the number to get 5 values.
Using pattern matching directly from the parameter.
## Examples
iex> image = Identicon.hash_input('hash_me')
%Identicon.Image{
color: nil,
hex: [202, 80, 91, 154, 26, 69, 237, 0, 38, 137, 34, 139, 223, 37, 34, 93],
grid: nil
}
iex> Identicon.build_grid(image)
%Identicon.Image{
color: nil,
grid: [
{202, 0},
{80, 1},
{91, 2},
{80, 3},
{202, 4},
{154, 5},
{26, 6},
{69, 7},
{26, 8},
{154, 9},
{237, 10},
{0, 11},
{38, 12},
{0, 13},
{237, 14},
{137, 15},
{34, 16},
{139, 17},
{34, 18},
{137, 19},
{223, 20},
{37, 21},
{34, 22},
{37, 23},
{223, 24}
],
hex: [202, 80, 91, 154, 26, 69, 237, 0, 38, 137, 34, 139, 223, 37, 34, 93]
}
"""
def build_grid(%Identicon.Image{hex: hex_list} = image) do
# Enum.chunk creates list of lists
grid =
hex_list
# Chunking the list by 3 with step 3 and discarding the rest
|> Enum.chunk_every(3, 3, :discard)
# Passing reference to a function mirror_row that takes 1 argument.
|> Enum.map(&mirror_row/1)
# Flattening the list for operations simplicity
|> List.flatten()
# Adding indexes
|> Enum.with_index
# Updating the grid property with the "pipe" syntax
%Identicon.Image{image | grid: grid}
end
@doc """
Mirroring a 3 elements row [a, b, c].
Returning a 5 elements row [a, b, c, d, e]
## Examples
iex> Identicon.mirror_row([1, 2, 3])
[1, 2, 3, 2, 1]
"""
def mirror_row(row) do
# [ 1, 2, 3] => [1, 2, 3, 2, 1]
[first, second, _tail] = row
## Joining lists with '++'
row ++ [second, first]
end
@doc """
Filtering out squares with odd color code number.
## Examples
iex> image = Identicon.hash_input('hash_me') |> Identicon.build_grid
iex> Identicon.filter_odd_squares(image)
%Identicon.Image{
color: nil,
grid: [
{202, 0},
{80, 1},
{80, 3},
{202, 4},
{154, 5},
{26, 6},
{26, 8},
{154, 9},
{0, 11},
{38, 12},
{0, 13},
{34, 16},
{34, 18},
{34, 22}
],
hex: [202, 80, 91, 154, 26, 69, 237, 0, 38, 137, 34, 139, 223, 37, 34, 93]
}
"""
def filter_odd_squares(%Identicon.Image{grid: grid} = image) do
grid = Enum.filter(grid, fn(cell) ->
{color_code, _} = cell
# Calculating the remainder to see if the number is odd or even
rem(color_code, 2) == 0
end )
%Identicon.Image{image| grid: grid}
end
@doc """
Building a pixel map.
Each cell must be defined as {{x_top_left, y_top_left}, {x_bottom_right, y_bottom_right}}
## Examples
iex> fake_img = %Identicon.Image{grid: [{202, 0}, {80, 1}, {90, 3}, {98, 5}]}
iex> Identicon.build_pixel_map(fake_img)
%Identicon.Image{
color: nil,
grid: [{202, 0}, {80, 1}, {90, 3}, {98, 5}],
hex: nil,
pixel_map: [
{{0, 0}, {50, 50}},
{{50, 0}, {100, 50}},
{{150, 0}, {200, 50}},
{{0, 50}, {50, 100}}
]
}
"""
def build_pixel_map(%Identicon.Image{grid: grid} = image) do
pixel_map = Enum.map(grid, fn({_, index}) ->
horizontal = rem(index, 5) * 50
vertical = div(index, 5) * 50
top_left_corner = {horizontal, vertical}
bottom_right_corner = {horizontal + 50, vertical + 50}
{top_left_corner, bottom_right_corner}
end)
%Identicon.Image{image | pixel_map: pixel_map}
end
@doc """
Drawing the image with EGD.
"""
def draw_image(%Identicon.Image{color: color, pixel_map: pixel_map}) do
image = :egd.create(250, 250)
fill = :egd.color(color)
# EGD weirdness - we are editing an existing image
# instead of returning a new one each time we draw something.
Enum.each(pixel_map, fn({start, stop}) ->
:egd.filledRectangle(image, start, stop, fill)
end)
:egd.render(image)
end
@doc """
Saving image to the disk.
"""
def save_image(image, filename) do
File.write("#{filename}.png", image)
end
end
|
19-02-02-The-Complete-Elixir-And-Phoenix-Bootcamp/04-IdenticonProject/identicon/lib/identicon.ex
| 0.936619
| 0.511595
|
identicon.ex
|
starcoder
|
defmodule Mongo.Session.ServerSession do
@moduledoc """
This module represents the server-side session. There are three fields:
* `last_use` - The timestamp for the last use of this server session
* `txn_num` - The current transaction number
* `session_id` - The session id of this server session
When a transaction is active, all operations in that transaction
use the same transaction number.
Transaction number is also used outside of transactions for
retryable writes. In this case, each write operation has its own
transaction number, but retries of a write operation use the same
transaction number as the first write (which is how the server
knows that subsequent writes are retries and should be ignored if
the first write succeeded on the server but was not read by the
client, for example).
"""
alias Mongo.Session.ServerSession
@type t :: %__MODULE__{
last_use: integer,
txn_num: non_neg_integer,
session_id: BSON.Binary.t
}
defstruct last_use: 0, txn_num: 0, session_id: nil
@doc """
Create a new server session.
"""
@spec new() :: ServerSession.t
def new() do
%ServerSession{session_id: Mongo.uuid(), last_use: System.monotonic_time(:second)}
end
@doc """
Update the last_use attribute of the server session to now.
"""
@spec set_last_use(ServerSession.t) :: ServerSession.t
def set_last_use(%ServerSession{} = session) do
%ServerSession{session | last_use: System.monotonic_time(:second)}
end
@doc """
Increment the current transaction number and return the new value.
"""
@spec next_txn_num(ServerSession.t) :: ServerSession.t
def next_txn_num(%ServerSession{:txn_num => txn_num} = session) do
%ServerSession{session | txn_num: txn_num + 1}
end
@doc """
Return true, if the server session will time out. In this case the session
can be removed from the queue.
"""
@spec about_to_expire?(ServerSession.t, integer) :: boolean
@compile {:inline, about_to_expire?: 2}
def about_to_expire?(%ServerSession{:last_use => last_use}, logical_session_timeout) do
(System.monotonic_time(:second) - last_use) >= logical_session_timeout
end
defimpl Inspect, for: ServerSession do
def inspect(%ServerSession{last_use: last_use, txn_num: txn, session_id: session_id}, _opts) do
"#ServerSession(" <> inspect(DateTime.from_unix(last_use)) <> ", " <> to_string(txn) <> ", session_id: " <> (inspect session_id) <> ")"
end
end
end
|
lib/session/server_session.ex
| 0.82734
| 0.429818
|
server_session.ex
|
starcoder
|
defmodule Noray.Tetrad do
@moduledoc """
Basic data structure for points and vectors.
A tetrad consists of four values: x, y, z, and w. w should be 1.0 (if it represents a point) or 0.0 (if it represents
a vector). See `Noray.Point` and `Noray.Vector` for helpers and specific operations.
"""
@opaque t :: {:tetrad, float(), float(), float(), float()}
require Record
Record.defrecord(:tetrad, x: 0.0, y: 0.0, z: 0.0, w: 0.0)
@doc """
Create a zero tetrad.
"""
@spec new :: t()
def new, do: tetrad()
@doc """
Create a new tetrad with the specified values.
You should use `Noray.Point.new/3` and `Noray.Vector.new/3` instead of this function.
"""
@spec new(float(), float(), float(), float()) :: t()
def new(x, y, z, w), do: tetrad(x: x, y: y, z: z, w: w)
@doc """
Returns the x component of a tetrad.
"""
@spec x(t()) :: float()
def x(tetrad), do: tetrad(tetrad, :x)
@doc """
Returns the y component of a tetrad.
"""
@spec y(t()) :: float()
def y(tetrad), do: tetrad(tetrad, :y)
@doc """
Returns the z component of a tetrad.
"""
@spec z(t()) :: float()
def z(tetrad), do: tetrad(tetrad, :z)
@doc """
Returns the w component of a tetrad. Don't use this. Use `point?/1` and `vector?/1` instead.
"""
@spec w(t()) :: float()
def w(tetrad), do: tetrad(tetrad, :w)
@doc """
Returns `true` if the tetrad represents a point, `false` otherwise.
"""
@spec point?(t()) :: boolean()
def point?(tetrad), do: tetrad(tetrad, :w) == 1.0
@doc """
Returns `true` if the tetrad represents a vector, `false` otherwise.
"""
@spec vector?(t()) :: boolean()
def vector?(tetrad), do: tetrad(tetrad, :w) == 0.0
@doc """
Adds two tetrads.
The tetrads can be points or vectors, even if the math doesn't necessarily make sense.
"""
@spec add(t(), t()) :: t()
def add(tetrad1, tetrad2) do
tetrad(
x: tetrad(tetrad1, :x) + tetrad(tetrad2, :x),
y: tetrad(tetrad1, :y) + tetrad(tetrad2, :y),
z: tetrad(tetrad1, :z) + tetrad(tetrad2, :z),
w: tetrad(tetrad1, :w) + tetrad(tetrad2, :w)
)
end
@doc """
Subtracts two tetrads.
The tetrads can be points or vectors, even if the math doesn't necessarily make sense.
"""
@spec subtract(t(), t()) :: t()
def subtract(tetrad1, tetrad2) do
tetrad(
x: tetrad(tetrad1, :x) - tetrad(tetrad2, :x),
y: tetrad(tetrad1, :y) - tetrad(tetrad2, :y),
z: tetrad(tetrad1, :z) - tetrad(tetrad2, :z),
w: tetrad(tetrad1, :w) - tetrad(tetrad2, :w)
)
end
@doc """
Negates a tetrad.
"""
@spec negate(t()) :: t()
def negate(tetrad) do
tetrad(
x: -tetrad(tetrad, :x),
y: -tetrad(tetrad, :y),
z: -tetrad(tetrad, :z),
w: -tetrad(tetrad, :w)
)
end
@doc """
Scales a tetrad by a factor.
"""
@spec scale(t(), float()) :: t()
def scale(tetrad, factor) do
tetrad(
x: tetrad(tetrad, :x) * factor,
y: tetrad(tetrad, :y) * factor,
z: tetrad(tetrad, :z) * factor,
w: tetrad(tetrad, :w) * factor
)
end
@doc """
Scales a tetrad by the inverse of a factor; that is, _divide_ the tetrad by the factor.
"""
@spec scale_inverse(t(), float()) :: t()
def scale_inverse(tetrad, factor_inverse) do
tetrad(
x: tetrad(tetrad, :x) / factor_inverse,
y: tetrad(tetrad, :y) / factor_inverse,
z: tetrad(tetrad, :z) / factor_inverse,
w: tetrad(tetrad, :w) / factor_inverse
)
end
end
|
lib/noray/tetrad.ex
| 0.926429
| 0.956431
|
tetrad.ex
|
starcoder
|
defmodule Chex.Game do
@moduledoc false
alias Chex.{Board, Color, Game, Move, Parser.FEN, Piece}
defstruct board: %{},
active_color: :white,
castling: [:K, :Q, :k, :q],
en_passant: nil,
moves: [],
halfmove_clock: 0,
fullmove_clock: 1,
captures: [],
check: nil,
result: nil,
pgn: nil
@doc """
Creates a new game, optionally from a FEN string.
Returns a %Game{} initialized with fen or the default starting positions.
## Examples
iex> Chex.Game.new()
{:ok, %Chex.Game{}}
iex> Chex.Game.new("rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1")
{:ok, %Chex.Game{}}
"""
@spec new :: {:ok, Chex.game()}
def new do
{:ok,
%Game{
board: Board.starting_position()
}}
end
@spec new(String.t()) :: {:ok, Chex.game()} | {:error, atom()}
def new(fen), do: FEN.parse(fen)
@doc """
Makes a move within the chess game.
Returns a %Game{} modified by the move.
## Examples
iex> {:ok, game} = Chex.Game.new()
iex> Chex.Game.move(game, "e4e5")
{error: :no_piece_at_square}
iex> Chex.Game.move(game, "e2e4")
{:ok, %Chex.Game{}}
"""
@spec move(Chex.game(), Chex.move()) ::
{:ok, Chex.game()} | {:error, atom()}
def move(game, move) when is_binary(move) do
move = Move.parse(move, game)
move(game, move)
end
def move(game, {from, to, promote}), do: move(game, {from, to}, promote)
def move(game, move), do: move(game, move, :queen)
@spec move(Chex.game(), {Chex.square(), Chex.square()}, Chex.name()) ::
{:ok, Chex.game()} | {:error, atom()}
def move(game, {from, to} = move, promote_to) do
with {:ok, _} <- validate_move(game, move),
{:ok, {piece, capture, game}} <- Board.move(game, from, to),
{:ok, game} <- castle(game, move) do
piece = Piece.trim(piece)
capture = if capture != nil, do: Piece.trim(capture)
game =
game
|> add_move(move)
|> capture_piece(capture)
|> maybe_promote_pawn(promote_to)
|> switch_active_color()
|> update_check()
|> update_castling(piece)
|> update_en_passant(piece)
|> update_halfmove_clock(piece, capture)
|> maybe_increment_fullmove_clock(piece)
|> maybe_update_result()
{:ok, game}
end
end
@doc """
Makes a series of `moves` within the chess game.
Returns a `{:ok, %Game{}` modified by the move or an error tuple.
## Examples
iex> {:ok, game} = Chex.Game.new()
iex> Chex.Game.move(game, "e4e5")
{error: :no_piece_at_square}
iex> Chex.Game.move(game, "e2e4")
{:ok, %Chex.Game{}}
"""
@spec moves(Chex.game(), [Chex.move()]) ::
{:ok, Chex.game()} | {:error, atom()}
def moves(game, moves) do
Enum.reduce_while(moves, game, fn san, game ->
case Chex.Move.parse(san, game) do
{:error, _} = error ->
{:halt, error}
move ->
{:ok, game} = move(game, move)
{:cont, game}
end
end)
|> case do
{:error, _} = error -> error
game -> {:ok, game}
end
end
defdelegate in_check?(game, color), to: Game.Checking
defdelegate checkmate?(game), to: Game.Checking
defdelegate stalemate?(game), to: Game.Checking
@spec result(Chex.game()) :: Chex.result()
def result(game), do: game.result
@spec add_move(Chex.game(), {Chex.square(), Chex.square()}) :: Chex.game()
defp add_move(%Game{moves: moves} = game, move) do
%{game | moves: [move | moves]}
end
@spec maybe_increment_fullmove_clock(Chex.game(), Chex.piece()) :: Chex.game()
defp maybe_increment_fullmove_clock(game, {_name, :black}) do
%{game | fullmove_clock: game.fullmove_clock + 1}
end
defp maybe_increment_fullmove_clock(game, _piece), do: game
defp update_check(game) do
check = if in_check?(game, game.active_color), do: game.active_color, else: nil
%{game | check: check}
end
@spec update_castling(Chex.game(), Chex.piece()) :: Chex.game()
defp update_castling(game, {:king, :black}) do
delete_castling_rights(game, [:k, :q])
end
defp update_castling(game, {:king, :white}) do
delete_castling_rights(game, [:K, :Q])
end
defp update_castling(%{moves: [{{:a, _r}, _to} | _tl]} = game, {:rook, color}) do
right =
{:queen, color}
|> Piece.to_string()
|> String.to_existing_atom()
delete_castling_rights(game, [right])
end
defp update_castling(%{moves: [{{:h, _r}, _to} | _tl]} = game, {:rook, color}) do
right =
{:king, color}
|> Piece.to_string()
|> String.to_existing_atom()
delete_castling_rights(game, [right])
end
defp update_castling(game, _piece), do: game
defp delete_castling_rights(game, rights) when is_list(rights) do
{_old, game} =
game
|> Map.get_and_update(:castling, fn current_rights ->
{current_rights, current_rights -- rights}
end)
game
end
@spec update_en_passant(Chex.game(), Chex.piece()) :: Chex.game()
defp update_en_passant(
%Game{moves: [{{file, 2}, {file, 4}} | _prev_moves]} = game,
{:pawn, :white}
) do
%{game | en_passant: {file, 3}}
end
defp update_en_passant(
%Game{moves: [{{file, 7}, {file, 5}} | _prev_moves]} = game,
{:pawn, :black}
) do
%{game | en_passant: {file, 6}}
end
defp update_en_passant(%Game{en_passant: nil} = game, _move), do: game
defp update_en_passant(game, _move), do: %{game | en_passant: nil}
@spec update_halfmove_clock(Chex.game(), Chex.piece(), Chex.piece() | nil) :: Chex.game()
defp update_halfmove_clock(game, {_, _}, {_, _}), do: %{game | halfmove_clock: 0}
defp update_halfmove_clock(game, {:pawn, _color}, _), do: %{game | halfmove_clock: 0}
defp update_halfmove_clock(game, _piece, _capture) do
%{game | halfmove_clock: game.halfmove_clock + 1}
end
@spec validate_move(Chex.game(), Chex.move()) ::
{:ok, Chex.game()} | {:error, reason :: atom}
defp validate_move(%Game{} = game, {from, to}) do
with {:ok, :noop} <-
Board.occupied?(game, from)
|> maybe_error(:no_piece_at_square),
color <- Board.get_piece_color(game, from),
{:ok, :noop} <-
(game.active_color == color)
|> maybe_error(:out_of_turn),
{:ok, :noop} <-
!Board.occupied_by_color?(game, color, to)
|> maybe_error(:occupied_by_own_color),
{:ok, :noop} <-
(to in Piece.possible_moves(game, from))
|> maybe_error(:invalid_move),
do: {:ok, game}
end
# Queenside castle
defp castle(game, {{:e, r}, {:c, r}}) when r in [1, 8] do
{:ok,
case Board.get_piece_name(game, {:c, r}) do
:king -> castle_queenside(game, r)
_ -> game
end}
end
# Kingside castle
defp castle(game, {{:e, r}, {:g, r}}) when r in [1, 8] do
{:ok,
case Board.get_piece_name(game, {:g, r}) do
:king -> castle_kingside(game, r)
_ -> game
end}
end
defp castle(game, _move), do: {:ok, game}
defp castle_kingside(game, rank) do
{:ok, {_p, _c, game}} = Board.move(game, {:h, rank}, {:f, rank})
game
end
defp castle_queenside(game, rank) do
{:ok, {_p, _c, game}} = Board.move(game, {:a, rank}, {:d, rank})
game
end
@spec switch_active_color(Chex.game()) :: Chex.game()
defp switch_active_color(%{active_color: color} = game) do
%{game | active_color: Color.flip(color)}
end
defp maybe_error(true, _reason), do: {:ok, :noop}
defp maybe_error(false, reason), do: {:error, reason}
@spec capture_piece(Chex.game(), Chex.piece() | nil) :: Chex.game()
defp capture_piece(game, nil), do: game
defp capture_piece(%Game{captures: captures} = game, piece) do
%{game | captures: [piece | captures]}
end
@spec maybe_promote_pawn(Chex.game(), Chex.name()) :: Chex.game()
defp maybe_promote_pawn(%{moves: [{_from, {_, d_rank} = sq} | _mvs]} = game, new_piece)
when d_rank in [1, 8] do
{_old, board} =
game.board
|> Map.get_and_update(sq, fn
{:pawn, color, start} = cur -> {cur, {new_piece, color, start}}
cur -> {cur, cur}
end)
%{game | board: board}
end
defp maybe_promote_pawn(game, _new_piece), do: game
defp maybe_update_result(%{check: nil} = game) do
case stalemate?(game) do
true -> %{game | result: :draw}
_ -> game
end
end
defp maybe_update_result(%{check: color} = game) do
case checkmate?(game) do
true -> %{game | result: Color.flip(color)}
_ -> game
end
end
end
|
lib/chex/game.ex
| 0.918982
| 0.526891
|
game.ex
|
starcoder
|
defmodule Acquirex.Space do
alias Acquirex.Corporation
@type status :: Empty | Full | {Incorporated, Corporation.t}
def start_link(coord) do
Agent.start_link(fn -> Empty end, name: {:via, :gproc, space_name(coord)})
end
def status(coord) do
Agent.get({:via, :gproc, space_name(coord)}, fn s -> s end)
end
@spec move_outcome(Tiles.t) :: Nothing | Incorporate | {Merger, [Corporation.t]}
def move_outcome(coord) do
case neighbour_status(coord) do
[_, _, _, Full] ->
Incorporate
[_, _, {Incorporated, _}, {Incorporated,_}] = ns ->
corps = (for {Incorporated, c} <- ns, do: c) |> Enum.uniq
{Merger, corps}
_ ->
Nothing
end
end
def neighbour_status(coord) do
(for n <- neighbours(coord), do: status(n))
|> Enum.sort
end
def fill(coord) do
Agent.cast({:via, :gproc, space_name(coord)}, fn _s -> handle_fill(coord) end)
end
def incorporate(coord, corp) do
Agent.cast({:via, :gproc, space_name(coord)}, fn _s -> handle_incorporate(coord, corp) end)
end
def join(coord, corp) do
Agent.cast({:via, :gproc, space_name(coord)}, fn s -> handle_join(s, coord, corp) end)
end
# fill will only happen if the move outcome is Nothing
# could consider having a Join outcome at some point
defp handle_fill(coord) do
case neighbour_status(coord) do
[_, _, _, {Incorporated, c}] ->
for n <- neighbours(coord), do: join(n, c)
Corporation.join(c, coord)
{Incorporated, c}
_ ->
Full
end
end
defp handle_incorporate(coord, corp) do
ns = neighbours(coord)
for n <- ns, do: join(n, corp)
Corporation.join(corp, coord)
{Incorporated, corp}
end
defp handle_join(Full, coord, corp) do
ns = neighbours(coord)
for n <- ns, do: join(n, corp)
Corporation.join(corp, coord)
{Incorporated, corp}
end
defp handle_join(s, _, _), do: s
defp space_name(coord) do
{:n, :l, {__MODULE__, coord}}
end
def neighbours({column, [c]=row}) when column in 1..12 and c in ?a..?i do
[{column-1, row},
{column+1, row},
{column, row_above row},
{column, row_below row}]
end
def neighbours({_,_}), do: []
defp row_above([row]), do: [row-1]
defp row_below([row]), do: [row+1]
end
|
lib/space.ex
| 0.707708
| 0.60871
|
space.ex
|
starcoder
|
defmodule Gealts.Crossover do
@moduledoc """
Randomly selects a position in a chromosome, then
exchanges sub-chromosomes.
Chromosomes fit for "mating" are randomly selected,
the number of parent chromosomes is controlled by the
@cr (crossover rate) parameter.
"""
alias Gealts.MathUtils
@cr 0.25
@type ind_chrome :: {non_neg_integer, Gealts.Chromosome.t}
@type mates :: {ind_chrome, ind_chrome}
@doc """
Select chromosomes fit for mating.
Pair chromosomes together and merge their
sub-chromosome populations based on a randomly selected cutoff point.
Update original chromosome population.
"""
@spec mate([Gealts.Chromosome.t]) :: [Gealts.Chromosome.t]
def mate(chromes) do
chromes
|> select
|> link
|> merge
|> update(chromes)
end
@spec select([Gealts.Chromosome.t]) :: [ind_chrome]
defp select(chromes) do
select(chromes, MathUtils.random_list(length(chromes)), 0, [])
end
defp select(_chromes, [], _i, acc) do
acc |> Enum.reverse
end
defp select(chromes, [r | rest], i, acc) when r > @cr do
select(chromes, rest, i + 1, acc)
end
defp select(chromes, [_r | rest], i, acc) do
select(chromes, rest, i + 1, [{i, Enum.at(chromes, i)} | acc])
end
@spec link([ind_chrome]) :: [mates]
defp link([]) do
[]
end
defp link(chromes) do
link(chromes, Enum.at(chromes, 0), [])
end
defp link([], _first, acc) do
acc |> Enum.reverse
end
defp link([a, b | chromes], first, acc) do
link([b | chromes], first, [{a, b} | acc])
end
defp link([a | chromes], first, acc) do
link(chromes, first, [{a, first} | acc])
end
@spec merge([mates]) :: [ind_chrome]
defp merge([]) do
[]
end
defp merge(chromes) do
vals = for _ <- 1..length(chromes), do: MathUtils.random_int(1, 4)
merge(chromes, vals, [])
end
defp merge([], _vals, acc) do
Enum.reverse(acc)
end
defp merge([{{pos, chrome_a}, {_pos, chrome_b}} | rest ], [val | vals], acc) do
merged = Enum.slice(chrome_a.values, 0, val) ++ Enum.slice(chrome_b.values, val, length(chrome_b.values))
merge(rest, vals, [{pos, %{chrome_a | values: merged}} | acc])
end
@spec update([ind_chrome], [Gealts.Chromosome.t]) :: [Gealts.Chromosome.t]
defp update([], chromes) do
chromes
end
defp update([{n, chrome} | rest], chromes) do
update(rest, List.replace_at(chromes, n, chrome))
end
end
|
lib/gealts/crossover.ex
| 0.750736
| 0.60964
|
crossover.ex
|
starcoder
|
defmodule ExDoc.HTMLFormatter.Autolink do
@moduledoc """
Conveniences for autolinking locals, types and more.
"""
@elixir_docs "http://elixir-lang.org/docs/master/"
@doc """
Escape `'`, `"`, `&`, `<` and `>` in the string using HTML entities.
This is only intended for use by the HTML formatter.
"""
def escape_html(binary) do
escape_map = [{ %r(&), "\\&" }, { %r(<), "\\<" }, { %r(>), "\\>" }, { %r("), "\\"" }]
Enum.reduce escape_map, binary, fn({ re, escape }, acc) -> Regex.replace(re, acc, escape) end
end
@doc """
Receives a list of module nodes and autolink all docs and typespecs.
"""
def all(modules) do
aliases = Enum.map modules, &(&1.module)
project_funs = lc m inlist modules, d inlist m.docs, do: m.id <> "." <> d.id
Enum.map modules, &(&1 |> all_docs(project_funs) |> all_typespecs(aliases))
end
defp all_docs(ExDoc.ModuleNode[] = module, project_funs) do
locals = Enum.map module.docs, &(&1.id)
moduledoc = module.moduledoc &&
module.moduledoc |> local_doc(locals) |> project_doc(project_funs)
docs = lc node inlist module.docs do
node.update_doc fn(doc) ->
doc && doc |> local_doc(locals) |> project_doc(project_funs)
end
end
module.moduledoc(moduledoc).docs(docs)
end
defp all_typespecs(ExDoc.ModuleNode[] = module, aliases) do
locals = Enum.map module.typespecs, fn
ExDoc.TypeNode[name: name, arity: arity] -> { name, arity }
end
typespecs = lc ExDoc.TypeNode[] = typespec inlist module.typespecs do
typespec.update_spec &typespec(&1, locals, aliases)
end
docs = lc node inlist module.docs do
node.update_specs fn(specs) ->
Enum.map(specs, &typespec(&1, locals, aliases))
end
end
module.typespecs(typespecs).docs(docs)
end
@doc """
Converts the given `ast` to string while linking the locals
given by `typespecs` as HTML.
"""
def typespec(ast, typespecs, aliases) do
Macro.to_string(ast, fn
{ name, _, args }, string when is_atom(name) and is_list(args) ->
string = strip_parens(string, args)
arity = length(args)
if { name, arity } in typespecs do
%s[<a href="#t:#{name}/#{arity}">#{string}</a>]
else
string
end
{ { :., _, [alias, name] }, _, args }, string when is_atom(name) and is_list(args) ->
string = strip_parens(string, args)
alias = expand_alias(alias)
if source = get_source(alias, aliases) do
%s[<a href="#{source}#{inspect alias}.html#t:#{name}/#{length(args)}">#{string}</a>]
else
string
end
_, string ->
string
end)
end
defp strip_parens(string, []) do
if :binary.last(string) == ?) do
:binary.part(string, 0, size(string)-2)
else
string
end
end
defp strip_parens(string, _), do: string
defp expand_alias({ :__aliases__, _, [h|t] }) when is_atom(h), do: Module.concat([h|t])
defp expand_alias(atom) when is_atom(atom), do: atom
defp expand_alias(_), do: nil
defp get_source(alias, aliases) do
cond do
nil?(alias) -> nil
alias in aliases -> ""
from_elixir?(alias) -> @elixir_docs
true -> nil
end
end
defp from_elixir?(alias) do
:lists.prefix(elixir_ebin, alias_ebin(alias))
end
defp alias_ebin(alias) do
case :code.where_is_file('#{alias}.beam') do
:non_existing -> ''
path -> path
end
end
defp elixir_ebin do
case :code.where_is_file('Elixir.Kernel.beam') do
:non_existing -> [0]
path -> path |> Path.dirname |> Path.dirname |> Path.dirname
end
end
@doc """
Create links to locally defined functions, specified in `locals`
as a list of `fun/arity` strings.
Ignores functions which are already wrapped in markdown url syntax,
e.g. `[test/1](url)`. In case the function doesn't touch the leading
or trailing `]`, e.g. `[my link link/1 is here](url)`, the fun/arity
will get translated to the new href of the function.
"""
def local_doc(bin, locals) when is_binary(bin) do
Regex.scan(%r{(?<!\[)`\s*([a-z_!\\?]+/\d+)\s*`(?!\])}, bin)
|> Enum.uniq
|> List.flatten
|> Enum.filter(&(&1 in locals))
|> Enum.reduce(bin, fn (x, acc) ->
escaped = Regex.escape(x)
Regex.replace(%r/(?<!\[)`(\s*(#{escaped})\s*)`(?!\])/, acc, "[`\\1`](#\\2)")
end)
end
@doc """
Create links to functions defined in the project, specified in `project_funs`
as a list of `Module.fun/arity` tuples.
Ignores functions which are already wrapped in markdown url syntax,
e.g. `[Module.test/1](url)`. In case the function doesn't touch the leading
or trailing `]`, e.g. `[my link Module.link/1 is here](url)`, the Module.fun/arity
will get translated to the new href of the function.
"""
def project_doc(bin, project_funs) when is_binary(bin) do
Regex.scan(%r{(?<!\[)`\s*((([A-Z][A-Za-z]+)\.)+[a-z_!\\?]+/\d+)\s*`(?!\])}, bin)
|> Enum.uniq
|> List.flatten
|> Enum.filter(&(&1 in project_funs))
|> Enum.reduce(bin, fn (x, acc) ->
{ mod_str, function_name, arity } = split_function(x)
escaped = Regex.escape(x)
Regex.replace(%r/(?<!\[)`(\s*#{escaped}\s*)`(?!\])/, acc,
"[`\\1`](#{mod_str}.html##{function_name}/#{arity})")
end)
end
defp split_function(bin) do
[modules, arity] = String.split(bin, "/")
{ mod, name } = modules |> String.split(".") |> Enum.split(-1)
{ Enum.join(mod, "."), hd(name), arity }
end
end
|
lib/ex_doc/html_formatter/autolink.ex
| 0.688887
| 0.565479
|
autolink.ex
|
starcoder
|
defmodule Nebulex do
@moduledoc ~S"""
Nebulex is split into 2 main components:
* `Nebulex.Cache` - caches are wrappers around the in-memory data store.
Via the cache, we can put, get, update, delete and query existing entries.
A cache needs an adapter to communicate to the in-memory data store.
* `Nebulex.Caching` - decorators provide an elegant way of annotating
functions to be cached or evicted. By means of these decorators, it is
possible the implementation of cache usage patterns like **Read-through**,
**Write-through**, **Cache-as-SoR**, etc.
In the following sections, we will provide an overview of those components and
how they interact with each other. Feel free to access their respective module
documentation for more specific examples, options and configuration.
If you want to quickly check a sample application using Nebulex, please check
the [getting started guide](http://hexdocs.pm/nebulex/getting-started.html).
## Caches
`Nebulex.Cache` is the wrapper around the Cache. We can define a
cache as follows:
defmodule MyApp.MyCache do
use Nebulex.Cache,
otp_app: :my_app,
adapter: Nebulex.Adapters.Local
end
Where the configuration for the Cache must be in your application
environment, usually defined in your `config/config.exs`:
config :my_app, MyApp.MyCache,
gc_interval: 3_600_000, #=> 1 hr
backend: :shards,
partitions: 2
Each cache in Nebulex defines a `start_link/1` function that needs to be
invoked before using the cache. In general, this function is not called
directly, but used as part of your application supervision tree.
If your application was generated with a supervisor (by passing `--sup`
to `mix new`) you will have a `lib/my_app/application.ex` file containing
the application start callback that defines and starts your supervisor.
You just need to edit the `start/2` function to start the cache as a
supervisor on your application's supervisor:
def start(_type, _args) do
children = [
{MyApp.Cache, []}
]
opts = [strategy: :one_for_one, name: MyApp.Supervisor]
Supervisor.start_link(children, opts)
end
## Declarative annotation-based caching
See [Nebulex.Caching](http://hexdocs.pm/nebulex/Nebulex.Caching.html).
"""
end
|
lib/nebulex.ex
| 0.83924
| 0.664248
|
nebulex.ex
|
starcoder
|
defmodule Contex.Sparkline do
@moduledoc """
Generates a simple sparkline from an array of numbers.
Note that this does not follow the pattern for other types of plot. It is not designed
to be embedded within a `Contex.Plot` and, because it only relies on a single list
of numbers, does not use data wrapped in a `Contex.Dataset`.
Usage is exceptionally simple:
```
data = [0, 5, 10, 15, 12, 12, 15, 14, 20, 14, 10, 15, 15]
Sparkline.new(data) |> Sparkline.draw() # Emits svg sparkline
```
The colour defaults to a green line with a faded green fill, but can be overridden
with `colours/3`. Unlike other colours in Contex, these colours are how you would
specify them in CSS - e.g.
```
Sparkline.new(data)
|> Sparkline.colours("#fad48e", "#ff9838")
|> Sparkline.draw()
```
The size defaults to 20 pixels high and 100 wide. You can override by updating
`:height` and `:width` directly in the `Sparkline` struct before call `draw/1`.
"""
alias __MODULE__
alias Contex.{ContinuousLinearScale, Scale}
defstruct [
:data,
:extents,
:length,
:spot_radius,
:spot_colour,
:line_width,
:line_colour,
:fill_colour,
:y_transform,
:height,
:width
]
@type t() :: %__MODULE__{}
@doc """
Create a new sparkline struct from some data.
"""
@spec new([number()]) :: Contex.Sparkline.t()
def new(data) when is_list(data) do
%Sparkline{data: data, extents: ContinuousLinearScale.extents(data), length: length(data)}
|> set_default_style
end
@doc """
Override line and fill colours for the sparkline.
Note that colours should be specified as you would in CSS - they are passed through
directly into the SVG. For example:
```
Sparkline.new(data)
|> Sparkline.colours("#fad48e", "#ff9838")
|> Sparkline.draw()
```
"""
@spec colours(Contex.Sparkline.t(), String.t(), String.t()) :: Contex.Sparkline.t()
def colours(%Sparkline{} = sparkline, fill, line) do
# TODO: Really need some validation...
%{sparkline | fill_colour: fill, line_colour: line}
end
defp set_default_style(%Sparkline{} = sparkline) do
%{
sparkline
| spot_radius: 2,
spot_colour: "red",
line_width: 1,
line_colour: "rgba(0, 200, 50, 0.7)",
fill_colour: "rgba(0, 200, 50, 0.2)",
height: 20,
width: 100
}
end
@doc """
Renders the sparkline to svg, including the svg wrapper, as a string or improper string list that
is marked safe.
"""
def draw(%Sparkline{height: height, width: width, line_width: line_width} = sparkline) do
vb_width = sparkline.length + 1
vb_height = height - 2 * line_width
scale =
ContinuousLinearScale.new()
|> ContinuousLinearScale.domain(sparkline.data)
|> Scale.set_range(vb_height, 0)
sparkline = %{sparkline | y_transform: Scale.domain_to_range_fn(scale)}
output = ~s"""
<svg height="#{height}" width="#{width}" viewBox="0 0 #{vb_width} #{vb_height}" preserveAspectRatio="none" role="img">
<path d="#{get_closed_path(sparkline, vb_height)}" #{get_fill_style(sparkline)}></path>
<path d="#{get_path(sparkline)}" #{get_line_style(sparkline)}></path>
</svg>
"""
{:safe, [output]}
end
defp get_line_style(%Sparkline{line_colour: line_colour, line_width: line_width}) do
~s|stroke="#{line_colour}" stroke-width="#{line_width}" fill="none" vector-effect="non-scaling-stroke"|
end
defp get_fill_style(%Sparkline{fill_colour: fill_colour}) do
~s|stroke="none" fill="#{fill_colour}"|
end
defp get_closed_path(%Sparkline{} = sparkline, vb_height) do
# Same as the open path, except we drop down, run back to height,height (aka 0,0) and close it...
open_path = get_path(sparkline)
[open_path, "V #{vb_height} L 0 #{vb_height} Z"]
end
# This is the IO List approach
defp get_path(%Sparkline{y_transform: transform_func} = sparkline) do
last_item = Enum.count(sparkline.data) - 1
[
"M",
sparkline.data
|> Enum.map(transform_func)
|> Enum.with_index()
|> Enum.map(fn {value, i} ->
case i < last_item do
true -> "#{i} #{value} L "
_ -> "#{i} #{value}"
end
end)
]
end
end
|
lib/chart/sparkline.ex
| 0.824285
| 0.907148
|
sparkline.ex
|
starcoder
|
defmodule Bouncer.Session do
@moduledoc """
A library of functions used to work with session data.
"""
alias Plug.Conn
alias Bouncer.Token
alias Bouncer.Utility
def adapter, do: Application.get_env(:bouncer, :adapter)
@doc """
Generates a session token. The ttl (time-to-live) defaults to 2 weeks.
See Bouncer.Token.Generate/4.
"""
def generate(conn, user, ttl) do
Token.generate(conn, "user", user, ttl)
end
@doc """
Verifies a session token is valid and matches the given user. See
Bouncer.Token.Verify/4.
"""
def verify(conn, token), do: Token.verify(conn, "user", token)
@doc """
Saves session data given a key and optional ttl (time-to-live).
"""
def save(data, key, ttl), do: adapter().save(data, key, ttl)
@doc """
Retrieves session data given an authorization token and puts it into the
connection.
"""
def put_current_user(conn) do
if Map.has_key? conn.private, :auth_token do
conn
|> verify(conn.private.auth_token)
|> Utility.debug_piped("Auth token verification: ")
|> put_current_user(conn)
else
conn
end
end
@doc """
Puts the user session data into the connection.
"""
def put_current_user({status, u}, conn) do
if status === :ok, do: Conn.put_private(conn, :current_user, u), else: conn
end
@doc """
Destroys a session given a token and a user ID.
"""
def destroy(token, id), do: Token.delete(token, id)
@doc """
Destroys all sessions associated with a given user ID.
"""
def destroy_all(conn, id), do: Token.delete_all(conn, "user", id)
@doc """
Convenience function to determine if the ID from the current_user in the
request matches the given User ID.
## examples
iex> Bouncer.Session.user_request? %{private: %{current_user: %{"id" => 1}}},
...> 1
true
iex> Bouncer.Session.user_request? %{private: %{current_user: %{"id" => 1}}},
...> "1"
true
iex> Bouncer.Session.user_request? %{private: %{current_user: %{"id" => 1}}},
...> "2"
false
iex> Bouncer.Session.user_request? %{private: %{}}, 1
false
"""
def user_request?(conn, id) do
has_current_user = Map.has_key?(conn.private, :current_user) &&
Map.has_key?(conn.private.current_user, "id")
if is_bitstring(id) do
{id, _} = Integer.parse(id)
has_current_user && conn.private.current_user["id"] == id
else
has_current_user && conn.private.current_user["id"] == id
end
end
def is_valid(conn) do
Map.has_key?(conn.private, :current_user) &&
Map.has_key?(conn.private.current_user, "id")
end
end
|
lib/bouncer/session.ex
| 0.609757
| 0.438485
|
session.ex
|
starcoder
|
defmodule Aoc2021.Day15 do
@moduledoc """
See https://adventofcode.com/2021/day/15
"""
@type pos() :: {non_neg_integer(), non_neg_integer()}
@type riskmap() :: %{pos() => non_neg_integer()}
defmodule Reader do
@moduledoc false
@spec read_map(Path.t()) :: {Aoc2021.Day15.riskmap(), Aoc2021.Day15.pos()}
def read_map(path) do
path
|> File.stream!()
|> read_map_from_stream()
end
@spec read_map_from_stream(Enum.t()) :: {Aoc2021.Day15.riskmap(), Aoc2021.Day15.pos()}
def read_map_from_stream(stream) do
{map, _} =
stream
|> Stream.map(&String.trim/1)
|> Stream.map(&String.codepoints/1)
|> Enum.reduce({%{}, 0}, &parse_line/2)
{{max_x, _}, _} = Enum.max_by(map, fn {{x, _}, _} -> x end)
{{_, max_y}, _} = Enum.max_by(map, fn {{_, y}, _} -> y end)
{map, {max_x, max_y}}
end
defp parse_line(line, {map, y}) do
{map, _} =
Enum.reduce(line, {map, 0}, fn c, {map, x} ->
v = String.to_integer(c)
{Map.put(map, {x, y}, v), x + 1}
end)
{map, y + 1}
end
end
defmodule AStar do
@moduledoc """
A* path search.
See https://en.wikipedia.org/wiki/A*_search_algorithm
"""
@infinity 1_000_000
@spec a_star(
Aoc2021.Day15.riskmap(),
Aoc2021.Day15.pos(),
Aoc2021.Day15.pos(),
(Aoc2021.Day15.pos() -> non_neg_integer()),
(Aoc2021.Day15.pos() -> [Aoc2021.Day15.pos()])
) ::
{:ok, [Aoc2021.Day15.pos()]} | {:error, :path_not_found}
def a_star(map, start, goal, h, neighbours) do
open_set = Heap.new(fn {_, a}, {_, b} -> a < b end)
open_set = Heap.push(open_set, {start, 0})
came_from = %{}
# Default value infinity
g_score = Map.new([{start, 0}])
f_score = Map.new([{start, h.(start)}])
recurse(map, goal, h, neighbours, open_set, came_from, g_score, f_score)
end
defp recurse(map, goal, h, neighbours, open_set, came_from, g_score, f_score) do
{{current, _}, open_set} = Heap.split(open_set)
case current == goal do
true ->
{:ok, reconstruct_path(came_from, current)}
false ->
ns = neighbours.(current)
{came_from, g_score, f_score, open_set} =
Enum.reduce(
ns,
{came_from, g_score, f_score, open_set},
&step_neighbour(&1, &2, current, map, h)
)
recurse(map, goal, h, neighbours, open_set, came_from, g_score, f_score)
end
end
defp step_neighbour(neighbour, {came_from, g_score, f_score, open_set}, current, map, h) do
tentative_g_score = get_score(g_score, current) + d(map, current, neighbour)
if tentative_g_score < get_score(g_score, neighbour) do
came_from = Map.put(came_from, neighbour, current)
g_score = Map.put(g_score, neighbour, tentative_g_score)
f_score = Map.put(f_score, neighbour, tentative_g_score + h.(neighbour))
open_set = Heap.push(open_set, {neighbour, tentative_g_score})
{came_from, g_score, f_score, open_set}
else
{came_from, g_score, f_score, open_set}
end
end
defp d(map, _current, neighbour) do
Map.get(map, neighbour)
end
defp get_score(scores, pos) do
Map.get(scores, pos, @infinity)
end
defp reconstruct_path(came_from, current) do
reconstruct_path(came_from, Map.get(came_from, current), [current])
end
defp reconstruct_path(_, nil, path), do: path
defp reconstruct_path(came_from, current, path) do
reconstruct_path(came_from, Map.get(came_from, current), [current | path])
end
end
@spec solve_part1() :: non_neg_integer()
@spec solve_part1(Path.t()) :: non_neg_integer()
def solve_part1(path \\ "priv/day15/input.txt") do
{map, {max_x, max_y} = goal} = Reader.read_map(path)
start = {0, 0}
h = make_h(goal)
n = make_neighbours(max_x, max_y)
{:ok, path} = AStar.a_star(map, start, goal, h, n)
path
|> tl()
|> Enum.map(fn p -> Map.get(map, p) end)
|> Enum.sum()
end
defp make_neighbours(max_x, max_y) do
fn {x, y} ->
[{x - 1, y}, {x + 1, y}, {x, y - 1}, {x, y + 1}]
|> Enum.filter(fn {x, y} -> x >= 0 and y >= 0 and x <= max_x and y <= max_y end)
end
end
defp make_h({goal_x, goal_y}) do
fn {x, y} ->
abs(goal_x - x) + abs(goal_y - y)
end
end
@spec solve_part2() :: non_neg_integer()
@spec solve_part2(Path.t()) :: non_neg_integer()
def solve_part2(path \\ "priv/day15/input.txt") do
{map, {max_x, max_y} = goal} =
path
|> Reader.read_map()
|> build_map()
start = {0, 0}
h = make_h(goal)
n = make_neighbours(max_x, max_y)
{:ok, path} = AStar.a_star(map, start, goal, h, n)
path
|> tl()
|> Enum.map(fn p -> Map.get(map, p) end)
|> Enum.sum()
end
defp increase_cell_value(x) when x > 9, do: rem(x, 10) + 1
defp increase_cell_value(x), do: x
defp build_map({initial, {max_x, max_y}}) do
row =
for n <- 0..4 do
Map.new(initial, fn {{x, y}, v} ->
{{x + n * (max_x + 1), y}, increase_cell_value(v + n)}
end)
end
|> Enum.reduce(%{}, &Map.merge/2)
map =
for n <- 0..4 do
Map.new(row, fn {{x, y}, v} ->
{{x, y + n * (max_y + 1)}, increase_cell_value(v + n)}
end)
end
|> Enum.reduce(%{}, &Map.merge/2)
{map, {(max_x + 1) * 5 - 1, (max_y + 1) * 5 - 1}}
end
end
|
lib/aoc2021/day15.ex
| 0.784402
| 0.569344
|
day15.ex
|
starcoder
|
defmodule AWS.ResourceGroupsTaggingAPI do
@moduledoc """
Resource Groups Tagging API
This guide describes the API operations for the resource groups tagging.
A tag is a label that you assign to an AWS resource. A tag consists of a
key and a value, both of which you define. For example, if you have two
Amazon EC2 instances, you might assign both a tag key of "Stack." But the
value of "Stack" might be "Testing" for one and "Production" for the other.
<important> Do not store personally identifiable information (PII) or other
confidential or sensitive information in tags. We use tags to provide you
with billing and administration services. Tags are not intended to be used
for private or sensitive data.
</important> Tagging can help you organize your resources and enables you
to simplify resource management, access management and cost allocation.
You can use the resource groups tagging API operations to complete the
following tasks:
<ul> <li> Tag and untag supported resources located in the specified Region
for the AWS account.
</li> <li> Use tag-based filters to search for resources located in the
specified Region for the AWS account.
</li> <li> List all existing tag keys in the specified Region for the AWS
account.
</li> <li> List all existing values for the specified key in the specified
Region for the AWS account.
</li> </ul> To use resource groups tagging API operations, you must add the
following permissions to your IAM policy:
<ul> <li> `tag:GetResources`
</li> <li> `tag:TagResources`
</li> <li> `tag:UntagResources`
</li> <li> `tag:GetTagKeys`
</li> <li> `tag:GetTagValues`
</li> </ul> You'll also need permissions to access the resources of
individual services so that you can tag and untag those resources.
For more information on IAM policies, see [Managing IAM
Policies](http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage.html)
in the *IAM User Guide*.
* **Services that support the Resource Groups Tagging API** *
You can use the Resource Groups Tagging API to tag resources for the
following AWS services.
<ul> <li> [Alexa for Business (a4b)](https://docs.aws.amazon.com/a4b)
</li> <li> [API Gateway](https://docs.aws.amazon.com/apigateway)
</li> <li> [Amazon AppStream](https://docs.aws.amazon.com/appstream2)
</li> <li> [AWS AppSync](https://docs.aws.amazon.com/appsync)
</li> <li> [AWS App Mesh](https://docs.aws.amazon.com/app-mesh)
</li> <li> [Amazon Athena](https://docs.aws.amazon.com/athena)
</li> <li> [Amazon
Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide)
</li> <li> [AWS Backup](https://docs.aws.amazon.com/aws-backup)
</li> <li> [AWS Certificate Manager](https://docs.aws.amazon.com/acm)
</li> <li> [AWS Certificate Manager Private
CA](https://docs.aws.amazon.com/acm)
</li> <li> [Amazon Cloud
Directory](https://docs.aws.amazon.com/clouddirectory)
</li> <li> [AWS Cloud Map](https://docs.aws.amazon.com/cloud-map)
</li> <li> [AWS CloudFormation](https://docs.aws.amazon.com/cloudformation)
</li> <li> [Amazon CloudFront](https://docs.aws.amazon.com/cloudfront)
</li> <li> [AWS CloudHSM](https://docs.aws.amazon.com/cloudhsm)
</li> <li> [AWS CloudTrail](https://docs.aws.amazon.com/cloudtrail)
</li> <li> [Amazon CloudWatch (alarms
only)](https://docs.aws.amazon.com/cloudwatch)
</li> <li> [Amazon CloudWatch
Events](https://docs.aws.amazon.com/cloudwatch/?id=docs_gateway#amazon-cloudwatch-events)
</li> <li> [Amazon CloudWatch
Logs](https://docs.aws.amazon.com/cloudwatch/?id=docs_gateway#amazon-cloudwatch-logs)
</li> <li> [Amazon Cloudwatch
Synthetics](https://docs.aws.amazon.com/cloudwatch)
</li> <li> [AWS CodeBuild](https://docs.aws.amazon.com/codebuild)
</li> <li> [AWS CodeCommit](https://docs.aws.amazon.com/codecommit)
</li> <li> [AWS CodePipeline](https://docs.aws.amazon.com/codepipeline)
</li> <li> [AWS CodeStar](https://docs.aws.amazon.com/codestar)
</li> <li> [AWS CodeStar
Connections](https://docs.aws.amazon.com/codestar-connections/latest/APIReference/)
</li> <li> [Amazon Cognito Identity](https://docs.aws.amazon.com/cognito)
</li> <li> [Amazon Cognito User Pools](https://docs.aws.amazon.com/cognito)
</li> <li> [Amazon Comprehend](https://docs.aws.amazon.com/comprehend)
</li> <li> [AWS Config](https://docs.aws.amazon.com/config)
</li> <li> [Amazon
Connect](http://aws.amazon.com/connect/resources/?whats-new-cards#Documentation)
</li> <li> [AWS Data Exchange](https://docs.aws.amazon.com/data-exchange)
</li> <li> [AWS Data Pipeline](https://docs.aws.amazon.com/data-pipeline)
</li> <li> [AWS Database Migration
Service](https://docs.aws.amazon.com/dms)
</li> <li> [AWS DataSync](https://docs.aws.amazon.com/datasync)
</li> <li> [AWS Device Farm](https://docs.aws.amazon.com/devicefarm)
</li> <li> [AWS Direct Connect](https://docs.aws.amazon.com/directconnect)
</li> <li> [AWS Directory
Service](https://docs.aws.amazon.com/directory-service)
</li> <li> [Amazon DynamoDB](https://docs.aws.amazon.com/dynamodb)
</li> <li> [Amazon EBS](https://docs.aws.amazon.com/ebs)
</li> <li> [Amazon EC2](https://docs.aws.amazon.com/ec2)
</li> <li> [EC2 Image Builder](https://docs.aws.amazon.com/imagebuilder)
</li> <li> [Amazon ECR](https://docs.aws.amazon.com/ecr)
</li> <li> [Amazon ECS](https://docs.aws.amazon.com/ecs)
</li> <li> [Amazon EKS](https://docs.aws.amazon.com/eks)
</li> <li> [AWS Elastic
Beanstalk](https://docs.aws.amazon.com/elastic-beanstalk)
</li> <li> [Amazon Elastic File System](https://docs.aws.amazon.com/efs)
</li> <li> [Elastic Load
Balancing](https://docs.aws.amazon.com/elasticloadbalancing)
</li> <li> [Amazon ElastiCache](https://docs.aws.amazon.com/elasticache)
</li> <li> [Amazon Elasticsearch
Service](https://docs.aws.amazon.com/elasticsearch-service)
</li> <li> [AWS Elemental MediaLive](https://docs.aws.amazon.com/medialive)
</li> <li> [AWS Elemental
MediaPackage](https://docs.aws.amazon.com/mediapackage)
</li> <li> [AWS Elemental MediaPackage
VoD](https://docs.aws.amazon.com/mediapackage)
</li> <li> [AWS Elemental
MediaTailor](https://docs.aws.amazon.com/mediatailor)
</li> <li> [Amazon EMR](https://docs.aws.amazon.com/emr)
</li> <li> [Amazon EventBridge
Schema](https://docs.aws.amazon.com/eventbridge)
</li> <li> [AWS Firewall
Manager](https://docs.aws.amazon.com/firewall-manager)
</li> <li> [Amazon Fraud
Detector](https://docs.aws.amazon.com/frauddetector)
</li> <li> [Amazon FSx](https://docs.aws.amazon.com/fsx)
</li> <li> [Amazon S3
Glacier](https://docs.aws.amazon.com/s3/?id=docs_gateway#amazon-s3-glacier)
</li> <li> [AWS Global
Accelerator](https://docs.aws.amazon.com/global-accelerator)
</li> <li> [AWS Ground Station](https://docs.aws.amazon.com/ground-station)
</li> <li> [AWS Glue](https://docs.aws.amazon.com/glue)
</li> <li> [Amazon GuardDuty](https://docs.aws.amazon.com/guardduty)
</li> <li> [Amazon Inspector](https://docs.aws.amazon.com/inspector)
</li> <li> [AWS IoT Analytics](https://docs.aws.amazon.com/iotanalytics)
</li> <li> [AWS IoT Core](https://docs.aws.amazon.com/iot)
</li> <li> [AWS IoT Device
Defender](https://docs.aws.amazon.com/iot-device-defender)
</li> <li> [AWS IoT Device
Management](https://docs.aws.amazon.com/iot-device-management)
</li> <li> [AWS IoT Events](https://docs.aws.amazon.com/iotevents)
</li> <li> [AWS IoT Greengrass](https://docs.aws.amazon.com/greengrass)
</li> <li> [AWS IoT 1-Click](https://docs.aws.amazon.com/iot-1-click)
</li> <li> [AWS IoT Sitewise](https://docs.aws.amazon.com/iot-sitewise)
</li> <li> [AWS IoT Things Graph](https://docs.aws.amazon.com/thingsgraph)
</li> <li> [Amazon Kendra](https://docs.aws.amazon.com/kendra)
</li> <li> [AWS Key Management Service](https://docs.aws.amazon.com/kms)
</li> <li> [Amazon Kinesis](https://docs.aws.amazon.com/kinesis)
</li> <li> [Amazon Kinesis Data
Analytics](https://docs.aws.amazon.com/kinesis/?id=docs_gateway#amazon-kinesis-data-analytics)
</li> <li> [Amazon Kinesis Data
Firehose](https://docs.aws.amazon.com/kinesis/?id=docs_gateway#amazon-kinesis-data-firehose)
</li> <li> [AWS Lambda](https://docs.aws.amazon.com/lambda)
</li> <li> [Amazon Lex](https://docs.aws.amazon.com/lex)
</li> <li> [AWS License
Manager](https://docs.aws.amazon.com/license-manager)
</li> <li> [Amazon Macie](https://docs.aws.amazon.com/macie)
</li> <li> [Amazon Machine
Learning](https://docs.aws.amazon.com/machine-learning)
</li> <li> [Amazon MQ](https://docs.aws.amazon.com/amazon-mq)
</li> <li> [Amazon MSK](https://docs.aws.amazon.com/msk)
</li> <li> [Amazon Neptune](https://docs.aws.amazon.com/neptune)
</li> <li> [AWS OpsWorks](https://docs.aws.amazon.com/opsworks)
</li> <li> [AWS OpsWorks CM](https://docs.aws.amazon.com/opsworks)
</li> <li> [AWS Organizations](https://docs.aws.amazon.com/organizations)
</li> <li> [Amazon Pinpoint](https://docs.aws.amazon.com/pinpoint)
</li> <li> [Amazon Quantum Ledger Database
(QLDB)](https://docs.aws.amazon.com/qldb)
</li> <li> [Amazon RDS](https://docs.aws.amazon.com/rds)
</li> <li> [Amazon Redshift](https://docs.aws.amazon.com/redshift)
</li> <li> [AWS Resource Access Manager](https://docs.aws.amazon.com/ram)
</li> <li> [AWS Resource Groups](https://docs.aws.amazon.com/ARG)
</li> <li> [AWS RoboMaker](https://docs.aws.amazon.com/robomaker)
</li> <li> [Amazon Route 53](https://docs.aws.amazon.com/route53)
</li> <li> [Amazon Route 53 Resolver](https://docs.aws.amazon.com/route53)
</li> <li> [Amazon S3 (buckets only)](https://docs.aws.amazon.com/s3)
</li> <li> [Amazon SageMaker](https://docs.aws.amazon.com/sagemaker)
</li> <li> [Savings Plans](https://docs.aws.amazon.com/savingsplans)
</li> <li> [AWS Secrets
Manager](https://docs.aws.amazon.com/secretsmanager)
</li> <li> [AWS Security Hub](https://docs.aws.amazon.com/securityhub)
</li> <li> [AWS Service
Catalog](https://docs.aws.amazon.com/servicecatalog)
</li> <li> [Amazon Simple Email Service
(SES)](https://docs.aws.amazon.com/ses)
</li> <li> [Amazon Simple Notification Service
(SNS)](https://docs.aws.amazon.com/sns)
</li> <li> [Amazon Simple Queue Service
(SQS)](https://docs.aws.amazon.com/sqs)
</li> <li> [Amazon Simple Workflow
Service](https://docs.aws.amazon.com/swf)
</li> <li> [AWS Step Functions](https://docs.aws.amazon.com/step-functions)
</li> <li> [AWS Storage
Gateway](https://docs.aws.amazon.com/storagegateway)
</li> <li> [AWS Systems
Manager](https://docs.aws.amazon.com/systems-manager)
</li> <li> [AWS Transfer for SFTP](https://docs.aws.amazon.com/transfer)
</li> <li> [Amazon VPC](https://docs.aws.amazon.com/vpc)
</li> <li> [AWS WAFv2](https://docs.aws.amazon.com/waf)
</li> <li> [AWS WAF Regional](https://docs.aws.amazon.com/waf)
</li> <li> [Amazon WorkLink](https://docs.aws.amazon.com/worklink)
</li> <li> [Amazon WorkSpaces](https://docs.aws.amazon.com/workspaces)
</li> </ul>
"""
@doc """
Describes the status of the `StartReportCreation` operation.
You can call this operation only from the organization's master account and
from the us-east-1 Region.
"""
def describe_report_creation(client, input, options \\ []) do
request(client, "DescribeReportCreation", input, options)
end
@doc """
Returns a table that shows counts of resources that are noncompliant with
their tag policies.
For more information on tag policies, see [Tag
Policies](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html)
in the *AWS Organizations User Guide.*
You can call this operation only from the organization's master account and
from the us-east-1 Region.
"""
def get_compliance_summary(client, input, options \\ []) do
request(client, "GetComplianceSummary", input, options)
end
@doc """
Returns all the tagged or previously tagged resources that are located in
the specified Region for the AWS account.
Depending on what information you want returned, you can also specify the
following:
<ul> <li> *Filters* that specify what tags and resource types you want
returned. The response includes all tags that are associated with the
requested resources.
</li> <li> Information about compliance with the account's effective tag
policy. For more information on tag policies, see [Tag
Policies](http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html)
in the *AWS Organizations User Guide.*
</li> </ul> <note> You can check the `PaginationToken` response parameter
to determine if a query is complete. Queries occasionally return fewer
results on a page than allowed. The `PaginationToken` response parameter
value is `null` *only* when there are no more results to display.
</note>
"""
def get_resources(client, input, options \\ []) do
request(client, "GetResources", input, options)
end
@doc """
Returns all tag keys in the specified Region for the AWS account.
"""
def get_tag_keys(client, input, options \\ []) do
request(client, "GetTagKeys", input, options)
end
@doc """
Returns all tag values for the specified key in the specified Region for
the AWS account.
"""
def get_tag_values(client, input, options \\ []) do
request(client, "GetTagValues", input, options)
end
@doc """
Generates a report that lists all tagged resources in accounts across your
organization and tells whether each resource is compliant with the
effective tag policy. Compliance data is refreshed daily.
The generated report is saved to the following location:
`s3://example-bucket/AwsTagPolicies/o-exampleorgid/YYYY-MM-ddTHH:mm:ssZ/report.csv`
You can call this operation only from the organization's master account and
from the us-east-1 Region.
"""
def start_report_creation(client, input, options \\ []) do
request(client, "StartReportCreation", input, options)
end
@doc """
Applies one or more tags to the specified resources. Note the following:
<ul> <li> Not all resources can have tags. For a list of services that
support tagging, see [this
list](http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/Welcome.html).
</li> <li> Each resource can have up to 50 tags. For other limits, see [Tag
Naming and Usage
Conventions](http://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions)
in the *AWS General Reference.*
</li> <li> You can only tag resources that are located in the specified
Region for the AWS account.
</li> <li> To add tags to a resource, you need the necessary permissions
for the service that the resource belongs to as well as permissions for
adding tags. For more information, see [this
list](http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/Welcome.html).
</li> </ul> <important> Do not store personally identifiable information
(PII) or other confidential or sensitive information in tags. We use tags
to provide you with billing and administration services. Tags are not
intended to be used for private or sensitive data.
</important>
"""
def tag_resources(client, input, options \\ []) do
request(client, "TagResources", input, options)
end
@doc """
Removes the specified tags from the specified resources. When you specify a
tag key, the action removes both that key and its associated value. The
operation succeeds even if you attempt to remove tags from a resource that
were already removed. Note the following:
<ul> <li> To remove tags from a resource, you need the necessary
permissions for the service that the resource belongs to as well as
permissions for removing tags. For more information, see [this
list](http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/Welcome.html).
</li> <li> You can only tag resources that are located in the specified
Region for the AWS account.
</li> </ul>
"""
def untag_resources(client, input, options \\ []) do
request(client, "UntagResources", input, options)
end
@spec request(AWS.Client.t(), binary(), map(), list()) ::
{:ok, Poison.Parser.t() | nil, Poison.Response.t()}
| {:error, Poison.Parser.t()}
| {:error, HTTPoison.Error.t()}
defp request(client, action, input, options) do
client = %{client | service: "tagging"}
host = build_host("tagging", client)
url = build_url(host, client)
headers = [
{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "ResourceGroupsTaggingAPI_20170126.#{action}"}
]
payload = Poison.Encoder.encode(input, %{})
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, %HTTPoison.Response{status_code: 200, body: ""} = response} ->
{:ok, nil, response}
{:ok, %HTTPoison.Response{status_code: 200, body: body} = response} ->
{:ok, Poison.Parser.parse!(body, %{}), response}
{:ok, %HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body, %{})
{:error, error}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp build_host(_endpoint_prefix, %{region: "local"}) do
"localhost"
end
defp build_host(endpoint_prefix, %{region: region, endpoint: endpoint}) do
"#{endpoint_prefix}.#{region}.#{endpoint}"
end
defp build_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/resource_groups_tagging_api.ex
| 0.867191
| 0.558809
|
resource_groups_tagging_api.ex
|
starcoder
|
defmodule JokenJwks do
@moduledoc """
`Joken.Hooks` implementation for fetching `Joken.Signer`s from public JWKS URLs.
This hook is intended to be used when you are _verifying_ a token is signed with
a well known public key. It only overrides the `before_verify/2` callback providing a
`Joken.Signer` for the given token. It is important to notice this is not meant for
use when **GENERATING** a token. So, using this hook with `Joken.encode_and_sign`
function **WILL NOT WORK!!!**
To use it, pass this hook to Joken either with the `add_hook/2` macro or directly
to each `Joken` function. Example:
defmodule MyToken do
use Joken.Config
add_hook(JokenJwks, strategy: MyFetchingStrategy)
# rest of your token config
end
Or:
Joken.verify_and_validate(config, token, nil, context, [{Joken.Jwks, strategy: MyStrategy}])
## Fetching strategy
Very rarely, your authentication server might rotate or block its keys. Key rotation is the
process of issuing a new key that in time will replace the older key. This is security hygiene
and should/might be a regular process.
Sometimes it is important to block keys because they got leaked or for any other reason.
Other times you simply don't control the authentication server and can't ensure the keys won't
change. This is the most common scenario for this hook.
In these cases (and some others) it is important to have a cache invalidation strategy: all your
cached keys should be refreshed. Since the best strategy might differ for each use case, there
is a behaviour that can be customized as the "fetching strategy", that is: when to fetch and re-fetch
keys. `JokenJwks` has a default strategy that tries to be smart and cover most use cases by default.
It combines a time based state machine to avoid overflowing the system with re-fetching keys. If that
is not a good option for your use case, it can still be configured. Please, see
`JokenJwks.SignerMatchStrategy` or `JokenJwks.DefaultStrategyTemplate` docs for more information.
"""
require Logger
use Joken.Hooks
@impl true
def before_verify(hook_options, {token, _signer}) do
with strategy <- hook_options[:strategy] || raise("No strategy provided"),
{:ok, kid} <- get_token_kid(token),
{:ok, signer} <- strategy.match_signer_for_kid(kid, hook_options) do
{:cont, {token, signer}}
else
err -> {:halt, err}
end
end
defp get_token_kid(token) do
with {:ok, headers} <- Joken.peek_header(token),
{:kid, kid} when not is_nil(kid) <- {:kid, headers["kid"]} do
{:ok, kid}
else
{:kid, nil} -> {:error, :no_kid_in_token_header}
err -> err
end
end
def log(_, :none, _), do: :ok
def log(:debug, log_level, msg) do
unless Logger.compare_levels(:debug, log_level) == :lt, do: Logger.debug(fn -> msg end)
end
def log(:info, log_level, msg) do
unless Logger.compare_levels(:info, log_level) == :lt, do: Logger.info(fn -> msg end)
end
def log(:warn, log_level, msg) do
unless Logger.compare_levels(:warn, log_level) == :lt, do: Logger.warn(fn -> msg end)
end
def log(:error, _, msg), do: Logger.error(msg)
end
|
lib/joken_jwks.ex
| 0.831964
| 0.496826
|
joken_jwks.ex
|
starcoder
|
defrecord File.Stat, Record.extract(:file_info, from_lib: "kernel/include/file.hrl"), moduledoc: """
A record responsible to hold file information. Its fields are:
* `size` - Size of file in bytes.
* `type` - `:device`, `:directory`, `:regular`, `:other`. The type of the file.
* `access` - `:read`, `:write`, `:read_write`, `:none`. The current system access to
the file.
* `atime` - The last time the file was read.
* `mtime` - The last time the file was written.
* `ctime` - The interpretation of this time field depends on the operating
system. On Unix, it is the last time the file or the inode was
changed. In Windows, it is the create time.
* `mode` - The file permissions.
* `links` - The number of links to this file. This is always 1 for file
systems which have no concept of links.
* `major_device` - Identifies the file system where the file is located.
In windows, the number indicates a drive as follows:
0 means A:, 1 means B:, and so on.
* `minor_device` - Only valid for character devices on Unix. In all other
cases, this field is zero.
* `inode` - Gives the inode number. On non-Unix file systems, this field
will be zero.
* `uid` - Indicates the owner of the file.
* `gid` - Gives the group that the owner of the file belongs to. Will be
zero for non-Unix file systems.
The time type returned in `atime`, `mtime`, and `ctime` is dependent on the
time type set in options. `{:time, type}` where type can be `:local`,
`:universal`, or `:posix`. Default is `:local`.
"""
defexception File.Error, [reason: nil, action: "", path: nil] do
def message(exception) do
formatted = list_to_binary(:file.format_error(reason exception))
"could not #{action exception} #{path exception}: #{formatted}"
end
end
defexception File.CopyError, [reason: nil, action: "", source: nil, destination: nil] do
def message(exception) do
formatted = list_to_binary(:file.format_error(reason exception))
"could not #{action exception} from #{source exception} to #{destination exception}: #{formatted}"
end
end
defexception File.IteratorError, reason: nil do
def message(exception) do
formatted = list_to_binary(:file.format_error(reason exception))
"error during file iteration: #{formatted}"
end
end
defmodule File do
@moduledoc """
This module contains function to manipulate files,
filenames and the filesystem. Many of the functions
that interact with the filesystem have their naming
based on its UNIX variants. For example, deleting a
file is done with `File.rm`. Getting its stats with
`File.stat`. If you want to read or write to a file
in chunks, check the IO module.
Most of the functions in this module return `:ok`
or `{ :ok, result }` in case of success, `{ :error, reason }`
otherwise. Those function are also followed by
a variant that ends with `!` which returns the
result (without the `{ :ok, result }` tuple) in
case of success or raises an exception in case it
fails. For example:
File.read("hello.txt")
#=> { :ok, "World" }
File.read("invalid.txt")
#=> { :error, :enoent }
File.read!("hello.txt")
#=> "World"
File.read!("invalid.txt")
#=> raises File.Error
In general, a developer should use the former in case
he wants to react in the fie does not exist. The latter
should be used when the developer expects his software
to fail in case the file cannot be read (i.e. it is
literally an exception).
Finally, the functions in this module accept either
a char lists or a binary. When manipulating paths, a char
list is returned if one is given as argument. However,
when reading files, binaries are always returned.
"""
alias Erlang.file, as: F
alias Erlang.filename, as: FN
alias Erlang.filelib, as: FL
@doc """
Expands the path by returning its absolute name and expanding
any `.` and `..` characters.
If the given `path` is a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.expand_path("/foo/bar/../bar") == "/foo/bar"
"""
def expand_path(path) do
normalize FN.absname(path)
end
@doc """
Expands the path to the relative location and expanding
any `.` and `..` characters. If the path is already an
absolute path, the relative location is ignored.
If the given `path` is a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.expand_path("foo/bar/../bar", "/baz") == "/baz/foo/bar"
File.expand_path("/foo/bar/../bar", "/baz") == "/foo/bar"
"""
def expand_path(path, relative_to) do
normalize FN.absname(FN.absname(path, relative_to))
end
@doc """
Returns true if the path is a regular file.
## Examples
File.regular? __FILE__ #=> true
"""
def regular?(path) do
FL.is_regular(path)
end
@doc """
Returns true if the path is a directory.
"""
def dir?(path) do
FL.is_dir(path)
end
@doc """
Returns true if the given argument exists.
It can be regular file, directory, socket,
symbolic link, named pipe or device file.
## Examples
File.exists?("test/")
#=> true
File.exists?("missing.txt")
#=> false
File.exists?("/dev/null")
#=> true
"""
def exists?(path) do
match?({ :ok, _ }, F.read_file_info(path))
end
@doc """
Returns the last component of the path or the path
itself if it does not contain any directory separators.
If the given `path` is a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.basename("foo")
#=> "foo"
File.basename("foo/bar")
#=> "bar"
File.basename("/")
#=> ""
"""
def basename(path) do
FN.basename(path)
end
@doc """
Returns the last component of `path` with the `extension`
stripped. This function should be used to remove a specific
extension which might, or might not, be there.
If the given `path` is a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.basename("~/foo/bar.ex", ".ex")
#=> "bar"
File.basename("~/foo/bar.exs", ".ex")
#=> "bar.exs"
File.basename("~/foo/bar.old.ex", ".ex")
#=> "bar.old"
"""
def basename(path, extension) do
FN.basename(path, extension)
end
@doc """
Return the `directory` component of `path`.
If the given `path` is a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.dirname("/foo/bar.ex")
#=> "foo"
"""
def dirname(path) do
FN.dirname(path)
end
@doc """
Return the `extension` of the last component of `path`.
If the given `path` is a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.extname("foo.erl")
#=> ".erl"
File.extname("~/foo/bar")
#=> ""
"""
def extname(path) do
FN.extension(path)
end
@doc """
Returns the `path` with the `extension` stripped.
If the given `path` is a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.rootname("/foo/bar")
#=> "/foo/bar"
File.rootname("/foo/bar.ex")
#=> "/foo/bar"
"""
def rootname(path) do
FN.rootname(path)
end
@doc """
Returns the `path` with the `extension` stripped. This function should be used to
remove a specific extension which might, or might not, be there.
If the given `path` is a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.rootname("/foo/bar.erl", ".erl")
#=> "/foo/bar"
File.rootname("/foo/bar.erl", ".ex")
#=> "/foo/bar.erl"
"""
def rootname(path, extension) do
FN.rootname(path, extension)
end
@doc """
Returns a string with one or more paths components joint by the path separator.
This function should be used to convert a list of strings in a path.
If the given `paths` are a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.join(["~", "foo"])
#=> "~/foo"
File.join(["foo"])
#=> "foo"
File.join(["/", "foo", "bar"])
#=> "/foo/bar"
"""
def join(paths) do
FN.join(paths)
end
@doc """
Join two paths.
If the given paths are a char list, returns a char list.
Otherwise returns a binary.
## Examples
File.join("foo", "bar")
#=> "foo/bar"
"""
def join(left, right) do
FN.join(left, right)
end
@doc """
Tries to create the directory `path`. Missing parent directories are not created.
Returns `:ok` if successful, or `{:error, reason}` if an error occurs.
Typical error reasons are:
* :eacces - Missing search or write permissions for the parent directories of `path`.
* :eexist - There is already a file or directory named `path`.
* :enoent - A component of `path` does not exist.
* :enospc - There is a no space left on the device.
* :enotdir - A component of `path` is not a directory
On some platforms, `:enoent` is returned instead.
"""
def mkdir(path) do
F.make_dir(path)
end
@doc """
Same as `mkdir`, but raises an exception in case of failure. Otherwise `:ok`.
"""
def mkdir!(path) do
case mkdir(path) do
:ok -> :ok
{ :error, reason } ->
raise File.Error, reason: reason, action: "make directory", path: to_binary(path)
end
end
@doc """
Tries to create the directory `path`. Missing parent directories are created.
Returns `:ok` if successful, or `{:error, reason}` if an error occurs.
Typical error reasons are:
* :eacces - Missing search or write permissions for the parent directories of `path`.
* :enospc - There is a no space left on the device.
* :enotdir - A component of `path` is not a directory.
"""
def mkdir_p(path) do
FL.ensure_dir(join(path, "."))
end
@doc """
Same as `mkdir_p`, but raises an exception in case of failure. Otherwise `:ok`.
"""
def mkdir_p!(path) do
case mkdir_p(path) do
:ok -> :ok
{ :error, reason } ->
raise File.Error, reason: reason, action: "make directory (with -p)", path: to_binary(path)
end
end
@doc """
Returns `{:ok, binary}`, where `binary` is a binary data object that contains the contents
of `path`, or `{:error, reason}` if an error occurs.
Typical error reasons:
* :enoent - The file does not exist.
* :eacces - Missing permission for reading the file,
or for searching one of the parent directories.
* :eisdir - The named file is a directory.
* :enotdir - A component of the file name is not a directory.
On some platforms, `:enoent` is returned instead.
* :enomem - There is not enough memory for the contents of the file.
You can use `Erlang.file.format_error(reason)` to get a descriptive string of the error.
"""
def read(path) do
F.read_file(path)
end
@doc """
Returns binary with the contents of the given filename or raises
File.Error if an error occurs.
"""
def read!(path) do
case read(path) do
{ :ok, binary } ->
binary
{ :error, reason } ->
raise File.Error, reason: reason, action: "read file", path: to_binary(path)
end
end
@doc """
Returns a list with the path splitted by the path separator.
If an empty string is given, then it returns the root path.
## Examples
File.split("")
#=> ["/"]
File.split("foo")
#=> ["foo"]
File.split("/foo/bar")
#=> ["/", "foo", "bar"]
"""
def split(path) do
FN.split(path)
end
@doc """
Traverses files and directories according to the given `glob` expression.
The wildcard string looks like an ordinary filename, except that certain
"wildcard characters" are interpreted in a special way. The following
characters are special:
* `?` - Matches one character.
* `*` - Matches any number of characters up to the end of
the filename, the next dot, or the next slash.
* `**` - Two adjacent <c>*</c>'s used as a single pattern will
match all files and zero or more directories and subdirectories.
* `[char1,char2,...]` - Matches any of the characters listed. Two characters
separated by a hyphen will match a range of characters.
* `{item1,item2,...}` - Matches one of the alternatives.
Other characters represent themselves. Only filenames that have exactly
the same character in the same position will match. Note that matching
is case-sensitive; i.e. "a" will not match "A".
## Examples
Imagine you have a directory called `projects` with three Elixir projects
inside of it: `elixir`, `exdoc` and `dynamo`. You can find all `.beam` files
inside their ebin directories all projects as follows:
File.wildcard("projects/*/ebin/**/*.beam")
If you want to search for both `.beam` and `.app` files, you could do:
File.wildcard("projects/*/ebin/**/*.{beam,app}")
"""
def wildcard(glob) when is_binary(glob) do
paths = Erlang.elixir_glob.wildcard binary_to_list(glob)
Enum.map paths, list_to_binary(&1)
end
def wildcard(glob) when is_list(glob) do
Erlang.elixir_glob.wildcard(glob)
end
@doc """
Returns information about the `path`. If it exists, it
returns a `{ :ok, info }` tuple, where info is as a
`File.Info` record. Retuns `{ :error, reason }` with
the same reasons as `File.read` if a failure occurs.
## Options
The accepted options are:
* `:time` if the time should be local, universal or posix.
Default is local.
"""
def stat(path, opts // []) do
case F.read_file_info(path, opts) do
{:ok, fileinfo} ->
{:ok, File.Stat.new fileinfo}
error ->
error
end
end
@doc """
Same as `stat` but returns the `File.Stat` directly and
throws `File.Error` if an error is returned.
"""
def stat!(path, opts // []) do
case stat(path, opts) do
{:ok, info} -> info
{:error, reason} ->
raise File.Error, reason: reason, action: "read file stats", path: to_binary(path)
end
end
@doc """
Writes the given `File.Stat` back to the filesystem at the given
path. Returns `:ok` or `{ :error, reason }`.
"""
def write_stat(path, File.Stat[] = stat, opts // []) do
F.write_file_info(path, setelem(stat, 1, :file_info), opts)
end
@doc """
Same as `write_stat/3` but raises an exception if it fails.
Returns `:ok` otherwise.
"""
def write_stat!(path, File.Stat[] = stat, opts // []) do
case write_stat(path, stat, opts) do
:ok -> :ok
{ :error, reason } ->
raise File.Error, reason: reason, action: "write file stats", path: to_binary(path)
end
end
@doc """
Updates modification time (mtime) and access time (atime) of
the given file. File is created if it doesnβt exist.
"""
def touch(path, time // :calendar.local_time) do
case F.change_time(path, time) do
{ :error, :enoent } -> write(path, "")
other -> other
end
end
@doc """
Same as `touch/1` but raises an exception if it fails.
Returns `:ok` otherwise.
"""
def touch!(path, time // :calendar.local_time) do
case touch(path, time) do
:ok -> :ok
{ :error, reason } ->
raise File.Error, reason: reason, action: "touch", path: to_binary(path)
end
end
@doc """
Copies the contents of `source` to `destination`. Both
parameters can be a filename or an io device opened with `File.open`.
`bytes_count` specifies the number of bytes to count, the default
being `:infinity`.
If file `destination` already exists, it is overriden
by the contents in `source`.
Returns `{ :ok, bytes_copied }` if successful,
`{ :error, reason }` otherwise.
Typical error reasons are the same as in `open/2`,
`read/1` and `write/2`.
"""
def copy(source, destination, bytes_count // :infinity) do
F.copy(source, destination, bytes_count)
end
@doc """
The same as `copy/3` but raises an File.CopyError if it fails.
Returns the `bytes_copied` otherwise.
"""
def copy!(source, destination, bytes_count // :infinity) do
case copy(source, destination, bytes_count) do
{ :ok, bytes_count } -> bytes_count
{ :error, reason } ->
raise File.CopyError, reason: reason, action: "copy",
source: to_binary(source), destination: to_binary(destination)
end
end
@doc """
Copies the contents in `source` to `destination`.
Similar to the command `cp -r` in Unix systems,
this function behaves differently depending
if `source` and `destination` are a file or a directory.
If both are files, it simply copies `source` to
`destination`. However, if `destination` is a directory,
it copies the contents of `source` to `destination/source`
recursively.
If a file already exists in the destination,
it invokes a callback which should return
true if the existing file should be overriden,
false otherwise. It defaults to return true.
It returns `:ok` in case of success, returns
`{ :error, reason }` otherwise.
"""
def cp(source, destination, callback // fn(_, _) -> true end) do
if dir?(source) do
{ :error, :eisdir }
else
output =
if dir?(destination) do
mkdir(destination)
join(destination, basename(source))
else
destination
end
case do_cp_file(source, output, callback, []) do
{ :error, _ } = error -> error
_ -> :ok
end
end
end
@doc """
The same as `cp/3`, but raises File.CopyError if it fails.
Returns the list of copied files otherwise.
"""
def cp!(source, destination, callback // fn(_, _) -> true end) do
case cp(source, destination, callback) do
:ok -> :ok
{ :error, reason } ->
raise File.CopyError, reason: reason, action: "copy recursively",
source: to_binary(source), destination: to_binary(destination)
end
end
@doc %B"""
Copies the contents in source to destination.
Similar to the command `cp -r` in Unix systems,
this function behaves differently depending
if `source` and `destination` are a file or a directory.
If both are files, it simply copies `source` to
`destination`. However, if `destination` is a directory,
it copies the contents of `source` to `destination/source`
recursively.
If a file already exists in the destination,
it invokes a callback which should return
true if the existing file should be overriden,
false otherwise. It defaults to return true.
If a directory already exists in the destination
where a file is meant to be (or otherwise), this
function will fail.
This function may fail while copying files,
in such cases, it will leave the destination
directory in a dirty state, where already
copied files won't be removed.
It returns `{ :ok, files_and_directories }` in case of
success with all files and directories copied in no
specific order, `{ :error, reason }` otherwise.
## Examples
# Copies "a.txt" to "tmp/a.txt"
File.cp_r "a.txt", "tmp"
# Copies all files in "samples" to "tmp/samples"
File.cp_r "samples", "tmp"
# Copies all files in "samples" to "tmp"
File.cp_r "samples/.", "tmp"
# Same as before, but asks the user how to proceed in case of conflicts
File.cp_r "samples/.", "tmp", fn(source, destination) ->
IO.gets("Overriding #{destination} by #{source}. Type y to confirm.") == "y"
end
"""
def cp_r(source, destination, callback // fn(_, _) -> true end) when is_function(callback) do
output =
if dir?(destination) || dir?(source) do
mkdir(destination)
join(destination, basename(source))
else
destination
end
case do_cp_r(source, output, callback, []) do
{ :error, _ } = error -> error
res -> { :ok, res }
end
end
@doc """
The same as `cp_r/3`, but raises File.CopyError if it fails.
Returns the list of copied files otherwise.
"""
def cp_r!(source, destination, callback // fn(_, _) -> true end) do
case cp_r(source, destination, callback) do
{ :ok, files } -> files
{ :error, reason } ->
raise File.CopyError, reason: reason, action: "copy recursively",
source: to_binary(source), destination: to_binary(destination)
end
end
# src may be a file or a directory, dest is definitely
# a directory. Returns nil unless an error is found.
defp do_cp_r(src, dest, callback, acc) when is_list(acc) do
case F.read_link(src) do
{ :ok, link } ->
do_cp_link(link, src, dest, callback, acc)
_ ->
case F.list_dir(src) do
{ :ok, files } ->
case mkdir(dest) do
success in [:ok, { :error, :eexist }] ->
Enum.reduce(files, [dest|acc], fn(x, acc) ->
do_cp_r(join(src, x), join(dest, x), callback, acc)
end)
reason -> reason
end
{ :error, :enotdir } ->
do_cp_file(src, dest, callback, acc)
reason -> reason
end
end
end
# If we reach this clause, there was an error while
# processing a file.
defp do_cp_r(_, _, _, acc) do
acc
end
# Both src and dest are files.
defp do_cp_file(src, dest, callback, acc) do
case copy(src, { dest, [:exclusive] }) do
{ :ok, _ } ->
[dest|acc]
{ :error, :eexist } ->
if callback.(src, dest) do
rm(dest)
case copy(src, dest) do
{ :ok, _ } -> [dest|acc]
reason -> reason
end
else
acc
end
reason -> reason
end
end
# Both src and dest are files.
defp do_cp_link(link, src, dest, callback, acc) do
case F.make_symlink(link, dest) do
:ok ->
[dest|acc]
{ :error, :eexist } ->
if callback.(src, dest) do
rm(dest)
case F.make_symlink(link, dest) do
:ok -> [dest|acc]
reason -> reason
end
else
acc
end
reason -> reason
end
end
@doc """
Writes `content` to the file `path`. The file is created if it
does not exist. If it exists, the previous contents are overwritten.
Returns `:ok` if successful, or `{:error, reason}` if an error occurs.
Typical error reasons are:
* :enoent - A component of the file name does not exist.
* :enotdir - A component of the file name is not a directory.
On some platforms, enoent is returned instead.
* :enospc - There is a no space left on the device.
* :eacces - Missing permission for writing the file or searching one of the parent directories.
* :eisdir - The named file is a directory.
"""
def write(path, content, modes // []) do
F.write_file(path, content, modes)
end
@doc """
Same as `write/3` but raises an exception if it fails, returns `:ok` otherwise.
"""
def write!(path, content, modes // []) do
case F.write_file(path, content, modes) do
:ok -> :ok
{ :error, reason } ->
raise File.Error, reason: reason, action: "write to file", path: to_binary(path)
end
end
@doc """
Tries to delete the file `path`.
Returns `:ok` if successful, or `{:error, reason}` if an error occurs.
Typical error reasons are:
* :enoent - The file does not exist.
* :eacces - Missing permission for the file or one of its parents.
* :eperm - The file is a directory and user is not super-user.
* :enotdir - A component of the file name is not a directory.
On some platforms, enoent is returned instead.
* :einval - Filename had an improper type, such as tuple.
## Examples
File.rm('foo.txt')
#=> :ok
File.rm('tmp_dir/')
#=> {:error, :eperm}
"""
def rm(path) do
F.delete(path)
end
@doc """
Same as `rm`, but raises an exception in case of failure. Otherwise `:ok`.
"""
def rm!(path) do
case rm(path) do
:ok -> :ok
{ :error, reason } ->
raise File.Error, reason: reason, action: "remove file", path: to_binary(path)
end
end
@doc """
Tries to delete the dir at `path`.
Returns `:ok` if successful, or `{:error, reason}` if an error occurs.
## Examples
File.rddir('tmp_dir')
#=> :ok
File.rmdir('foo.txt')
#=> {:error, :enotdir}
"""
def rmdir(path) do
F.del_dir(path)
end
@doc """
Same as `rmdir/1`, but raises an exception in case of failure. Otherwise `:ok`.
"""
def rmdir!(path) do
case rmdir(path) do
:ok -> :ok
{ :error, reason } ->
raise File.Error, reason: reason, action: "remove directory", path: to_binary(path)
end
end
@doc """
Remove files and directories recursively at the given `path`.
Symlinks are not followed but simply removed, non existing
files are simply ignored (i.e. doesn't make this function fail).
Returns `{ :ok, files_and_directories }` with all files and
directories removed in no specific order, `{ :error, reason }`
otherwise.
## Examples
File.rm_rf "samples"
#=> { :ok, ["samples", "samples/1.txt"] }
File.rm_rf "unknown"
#=> { :ok, [] }
"""
def rm_rf(path) do
do_rm_rf(path, { :ok, [] })
end
defp do_rm_rf(path, { :ok, acc } = entry) do
case safe_list_dir(path) do
{ :ok, files } ->
res =
Enum.reduce files, entry, fn(file, tuple) ->
do_rm_rf(join(path, file), tuple)
end
case res do
{ :ok, acc } ->
case rmdir(path) do
:ok -> { :ok, [path|acc] }
{ :error, :enoent } -> res
reason -> reason
end
reason -> reason
end
{ :error, :enotdir } ->
case rm(path) do
:ok -> { :ok, [path|acc] }
{ :error, :enoent } -> entry
reason -> reason
end
{ :error, :enoent } -> entry
reason -> reason
end
end
defp do_rm_rf(_, reason) do
reason
end
defp safe_list_dir(path) do
case F.read_link(path) do
{ :ok, _ } -> { :error, :enotdir }
_ -> F.list_dir(path)
end
end
@doc """
Same as `rm_rf/1` but raises `File.Error` in case of failures,
otherwise the list of files or directories removed.
"""
def rm_rf!(path) do
case rm_rf(path) do
{ :ok, files } -> files
{ :error, reason } ->
raise File.Error, reason: reason, action: "remove files and directories recursively from", path: to_binary(path)
end
end
@doc """
Opens the given `path` according to the given list of modes.
By default, the file is opened in read mode, as a binary with utf8 encoding.
The allowed modes:
* `:read` - The file, which must exist, is opened for reading.
* `:write` - The file is opened for writing. It is created if it does not exist.
If the file exists, and if write is not combined with read, the file will be truncated.
* `:append` - The file will be opened for writing, and it will be created if it does not exist.
Every write operation to a file opened with append will take place at the end of the file.
* `:exclusive` - The file, when opened for writing, is created if it does not exist.
If the file exists, open will return { :error, :eexist }.
* `:charlist` - When this term is given, read operations on the file will return char lists rather than binaries;
* `:compressed` - Makes it possible to read or write gzip compressed files.
The compressed option must be combined with either read or write, but not both.
Note that the file size obtained with `stat/1` will most probably not
match the number of bytes that can be read from a compressed file.
If a function is given to modes (instead of a list), it dispatches to `open/3`.
Check `http://www.erlang.org/doc/man/file.html#open-2` for more information about
other options as `read_ahead` and `delayed_write`.
This function returns:
* { :ok, io_device } - The file has been opened in the requested mode.
`io_device` is actually the pid of the process which handles the file.
This process is linked to the process which originally opened the file.
If any process to which the io_device is linked terminates, the file will
be closed and the process itself will be terminated. An io_device returned
from this call can be used as an argument to the `IO` module functions.
* { :error, reason } - The file could not be opened.
## Examples
{ :ok, file } = File.open("foo.tar.gz", [:read, :compressed])
IO.readline(file)
File.close(file)
"""
def open(path, modes // [])
def open(path, modes) when is_list(modes) do
F.open(path, open_defaults(modes, true, true))
end
def open(path, function) when is_function(function) do
open(path, [], function)
end
@doc """
Similar to `open/2` but expects a function as last argument.
The file is opened, given to the function as argument and
automatically closed after the function returns, regardless
if there was an error or not.
It returns `{ :ok, function_result }` in case of success,
`{ :error, reason }` otherwise.
Do not use this function with :delayed_write option
since automatically closing the file may fail
(as writes are delayed).
## Examples
File.open!("foo.txt", [:read, :write], fn(file) ->
IO.readline(file)
end)
"""
def open(path, modes, function) do
case open(path, modes) do
{ :ok, device } ->
try do
{ :ok, function.(device) }
after
:ok = close(device)
end
other -> other
end
end
@doc """
Same as `open/2` but raises an error if file could not be opened.
Returns the `io_device` otherwise.
"""
def open!(path, modes // []) do
case open(path, modes) do
{ :ok, device } -> device
{ :error, reason } ->
raise File.Error, reason: reason, action: "open", path: to_binary(path)
end
end
@doc """
Same as `open/3` but raises an error if file could not be opened.
Returns the function result otherwise.
"""
def open!(path, modes, function) do
case open(path, modes, function) do
{ :ok, device } -> device
{ :error, reason } ->
raise File.Error, reason: reason, action: "open", path: to_binary(path)
end
end
@doc """
Gets the current working directory. In rare circumstances, this function can
fail on Unix. It may happen if read permission does not exist for the parent
directories of the current directory. For this reason, returns `{ :ok, cwd }`
in case of success, `{ :error, reason }` otherwise.
"""
def cwd() do
case F.get_cwd do
{ :ok, cwd } -> { :ok, list_to_binary(cwd) }
{ :error, _ } = error -> error
end
end
@doc """
The same as `cwd/0`, but raises an exception if it fails.
"""
def cwd!() do
case F.get_cwd do
{ :ok, cwd } -> list_to_binary(cwd)
{ :error, reason } ->
raise File.Error, reason: reason, action: "get current working directory"
end
end
@doc """
Sets the current working directory. Returns `:ok` if successful,
`{ :error, reason }` otherwise.
"""
def cd(path) do
F.set_cwd(path)
end
@doc """
The same as `cd/0`, but raises an exception if it fails.
"""
def cd!(path) do
case F.set_cwd(path) do
:ok -> :ok
{ :error, reason } ->
raise File.Error, reason: reason, action: "set current working directory to", path: to_binary(path)
end
end
@doc """
Changes the current directory to the given `path`,
executes the given function and then revert back
to the previous path regardless if there is an exception.
Raises an error if retrieving or changing the current
directory fails.
"""
def cd!(path, function) do
old = cwd!
cd!(path)
try do
function.()
after
cd!(old)
end
end
@doc """
Closes the file referenced by `io_device`. It mostly returns `:ok`, except
for some severe errors such as out of memory.
Note that if the option `:delayed_write` was used when opening the file,
`close/1` might return an old write error and not even try to close the file.
See `open/2`.
"""
def close(io_device) do
F.close(io_device)
end
@doc """
Convert the file device into an iterator that can be
passed into `Enum`. The device is iterated line
by line lazily, at the end of iteration the file is
closed.
## Examples
An example that lazily iterates a file replacing all double
quotes per single quotes and write each line to a target file
is shown below:
source = File.iterator("README.md")
File.open "NEWREADME.md", [:write], fn(target) ->
Enum.each source, fn(line) ->
IO.write target, Regex.replace_all(%r/"/, line, "'")
end
end
"""
def iterator(device)
def iterator(file) when is_binary(file) or is_list(file) do
iterator(file, [])
end
def iterator(device) do
fn(_) ->
case :io.get_line(device, "") do
:eof ->
close(device)
:stop
{ :error, reason } ->
raise File.IteratorError, reason: reason
data ->
{ data, :ok }
end
end
end
@doc """
Opens the given `file` with the given `mode` and
returns its iterator. Fails for the same reasons
as `File.open`.
"""
def iterator(file, mode) do
case open(file, mode) do
{ :ok, device } -> { :ok, iterator(device) }
error -> error
end
end
@doc """
Same as `iterator/2` but raises if the file
cannot be opened.
"""
def iterator!(file, mode // []) do
open!(file, mode) /> iterator
end
## Helpers
# Normalize the given path by removing "..".
defp normalize(path), do: normalize(split(path), [])
defp normalize([top|t], [_|acc]) when top in ["..", '..'] do
normalize t, acc
end
defp normalize([top|t], acc) when top in [".", '.'] do
normalize t, acc
end
defp normalize([h|t], acc) do
normalize t, [h|acc]
end
defp normalize([], acc) do
join List.reverse(acc)
end
defp open_defaults([:charlist|t], add_encoding, _add_binary) do
open_defaults(t, add_encoding, false)
end
defp open_defaults([{:encoding, _} = h|t], _add_encoding, add_binary) do
[h|open_defaults(t, false, add_binary)]
end
defp open_defaults([h|t], add_encoding, add_binary) do
[h|open_defaults(t, add_encoding, add_binary)]
end
defp open_defaults([], add_encoding, add_binary) do
options = []
if add_encoding, do: options = [{:encoding, :unicode}|options]
if add_binary, do: options = [:binary|options]
options
end
end
|
lib/elixir/lib/file.ex
| 0.823612
| 0.568176
|
file.ex
|
starcoder
|
defmodule Quantonex.Indicators do
@moduledoc """
Contains technical indicators.
"""
alias Quantonex.DataPoint
@dataset_min_size_error "There must be at least 1 element in the dataset."
@period_min_value_error "Period must be at least 1."
@period_max_value_error "Period can't be greater than the length of the dataset."
@zero Decimal.new(0)
@typedoc """
Represents a smoothing method.
* `:ema` - exponential moving average
* `:sma` - simple moving average
"""
@type smoothing_method :: :ema | :sma
@doc """
Calculates a list of exponential moving averages (EMAs) for a given dataset and period.
The first `n` elements (`n == period`) are used to calculate the initial EMA using a SMA.
Each successive value is calculated using an EMA.
Possible return values are:
* `{:error, reason}`
* `{:ok, values}`
The returned list of EMA values has the same length as the input dataset, so they can
be joined again.
## Examples
```
dataset = 1..11 |> Enum.map(fn x -> x end)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
{:ok, emas} = Quantonex.Indicators.ema(dataset, 10)
{:ok,
[#Decimal<0>, #Decimal<0>, #Decimal<0>, #Decimal<0>, #Decimal<0>, #Decimal<0>,
#Decimal<0>, #Decimal<0>, #Decimal<0>, #Decimal<5.5>,
#Decimal<6.500000000000000000000000000>]}
Enum.zip(dataset, emas)
[
{1, #Decimal<0>},
{2, #Decimal<0>},
{3, #Decimal<0>},
{4, #Decimal<0>},
{5, #Decimal<0>},
{6, #Decimal<0>},
{7, #Decimal<0>},
{8, #Decimal<0>},
{9, #Decimal<0>},
{10, #Decimal<5.5>},
{11, #Decimal<6.500000000000000000000000000>}
]
```
"""
@spec ema(dataset :: nonempty_list(price :: String.t() | number()), period :: pos_integer()) ::
{:error, reason :: String.t()} | {:ok, nonempty_list(values :: Decimal.t())}
def ema([], _period), do: {:error, @dataset_min_size_error}
def ema(dataset, period) when is_list(dataset) and period < 1,
do: {:error, @period_min_value_error}
def ema(dataset, period) when is_list(dataset) and period > length(dataset),
do: {:error, @period_max_value_error}
def ema(dataset, period) when is_list(dataset) do
try do
# every ema value with an index < period is set to 0
initial_emas = 1..(period - 1) |> Enum.map(fn _ -> @zero end)
# the first EMA is based on a SMA
{:ok, seed} =
dataset
|> Enum.take(period)
|> Enum.map(&to_decimal/1)
|> sma()
values =
dataset
|> Enum.slice(period..(length(dataset) - 1))
|> Enum.map(&to_decimal/1)
|> Enum.reduce_while([seed | initial_emas], fn current_price, acc ->
[previous_ema | _tail] = acc
case ema(current_price, period, previous_ema) do
{:ok, value} -> {:cont, [value | acc]}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
# reverse to match the order of the input dataset
|> Enum.reverse()
{:ok, values}
rescue
e in Decimal.Error ->
{:error, "An error occured while calculating the EMA value: " <> e.message}
end
end
@doc """
Calculates a single exponential moving average (EMA) for a given price, period and another EMA.
This is a convenience function to avoid calculation using a complete dataset.
See `Quantonex.Indicators.ema/2` for how to calculate an initial EMA.
Possible return values are:
* `{:error, reason}`
* `{:ok, value}`
## Examples
```
previous_ema = Decimal.from_float(5.5)
#Decimal<5.5>
{:ok, value} = Quantonex.Indicators.ema(11, 10, previous_ema)
{:ok, #Decimal<6.500000000000000000000000000>}
```
"""
@spec ema(price :: String.t() | number(), period :: pos_integer(), previous_ema :: Decimal.t()) ::
{:error, reason :: String.t()} | {:ok, value :: Decimal.t()}
def ema(price, period, previous_ema) do
try do
# 2 / (period + 1)
multiplier = weighted_multiplier(period)
# (price - previous_ema) * multiplier + previous_ema
value =
previous_ema
|> to_decimal()
|> Decimal.mult(Decimal.sub(Decimal.new(1), multiplier))
|> Decimal.add(Decimal.mult(price, multiplier))
{:ok, value}
rescue
e in Decimal.Error ->
{:error, "An error occured while calculating the EMA value: " <> e.message}
end
end
@doc """
Calculates a list of relative strength indexes (RSIs) for a given dataset.
"""
@spec rsi(
dataset :: nonempty_list(price :: String.t() | number()),
period :: non_neg_integer()
) ::
{:error, reason :: String.t()}
| {:ok, values :: nonempty_list(Decimal.t())}
def rsi([], _period), do: {:error, @dataset_min_size_error}
def rsi(dataset, period) do
try do
init_map = %{
current_price: @zero,
previous_price: @zero,
up_movement: @zero,
up_sum: @zero,
down_movement: @zero,
down_sum: @zero,
up_average: @zero,
down_average: @zero,
relative_strength: @zero,
value: @zero
}
results =
dataset
|> Enum.map(&to_decimal/1)
|> Enum.with_index()
|> Enum.reduce(fn price_with_index, acc ->
{current_price, index} = price_with_index
{previous_rsi, previous_items} =
case acc do
# the first iteration and initialization of our result rsi dataset
{first_price, _index} ->
previous_rsi = %{init_map | current_price: first_price}
{
previous_rsi,
[previous_rsi]
}
[previous_rsi | _tail] ->
{previous_rsi, acc}
end
current_rsi =
init_map
|> Map.put(:current_price, current_price)
|> Map.put(:previous_price, previous_rsi.current_price)
|> rsi_up_movement()
|> rsi_down_movement()
|> rsi_up_sum(previous_rsi)
|> rsi_down_sum(previous_rsi)
|> rsi_up_average(previous_rsi, index, period)
|> rsi_down_average(previous_rsi, index, period)
|> rsi_strength(index, period)
|> rsi_index(index, period)
[current_rsi | previous_items]
end)
|> Enum.map(fn x -> x.value end)
|> Enum.reverse()
{:ok, results}
rescue
e in Decimal.Error ->
{:error, "An error occured while calculating the RSI value: " <> e.message}
end
end
@doc """
Calculates a simple moving average (SMA) for a given dataset.
The period of the SMA is fixed and equal to the length of the dataset.
Possible return values are:
* `{:error, reason}`
* `{:ok, value}`
## Examples
```
iex> Quantonex.Indicators.sma([1, 2, 3])
{:ok, Decimal.new(2)}
```
"""
@spec sma(dataset :: nonempty_list(price :: String.t() | number())) ::
{:error, reason :: String.t()} | {:ok, value :: Decimal.t()}
def sma([]), do: {:error, @dataset_min_size_error}
def sma(dataset) when is_list(dataset) do
try do
value = calculate_simple_moving_average(dataset, length(dataset))
{:ok, value}
rescue
e in Decimal.Error ->
{:error, "An error occured while calculating the SMA value: " <> e.message}
end
end
@typedoc """
Represents a volume weighted average price.
* `value` - the volume weighted average price
* `cumulative_volume` - the previous volume plus the current volume
* `cumulative_volume_price` - the previous volume price plus the current volume price
"""
@type volume_weighted_average_price :: %{
cumulative_volume: non_neg_integer(),
cumulative_volume_price: Decimal.t(),
value: Decimal.t()
}
@doc """
Calculates a list of volume weighted average prices (VWAPs) for a given dataset.
The following data point properties are used to calculate a VWAP value.
* `high`
* `low`
* `close`
* `volume`
Possible return values are:
* `{:error, reason}`
* `{:ok, values}`
The returned list of VWAP values has the same length as the input dataset, so they can
be joined again.
## Examples
```
dataset = [%Quantonex.DataPoint{
close: #Decimal<127.28>,
high: #Decimal<127.36>,
low: #Decimal<126.99>,
volume: 89329
},...]
{:ok, vwaps} = Quantonex.Indicators.vwap(dataset)
{:ok,
[%{
cumulative_volume: 89329,
cumulative_volume_price: #Decimal<11363542.09>,
value: #Decimal<127.21>
}, ...]
}
Enum.zip(dataset, vwaps)
[
...
]
```
"""
@spec vwap(dataset :: nonempty_list(DataPoint.t())) ::
{:error, reason :: String.t()}
| {:ok, values :: nonempty_list(volume_weighted_average_price())}
def vwap([]), do: {:error, @dataset_min_size_error}
def vwap(dataset) when is_list(dataset) do
values =
dataset
|> Enum.reduce_while([], fn data_point, acc ->
{cumulative_volume, cumulative_volume_price} =
case acc do
[] ->
{0, @zero}
[previous_vwap | _tail] ->
{previous_vwap.cumulative_volume, previous_vwap.cumulative_volume_price}
end
case vwap(data_point, cumulative_volume, cumulative_volume_price) do
{:ok, value} -> {:cont, [value | acc]}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
case values do
{:error, reason} -> {:error, reason}
_ -> {:ok, values |> Enum.reverse()}
end
end
@doc """
Calculates a single volume weighted average price (VWAP).
The following data point properties are used to calculate a VWAP value.
* `high`
* `low`
* `close`
* `volume`
Possible return values are:
* `{:error, reason}`
* `{:ok, values}`
## Examples
```
data_point = %Quantonex.DataPoint{
close: Decimal.new(6),
high: Decimal.new(8),
low: Decimal.new(4),
volume: 10
}
{:ok,
%{
"cumulative_volume": cumulative_volume,
"cumulative_volume_price": cumulative_volume_price,
"value": value
}
} = Quantonex.Indicators.vwap(data_point)
```
Successive calculations can be done by passing previously calculated cumulative values to
the function.
```
next_data_point = %Quantonex.DataPoint{
close: Decimal.new(8),
high: Decimal.new(10),
low: Decimal.new(6),
volume: 20
}
Quantonex.Indicators.vwap(next_data_point, cumulative_volume, cumulative_volume_price)
```
"""
@spec vwap(
data_point :: DataPoint.t(),
cumulative_volume :: non_neg_integer(),
cumulative_volume_price :: number() | Decimal.t()
) ::
{:error, reason :: String.t()} | {:ok, value :: volume_weighted_average_price()}
def vwap(data_point, cumulative_volume \\ 0, cumulative_volume_price \\ 0)
def vwap(%DataPoint{:volume => volume}, cumulative_volume, _cumulative_volume_price)
when volume == 0 and cumulative_volume == 0,
do: {:error, "The data point volume and cumulative volume can't both be zero."}
def vwap(%DataPoint{}, cumulative_volume, _cumulative_volume_price)
when is_integer(cumulative_volume) and cumulative_volume < 0,
do: {:error, "The cumulative volume can't be negative."}
def vwap(%DataPoint{}, _cumulative_volume, cumulative_volume_price)
when cumulative_volume_price < 0,
do: {:error, "The cumulative volume price can't be negative."}
def vwap(%DataPoint{} = data_point, cumulative_volume, cumulative_volume_price) do
try do
# ensure cumulative volume price can be converted to a decimal
cumulative_volume_price = to_decimal(cumulative_volume_price)
average_price =
data_point.high
|> Decimal.add(data_point.low)
|> Decimal.add(data_point.close)
|> Decimal.div(3)
volume_price = Decimal.mult(average_price, data_point.volume)
new_cumulative_volume = cumulative_volume + data_point.volume
new_cumulative_volume_price = Decimal.add(cumulative_volume_price, volume_price)
value = Decimal.div(new_cumulative_volume_price, new_cumulative_volume)
{
:ok,
%{
cumulative_volume: new_cumulative_volume,
cumulative_volume_price: new_cumulative_volume_price,
value: value
}
}
rescue
e in Decimal.Error ->
{:error, "An error occured while calculating the VWAP value: " <> e.message}
end
end
## Helpers
defp calculate_simple_moving_average(dataset, period) do
dataset
|> Enum.map(&to_decimal/1)
|> Enum.reduce(fn x, acc -> Decimal.add(x, acc) end)
|> calculate_average(period)
end
defp calculate_average(sum, divisor), do: Decimal.div(sum, Decimal.new(divisor))
defp rsi_down_average(current_rsi, _previous_rsi, index, period) when index < period,
do: current_rsi
defp rsi_down_average(%{:down_sum => down_sum} = current_rsi, _previous_rsi, index, period)
when index == period do
%{current_rsi | down_average: calculate_simple_moving_average([down_sum], period)}
end
defp rsi_down_average(
%{:down_movement => current_down_movement} = current_rsi,
%{
:down_average => previous_down_average
} = _previous_rsi,
_index,
period
) do
down_average =
Decimal.mult(previous_down_average, Decimal.new(period - 1))
|> Decimal.add(current_down_movement)
|> Decimal.div(Decimal.new(period))
%{current_rsi | down_average: down_average}
end
defp rsi_index(rsi_map, index, period) when index < period, do: rsi_map
defp rsi_index(%{:relative_strength => relative_strength} = rsi_map, _index, _period) do
max_rsi = Decimal.new(100)
# RSI: 100 β 100 / ( 1 + relative_strength)
first_calc = Decimal.new(1) |> Decimal.add(relative_strength)
second_calc = Decimal.div(max_rsi, first_calc)
relative_strength_index = Decimal.sub(max_rsi, second_calc)
%{rsi_map | value: relative_strength_index}
end
defp rsi_strength(%{:down_average => @zero} = rsi_map, index, period) when index < period,
do: rsi_map
defp rsi_strength(
%{
:up_average => up_average,
:down_average => down_average
} = rsi_map,
_index,
_period
) do
%{rsi_map | relative_strength: Decimal.div(up_average, down_average)}
end
defp rsi_up_sum(
%{:up_movement => up_movement} = current_rsi,
%{:up_sum => up_sum} = _previous_rsi
) do
up_sum = Decimal.add(up_movement, up_sum)
%{current_rsi | up_sum: up_sum}
end
defp rsi_down_sum(
%{:down_movement => down_movement} = current_rsi,
%{:down_sum => down_sum} = _previous_rsi
) do
down_sum = Decimal.add(down_movement, down_sum)
%{current_rsi | down_sum: down_sum}
end
defp rsi_down_movement(
%{:previous_price => previous_price, :current_price => current_price} = rsi_map
) do
diff = Decimal.sub(current_price, previous_price)
case Decimal.lt?(diff, 0) do
true -> %{rsi_map | down_movement: Decimal.abs(diff)}
false -> %{rsi_map | down_movement: @zero}
end
end
defp rsi_up_average(current_rsi, _previous_rsi, index, period) when index < period,
do: current_rsi
defp rsi_up_average(%{:up_sum => up_sum} = current_rsi, _previous_rsi, index, period)
when index == period do
%{current_rsi | up_average: calculate_simple_moving_average([up_sum], period)}
end
defp rsi_up_average(
%{:up_movement => current_up_movement} = current_rsi,
%{
:up_average => previous_up_average
} = _previous_rsi,
_index,
period
) do
up_average =
Decimal.mult(previous_up_average, Decimal.new(period - 1))
|> Decimal.add(current_up_movement)
|> Decimal.div(Decimal.new(period))
%{current_rsi | up_average: up_average}
end
defp rsi_up_movement(
%{:previous_price => previous_price, :current_price => current_price} = rsi_map
) do
diff = Decimal.sub(current_price, previous_price)
case Decimal.gt?(diff, 0) do
true -> %{rsi_map | up_movement: diff}
false -> %{rsi_map | up_movement: @zero}
end
end
defp to_decimal(value) when is_float(value), do: Decimal.from_float(value)
# create decimals from either strings, integers or decimals
defp to_decimal(value), do: Decimal.new(value)
defp weighted_multiplier(period) do
period_increment = Decimal.new(period + 1)
Decimal.new(2)
|> Decimal.div(period_increment)
end
end
|
lib/indicators.ex
| 0.939143
| 0.902309
|
indicators.ex
|
starcoder
|
defmodule Hui.Encode do
@moduledoc """
Utilities for encoding Solr query and update data structures.
"""
@type options :: __MODULE__.Options.t()
@url_delimiters {?=, ?&}
@json_delimiters {?:, ?,}
defmodule Options do
@moduledoc false
defstruct [:per_field, :prefix, type: :url]
@type t :: %__MODULE__{
type: :url | :json,
per_field: binary,
prefix: binary
}
end
@doc """
Encodes keywords list into IO data.
"""
@spec encode(keyword() | map()) :: iodata()
def encode([]), do: []
def encode(query) when is_list(query), do: encode(query, %Options{})
def encode(query) when is_map(query), do: encode(query |> Map.to_list(), %Options{})
@doc """
Encodes keywords of Solr query structs that require special handling into IO data.
"""
@spec encode(keyword(), options) :: iodata()
def encode(query, options)
def encode([h | t], %{type: :url} = opts), do: transform({h, t}, opts, @url_delimiters)
def encode([h | t], %{type: :json} = opts), do: transform({h, t}, opts, @json_delimiters)
@doc false
def encode_json([], %{type: :json}), do: [?{, ?}]
def encode_json(query, %{type: :json} = opts), do: [?{, encode(query, opts), ?}]
# expands and transforms fq: [x, y, z] => "fq=x&fq=&fq=z"
defp transform({{k, v}, t}, %{type: :url} = opts, _delimiters) when is_list(v) do
encode(Enum.map(v, &{k, &1}) ++ t, opts)
end
defp transform({{_k, %{:__struct__ => _} = v}, t}, opts, {_eql, delimiter}) do
case t do
[] -> Hui.Encoder.encode(v)
_ -> [Hui.Encoder.encode(v), delimiter | [encode(t, opts)]]
end
end
defp transform({h, []}, opts, {eql, _delimiter}), do: [key(h, opts), eql, value(h, opts)]
defp transform({h, t}, opts, {eql, delimiter}) do
[key(h, opts), eql, value(h, opts), delimiter | [encode(t, opts)]]
end
defp key({k, _v}, %{prefix: nil, type: :url}), do: to_string(k)
defp key({k, _v}, %{prefix: nil, type: :json}), do: [?", to_string(k), ?"]
defp key({k, _v}, %{prefix: prefix, per_field: field}) do
key = to_string(k)
cond do
k in [:facet, :mlt, :spellcheck, :suggest] -> key
String.ends_with?(prefix, key) -> prefix
field != nil -> ["f", ".", field, ".", prefix, ".", key] |> to_string()
field == nil -> [prefix, ".", key] |> to_string()
end
end
defp value({_k, v}, %{type: :url}), do: URI.encode_www_form(to_string(v))
defp value({_k, v}, %{type: :json}), do: Jason.encode_to_iodata!(v)
@doc false
@spec sanitise(list()) :: list()
def sanitise(query) do
query
|> Enum.reject(fn {k, v} ->
v in ["", nil, []] or k == :__struct__ or k == :per_field
end)
end
end
|
lib/hui/encode.ex
| 0.800809
| 0.466603
|
encode.ex
|
starcoder
|
defmodule OSC do
alias OSC.Encoder
alias OSC.Parser
@doc """
Encode a value to OSC.
iex> OSC.encode(%OSC.Message{address: "/foo", arguments: ["hello"]})
{:ok, <<47, 102, 111, 111, 0, 0, 0, 0, 44, 115, 0, 0, 104, 101, 108, 108, 111, 0, 0, 0>>}
"""
@spec encode(Encoder.t, Keyword.t) :: {:ok, iodata} | {:ok, String.t}
| {:error, {:invalid, any}}
def encode(value, options \\ []) do
{:ok, encode!(value, options)}
rescue
exception in [OSC.EncodeError] ->
{:error, {:invalid, exception.value}}
end
@doc """
Encode a value to OSC as iodata.
iex> OSC.encode(%OSC.Message{address: "/foo", arguments: ["hello"]})
{:ok, [["/foo", 0, 0, 0, 0], [',s', 0, 0], [["hello", 0, 0, 0]]]}
"""
@spec encode_to_iodata(Encoder.t, Keyword.t) :: {:ok, iodata}
| {:error, {:invalid, any}}
def encode_to_iodata(value, options \\ []) do
encode(value, [iodata: true] ++ options)
end
@doc """
Encode a value to OSC, raises an exception on error.
iex> OSC.encode!(%OSC.Message{address: "/foo", arguments: ["hello"]})
<<47, 102, 111, 111, 0, 0, 0, 0, 44, 115, 0, 0, 104, 101, 108, 108, 111, 0, 0, 0>>
"""
@spec encode!(Encoder.t, Keyword.t) :: iodata | no_return
def encode!(value, options \\ []) do
iodata = Encoder.encode(value, options)
unless options[:iodata] do
iodata |> IO.iodata_to_binary
else
iodata
end
end
@doc """
Encode a value to OSC as iodata, raises an exception on error.
iex> OSC.encode_to_iodata!(%OSC.Message{address: "/foo", arguments: ["hello"]})
[["/foo", 0, 0, 0, 0], [',s', 0, 0], [["hello", 0, 0, 0]]]
"""
@spec encode_to_iodata!(Encoder.t, Keyword.t) :: iodata | no_return
def encode_to_iodata!(value, options \\ []) do
encode!(value, [iodata: true] ++ options)
end
@doc """
Decode OSC to a value.
iex> OSC.decode(<<47, 102, 111, 111, 0, 0, 0, 0, 44, 115, 0, 0, 104, 101, 108, 108, 111, 0, 0, 0>>)
{:ok, %OSC.Message{address: "/foo", arguments: ["hello"]}}
"""
@spec decode(iodata, Keyword.t) :: {:ok, Parser.t} | {:error, :invalid}
| {:error, {:invalid, String.t}}
def decode(iodata, options \\ []) do
Parser.parse(iodata, options)
end
@doc """
Decode OSC to a value, raises an exception on error.
iex> OSC.decode!(<<47, 102, 111, 111, 0, 0, 0, 0, 44, 115, 0, 0, 104, 101, 108, 108, 111, 0, 0, 0>>)
%OSC.Message{address: "/foo", arguments: ["hello"]}
"""
@spec decode!(iodata, Keyword.t) :: Parser.t | no_return
def decode!(iodata, options \\ []) do
Parser.parse!(iodata, options)
end
end
|
lib/osc.ex
| 0.83104
| 0.483892
|
osc.ex
|
starcoder
|
defmodule Bluetooth.HCI.Commands do
@moduledoc """
This module holds conversion functions for HCI commands and their results
from and to a logical format and the binary representation.
This module does not to attempt to be complete but grows by need.
"""
alias Bluetooth.HCI
alias Bluetooth.AssignedNumbers
def read_local_name() do
HCI.create_command(0x03, 0x0014, <<>>)
end
def receive_local_name(<<0 :: size(8), long_name :: binary>>) do
# the local name is 0-terminated or a full 248 bytes long UTF8 string
[name, _] = String.split(long_name, <<0>>, parts: 2)
{:ok, name}
end
def receive_local_name(<<code :: integer-size(8), _>>), do: {:error, code}
def write_local_name(name)
when is_binary(name) and :erlang.byte_size(name) == 248 do
HCI.create_command(0x03, 0x0015, name)
end
def write_local_name(name)
when is_binary(name) and :erlang.byte_size(name) < 248 do
HCI.create_command(0x03, 0x0015, name <> <<0>>)
end
def read_local_version_info() do
HCI.create_command(0x04, 0x01, <<>>)
end
def receive_local_version_info(params) do
<<code :: integer-size(8),
hci_version :: integer-size(8),
hci_revision :: integer-little-size(16),
pal_version :: integer-size(8),
manufacturer :: integer-little-size(16),
pal_subversion :: integer-little-size(16)
>> = params
if (code != 0) do
{:error, code}
else
{:ok, %{hci_version_code: hci_version,
hci_version: AssignedNumbers.version(hci_version),
hci_revision: hci_revision,
pal_version_code: pal_version,
pal_version: AssignedNumbers.version(pal_version),
manufacturer_uuid: manufacturer,
manufacturer: AssignedNumbers.company_name(manufacturer),
pal_subversion: pal_subversion
}}
end
end
@doc """
Reads the BD address or the LE public address from the controller
"""
def read_bd_address() do
HCI.create_command(0x04, 0x0009, <<>>)
end
def reveive_bd_address(<<0x00, addr :: binary-size(6)>>), do: {:ok, addr}
def reveive_bd_address(<<code :: unsigned-integer-size(8), _rest::binary>>), do: {:error, code}
end
|
lib/bluetooth/hci/command.ex
| 0.603231
| 0.456834
|
command.ex
|
starcoder
|
alias Cuda.Graph
alias Cuda.Graph.Node
alias Cuda.Graph.Pin
defprotocol Cuda.Graph.NodeProto do
@doc """
Returns pin by its id
"""
@spec pin(node:: Node.t, id: Graph.id) :: Pin.t | nil
def pin(node, id)
@doc """
Returns a list of pins of specified type
"""
@spec pins(node :: Node.t, type :: Pin.type | [Pin.type]) :: [Pin.t]
def pins(node, type \\ nil)
@spec assign(node :: struct, key :: atom, value :: any) :: struct
def assign(node, key, value)
@spec assign(node :: struct, key :: map | keyword) :: struct
def assign(node, assigns)
end
defprotocol Cuda.Graph.GraphProto do
@spec add(graph :: Graph.t, node :: Node.t) :: Graph.t
def add(graph, node)
@doc """
Replaces node in the graph.
If the node to replace have same id as a replaced node, you can call this
function with two arguments - graph and the node to replace. If you need to
replace node which id is different from replacing node id, pass id of node
to replace as second argument and replacement node as a third argument.
"""
@spec replace(graph :: Graph.t, node :: Node.t) :: Graph.t
@spec replace(graph :: Graph.t, id :: Graph.id | [Graph.id], node :: Node.t) :: Graph.t
def replace(graph, node)
def replace(graph, id, node)
@doc """
Returns node in the graph by its name or path (a list of names)
"""
@spec node(graph :: Graph.t, id :: Graph.id | [Graph.id]) :: Node.t
def node(graph, id)
@doc """
Returns pin of link specification. It can be a pin of graph itself or a pin
of child node
"""
@spec link_spec_pin(graph :: Graph.t, link_spec :: Graph.link_spec) :: Pin.t
def link_spec_pin(graph, link_spec)
@doc """
Returns a node of link specification. It can be a graph itself or child node
"""
@spec link_spec_node(graph :: Graph.t, link_spec :: Graph.link_spec) :: Node.t | Graph.t
def link_spec_node(graph, link_spec)
end
defprotocol Cuda.Graph.Factory do
@doc """
Creates a new evaluation node
"""
@spec new(node :: struct, id :: Graph.id, module :: atom, opts :: keyword, env :: Cuda.Env.t) :: struct
def new(node, id, module, opts \\ [], env \\ [])
end
defimpl Cuda.Graph.NodeProto, for: Any do
def pin(%{pins: pins}, id) do
pins |> Enum.find(fn
%Pin{id: ^id} -> true
_ -> false
end)
end
def get_pin(_, _), do: nil
def pins(%{pins: pins}, nil), do: pins
def pins(node, types) when is_list(types) do
Enum.reduce(types, [], &(&2 ++ pins(node, &1)))
end
def pins(%{pins: pins}, type) do
pins |> Enum.filter(fn
%Pin{type: ^type} -> true
_ -> false
end)
end
def assign(%{assigns: assigns} = node, key, value) do
%{node | assigns: Map.put(assigns, key, value)}
end
def assign(%{assigns: assigns} = node, data) do
data = data |> Enum.into(%{})
%{node | assigns: Map.merge(assigns, data)}
end
end
defimpl Cuda.Graph.GraphProto, for: Any do
require Cuda
import Cuda, only: [compile_error: 1]
def add(%{nodes: nodes} = graph, %{id: id} = node) do
with nil <- node(graph, id) do
%{graph | nodes: [node | nodes]}
else
_ -> compile_error("Node with id `#{id}` is already in the graph")
end
end
def replace(%{nodes: nodes} = graph, %{id: id} = node) do
nodes = nodes |> Enum.map(fn
%{id: ^id} -> node
x -> x
end)
%{graph | nodes: nodes}
end
def replace(%{nodes: _} = graph, [], node), do: replace(graph, node)
def replace(%{id: src}, [], %{id: dst} = node) when src == dst, do: node
def replace(graph, [id | path], node) do
with %{} = child <- Cuda.Graph.GraphProto.node(graph, id) do
replace(graph, replace(child, path, node))
end
end
def replace(%{nodes: nodes} = graph, id, node) do
nodes = nodes |> Enum.map(fn
%{id: ^id} -> node
x -> x
end)
%{graph | nodes: nodes}
end
def node(_, []), do: nil
def node(%{nodes: _} = graph, [id]), do: node(graph, id)
def node(%{nodes: _} = graph, [id | path]) do
with %{} = child <- Cuda.Graph.GraphProto.node(graph, id) do
Cuda.Graph.GraphProto.node(child, path)
end
end
def node(%{nodes: nodes}, id) do
nodes |> Enum.find(fn
%{id: ^id} -> true
_ -> false
end)
end
def node(_, _), do: nil
def link_spec_pin(graph, {:__self__, pin}) do
Cuda.Graph.NodeProto.pin(graph, pin)
end
def link_spec_pin(graph, {node, pin}) do
with %{} = node <- node(graph, node) do
Cuda.Graph.NodeProto.pin(node, pin)
end
end
def link_spec_node(graph, {:__self__, _}) do
graph
end
def link_spec_node(graph, {node, _}) do
node(graph, node)
end
end
|
lib/cuda/graph/protocols.ex
| 0.86997
| 0.472501
|
protocols.ex
|
starcoder
|
defmodule Cldr.Calendar.ISOWeek do
import Cldr.Calendar,
only: [iso_days_from_date: 1, date_from_iso_days: 2, add: 2, day_of_year: 1]
@doc """
Returns the date of the first day of the first week of the year that includes
the provided `date`.
This conforms with the ISO standard definition of when the first week of the year
begins:
* If 1 January is on a Monday, Tuesday, Wednesday or Thursday, it is in week 01.
* If 1 January is on a Friday, it is part of week 53 of the previous year;
* If on a Saturday, it is part of week 52 (or 53 if the previous Gregorian year was a leap year)
* If on a Sunday, it is part of week 52 of the previous year.
IF a date if provided (as apposed to just a year) then we also need to make sure that the
first week starts before the supplied date.
"""
def first_week_of_year(%{year: year, calendar: calendar} = date) do
estimate = first_week_of_year(year, calendar)
if Date.compare(estimate, date) in [:lt, :eq] do
estimate
else
first_week_of_year(year - 1, calendar)
end
end
def first_week_of_year(year, calendar \\ Calendar.ISO) when is_integer(year) do
new_year = %{year: year, month: 1, day: 1, calendar: calendar}
{days, _fraction} = iso_days_from_date(new_year)
case Date.day_of_week(new_year) do
day when day in 1..4 ->
date_from_iso_days({days - day + 1, {0, 1}}, calendar)
day when day in 5..7 ->
date_from_iso_days({days - day + 1 + 7, {0, 1}}, calendar)
end
end
@doc """
The last week of the ISO week-numbering year, i.e. the 52nd or 53rd one, is
the week before week 01. This weekβs properties are:
* It has the year's last Thursday in it.
* It is the last week with a majority (4 or more) of its days in December.
* Its middle day, Thursday, falls in the ending year.
* Its last day is the Sunday nearest to 31 December.
* It has 28 December in it. Hence the earliest possible last week
extends from Monday 22 December to Sunday 28 December, the latest possible
last week extends from Monday 28 December to Sunday 3 January (next gregorian year).
* If 31 December is on a Monday, Tuesday or Wednesday, it is in week 01 of the
next year. If it is on a Thursday, it is in week 53 of the year just ending;
if on a Friday it is in week 52 (or 53 if the year just ending is a leap year);
if on a Saturday or Sunday, it is in week 52 of the year just ending.
IF a date if provided (as apposed to just a year) then we also need to make sure that the
last week accommodates the supplied date.
"""
def last_week_of_year(%{year: year, calendar: calendar} = date) do
estimate = last_week_of_year(year - 1, calendar)
if Date.compare(add(estimate, 6), date) in [:gt, :eq] do
estimate
else
last_week_of_year(year + 1, calendar)
end
end
def last_week_of_year(year, calendar \\ Calendar.ISO) when is_integer(year) do
end_of_year = %{year: year, month: 12, day: 31, calendar: calendar}
{days, _fraction} = iso_days_from_date(end_of_year)
case Date.day_of_week(end_of_year) do
day when day in 1..3 ->
date_from_iso_days({days - day - 6, {0, 1}}, calendar)
day when day in 4..7 ->
date_from_iso_days({days - day + 1, {0, 1}}, calendar)
end
end
def first_day_of_year(year) do
first_week_of_year(year)
end
def last_day_of_year(year) do
year
|> last_week_of_year
|> add(6)
end
def year_number(%{year: year, month: _month, day: _day} = date) do
first_day = first_day_of_year(date)
last_day = last_day_of_year(date)
cond do
first_day.month == 12 -> last_day.year
last_day.month == 1 -> first_day.year
true -> year
end
end
def year_range(date) do
%Date.Range{first: first_day_of_year(date), last: last_day_of_year(date)}
end
@doc """
Returns the week of the year for the given date.
Note that for some calendars (like `Calendar.ISO`), the first week
of the year may not be the week that includes January 1st therefore
for some dates near the start or end of the year, the week number
may refer to a date in the following or previous Gregorian year.
## Examples
"""
def week_of_year(%{year: year, month: _month, day: _day, calendar: _calendar} = date) do
week = div(day_of_year(date) - Date.day_of_week(date) + 10, 7)
cond do
week >= 1 and week < 53 -> week
week < 1 -> week_of_year(last_week_of_year(year - 1))
week > week_of_year(last_week_of_year(year - 1)) -> 1
end
end
@doc """
Returns the number of weeks in a year
## Examples
iex> Cldr.Calendar.weeks_in_year 2008
52
iex> Cldr.Calendar.weeks_in_year 2009
53
iex> Cldr.Calendar.weeks_in_year 2017
52
"""
def weeks_in_year(%{year: year}) do
if leap_mod(year) == 4 or leap_mod(year - 1) == 3, do: 53, else: 52
end
def weeks_in_year(year, calendar \\ Calendar.ISO) do
weeks_in_year(%{year: year, month: 1, day: 1, calendar: calendar})
end
defp leap_mod(year) do
rem(year + div(year, 4) - div(year, 100) + div(year, 400), 7)
end
end
|
lib/cldr/calendar/iso_week.ex
| 0.813387
| 0.797004
|
iso_week.ex
|
starcoder
|
defmodule QRCode.ErrorCorrection do
@moduledoc """
Error correction code words and block information.
"""
alias QRCode.{QR, Polynom}
alias QRCode.GeneratorPolynomial, as: GP
import QRCode.QR, only: [level: 1, version: 1]
@type groups() :: {[[], ...], [[]]}
@type codewords() :: groups()
@type t() :: %__MODULE__{
ec_codewrods_per_block: ExMaybe.t(integer()),
blocks_in_group1: ExMaybe.t(integer()),
codewords_per_block_in_group1: ExMaybe.t(integer()),
blocks_in_group2: ExMaybe.t(integer()),
codewords_per_block_in_group2: ExMaybe.t(integer()),
groups: ExMaybe.t(groups()),
codewords: ExMaybe.t(codewords())
}
defstruct ec_codewrods_per_block: nil,
blocks_in_group1: nil,
codewords_per_block_in_group1: nil,
blocks_in_group2: nil,
codewords_per_block_in_group2: nil,
groups: nil,
codewords: nil
@ecc_table [
[
low: {7, 1, 19, 0, 0},
medium: {10, 1, 16, 0, 0},
quartile: {13, 1, 13, 0, 0},
high: {17, 1, 9, 0, 0}
],
[
low: {10, 1, 34, 0, 0},
medium: {16, 1, 28, 0, 0},
quartile: {22, 1, 22, 0, 0},
high: {28, 1, 16, 0, 0}
],
[
low: {15, 1, 55, 0, 0},
medium: {26, 1, 44, 0, 0},
quartile: {18, 2, 17, 0, 0},
high: {22, 2, 13, 0, 0}
],
[
low: {20, 1, 80, 0, 0},
medium: {18, 2, 32, 0, 0},
quartile: {26, 2, 24, 0, 0},
high: {16, 4, 9, 0, 0}
],
[
low: {26, 1, 108, 0, 0},
medium: {24, 2, 43, 0, 0},
quartile: {18, 2, 15, 2, 16},
high: {22, 2, 11, 2, 12}
],
[
low: {18, 2, 68, 0, 0},
medium: {16, 4, 27, 0, 0},
quartile: {24, 4, 19, 0, 0},
high: {28, 4, 15, 0, 0}
],
[
low: {20, 2, 78, 0, 0},
medium: {18, 4, 31, 0, 0},
quartile: {18, 2, 14, 4, 15},
high: {26, 4, 13, 1, 14}
],
[
low: {24, 2, 97, 0, 0},
medium: {22, 2, 38, 2, 39},
quartile: {22, 4, 18, 2, 19},
high: {26, 4, 14, 2, 15}
],
[
low: {30, 2, 116, 0, 0},
medium: {22, 3, 36, 2, 37},
quartile: {20, 4, 16, 4, 17},
high: {24, 4, 12, 4, 13}
],
[
low: {18, 2, 68, 2, 69},
medium: {26, 4, 43, 1, 44},
quartile: {24, 6, 19, 2, 20},
high: {28, 6, 15, 2, 16}
],
[
low: {20, 4, 81, 0, 0},
medium: {30, 1, 50, 4, 51},
quartile: {28, 4, 22, 4, 23},
high: {24, 3, 12, 8, 13}
],
[
low: {24, 2, 92, 2, 93},
medium: {22, 6, 36, 2, 37},
quartile: {26, 4, 20, 6, 21},
high: {28, 7, 14, 4, 15}
],
[
low: {26, 4, 107, 0, 0},
medium: {22, 8, 37, 1, 38},
quartile: {24, 8, 20, 4, 21},
high: {22, 12, 11, 4, 12}
],
[
low: {30, 3, 115, 1, 116},
medium: {24, 4, 40, 5, 41},
quartile: {20, 11, 16, 5, 17},
high: {24, 11, 12, 5, 13}
],
[
low: {22, 5, 87, 1, 88},
medium: {24, 5, 41, 5, 42},
quartile: {30, 5, 24, 7, 25},
high: {24, 11, 12, 7, 13}
],
[
low: {24, 5, 98, 1, 99},
medium: {28, 7, 45, 3, 46},
quartile: {24, 15, 19, 2, 20},
high: {30, 3, 15, 13, 16}
],
[
low: {28, 1, 107, 5, 108},
medium: {28, 10, 46, 1, 47},
quartile: {28, 1, 22, 15, 23},
high: {28, 2, 14, 17, 15}
],
[
low: {30, 5, 120, 1, 121},
medium: {26, 9, 43, 4, 44},
quartile: {28, 17, 22, 1, 23},
high: {28, 2, 14, 19, 15}
],
[
low: {28, 3, 113, 4, 114},
medium: {26, 3, 44, 11, 45},
quartile: {26, 17, 21, 4, 22},
high: {26, 9, 13, 16, 14}
],
[
low: {28, 3, 107, 5, 108},
medium: {26, 3, 41, 13, 42},
quartile: {30, 15, 24, 5, 25},
high: {28, 15, 15, 10, 16}
],
[
low: {28, 4, 116, 4, 117},
medium: {26, 17, 42, 0, 0},
quartile: {28, 17, 22, 6, 23},
high: {30, 19, 16, 6, 17}
],
[
low: {28, 2, 111, 7, 112},
medium: {28, 17, 46, 0, 0},
quartile: {30, 7, 24, 16, 25},
high: {24, 34, 13, 0, 0}
],
[
low: {30, 4, 121, 5, 122},
medium: {28, 4, 47, 14, 48},
quartile: {30, 11, 24, 14, 25},
high: {30, 16, 15, 14, 16}
],
[
low: {30, 6, 117, 4, 118},
medium: {28, 6, 45, 14, 46},
quartile: {30, 11, 24, 16, 25},
high: {30, 30, 16, 2, 17}
],
[
low: {26, 8, 106, 4, 107},
medium: {28, 8, 47, 13, 48},
quartile: {30, 7, 24, 22, 25},
high: {30, 22, 15, 13, 16}
],
[
low: {28, 10, 114, 2, 115},
medium: {28, 19, 46, 4, 47},
quartile: {28, 28, 22, 6, 23},
high: {30, 33, 16, 4, 17}
],
[
low: {30, 8, 122, 4, 123},
medium: {28, 22, 45, 3, 46},
quartile: {30, 8, 23, 26, 24},
high: {30, 12, 15, 28, 16}
],
[
low: {30, 3, 117, 10, 118},
medium: {28, 3, 45, 23, 46},
quartile: {30, 4, 24, 31, 25},
high: {30, 11, 15, 31, 16}
],
[
low: {30, 7, 116, 7, 117},
medium: {28, 21, 45, 7, 46},
quartile: {30, 1, 23, 37, 24},
high: {30, 19, 15, 26, 16}
],
[
low: {30, 5, 115, 10, 116},
medium: {28, 19, 47, 10, 48},
quartile: {30, 15, 24, 25, 25},
high: {30, 23, 15, 25, 16}
],
[
low: {30, 13, 115, 3, 116},
medium: {28, 2, 46, 29, 47},
quartile: {30, 42, 24, 1, 25},
high: {30, 23, 15, 28, 16}
],
[
low: {30, 17, 115, 0, 0},
medium: {28, 10, 46, 23, 47},
quartile: {30, 10, 24, 35, 25},
high: {30, 19, 15, 35, 16}
],
[
low: {30, 17, 115, 1, 116},
medium: {28, 14, 46, 21, 47},
quartile: {30, 29, 24, 19, 25},
high: {30, 11, 15, 46, 16}
],
[
low: {30, 13, 115, 6, 116},
medium: {28, 14, 46, 23, 47},
quartile: {30, 44, 24, 7, 25},
high: {30, 59, 16, 1, 17}
],
[
low: {30, 12, 121, 7, 122},
medium: {28, 12, 47, 26, 48},
quartile: {30, 39, 24, 14, 25},
high: {30, 22, 15, 41, 16}
],
[
low: {30, 6, 121, 14, 122},
medium: {28, 6, 47, 34, 48},
quartile: {30, 46, 24, 10, 25},
high: {30, 2, 15, 64, 16}
],
[
low: {30, 17, 122, 4, 123},
medium: {28, 29, 46, 14, 47},
quartile: {30, 49, 24, 10, 25},
high: {30, 24, 15, 46, 16}
],
[
low: {30, 4, 122, 18, 123},
medium: {28, 13, 46, 32, 47},
quartile: {30, 48, 24, 14, 25},
high: {30, 42, 15, 32, 16}
],
[
low: {30, 20, 117, 4, 118},
medium: {28, 40, 47, 7, 48},
quartile: {30, 43, 24, 22, 25},
high: {30, 10, 15, 67, 16}
],
[
low: {30, 19, 118, 6, 119},
medium: {28, 18, 47, 31, 48},
quartile: {30, 34, 24, 34, 25},
high: {30, 20, 15, 61, 16}
]
]
@spec total_data_codewords(QR.t()) :: integer()
def total_data_codewords(%QR{version: version, ecc_level: level})
when version(version) and level(level) do
version
|> get_ecc_row(level)
|> compute_total_data_codewords()
end
@spec put(QR.t()) :: QR.t()
def put(%QR{encoded: data, version: version, ecc_level: level} = qr)
when version(version) and level(level) do
%{
qr
| ecc:
%__MODULE__{}
|> put_info(version, level)
|> put_groups(data)
|> put_codewords()
}
end
defp put_info(%__MODULE__{} = ecc, version, level) do
{ec_codewrods_per_block, blocks_in_group1, codewords_in_group1, blocks_in_group2,
codewords_in_group2} = get_ecc_row(version, level)
%{
ecc
| ec_codewrods_per_block: ec_codewrods_per_block,
blocks_in_group1: blocks_in_group1,
codewords_per_block_in_group1: codewords_in_group1,
blocks_in_group2: blocks_in_group2,
codewords_per_block_in_group2: codewords_in_group2
}
end
defp put_groups(%__MODULE__{} = ecc, data) do
bytes_in_group1 = ecc.blocks_in_group1 * ecc.codewords_per_block_in_group1
bytes_in_group2 = ecc.blocks_in_group2 * ecc.codewords_per_block_in_group2
<<data_group1::binary-size(bytes_in_group1), data_group2::binary-size(bytes_in_group2)>> =
data
%{
ecc
| groups:
{group(data_group1, ecc.blocks_in_group1, ecc.codewords_per_block_in_group1),
group(data_group2, ecc.blocks_in_group2, ecc.codewords_per_block_in_group2)}
}
end
defp put_codewords(%__MODULE__{groups: {g1, g2}, ec_codewrods_per_block: codewords} = ecc) do
%{ecc | codewords: {compute_codewords(g1, codewords), compute_codewords(g2, codewords)}}
end
defp get_ecc_row(version, level) do
@ecc_table
|> Enum.at(version - 1)
|> Keyword.get(level)
end
defp compute_codewords(group, codewords) do
divisor = GP.create(codewords)
Enum.map(group, &Polynom.div(&1, divisor))
end
defp compute_total_data_codewords(
{_, blocks_in_group1, codewords_in_group1, blocks_in_group2, codewords_in_group2}
) do
blocks_in_group1 * codewords_in_group1 + blocks_in_group2 * codewords_in_group2
end
defp group("", 0, _codewords) do
[]
end
defp group(data, blocks, codewords) do
<<block_data::binary-size(codewords), rest::binary>> = data
[block(block_data, codewords) | group(rest, blocks - 1, codewords)]
end
defp block("", 0) do
[]
end
defp block(<<codeword::size(8), rest::binary>>, codewords) do
[codeword | block(rest, codewords - 1)]
end
end
|
lib/qr_code/error_correction.ex
| 0.755366
| 0.647645
|
error_correction.ex
|
starcoder
|
defmodule ExRut do
@moduledoc """
An Elixir library to validate and format chilean ID/TAX number ('RUN/RUT')
"""
@regex_rut ~r/^((?'num'\d{1,3}(?:([\.\,]?)\d{1,3}){2})(-?)(?'dv'[\dkK]))$/
@defaults [
delimiter: ".",
show_dv: true
]
def valid?(rut) when is_binary(rut) do
case get_rut_values(rut) do
{:ok, number, dv} ->
calculated_dv = calculate_dv_value(number)
calculated_dv == dv
{:error, :invalid_value} ->
false
end
end
def valid?(rut) when is_integer(rut) do
rut
|> Integer.to_string
|> valid?
end
def valid?(rut) when is_nil(rut), do: false
def valid?(_), do: false
def calculate_dv(number) when is_integer(number) do
{:ok, calculate_dv_value(number)}
end
def calculate_dv(number) when is_binary(number) do
number = clean_rut(number)
case Integer.parse(number) do
{number_as_int, ""} ->
{:ok, calculate_dv_value(number_as_int)}
{_num_as_int, _} ->
{:error, :invalid_value}
:error ->
{:error, :invalid_value}
end
end
def calculate_dv!(number) do
case calculate_dv(number) do
{:ok, dv} ->
dv
{:error, :invalid_value} ->
raise "Invalid Value"
end
end
def format(rut, options \\ [])
def format(rut, options) when is_binary(rut) do
if valid?(rut) do
options = Keyword.merge(@defaults, options)
delimiter = Keyword.get(options, :delimiter)
show_dv = Keyword.get(options, :show_dv)
{:ok, num, dv} = get_rut_values(rut)
dv = if show_dv, do: dv, else: nil
dash = if show_dv, do: "-", else: ""
formatted_rut =
[delimit_integer(num, delimiter), dv]
|> Enum.reject(&is_nil/1)
|> Enum.join(dash)
{:ok, formatted_rut}
else
{:error, :invalid_value}
end
end
def format(rut, options) when is_integer(rut) do
format(Integer.to_string(rut), options)
end
def format!(number, options \\ []) do
case format(number, options) do
{:ok, formatted_rut} ->
formatted_rut
{:error, :invalid_value} ->
raise "Invalid Value"
end
end
defp calculate_dv_value(number) when is_integer(number) do
calculated_dv =
number
|> split_integer
|> Enum.reverse
|> Enum.with_index(2)
|> Enum.map(fn {n, index} ->
if index > 7 do
{n, (index-6)}
else
{n, index}
end
end)
|> Enum.reduce(0, fn {n, index}, acc ->
acc + (n * index)
end)
|> Kernel.rem(11)
|> Kernel.-(11)
|> Kernel.abs
cond do
calculated_dv == 11 -> "0"
calculated_dv == 10 -> "k"
true -> Integer.to_string(calculated_dv)
end
end
defp get_rut_values(rut) do
rut = clean_rut(rut)
cond do
# valid format
Regex.match?(@regex_rut, rut) ->
get_clean_rut_values(@regex_rut, rut)
# invalid format
true ->
{:error, :invalid_value}
end
end
defp get_clean_rut_values(regex, rut) do
rut = clean_rut(rut)
%{"num" => number, "dv" => dv} = Regex.named_captures(regex, rut)
dv = String.downcase(dv)
case Integer.parse(number) do
{number_as_int, ""} ->
{:ok, number_as_int, dv}
:error ->
{:error, :invalid_rut}
end
end
defp clean_rut(rut) do
rut
|> String.replace(".", "")
|> String.replace(",", "")
end
defp split_integer(number) when is_integer(number) do
number
|> Kernel.to_string()
|> String.split("", trim: true)
|> Enum.map(fn int_string ->
Integer.parse(int_string)
|> case do
{int, _} ->
int
_ ->
0
end
end)
end
defp delimit_integer(number, delimiter) when is_binary(number) do
case Integer.parse(number) do
{number_as_int, ""} -> delimit_integer(number_as_int, delimiter)
:error -> number
end
end
defp delimit_integer(number, delimiter) when is_integer(number) do
integer =
abs(number)
|> Integer.to_charlist()
|> :lists.reverse()
|> delimit_integer(delimiter, [])
Enum.join([integer])
end
defp delimit_integer([a, b, c, d | tail], delimiter, acc) do
delimit_integer([d | tail], delimiter, [delimiter, c, b, a | acc])
end
defp delimit_integer(list, _, acc) do
:lists.reverse(list) ++ acc
end
end
|
lib/ex_rut.ex
| 0.55941
| 0.585694
|
ex_rut.ex
|
starcoder
|
defmodule Day25 do
def read_file(path) do
{:ok, input} = File.read(path)
input
|> parse_input
end
def parse_input(input) do
[init_state] = Regex.run(~r{Begin in state (\w+)\.}, input, capture: :all_but_first)
[steps] = Regex.run(~r{Perform a diagnostic checksum after (\d+) steps\.}, input, capture: :all_but_first)
rules = Regex.scan(
~r{In state (\w+):
If the current value is 0:
- Write the value (\d)\.
- Move one slot to the (\w+)\.
- Continue with state (\w+)\.
If the current value is 1:
- Write the value (\d)\.
- Move one slot to the (\w+)\.
- Continue with state (\w+)\.},
input,
capture: :all_but_first)
%{
:initial_state => init_state,
:steps => steps
|> String.to_integer,
:rules => rules
|> Enum.map(
fn [state, if_0_write, if_0_move, if_0_next, if_1_write, if_1_move, if_1_next] ->
%{
:state => state,
0 => %{
:write => String.to_integer(if_0_write),
:move => move(if_0_move),
:next_state => if_0_next
},
1 => %{
:write => String.to_integer(if_1_write),
:move => move(if_1_move),
:next_state => if_1_next
}
}
end)
|> Enum.reduce(%{}, fn rule, map -> map |> Map.put(rule[:state], rule) end)
}
end
def move("left"), do: -1
def move("right"), do: 1
def step(acc, input) do
current_value = Map.get(acc, acc[:index], 0)
acc
|> Map.put(acc[:index], input[:rules][acc[:state]][current_value][:write])
|> Map.update(:index, 0, &(&1 + input[:rules][acc[:state]][current_value][:move]))
|> Map.put(:state, input[:rules][acc[:state]][current_value][:next_state])
end
def checksum(input) do
[state] = Stream.unfold(%{:index => 0, :state => input[:initial_state]}, fn acc -> val = step(acc, input); {val, val} end)
|> Stream.drop(input[:steps] - 1)
|> Stream.take(1)
|> Enum.to_list
Map.drop(state, [:index, :state])
|> Map.values
|> Enum.reduce(0, &(&1 + &2))
end
end
|
lib/day25.ex
| 0.539226
| 0.576244
|
day25.ex
|
starcoder
|
defmodule Geometry.PointZ do
@moduledoc """
A point struct, representing a 3D point.
"""
import Geometry.Guards
alias Geometry.{GeoJson, Hex, PointZ, WKB, WKT}
defstruct [:coordinate]
@blank " "
@empty %{
{:ndr, :hex} => "000000000000F87F000000000000F87F000000000000F87F",
{:xdr, :hex} => "7FF80000000000007FF80000000000007FF8000000000000",
{:ndr, :binary} => Hex.to_binary("000000000000F87F000000000000F87F000000000000F87F"),
{:xdr, :binary} => Hex.to_binary("7FF80000000000007FF80000000000007FF8000000000000")
}
@type t :: %PointZ{coordinate: Geometry.coordinate() | nil}
@doc """
Creates an empty `PointZ`.
## Examples
iex> PointZ.new()
%PointZ{coordinate: nil}
"""
@spec new :: t()
def new, do: %PointZ{}
@doc """
Creates a `PointZ` from the given `coordinate`.
## Examples
iex> PointZ.new([1.5, -2.1, 3])
%PointZ{coordinate: [1.5, -2.1, 3]}
"""
@spec new(Geometry.coordinate()) :: t()
def new([x, y, z] = coordinate) when is_coordinate(x, y, z) do
%PointZ{coordinate: coordinate}
end
@doc """
Creates a `PointZ` from the given `x`, `y`, and `z`.
## Examples
iex> PointZ.new(-1.1, 2.2, 3)
%PointZ{coordinate: [-1.1, 2.2, 3]}
"""
@spec new(number(), number(), number()) :: t()
def new(x, y, z) when is_coordinate(x, y, z) do
%PointZ{coordinate: [x, y, z]}
end
@doc """
Returns `true` if the given `PointZ` is empty.
## Examples
iex> PointZ.empty?(PointZ.new())
true
iex> PointZ.empty?(PointZ.new(1, 2, 3))
false
"""
@spec empty?(t()) :: boolean
def empty?(%PointZ{coordinate: coordinate}), do: is_nil(coordinate)
@doc """
Creates a `PointZ` from the given coordinate.
## Examples
iex> PointZ.from_coordinates([[-1, 1, 1]])
%PointZ{coordinate: [-1, 1, 1]}
"""
@spec from_coordinates(Geometry.coordinate() | [nil, ...]) :: t()
def from_coordinates([[x, y, z] = coordinate]) when is_coordinate(x, y, z) do
%PointZ{coordinate: coordinate}
end
def from_coordinates([x, y, z] = coordinate) when is_coordinate(x, y, z) do
%PointZ{coordinate: coordinate}
end
def from_coordinates([nil, nil, nil]) do
%PointZ{}
end
@doc """
Returns the WKT representation for a `PointZ`. With option `:srid` an EWKT
representation with the SRID is returned.
## Examples
iex> PointZ.to_wkt(PointZ.new())
"Point Z EMPTY"
iex> PointZ.to_wkt(PointZ.new(1.1, 2.2, 3.3))
"Point Z (1.1 2.2 3.3)"
iex> PointZ.to_wkt(PointZ.new(1.1, 2.2, 3.3), srid: 4711)
"SRID=4711;Point Z (1.1 2.2 3.3)"
"""
@spec to_wkt(t(), opts) :: Geometry.wkt()
when opts: [srid: Geometry.srid()]
def to_wkt(%PointZ{coordinate: coordinate}, opts \\ []) do
WKT.to_ewkt(<<"Point Z ", to_wkt_point(coordinate)::binary()>>, opts)
end
@doc """
Returns an `:ok` tuple with the `PointZ` from the given WKT string. Otherwise
returns an `:error` tuple.
If the geometry contains an SRID the id is added to the tuple.
## Examples
iex> PointZ.from_wkt("Point Z (-5.1 7.8 9.9)")
{:ok, %PointZ{coordinate: [-5.1, 7.8, 9.9]}}
iex> PointZ.from_wkt("SRID=7219;Point Z (-5.1 7.8 9.9)")
{:ok, {%PointZ{coordinate: [-5.1, 7.8, 9.9]}, 7219}}
iex> PointZ.from_wkt("Point Z EMPTY")
{:ok, %PointZ{}}
"""
@spec from_wkt(Geometry.wkt()) ::
{:ok, t()} | {t(), Geometry.srid()} | Geometry.wkt_error()
def from_wkt(wkt), do: WKT.to_geometry(wkt, PointZ)
@doc """
The same as `from_wkt/1`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkt!(Geometry.wkt()) :: t() | {t(), Geometry.srid()}
def from_wkt!(wkt) do
case WKT.to_geometry(wkt, PointZ) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the GeoJSON term of a `PointZ`.
## Examples
iex> PointZ.to_geo_json(PointZ.new(1, 2, 3))
%{"type" => "Point", "coordinates" => [1, 2, 3]}
"""
@spec to_geo_json(t()) :: Geometry.geo_json_term()
def to_geo_json(%PointZ{coordinate: coordinate}) when not is_nil(coordinate) do
%{
"type" => "Point",
"coordinates" => coordinate
}
end
@doc """
Returns an `:ok` tuple with the `PointZ` from the given GeoJSON term.
Otherwise returns an `:error` tuple.
## Examples
iex> ~s({"type": "Point", "coordinates": [1.1, 2.2, 3.3]})
iex> |> Jason.decode!()
iex> |> PointZ.from_geo_json()
{:ok, %PointZ{coordinate: [1.1, 2.2, 3.3]}}
"""
@spec from_geo_json(Geometry.geo_json_term()) :: {:ok, t()} | Geometry.geo_json_error()
def from_geo_json(json), do: GeoJson.to_point(json, PointZ)
@doc """
The same as `from_geo_json/1`, but raises a `Geometry.Error` exception if it
fails.
"""
@spec from_geo_json!(Geometry.geo_json_term()) :: t()
def from_geo_json!(json) do
case GeoJson.to_point(json, PointZ) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc """
Returns the WKB representation for a `PointZ`.
With option `:srid` an EWKB representation with the SRID is returned.
The option `:endian` indicates whether `:xdr` big endian or `:ndr` little
endian is returned. The default is `:xdr`.
The `:mode` determines whether a hex-string or binary is returned. The default
is `:binary`.
## Examples
iex> PointZ.to_wkb(PointZ.new(), mode: :hex)
"00800000017FF80000000000007FF80000000000007FF8000000000000"
iex> PointZ.to_wkb(PointZ.new(), endian: :ndr, mode: :hex)
"0101000080000000000000F87F000000000000F87F000000000000F87F"
iex> PointZ.to_wkb(PointZ.new(1.1, 2.2, 3.3), endian: :xdr, mode: :hex)
"00800000013FF199999999999A400199999999999A400A666666666666"
iex> PointZ.to_wkb(PointZ.new(1.1, 2.2, 3.3), endian: :ndr, mode: :hex)
"01010000809A9999999999F13F9A999999999901406666666666660A40"
iex> PointZ.to_wkb(PointZ.new(1.1, 2.2, 3.3), srid: 4711, endian: :xdr, mode: :hex)
"00A0000001000012673FF199999999999A400199999999999A400A666666666666"
"""
@spec to_wkb(t(), opts) :: Geometry.wkb()
when opts: [endian: Geometry.endian(), srid: Geometry.srid(), mode: Geometry.mode()]
def to_wkb(%PointZ{coordinate: coordinate}, opts \\ []) do
endian = Keyword.get(opts, :endian, Geometry.default_endian())
srid = Keyword.get(opts, :srid)
mode = Keyword.get(opts, :mode, Geometry.default_mode())
to_wkb(coordinate, srid, endian, mode)
end
@doc """
Returns an `:ok` tuple with the `PointZ` from the given WKB string. Otherwise
returns an `:error` tuple.
If the geometry contains a SRID the id is added to the tuple.
The optional second argument determines if a `:hex`-string or a `:binary`
input is expected. The default is `:binary`.
## Examples
iex> PointZ.from_wkb(
...> "00800000017FF80000000000007FF80000000000007FF8000000000000",
...> :hex
...> )
{:ok, %PointZ{coordinate: nil}}
iex> PointZ.from_wkb(
...> "00800000013FF199999999999A400199999999999A400A666666666666",
...> :hex
...> )
{:ok, %PointZ{coordinate: [1.1, 2.2, 3.3]}}
iex> PointZ.from_wkb(
...> "01010000809A9999999999F13F9A999999999901406666666666660A40",
...> :hex
...> )
{:ok, %PointZ{coordinate: [1.1, 2.2, 3.3]}}
iex> PointZ.from_wkb(
...> "00A0000001000012673FF199999999999A400199999999999A400A666666666666",
...> :hex
...> )
{:ok, {%PointZ{coordinate: [1.1, 2.2, 3.3]}, 4711}}
"""
@spec from_wkb(Geometry.wkb(), Geometry.mode()) ::
{:ok, t() | {t(), Geometry.srid()}}
| Geometry.wkb_error()
def from_wkb(wkb, mode \\ :binary), do: WKB.to_geometry(wkb, mode, PointZ)
@doc """
The same as `from_wkb/2`, but raises a `Geometry.Error` exception if it fails.
"""
@spec from_wkb!(Geometry.wkb(), Geometry.mode()) :: t() | {t(), Geometry.srid()}
def from_wkb!(wkb, mode \\ :binary) do
case WKB.to_geometry(wkb, mode, PointZ) do
{:ok, geometry} -> geometry
error -> raise Geometry.Error, error
end
end
@doc false
@compile {:inline, to_wkt_coordinate: 1}
@spec to_wkt_coordinate(Geometry.coordinate()) :: String.t()
def to_wkt_coordinate([x, y, z]) do
<<
to_wkt_number(x)::binary(),
@blank,
to_wkt_number(y)::binary(),
@blank,
to_wkt_number(z)::binary()
>>
end
@compile {:inline, to_wkt_point: 1}
defp to_wkt_point(nil), do: "EMPTY"
defp to_wkt_point(coordinate), do: <<"(", to_wkt_coordinate(coordinate)::binary(), ")">>
@compile {:inline, to_wkt_number: 1}
defp to_wkt_number(num) when is_integer(num), do: Integer.to_string(num)
defp to_wkt_number(num) when is_float(num), do: Float.to_string(num)
@doc false
@compile {:inline, to_wkb: 4}
@spec to_wkb(
Geometry.coordinate() | nil,
Geometry.srid() | nil,
Geometry.endian(),
Geometry.mode()
) ::
binary()
def to_wkb(coordinate, srid, endian, mode) do
<<
WKB.byte_order(endian, mode)::binary(),
wkb_code(endian, not is_nil(srid), mode)::binary,
WKB.srid(srid, endian, mode)::binary(),
to_wkb_coordinate(coordinate, endian, mode)::binary
>>
end
@doc false
@compile {:inline, to_wkb_coordinate: 3}
@spec to_wkb_coordinate(coordinate, endian, mode) :: wkb
when coordinate: Geometry.coordinate() | nil,
endian: Geometry.endian(),
mode: Geometry.mode(),
wkb: Geometry.wkb()
def to_wkb_coordinate(nil, endian, mode), do: Map.fetch!(@empty, {endian, mode})
def to_wkb_coordinate([x, y, z], endian, mode) do
<<
to_wkb_number(x, endian, mode)::binary(),
to_wkb_number(y, endian, mode)::binary(),
to_wkb_number(z, endian, mode)::binary()
>>
end
@compile {:inline, to_wkb_number: 3}
defp to_wkb_number(num, endian, :hex), do: Hex.to_float_string(num, endian)
defp to_wkb_number(num, :xdr, :binary), do: <<num::big-float-size(64)>>
defp to_wkb_number(num, :ndr, :binary), do: <<num::little-float-size(64)>>
@compile {:inline, wkb_code: 3}
defp wkb_code(endian, srid?, :hex) do
case {endian, srid?} do
{:xdr, false} -> "80000001"
{:ndr, false} -> "01000080"
{:xdr, true} -> "A0000001"
{:ndr, true} -> "010000A0"
end
end
defp wkb_code(endian, srid?, :binary) do
case {endian, srid?} do
{:xdr, false} -> <<0x80000001::big-integer-size(32)>>
{:ndr, false} -> <<0x80000001::little-integer-size(32)>>
{:xdr, true} -> <<0xA0000001::big-integer-size(32)>>
{:ndr, true} -> <<0xA0000001::little-integer-size(32)>>
end
end
end
|
lib/geometry/point_z.ex
| 0.964996
| 0.777511
|
point_z.ex
|
starcoder
|
defmodule Square.BankAccounts do
@moduledoc """
Documentation for `Square.BankAccounts`.
"""
@doc """
Returns a list of `BankAccount` maps linked to a Square account.
For more information, see
[Bank Accounts API](https://developer.squareup.com/docs/docs/bank-accounts-api).
```
def list_bank_accounts(client, [
cursor: nil,
limit: nil,
location_id: nil
])
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `cursor` | `String` | Query, Optional | The pagination cursor returned by a previous call to this endpoint.<br>Use it in the next `ListBankAccounts` request to retrieve the next set <br>of results.<br><br>See the [Pagination](https://developer.squareup.com/docs/docs/working-with-apis/pagination) guide for more information. |
| `limit` | `Integer` | Query, Optional | Upper limit on the number of bank accounts to return in the response. <br>Currently, 1000 is the largest supported limit. You can specify a limit <br>of up to 1000 bank accounts. This is also the default limit. |
| `location_id` | `String` | Query, Optional | Location ID. You can specify this optional filter <br>to retrieve only the linked bank accounts belonging to a specific location. |
### Response Type
[`List Bank Accounts Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/list-bank-accounts-response.md)
### Example Usage
iex> Square.client |> Square.BankAccounts.list_bank_accounts()
"""
@spec list_bank_accounts(Tesla.Client.t(), list) :: {:error, any} | {:ok, Tesla.Env.t()}
def list_bank_accounts(client, params \\ []),
do: Tesla.get(client, "bank-accounts", query: params)
@doc """
Returns details of a [BankAccount](#type-bankaccount) identified by V1 bank account ID.
For more information, see
[Retrieve a bank account by using an ID issued by V1 Bank Accounts API](https://developer.squareup.com/docs/docs/bank-accounts-api#retrieve-a-bank-account-by-using-an-id-issued-by-the-v1-bank-accounts-api).
```
def get_bank_account_by_v1_id(client, v1_bank_account_id)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `v1_bank_account_id` | `String` | Template, Required | Connect V1 ID of the desired `BankAccount`. For more information, see <br>[Retrieve a bank account by using an ID issued by V1 Bank Accounts API](https://developer.squareup.com/docs/docs/bank-accounts-api#retrieve-a-bank-account-by-using-an-id-issued-by-v1-bank-accounts-api). |
### Response Type
[`Get Bank Account by V1 Id Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/get-bank-account-by-v1-id-response.md)
### Example Usage
iex> v1_bank_account_id = "v1_bank_account_id8"
iex> Square.client |> Square.BankAccounts.get_bank_account_by_v1_id(v1_bank_account_id)
"""
@spec get_bank_account_by_v1_id(Tesla.Client.t(), binary) ::
{:error, any} | {:ok, Tesla.Env.t()}
def get_bank_account_by_v1_id(client, v1_bank_account_id),
do: Tesla.get(client, "bank-accounts/by-v1-id/#{v1_bank_account_id}")
@doc """
Returns details of a [BankAccount](#type-bankaccount)
linked to a Square account. For more information, see
[Bank Accounts API](https://developer.squareup.com/docs/docs/bank-accounts-api).
```
def get_bank_account(client, bank_account_id)
```
### Parameters
| Parameter | Type | Tags | Description |
| --- | --- | --- | --- |
| `bank_account_id` | `String` | Template, Required | Square-issued ID of the desired `BankAccount`. |
### Response Type
[`Get Bank Account Response Map`](https://github.com/square/square-ruby-sdk/blob/master/doc/models/get-bank-account-response.md)
### Example Usage
iex> bank_account_id = "bank_account_id0"
iex> Square.client |> Square.BankdAccounts.get_bank_account(bank_account_id)
"""
@spec get_bank_accounts(binary | Tesla.Client.t(), binary) ::
{:error, any} | {:ok, Tesla.Env.t()}
def get_bank_accounts(client, bank_account_id),
do: Tesla.get(client, "bank-accounts/#{bank_account_id}")
end
|
lib/api/bank_accounts_api.ex
| 0.90697
| 0.889096
|
bank_accounts_api.ex
|
starcoder
|
defmodule Apoc.RSA.PublicKey do
@moduledoc """
Struct and set of functions for working with an RSA public key
For information on key formats in PKI see [PKI PEM overview](https://gist.github.com/awood/9338235)
or [RFC5912](https://tools.ietf.org/html/rfc5912)
See also [Erlang Public Key Records](http://erlang.org/doc/apps/public_key/public_key_records.html#rsa)
"""
defstruct [:modulus, :public_exponent]
@type t :: %__MODULE__{
modulus: integer(),
public_exponent: integer()
}
@doc """
Encrypts a message with the given public key
(uses PKCS1-OAEP padding).
See `Apoc.RSA.encrypt/2`
"""
@spec encrypt(__MODULE__.t, binary()) :: {:ok, binary()} | :error
def encrypt(%__MODULE__{} = key, message) do
try do
ciphertext =
:rsa
|> :crypto.public_encrypt(message, to_erlang_type(key), :rsa_pkcs1_oaep_padding)
|> Apoc.encode()
{:ok, ciphertext}
rescue
_ -> :error
end
end
@doc """
Decrypts a message with the given public key
(uses standard PKCS1 padding as decryption using the public key is not sensitive).
See `Apoc.RSA.decrypt/2`
"""
@spec decrypt(__MODULE__.t, binary()) :: {:ok, binary()} | :error
def decrypt(%__MODULE__{} = key, ciphertext) do
try do
with {:ok, ctb} <- Apoc.decode(ciphertext) do
{:ok, :crypto.public_decrypt(:rsa, ctb, to_erlang_type(key), :rsa_pkcs1_padding)}
end
rescue
_ -> :error
end
end
@doc """
Loads a pem encoded public key certificate string.
"""
@spec load_pem(String.t) :: {:ok, __MODULE__.t} | {:error, String.t}
def load_pem(pem_str) do
with [enc_pkey] <- :public_key.pem_decode(pem_str),
{:RSAPublicKey, n, p} <- :public_key.pem_entry_decode(enc_pkey) do
{:ok,%__MODULE__{modulus: n, public_exponent: p}}
else
_ ->
{:error, "Not a public key"}
end
end
@doc """
Dumps a key into PEM format
"""
@spec dump_pem(__MODULE__.t) :: String.t
def dump_pem(%__MODULE__{modulus: n, public_exponent: e}) do
:SubjectPublicKeyInfo
|> :public_key.pem_entry_encode({:RSAPublicKey, n, e})
|> List.wrap
|> :public_key.pem_encode
end
def to_erlang_type(%__MODULE__{modulus: n, public_exponent: e}) do
[e, n]
end
defimpl Inspect do
import Inspect.Algebra
def inspect(key, opts) do
concat(["#Apoc.RSA.PublicKey<", to_doc(key.public_exponent, opts), ">"])
end
end
end
|
lib/apoc/rsa/public_key.ex
| 0.867176
| 0.601652
|
public_key.ex
|
starcoder
|
defmodule AppOptex.Client do
require Logger
@moduledoc """
Module responsible for comunication with AppOptics API.
"""
@doc """
Send an HTTP request to [AppOptics create API](https://docs.appoptics.com/api/?shell#create-a-measurement) with a list of measurements and tags. Returns the response from AppOptics API.
* appoptics_url - AppOptics API endpoint.
* token - AppOptics auth token.
* measurements - List of measurements to send. Each measurements is a map with a `name` and a `value` key.
* tags - A map of tags to send for the current measurement.
## Examples
iex> AppOptex.Client.send_measurements("https://api.appoptics.com/v1/measurements", "MY_TOKEN", [%{name: "my.mertric.name", value: 1}], %{"name" => "value"})
{:ok, %HTTPoison.Response{}}
"""
def send_measurements(appoptics_url, token, measurements, tags)
when is_list(measurements) and is_map(tags) do
payload = %{
tags: tags,
measurements: measurements
}
HTTPoison.post(appoptics_url, Poison.encode!(payload), [{"Content-Type", "application/json"}],
hackney: [basic_auth: {token, ""}]
)
end
@doc """
Read from [AppOptics retrieve measurement API](https://docs.appoptics.com/api/?shell#retrieve-a-measurement) given a query. Returns the AppOptex API response.
* `appoptics_url` - AppOptics API endpoint.
* `token` - AppOptics auth token.
* `metric_name` - Name of the metric to search.
* `resolution` - Defines the resolution to return the data to in seconds.
* `query` - map of query params. **Must** include either `duration` or `start_time`. Params include:
* `start_time` - Unix Time of where to start the time search from. This parameter is optional if duration is specified.
* `end_time` - Unix Time of where to end the search. This parameter is optional and defaults to current wall time.
* `duration` - How far back to look in time, measured in seconds. This parameter can be used in combination with endtime to set a starttime N seconds back in time. It is an error to set starttime, endtime and duration.
## Examples
iex> AppOptex.Client.read_measurements("https://api.appoptics.com/v1/measurements", "MY_TOKEN", "my.other_metric", 60, %{duration: 999999})
%{
"attributes" => %{"created_by_ua" => "hackney/1.15.1"},
"links" => [],
"name" => "my.other_metric",
"resolution" => 60,
"series" => [
%{
"measurements" => [%{"time" => 1554667320, "value" => 5.0}],
"tags" => %{"my_tag" => "value"}
}
]
}
"""
def read_measurements(appoptics_url, token, metric_name, resolution, %{start_time: _} = params),
do: _read_measurements(appoptics_url, token, metric_name, resolution, params)
def read_measurements(appoptics_url, token, metric_name, resolution, %{duration: _} = params),
do: _read_measurements(appoptics_url, token, metric_name, resolution, params)
def read_measurements(
appoptics_url,
token,
metric_name,
resolution,
%{duration: _, start_time: _} = params
),
do: _read_measurements(appoptics_url, token, metric_name, resolution, params)
def read_measurements(_appoptics_url, _token, _metric_name, _resolution, _),
do: raise("Must provide either :duration or :start_time")
def _read_measurements(appoptics_url, token, metric_name, resolution, params)
when is_map(params) do
query =
params
|> Map.put(:resolution, resolution)
|> Stream.map(fn {k, v} -> "#{k}=#{v}" end)
|> Enum.join("&")
HTTPoison.get!(
"#{appoptics_url}/#{metric_name}?#{query}",
[{"Content-Type", "application/json"}],
hackney: [basic_auth: {token, ""}]
)
|> extract_body()
end
defp extract_body(%HTTPoison.Response{body: body, status_code: 200}),
do: body |> Poison.decode!()
defp extract_body(%HTTPoison.Response{body: body}), do: body
defp extract_body(_), do: "Unable to extract body"
end
|
lib/app_optex/client.ex
| 0.897153
| 0.452838
|
client.ex
|
starcoder
|
defmodule NervesHubLink.Client do
@moduledoc """
A behaviour module for customizing if and when firmware updates get applied.
By default NervesHubLink applies updates as soon as it knows about them from the
NervesHubLink server and doesn't give warning before rebooting. This let's
devices hook into the decision making process and monitor the update's
progress.
# Example
```elixir
defmodule MyApp.NervesHubLinkClient do
@behaviour NervesHubLink.Client
# May return:
# * `:apply` - apply the action immediately
# * `:ignore` - don't apply the action, don't ask again.
# * `{:reschedule, timeout_in_milliseconds}` - call this function again later.
@impl NervesHubLink.Client
def update_available(data) do
if SomeInternalAPI.is_now_a_good_time_to_update?(data) do
:apply
else
{:reschedule, 60_000}
end
end
end
```
To have NervesHubLink invoke it, add the following to your `config.exs`:
```elixir
config :nerves_hub, client: MyApp.NervesHubLinkClient
```
"""
require Logger
@typedoc "Update that comes over a socket."
@type update_data :: map()
@typedoc "Supported responses from `update_available/1`"
@type update_response :: :apply | :ignore | {:reschedule, pos_integer()}
@typedoc "Firmware update progress, completion or error report"
@type fwup_message ::
{:ok, non_neg_integer(), String.t()}
| {:warning, non_neg_integer(), String.t()}
| {:error, non_neg_integer(), String.t()}
| {:progress, 0..100}
@doc """
Called to find out what to do when a firmware update is available.
May return one of:
* `apply` - Download and apply the update right now.
* `ignore` - Don't download and apply this update.
* `{:reschedule, timeout}` - Defer making a decision. Call this function again in `timeout` milliseconds.
"""
@callback update_available(update_data()) :: update_response()
@doc """
Called on firmware update reports.
The return value of this function is not checked.
"""
@callback handle_fwup_message(fwup_message()) :: :ok
@doc """
Called when downloading a firmware update fails.
The return value of this function is not checked.
"""
@callback handle_error(any()) :: :ok
@doc """
This function is called internally by NervesHubLink to notify clients.
"""
@spec update_available(update_data()) :: update_response()
def update_available(data) do
case apply_wrap(mod(), :update_available, [data]) do
:apply ->
:apply
:ignore ->
:ignore
{:reschedule, timeout} when timeout > 0 ->
{:reschedule, timeout}
wrong ->
Logger.error(
"[NervesHubLink] Client: #{inspect(mod())}.update_available/1 bad return value: #{
inspect(wrong)
} Applying update."
)
:apply
end
end
@doc """
This function is called internally by NervesHubLink to notify clients of fwup progress.
"""
@spec handle_fwup_message(fwup_message()) :: :ok
def handle_fwup_message(data) do
_ = apply_wrap(mod(), :handle_fwup_message, [data])
:ok
end
@doc """
This function is called internally by NervesHubLink to notify clients of fwup errors.
"""
@spec handle_error(any()) :: :ok
def handle_error(data) do
_ = apply_wrap(mod(), :handle_error, [data])
end
# Catches exceptions and exits
defp apply_wrap(mod, function, args) do
apply(mod, function, args)
catch
:error, reason -> {:error, reason}
:exit, reason -> {:exit, reason}
err -> err
end
defp mod() do
Application.get_env(:nerves_hub_link, :client, NervesHubLink.Client.Default)
end
end
|
lib/nerves_hub_link/client.ex
| 0.807612
| 0.728676
|
client.ex
|
starcoder
|
defmodule Ash.Notifier.PubSub do
@moduledoc "A pubsub notifier extension"
@publish %Ash.Dsl.Entity{
name: :publish,
target: Ash.Notifier.PubSub.Publication,
describe: """
Configure a given action to publish its results over a given topic.
If you have multiple actions with the same name (only possible if they have different types),
use the `type` option, to specify which type you are referring to. Otherwise the message will
be broadcast for all actions with that name.
To include attribute values of the resource in the message, pass a list
of strings and attribute names. They will ultimately be joined with `:`.
For example:
```elixir
prefix "user"
publish :create, ["created", :user_id]
publish :update, ["updated:{user_id}"]
```
This might publish a message to \"user:created:1\"" for example.
For updates, if the field in the template is being changed, a message is sent
to *both* values. So if you change `user 1` to `user 2`, the same message would
be published to `user:updated:1` and `user:updated:2`. If there are multiple
attributes in the template, and they are all being changed, a message is sent for
every combination of substitutions.
""",
examples: [
"publish :create, \"created\"",
"""
publish :assign, "assigned" do
type :create
end
"""
],
schema: Ash.Notifier.PubSub.Publication.schema(),
args: [:action, :topic]
}
@publish_all %Ash.Dsl.Entity{
name: :publish_all,
target: Ash.Notifier.PubSub.Publication,
describe: """
Works just like `publish`, except that it takes a type
and publishes all actions of that type
""",
examples: [
"publish_all :create, \"created\""
],
schema: Ash.Notifier.PubSub.Publication.publish_all_schema(),
args: [:type, :topic]
}
@pub_sub %Ash.Dsl.Section{
name: :pub_sub,
describe: """
A section for configuring how resource actions are published over pubsub
""",
examples: [
"""
pub_sub do
module MyEndpoint
prefix "post"
publish :destroy, ["foo", :id]
publish :default, ["foo", :id], type: :update
publish :default, ["bar", :name], type: :update, event: "name_change"
end
"""
],
entities: [
@publish,
@publish_all
],
modules: [:module],
schema: [
module: [
type: :atom,
doc: "The module to call `broadcast/3` on e.g module.broadcast(topic, event, message).",
required: true
],
prefix: [
type: :string,
doc:
"A prefix for all pubsub messages, e.g `users`. A message with `created` would be published as `users:created`"
]
]
}
use Ash.Dsl.Extension, sections: [@pub_sub]
def publications(resource) do
Ash.Dsl.Extension.get_entities(resource, [:pub_sub])
end
def module(resource) do
Ash.Dsl.Extension.get_opt(resource, [:pub_sub], :module, nil)
end
def prefix(resource) do
Ash.Dsl.Extension.get_opt(resource, [:pub_sub], :prefix, nil)
end
def notify(%Ash.Notifier.Notification{resource: resource} = notification) do
resource
|> publications()
|> Enum.filter(&matches?(&1, notification))
|> Enum.each(&publish_notification(&1, notification))
end
defp publish_notification(publish, notification) do
publish.topic
|> fill_template(notification)
|> Enum.each(fn topic ->
event = publish.event || to_string(notification.action.name)
prefix = prefix(notification.resource) || ""
prefixed_topic = prefix <> ":" <> topic
module(notification.resource).broadcast(
prefixed_topic,
event,
notification
)
end)
end
defp fill_template(topic, _) when is_binary(topic), do: [topic]
defp fill_template(topic, %{action: %{type: type}, data: data})
when type in [:create, :destroy] do
topic
|> Enum.map(fn item ->
if is_binary(item) do
item
else
data
|> Map.get(item)
|> to_string()
end
end)
|> Enum.join(":")
|> List.wrap()
end
defp fill_template(topic, notification) do
topic
|> all_combinations_of_values(notification)
|> Enum.map(&List.flatten/1)
|> Enum.map(&Enum.join(&1, ":"))
end
defp all_combinations_of_values(items, notification, trail \\ [])
defp all_combinations_of_values([], _, trail), do: [Enum.reverse(trail)]
defp all_combinations_of_values([item | rest], notification, trail) when is_binary(item) do
all_combinations_of_values(rest, notification, [item | trail])
end
defp all_combinations_of_values([item | rest], notification, trail) when is_atom(item) do
value_before_change = Map.get(notification.changeset.data, item)
value_after_change = Map.get(notification.data, item)
[value_before_change, value_after_change]
|> Enum.reject(&is_nil/1)
|> Enum.uniq()
|> Enum.flat_map(fn possible_value ->
all_combinations_of_values(rest, notification, [possible_value | trail])
end)
end
defp matches?(%{action: action, type: nil}, %{action: %{name: action}}), do: true
defp matches?(%{action: nil, type: type}, %{action: %{type: type}}), do: true
defp matches?(%{action: action, type: type}, %{action: %{name: action, type: type}}), do: true
defp matches?(_, _), do: false
end
|
lib/ash/notifier/pub_sub/pub_sub.ex
| 0.878796
| 0.844409
|
pub_sub.ex
|
starcoder
|
defmodule FnExpr do
@moduledoc """
Creates immediately invoked function expressions specifically
[IIFE](http://benalman.com/news/2010/11/immediately-invoked-function-expression/)
to be used with the pipe operator.
The motiviation for this library was from <NAME>' short talk
[Put This in Your Pipe](https://vimeo.com/216107561), which
was awesome. However, I found that the additional boilerplate
syntax to *immediately-invoke* the function also a little *funky*.
Here is a (contrived) example, using the pipe operator and anonymous
functions directly
iex> :apples
...> |> (fn atom -> Atom.to_string(atom) <> "__post" end).()
...> |> (fn (str) -> String.to_atom("pre__" <> str) end).()
:pre__apples__post
The same example can be represented with the `&` capture operator.
iex> :apples
...> |> (&(Atom.to_string(&1) <> "__post")).()
...> |> (&(String.to_atom("pre__" <> &1))).()
:pre__apples__post
Neither look that great, so we are introducing a && capture-and-run
operator, as well as `capture` macro which includes support for
both capture expressions (`&1 + 1`), as well as anonymous
functions (`fn x -> x + 1 end`).
That same example above, would look like
iex> :apples
...> |> FnExpr.&&(Atom.to_string(&1) <> "__post")
...> |> FnExpr.&&(String.to_atom("pre__" <> &1))
:pre__apples__post
If you prefer, you can `use FnExpr` like the following:
defmodule FnExpr.ExampleInvoke do
use FnExpr
def piece_count(board) do
board
|> Enum.reduce({0, 0}, fn(piece, {black, white}) ->
case piece do
:black -> {black+1, white}
:white -> {black, white+1}
_ -> {black, white}
end
end)
end
def total_pieces_on(board) do
board
|> piece_count
|> invoke(fn {black, white} -> black + white end)
|> invoke("Total pieces: \#{&1}")
end
end
And a sample output would look like:
iex> [nil, :black, nil, :black, :white, nil, :black]
...> |> FnExpr.ExampleInvoke.total_pieces_on
"Total pieces: 4"
In the example above, we are using the `invoke` macro, it
takes the preceeding output from the pipe, as well as a
function expression (or a capture expression).
If you passs in a capture expression, then you only have
`&1` available (the output from the previous pipe).
Or, if you wanted greated pattern mataching, you can
us the `fn` function expression notation.
Here's a full example showing how to use the `&&` macro.
Note that && is not directly supported in Elixir as a
capture and immediately execute operator, so you will need
to include the module name `FnExpr.&&`.
defmodule FnExpr.ExampleAndAnd do
require FnExpr
def combine(atom) do
atom
|> FnExpr.&&(Atom.to_string(&1) <> "__post")
|> FnExpr.&&(String.to_atom("pre__" <> &1))
end
end
"""
defmacro __using__(_) do
quote do
defmacro invoke(piped_in_argument, expr) do
fun = is_tuple(expr) && elem(expr, 0)
case fun do
:fn ->
quote do
unquote(expr).(unquote(piped_in_argument))
end
_ ->
quote do
(&unquote(expr)).(unquote(piped_in_argument))
end
end
end
end
end
@doc """
Sometimes when working with piped operations, you might end up with a
null value, and that's OK, but you would love to provde a defaulted value
before continuing.
For example,
raw_input
|> SomeOtherModule.process_all
|> List.first
|> default(%{id: "test_id"})
|> Map.get(id)
In the above, there is a chance that there were no widgets to process,
but that's OK, you will just default it to a *near* empty map to allow
it to flow through to `&Map.get/2` without throwing an exception
"""
def default(piped_in_argument, default_value) do
case piped_in_argument do
nil -> default_value
_ -> piped_in_argument
end
end
@doc """
Creates function expression specifically to be used with the pipe operator.
Here is a (contrived) example showing how it can be used
iex> :apples
...> |> FnExpr.&&(Atom.to_string(&1) <> "__post")
...> |> FnExpr.&&(String.to_atom("pre__" <> &1))
:pre__apples__post
"""
defmacro unquote(:&&)(piped_in_argument, expr) do
quote do
(&unquote(expr)).(unquote(piped_in_argument))
end
end
end
|
lib/fn_expr.ex
| 0.878053
| 0.590661
|
fn_expr.ex
|
starcoder
|
defmodule ExDebugger.Tokenizer do
@moduledoc false
use CommendableComments
@modulecomment """
Ideally, as a regular user one should not need to know about this. However, as leaky abstractions tend to bite us by
surprise; it may be important to be aware of this.
The `AST` that we have access to compile time has a certain amount of loss of information that we need in order to
pinpoint a correct line. These general pertain to `end` identifiers which make it very hard to pinpoint the correct
line location that is relevant to annotate; such as:
```elixir
case a do # 1.
:a -> case b do # 2.
:b -> :ok # 3.
:c -> :error # 4.
end # 5.
# 6.
:b -> case c do # 7.
:b -> :error # 8.
:c -> :ok # 9.
end # 10.
end # 11.
```
`ExDebugger` wants to auto-annotate any polyfurcation point. The lines needed to be annotated in this case are only the
nested ones:
* 3, 4 and
* 8, 9
However, from an algorithmic perspective it is rather difficult to determine whether or not a case expression is nested
and that accordingly its parent can be excluded. This leads to the oversimplification of blindly applying the
annotation for each and every branch in each and every case expression. As such, we also need to annotate branches `:a`
and `:b` for the parent case expression above and the appropriate lines for that would thus constitute lines: 5 and 10
respectively.
However, the AST as received compile time excludes the various `end` identifiers making it difficult to distinguish
between the above and for instance:
```elixir
case a do # 1.
:a -> case b do # 2.
:b -> :ok # 3.
:c -> :error # 4.
# 5.
# 6.
# 7.
# 8.
end # 9.
# 10.
:b -> case c do # 11.
:b -> :error # 12.
:c -> :ok # 13.
end # 14.
end # 15.
```
In order to make things easier, this module tokenizes the respective file in which the module resides and scans for
the various `end` identifiers accordingly.
The downside of this solution is that effectively compile time we are tokenizing everything twice; once when Elixir
starts compilation and secondly when we hijack the def-macro and tokenize again to correctly annotate the AST.
Of course, it is not entirely impossible to solely rely on the raw `AST` as provided. Conceptually speaking, it is
rather easy to inculcate that a current branch's ending is one less than the starting line of the next branch. I may
explore this in the future; in this first iteration I went with a brute force solution instead.
"""
defstruct file_name: "",
defs: [],
def_line: 0,
def_name: "",
module: :none,
meta_debug: nil
alias ExDebugger.Meta
alias __MODULE__.{
Definition,
NestedModule,
Repo
}
@doc false
def new(caller, def_heading_ast) do
meta_debug = Meta.new(caller.module)
{def_name, def_line} = Definition.name_and_line(def_heading_ast)
file_name = caller.file
if Repo.is_uninitialized?(file_name) do
file_name
|> file
|> Meta.debug(meta_debug, "", :show_module_tokens)
|> nested_modules
|> groupify_defs
|> Repo.insert(file_name)
end
defs = Repo.lookup(file_name)
struct!(__MODULE__, %{
file_name: file_name,
def_line: def_line,
def_name: def_name,
defs: defs,
# def_lines: defs.def_lines,
module: caller.module,
meta_debug: meta_debug
})
# |> IO.inspect
end
# Nested functions as they appear in code are abbreviated whereas the
# module obtained from `__CALLER__.module` is a fully qualified `module`
# name which includes the names of all its parents.
# The current solution implemented is a bit naive and does not cover
# all the cases but should suffice for the time being.
@doc false
def module_has_use_ex_debugger?(t = %__MODULE__{}, module) do
nested_modules = t.defs.nested_modules
ground_state = module in nested_modules
module
|> Module.split()
|> Enum.reverse()
|> Enum.reduce_while({ground_state, []}, fn
_, {true, _} ->
{:halt, {true}}
e, {_, module_name_portions} ->
module_name_portions = Module.concat([e | module_name_portions])
result = module_name_portions in nested_modules
{:cont, {result, [module_name_portions]}}
end)
|> elem(0)
end
@doc false
def last_line(%__MODULE__{} = t) do
t.defs
|> Map.fetch!(t.def_line)
|> Map.fetch!(:last_line)
end
@doc false
def bifurcates?(%__MODULE__{} = t) do
try do
t.defs
|> Map.fetch!(t.def_line)
|> Map.fetch!(:polyfurcation_expressions)
|> Kernel.!=(%{})
rescue
_ ->
ExDebugger.Anomaly.raise(
"Entry not found. Only known occurrence for this is when you try to `use ExDebugger` with a `defmacro __using__`.",
:entry_not_found
)
end
end
@doc false
def file(file) do
file
|> File.open!([:charlist])
|> IO.read(:all)
|> :elixir_tokenizer.tokenize(1, [])
|> case do
{:ok, _, _, _, ls} -> ls
end
end
@doc false
def nested_modules(tokens = [{:identifier, _, :defmodule} | _]) do
{tokens, NestedModule.usage_ex_debugger(tokens)}
end
@doc false
def groupify_defs({tokens, nested_modules}), do: groupify_defs(tokens, %{}, nested_modules)
@doc false
def groupify_defs([{:identifier, _, :defmodule} | tl], acc, nested_modules) do
tl
|> Definition.all()
|> Enum.reduce(acc, fn [{:identifier, {line, _, nil}, _def_identifier} | tl], a ->
[{:end, {last_line, _, nil}} | _] = Enum.reverse(tl)
a
|> Map.put(line, %{
first_line: line,
last_line: last_line,
lines: tl,
polyfurcation_expressions: polyfurcation_expressions(tl),
sections: group_expressions(tl)
})
|> Map.put(:nested_modules, nested_modules)
# |> Map.update(:def_lines, [line], fn ls -> ls ++ [line] end)
end)
end
@doc false
def groupify_defs(_, _, _), do: {:error, :no_defmodule}
@doc false
def polyfurcation_expressions(tokens) do
tokens
|> Enum.reduce(%{}, fn
{_, {line, _, _}, :case}, a ->
a
|> Map.put(line, :case)
|> Map.update(:case, [line], &(&1 ++ [line]))
{_, {line, _, _}, :cond}, a ->
a
|> Map.put(line, :cond)
|> Map.update(:cond, [line], &(&1 ++ [line]))
{_, {line, _, _}, :if}, a ->
a
|> Map.put(line, :if)
|> Map.update(:if, [line], &(&1 ++ [line]))
_, a ->
a
end)
end
@doc false
def group_expressions(tokens) do
tokens
|> Enum.reduce({[:ignore], %{}}, fn
{:fn, _}, {stack, a} ->
{[:ignore_block_till_end | stack], a}
e = {_, {line, _, _}, :case}, {stack, a} ->
{[{:groupify_defs, line} | stack], Map.put(a, line, [e])}
e = {_, {line, _, _}, :if}, {stack, a} ->
{[{:groupify_defs, line} | stack], Map.put(a, line, [e])}
e = {_, {line, _, _}, :cond}, {stack, a} ->
{[{:groupify_defs, line} | stack], Map.put(a, line, [e])}
e = {:end, {line, _, _}}, {stack = [last_block | tl], a} ->
last_block
|> case do
:ignore ->
{stack, Map.put(a, :end, line)}
:ignore_block_till_end ->
{tl, a}
{:groupify_defs, line} ->
{tl, Map.update(a, line, [e], fn ls -> handle_sections(ls ++ [e]) end)}
end
e, {stack = [last_block | _], a} ->
last_block
|> case do
:ignore -> {stack, a}
:ignore_block_till_end -> {stack, a}
{:groupify_defs, line} -> {stack, Map.update(a, line, [e], fn ls -> ls ++ [e] end)}
end
end)
|> elem(1)
end
@doc false
def handle_sections(block = [{_, {_line, _, _}, _op} | _]) do
block
|> Enum.reverse()
|> Enum.reduce({nil, []}, fn
{:end, {line, _, _}}, {nil, []} ->
{{:end_section, line - 1}, []}
{_, {line, _, _}, :if}, {{:end_section, end_section}, a} ->
{{:end_section, line - 1}, [%{start_section: line, end_section: end_section} | a]}
{_, {line, _, _}, :else}, {{:end_section, end_section}, a} ->
{{:end_section, line - 1}, [%{start_section: line, end_section: end_section} | a]}
{_, {line, _, _}, :->}, {{:end_section, end_section}, a} ->
{{:end_section, line - 1}, [%{start_section: line, end_section: end_section} | a]}
_, acc ->
acc
end)
|> elem(1)
end
end
|
lib/ex_debugger/tokenizer.ex
| 0.834643
| 0.868269
|
tokenizer.ex
|
starcoder
|
defmodule Aya.Driver do
@moduledoc """
Backend driver which is used with Aya. It should implement a set of functions
which perform various forms of validation. These functions are:
* `check_passkey` - Takes passkey, returns a user variable which will be used in future validation reqs
* `check_torrent` - Validates a torrent hash and the previously defined user variable
* `check_event` - Validates an event and the user variable
* `handle_announce` - Handles a full announce request with new reported stats from a user
All driver functions are simply sugar around standard GenServer calls. They will
all be passed a state variable and should return a response in the form {response, new_state}.
"""
defmacro __using__(_args) do
quote do
use GenServer
@type event :: :seeding | :leeching | :stopped
@type hash :: String.t
@type passkey :: String.t
@type user :: any
@type ul :: number
@type dl :: number
@type left :: number
@type state :: any
def start_link(opts \\ []) do
{:ok, _pid} = GenServer.start_link(__MODULE__, :ok, opts)
end
defoverridable start_link: 1
def init(:ok) do
require Logger
Logger.log :debug, "Started driver!"
{:ok, nil}
end
defoverridable init: 1
def handle_call({:check_passkey, passkey}, _from, state) do
{reply, new_state} = check_passkey(passkey, state)
{:reply, reply, new_state}
end
@spec check_passkey(passkey, state) :: {{:ok, user}, state} | {{:error, String.t}, state}
def check_passkey(_passkey, state) do
{{:ok, nil}, state}
end
defoverridable check_passkey: 2
def handle_call({:check_torrent, hash, user}, _from, state) do
{reply, new_state} = check_torrent(hash, user, state)
{:reply, reply, new_state}
end
@spec check_torrent(hash, user, state) :: {:ok, state} | {{:error, String.t}, state}
def check_torrent(_hash, _user, state) do
{:ok, state}
end
defoverridable check_torrent: 3
def handle_call({:check_event, event, user}, _from, state) do
{reply, new_state} = check_event(event, user, state)
{:reply, reply, new_state}
end
@spec check_event(any, user, state) :: {:ok, state} | {{:error, String.t}, state}
def check_event(_event, _user, state) do
{:ok, state}
end
defoverridable check_event: 3
def handle_call({:announce, params, user}, _from, state) do
new_state = handle_announce(params, user, state)
{:reply, :ok, state}
end
@spec handle_announce({hash, ul, dl, left, event}, user, state) :: state
def handle_announce(_params, _user, state) do
state
end
defoverridable handle_announce: 3
end
end
end
|
lib/aya/driver.ex
| 0.790652
| 0.553505
|
driver.ex
|
starcoder
|
defmodule Top52.Tasks do
@moduledoc """
The Tasks context.
"""
import Ecto.Query, warn: false
alias Ecto.Query
alias Top52.Repo
alias Top52.Tasks.Task
alias Top52.Tasks.Note
@doc """
Returns the list of tasks.
## Examples
iex> list_tasks()
[%Task{}, ...]
"""
def list_tasks do
Repo.all(Task)
end
@doc """
Gets a list of active tasks for a particular user
"""
def list_active_tasks_by_user(user_id) do
notes_query = from n in Note, order_by: n.id
query = Query.from(t in Task, where: t.user_id == ^user_id and t.status == "Active", order_by: t.deadline, preload: [notes: ^notes_query])
case Repo.all(query) do
nil -> nil
tasks -> tasks
end
end
@doc """
Gets a list of backlog tasks for a particular user
"""
def list_backlog_tasks_by_user(user_id) do
query = Query.from(t in Task, where: t.user_id == ^user_id and t.status == "Backlog", preload: [:notes])
case Repo.all(query) do
nil -> nil
tasks -> tasks
end
end
@doc """
Gets a list of completed tasks for a particular user
"""
def list_completed_tasks_by_user(user_id) do
query = Query.from(t in Task, where: t.user_id == ^user_id and t.status == "Completed", preload: [:notes])
case Repo.all(query) do
nil -> nil
tasks -> tasks
end
end
@doc """
Gets a single task.
Raises `Ecto.NoResultsError` if the Task does not exist.
## Examples
iex> get_task!(123)
%Task{}
iex> get_task!(456)
** (Ecto.NoResultsError)
"""
def get_task!(id), do: Repo.get!(Task, id)
@doc """
Gets a single task with its notes.
Raises `Ecto.NoResultsError` if the Task does not exist.
## Examples
iex> get_task!(123)
%Task{}
iex> get_task!(456)
** (Ecto.NoResultsError)
"""
def get_task_with_notes!(id), do: Repo.get!(Task, id) |> Repo.preload(:notes)
@doc """
Creates a task.
## Examples
iex> create_task(%{field: value})
{:ok, %Task{}}
iex> create_task(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_task(attrs \\ %{}) do
%Task{}
|> Task.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a task.
## Examples
iex> update_task(task, %{field: new_value})
{:ok, %Task{}}
iex> update_task(task, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_task(%Task{} = task, attrs) do
task
|> Task.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Task.
## Examples
iex> delete_task(task)
{:ok, %Task{}}
iex> delete_task(task)
{:error, %Ecto.Changeset{}}
"""
def delete_task(%Task{} = task) do
Repo.delete(task)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking task changes.
## Examples
iex> change_task(task)
%Ecto.Changeset{source: %Task{}}
"""
def change_task(%Task{} = task) do
Task.changeset(task, %{})
end
alias Top52.Tasks.Note
@doc """
Returns the list of notes.
## Examples
iex> list_notes()
[%Note{}, ...]
"""
def list_notes do
Repo.all(Note)
end
@doc """
Gets a single note.
Raises `Ecto.NoResultsError` if the Note does not exist.
## Examples
iex> get_note!(123)
%Note{}
iex> get_note!(456)
** (Ecto.NoResultsError)
"""
def get_note!(id), do: Repo.get!(Note, id)
@doc """
Creates a note.
## Examples
iex> create_note(%{field: value})
{:ok, %Note{}}
iex> create_note(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_note(attrs \\ %{}) do
%Note{}
|> Note.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a note.
## Examples
iex> update_note(note, %{field: new_value})
{:ok, %Note{}}
iex> update_note(note, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_note(%Note{} = note, attrs) do
note
|> Note.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Note.
## Examples
iex> delete_note(note)
{:ok, %Note{}}
iex> delete_note(note)
{:error, %Ecto.Changeset{}}
"""
def delete_note(%Note{} = note) do
Repo.delete(note)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking note changes.
## Examples
iex> change_note(note)
%Ecto.Changeset{source: %Note{}}
"""
def change_note(%Note{} = note) do
Note.changeset(note, %{})
end
end
|
lib/top5_2/tasks.ex
| 0.794544
| 0.433082
|
tasks.ex
|
starcoder
|
defmodule DiversityInTech.Companies do
@moduledoc """
The Companies context.
"""
import Ecto.Query, warn: false
alias DiversityInTech.Repo
alias DiversityInTech.Companies.Company
alias Ecto.Multi
@doc """
Returns the list of companies.
## Examples
iex> list_companies()
[%Company{}, ...]
"""
def list_companies do
Repo.all(Company)
end
@doc """
Returns the list of companies paginated.
## Examples
iex> paginate_companies(attrs)
%Scrivener.Page{entries: [%Company{}], page_number: 1,
page_size: 1, total_entries: 1, total_pages: 1}
"""
def paginate_companies(attrs) do
Repo.paginate(Company, attrs)
end
@doc """
Gets a single company.
Raises `Ecto.NoResultsError` if the Company does not exist.
## Examples
iex> get_company!(123)
%Company{}
iex> get_company!(456)
** (Ecto.NoResultsError)
"""
def get_company!(id), do: Repo.get!(Company, id)
@doc """
Gets a single company by its slug.
Raises `Ecto.NoResultsError` if the Company does not exist.
## Examples
iex> get_company_by_slug!("company-slug")
%Company{}
iex> get_company_by_slug!("company-slug")
** (Ecto.NoResultsError)
"""
def get_company_by_slug!(slug) do
query = from(company in Company, where: company.slug == ^slug)
Repo.one!(query)
end
@doc """
Creates a company.
## Examples
iex> create_company(%{field: value})
{:ok, %Company{}}
iex> create_company(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_company(attrs \\ %{}) do
changeset =
%Company{}
|> Company.changeset(attrs)
result =
Multi.new()
|> Multi.insert(:company, changeset)
|> Multi.run(:logo, &create_logo(&1, attrs))
|> Repo.transaction()
case result do
{:ok, changes} -> {:ok, changes.company}
{:error, _, changeset, _} -> {:error, changeset}
end
end
@doc """
Updates a company.
## Examples
iex> update_company(company, %{field: new_value})
{:ok, %Company{}}
iex> update_company(company, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_company(%Company{} = company, attrs) do
changeset =
company
|> Company.changeset(attrs)
result =
Multi.new()
|> Multi.update(:company, changeset)
|> Multi.run(:old_logo, &delete_logo(&1))
|> Multi.run(:logo, &create_logo(&1, attrs))
|> Repo.transaction()
case result do
{:ok, changes} -> {:ok, changes.company}
{:error, _, changeset, _} -> {:error, changeset}
end
end
@doc """
Deletes a Company.
## Examples
iex> delete_company(company)
{:ok, %Company{}}
iex> delete_company(company)
{:error, %Ecto.Changeset{}}
"""
def delete_company(%Company{} = company) do
result =
Multi.new()
|> Multi.delete(:company, company)
|> Multi.run(:old_logo, &delete_logo(&1))
|> Repo.transaction()
case result do
{:ok, changes} -> {:ok, changes.company}
{:error, _, changeset, _} -> {:error, changeset}
end
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking company changes.
## Examples
iex> change_company(company)
%Ecto.Changeset{source: %Company{}}
"""
def change_company(%Company{} = company) do
Company.changeset(company, %{})
end
alias DiversityInTech.Companies.Review
@doc """
Returns the list of reviews.
## Examples
iex> list_reviews()
[%Review{}, ...]
"""
def list_reviews do
Repo.all(Review)
end
@doc """
Returns the list of reviews of a certain company.
## Examples
iex> paginate_company_reviews(company_id, attrs)
%Scrivener.Page{entries: [%Review{}], page_number: 1,
page_size: 1, total_entries: 1, total_pages: 1}
"""
def paginate_company_reviews(company_id, attrs) do
from(review in Review, where: review.company_id == ^company_id)
|> Repo.paginate(attrs)
end
@doc """
Gets a single review.
Raises `Ecto.NoResultsError` if the Review does not exist.
## Examples
iex> get_review!(123)
%Review{}
iex> get_review!(456)
** (Ecto.NoResultsError)
"""
def get_review!(id), do: Repo.get!(Review, id)
@doc """
Creates a review.
## Examples
iex> create_review(%{field: value})
{:ok, %Review{}}
iex> create_review(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_review(attrs \\ %{}) do
%Review{}
|> Review.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a review.
## Examples
iex> update_review(review, %{field: new_value})
{:ok, %Review{}}
iex> update_review(review, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_review(%Review{} = review, attrs) do
review
|> Review.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Review.
## Examples
iex> delete_review(review)
{:ok, %Review{}}
iex> delete_review(review)
{:error, %Ecto.Changeset{}}
"""
def delete_review(%Review{} = review) do
Repo.delete(review)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking review changes.
## Examples
iex> change_review(review)
%Ecto.Changeset{source: %Review{}}
"""
def change_review(%Review{} = review) do
Review.changeset(review, %{})
end
# Private functions
defp create_logo(%{company: company}, attrs) do
company
|> Company.logo_changeset(attrs)
|> Repo.update()
end
defp delete_logo(%{company: company}) do
path =
DiversityInTech.Uploader.file_path(
company,
company.logo,
DiversityInTech.Uploaders.Image
)
case DiversityInTech.Uploaders.Image.delete({path, company}) do
:ok -> {:ok, company}
_ -> {:error, company}
end
end
alias DiversityInTech.Companies.Attribute
@doc """
Returns the list of attributes.
## Examples
iex> list_attributes()
[%Attribute{}, ...]
"""
def list_attributes do
Repo.all(Attribute)
end
@doc """
Gets a single attribute.
Raises `Ecto.NoResultsError` if the Attribute does not exist.
## Examples
iex> get_attribute!(123)
%Attribute{}
iex> get_attribute!(456)
** (Ecto.NoResultsError)
"""
def get_attribute!(id), do: Repo.get!(Attribute, id)
@doc """
Creates a attribute.
## Examples
iex> create_attribute(%{field: value})
{:ok, %Attribute{}}
iex> create_attribute(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_attribute(attrs \\ %{}) do
%Attribute{}
|> Attribute.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a attribute.
## Examples
iex> update_attribute(attribute, %{field: new_value})
{:ok, %Attribute{}}
iex> update_attribute(attribute, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_attribute(%Attribute{} = attribute, attrs) do
attribute
|> Attribute.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a Attribute.
## Examples
iex> delete_attribute(attribute)
{:ok, %Attribute{}}
iex> delete_attribute(attribute)
{:error, %Ecto.Changeset{}}
"""
def delete_attribute(%Attribute{} = attribute) do
Repo.delete(attribute)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking attribute changes.
## Examples
iex> change_attribute(attribute)
%Ecto.Changeset{source: %Attribute{}}
"""
def change_attribute(%Attribute{} = attribute) do
Attribute.changeset(attribute, %{})
end
alias DiversityInTech.Companies.AttributeReview
@doc """
Returns the list of attributes_reviews.
## Examples
iex> list_attributes_reviews()
[%AttributeReview{}, ...]
"""
def list_attributes_reviews do
Repo.all(AttributeReview)
end
@doc """
Gets a single attribute_review.
Raises `Ecto.NoResultsError` if the Attribute review does not exist.
## Examples
iex> get_attribute_review!(123)
%AttributeReview{}
iex> get_attribute_review!(456)
** (Ecto.NoResultsError)
"""
def get_attribute_review!(id), do: Repo.get!(AttributeReview, id)
@doc """
Creates a attribute_review.
## Examples
iex> create_attribute_review(%{field: value})
{:ok, %AttributeReview{}}
iex> create_attribute_review(%{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def create_attribute_review(attrs \\ %{}) do
%AttributeReview{}
|> AttributeReview.changeset(attrs)
|> Repo.insert()
end
@doc """
Updates a attribute_review.
## Examples
iex> update_attribute_review(attribute_review, %{field: new_value})
{:ok, %AttributeReview{}}
iex> update_attribute_review(attribute_review, %{field: bad_value})
{:error, %Ecto.Changeset{}}
"""
def update_attribute_review(%AttributeReview{} = attribute_review, attrs) do
attribute_review
|> AttributeReview.changeset(attrs)
|> Repo.update()
end
@doc """
Deletes a AttributeReview.
## Examples
iex> delete_attribute_review(attribute_review)
{:ok, %AttributeReview{}}
iex> delete_attribute_review(attribute_review)
{:error, %Ecto.Changeset{}}
"""
def delete_attribute_review(%AttributeReview{} = attribute_review) do
Repo.delete(attribute_review)
end
@doc """
Returns an `%Ecto.Changeset{}` for tracking attribute_review changes.
## Examples
iex> change_attribute_review(attribute_review)
%Ecto.Changeset{source: %AttributeReview{}}
"""
def change_attribute_review(%AttributeReview{} = attribute_review) do
AttributeReview.changeset(attribute_review, %{})
end
end
|
lib/diversity_in_tech/companies/companies.ex
| 0.803598
| 0.415432
|
companies.ex
|
starcoder
|
defmodule Robot do
@moduledoc """
Emergency Hull Painting Robot
"""
defstruct heading: :up, position: {0, 0}, panels: %{}, next_input: :paint
@black 0
@white 1
@left 0
@right 1
def print(map) do
pts = Map.keys(map)
{min_x, max_x} = Enum.map(pts, fn {x, _y} -> x end) |> Enum.min_max()
{min_y, max_y} = Enum.map(pts, fn {_x, y} -> y end) |> Enum.min_max()
for y <- max_y..min_y do
for x <- min_x..max_x do
print_char(Map.get(map, {x,y}, @black))
end
IO.write("\n")
end
:ok
end
defp print_char(@black), do: IO.write(".")
defp print_char(@white), do: IO.write("#")
@doc """
Use an Intcode program to cause the robot to paint
"""
def paint(str, panels \\ %{}) do
{:ok, _pid} = Agent.start_link(fn -> %Robot{panels: panels} end, name: __MODULE__)
code = Intcode.load(str)
Intcode.run(code, [], &camera/0, &action/1)
panels = Agent.get(__MODULE__, fn state -> state.panels end)
Agent.stop(__MODULE__)
panels
end
def camera do
color = Agent.get(__MODULE__, fn state -> Map.get(state.panels, state.position, @black) end)
color
end
def action(cmd) do
action(cmd, Agent.get(__MODULE__, fn state -> state.next_input end))
end
def action(cmd, :paint) do
{panels, pos} = Agent.get(__MODULE__, fn state -> {state.panels, state.position} end)
panels = Map.put(panels, pos, cmd)
Agent.update(__MODULE__, fn state -> %Robot{state | panels: panels, next_input: :move} end)
end
def action(cmd, :move) do
{pos, heading} = Agent.get(__MODULE__, fn state -> {state.position, state.heading} end)
heading = update_heading(heading, cmd)
pos = update_position(pos, heading)
Agent.update(__MODULE__, fn state -> %Robot{state | position: pos, heading: heading, next_input: :paint} end)
end
defp update_heading(:up, @left), do: :left
defp update_heading(:left, @left), do: :down
defp update_heading(:down, @left), do: :right
defp update_heading(:right, @left), do: :up
defp update_heading(:up, @right), do: :right
defp update_heading(:right, @right), do: :down
defp update_heading(:down, @right), do: :left
defp update_heading(:left, @right), do: :up
defp update_position({x, y}, :up), do: {x, y+1}
defp update_position({x, y}, :down), do: {x, y-1}
defp update_position({x, y}, :left), do: {x-1, y}
defp update_position({x, y}, :right), do: {x+1, y}
end
|
apps/day11/lib/robot.ex
| 0.688468
| 0.498901
|
robot.ex
|
starcoder
|
defmodule AdaptableCostsEvaluator.Outputs.Output do
@moduledoc """
An `AdaptableCostsEvaluator.Outputs.Output` holds the result of the evaluation
of the particular `AdaptableCostsEvaluator.Formulas.Formula`. The value of the
`AdaptableCostsEvaluator.Outputs.Output` is always validated against the linked
`AdaptableCostsEvaluator.FieldSchemas.FieldSchema`.
"""
use Ecto.Schema
import Ecto.Changeset
alias AdaptableCostsEvaluator.Validators.{FieldValueValidator, LabelValidator}
schema "outputs" do
field :label, :string
field :last_value, AdaptableCostsEvaluator.Types.JSONB
field :name, :string
belongs_to :computation, AdaptableCostsEvaluator.Computations.Computation
belongs_to :field_schema, AdaptableCostsEvaluator.FieldSchemas.FieldSchema
belongs_to :formula, AdaptableCostsEvaluator.Formulas.Formula
timestamps()
end
@doc false
def changeset(output, attrs) do
output
|> cast(attrs, [:name, :label, :last_value, :computation_id, :field_schema_id, :formula_id])
|> validate_required([:name, :label, :computation_id, :field_schema_id])
|> validate_length(:name, max: 100)
|> validate_length(:label, max: 100)
|> unique_constraint([:label, :computation_id])
|> FieldValueValidator.validate()
|> LabelValidator.validate()
|> validate_computation_context()
end
defp validate_computation_context(changeset) do
if changeset.valid? do
changes = changeset.changes
formula_id = changes[:formula_id] || changeset.data.formula_id
computation_id = changes[:computation_id] || changeset.data.computation_id
if formula_id == nil do
changeset
else
formula = AdaptableCostsEvaluator.Formulas.get_formula!(formula_id)
if formula.computation_id == computation_id do
changeset
else
add_error(
changeset,
:formula_id,
"formula is in different computation than the output record"
)
end
end
else
changeset
end
end
defdelegate authorize(action, user, params),
to: AdaptableCostsEvaluator.Policies.Outputs.OutputPolicy
end
|
lib/adaptable_costs_evaluator/outputs/output.ex
| 0.832747
| 0.537466
|
output.ex
|
starcoder
|
defmodule AWS.ACM do
@moduledoc """
AWS Certificate Manager
You can use AWS Certificate Manager (ACM) to manage SSL/TLS certificates for
your AWS-based websites and applications.
For more information about using ACM, see the [AWS Certificate Manager User Guide](https://docs.aws.amazon.com/acm/latest/userguide/).
"""
alias AWS.Client
alias AWS.Request
def metadata do
%AWS.ServiceMetadata{
abbreviation: "ACM",
api_version: "2015-12-08",
content_type: "application/x-amz-json-1.1",
credential_scope: nil,
endpoint_prefix: "acm",
global?: false,
protocol: "json",
service_id: "ACM",
signature_version: "v4",
signing_name: "acm",
target_prefix: "CertificateManager"
}
end
@doc """
Adds one or more tags to an ACM certificate.
Tags are labels that you can use to identify and organize your AWS resources.
Each tag consists of a `key` and an optional `value`. You specify the
certificate on input by its Amazon Resource Name (ARN). You specify the tag by
using a key-value pair.
You can apply a tag to just one certificate if you want to identify a specific
characteristic of that certificate, or you can apply the same tag to multiple
certificates if you want to filter for a common relationship among those
certificates. Similarly, you can apply the same tag to multiple resources if you
want to specify a relationship among those resources. For example, you can add
the same tag to an ACM certificate and an Elastic Load Balancing load balancer
to indicate that they are both used by the same website. For more information,
see [Tagging ACM certificates](https://docs.aws.amazon.com/acm/latest/userguide/tags.html).
To remove one or more tags, use the `RemoveTagsFromCertificate` action. To view
all of the tags that have been applied to the certificate, use the
`ListTagsForCertificate` action.
"""
def add_tags_to_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "AddTagsToCertificate", input, options)
end
@doc """
Deletes a certificate and its associated private key.
If this action succeeds, the certificate no longer appears in the list that can
be displayed by calling the `ListCertificates` action or be retrieved by calling
the `GetCertificate` action. The certificate will not be available for use by
AWS services integrated with ACM.
You cannot delete an ACM certificate that is being used by another AWS service.
To delete a certificate that is in use, the certificate association must first
be removed.
"""
def delete_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DeleteCertificate", input, options)
end
@doc """
Returns detailed metadata about the specified ACM certificate.
"""
def describe_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "DescribeCertificate", input, options)
end
@doc """
Exports a private certificate issued by a private certificate authority (CA) for
use anywhere.
The exported file contains the certificate, the certificate chain, and the
encrypted private 2048-bit RSA key associated with the public key that is
embedded in the certificate. For security, you must assign a passphrase for the
private key when exporting it.
For information about exporting and formatting a certificate using the ACM
console or CLI, see [Export a Private Certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-export-private.html).
"""
def export_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ExportCertificate", input, options)
end
@doc """
Returns the account configuration options associated with an AWS account.
"""
def get_account_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetAccountConfiguration", input, options)
end
@doc """
Retrieves an Amazon-issued certificate and its certificate chain.
The chain consists of the certificate of the issuing CA and the intermediate
certificates of any other subordinate CAs. All of the certificates are base64
encoded. You can use
[OpenSSL](https://wiki.openssl.org/index.php/Command_Line_Utilities) to decode
the certificates and inspect individual fields.
"""
def get_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "GetCertificate", input, options)
end
@doc """
Imports a certificate into AWS Certificate Manager (ACM) to use with services
that are integrated with ACM.
Note that [integrated services](https://docs.aws.amazon.com/acm/latest/userguide/acm-services.html)
allow only certificate types and keys they support to be associated with their
resources. Further, their support differs depending on whether the certificate
is imported into IAM or into ACM. For more information, see the documentation
for each service. For more information about importing certificates into ACM,
see [Importing Certificates](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html)
in the *AWS Certificate Manager User Guide*.
ACM does not provide [managed renewal](https://docs.aws.amazon.com/acm/latest/userguide/acm-renewal.html) for
certificates that you import.
Note the following guidelines when importing third party certificates:
* You must enter the private key that matches the certificate you
are importing.
* The private key must be unencrypted. You cannot import a private
key that is protected by a password or a passphrase.
* The private key must be no larger than 5 KB (5,120 bytes).
* If the certificate you are importing is not self-signed, you must
enter its certificate chain.
* If a certificate chain is included, the issuer must be the subject
of one of the certificates in the chain.
* The certificate, private key, and certificate chain must be
PEM-encoded.
* The current time must be between the `Not Before` and `Not After`
certificate fields.
* The `Issuer` field must not be empty.
* The OCSP authority URL, if present, must not exceed 1000
characters.
* To import a new certificate, omit the `CertificateArn` argument.
Include this argument only when you want to replace a previously imported
certificate.
* When you import a certificate by using the CLI, you must specify
the certificate, the certificate chain, and the private key by their file names
preceded by `fileb://`. For example, you can specify a certificate saved in the
`C:\temp` folder as `fileb://C:\temp\certificate_to_import.pem`. If you are
making an HTTP or HTTPS Query request, include these arguments as BLOBs.
* When you import a certificate by using an SDK, you must specify
the certificate, the certificate chain, and the private key files in the manner
required by the programming language you're using.
* The cryptographic algorithm of an imported certificate must match
the algorithm of the signing CA. For example, if the signing CA key type is RSA,
then the certificate key type must also be RSA.
This operation returns the [Amazon Resource Name (ARN)](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
of the imported certificate.
"""
def import_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ImportCertificate", input, options)
end
@doc """
Retrieves a list of certificate ARNs and domain names.
You can request that only certificates that match a specific status be listed.
You can also filter by specific attributes of the certificate. Default filtering
returns only `RSA_2048` certificates. For more information, see `Filters`.
"""
def list_certificates(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListCertificates", input, options)
end
@doc """
Lists the tags that have been applied to the ACM certificate.
Use the certificate's Amazon Resource Name (ARN) to specify the certificate. To
add a tag to an ACM certificate, use the `AddTagsToCertificate` action. To
delete a tag, use the `RemoveTagsFromCertificate` action.
"""
def list_tags_for_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ListTagsForCertificate", input, options)
end
@doc """
Adds or modifies account-level configurations in ACM.
The supported configuration option is `DaysBeforeExpiry`. This option specifies
the number of days prior to certificate expiration when ACM starts generating
`EventBridge` events. ACM sends one event per day per certificate until the
certificate expires. By default, accounts receive events starting 45 days before
certificate expiration.
"""
def put_account_configuration(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "PutAccountConfiguration", input, options)
end
@doc """
Remove one or more tags from an ACM certificate.
A tag consists of a key-value pair. If you do not specify the value portion of
the tag when calling this function, the tag will be removed regardless of value.
If you specify a value, the tag is removed only if it is associated with the
specified value.
To add tags to a certificate, use the `AddTagsToCertificate` action. To view all
of the tags that have been applied to a specific ACM certificate, use the
`ListTagsForCertificate` action.
"""
def remove_tags_from_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RemoveTagsFromCertificate", input, options)
end
@doc """
Renews an eligible ACM certificate.
At this time, only exported private certificates can be renewed with this
operation. In order to renew your ACM PCA certificates with ACM, you must first
[grant the ACM service principal permission to do so](https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaPermissions.html).
For more information, see [Testing Managed Renewal](https://docs.aws.amazon.com/acm/latest/userguide/manual-renewal.html)
in the ACM User Guide.
"""
def renew_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RenewCertificate", input, options)
end
@doc """
Requests an ACM certificate for use with other AWS services.
To request an ACM certificate, you must specify a fully qualified domain name
(FQDN) in the `DomainName` parameter. You can also specify additional FQDNs in
the `SubjectAlternativeNames` parameter.
If you are requesting a private certificate, domain validation is not required.
If you are requesting a public certificate, each domain name that you specify
must be validated to verify that you own or control the domain. You can use [DNS validation](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-dns.html)
or [email validation](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-email.html).
We recommend that you use DNS validation. ACM issues public certificates after
receiving approval from the domain owner.
"""
def request_certificate(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "RequestCertificate", input, options)
end
@doc """
Resends the email that requests domain ownership validation.
The domain owner or an authorized representative must approve the ACM
certificate before it can be issued. The certificate can be approved by clicking
a link in the mail to navigate to the Amazon certificate approval website and
then clicking **I Approve**. However, the validation email can be blocked by
spam filters. Therefore, if you do not receive the original mail, you can
request that the mail be resent within 72 hours of requesting the ACM
certificate. If more than 72 hours have elapsed since your original request or
since your last attempt to resend validation mail, you must request a new
certificate. For more information about setting up your contact email addresses,
see [Configure Email for your Domain](https://docs.aws.amazon.com/acm/latest/userguide/setup-email.html).
"""
def resend_validation_email(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "ResendValidationEmail", input, options)
end
@doc """
Updates a certificate.
Currently, you can use this function to specify whether to opt in to or out of
recording your certificate in a certificate transparency log. For more
information, see [ Opting Out of Certificate Transparency Logging](https://docs.aws.amazon.com/acm/latest/userguide/acm-bestpractices.html#best-practices-transparency).
"""
def update_certificate_options(%Client{} = client, input, options \\ []) do
Request.request_post(client, metadata(), "UpdateCertificateOptions", input, options)
end
end
|
lib/aws/generated/acm.ex
| 0.880463
| 0.45181
|
acm.ex
|
starcoder
|
defmodule Breadboard.Joystick do
@moduledoc """
Manage the 'Joystick' using the I2C protocol
Joystick module refer to a simple hardware consisting of an *analog joystick* connected to an 'ADS1115' *analog to digital converter* using the I2C Interface.
This hardware is tested on a [OrangePi board](http://www.orangepi.org/) (PC and PC2) by this default pins assignment:
Analog Joystic | ADS1115 | OrangePi board
-------------- | ------- | --------------
5V | | pin 1 (+3.3V)
GND | | pin 9 (Ground)
VRx | an0 | _
VRy | an1 | _
SW | an2 | _
_ | VDD | pin 1 (+3.3V)
_ | GND | pin 9 (Ground)
_ | SCL | pin 5 (TWI0_SCK)
_ | SDA | pin 3 (TWI0_SDA)
Note:
- I2C Addressing by ADR pin >> 0x48 (1001000) ADR -> GND
- Analog Inputs >> Single Ended type (Input Channel <-> GND)
- Internally `Breadboard` use the `ADS1115` library to manage I2C protocol
Any *Joystick* is supervised in the application, but if the *joystick* (*child*) process crashed is never restarted.
"""
@typedoc "Options for `Breadboard.Joystick.connect/1`"
@type connect_options ::
{:i2c_bus_name, any()}
| {:i2c_bus_addr, Circuits.I2C.address()}
| {:i2c_bus_gain, ADS1115.Config.gain()}
| {:push_button_in, ADS1115.Config.comparison()}
| {:x_axis_in, ADS1115.Config.comparison()}
| {:y_axis_in, ADS1115.Config.comparison()}
@doc """
Connect to a 'Joystick hardware'.
Options:
* `:i2c_bus_name` - any valid 'bus name' valid for the platform in the form "i2c-n" where "n" is the bus number (as defined in `Circuits.I2C`).
* `:i2c_bus_addr` - address of the device, as defined in `Circuits.I2C` (`0x48`)
* `:i2c_bus_gain` - range of the ADC scaling, as defined in `ADS1115` (`6144`)
* `:push_button_in` - Input Channel for 'push button' (`{:ain2, :gnd}`)
* `:x_axis_in` - Input Channel for the measurement of the X axis (`{:ain0, :gnd}`)
* `:y_axis_in` - Input Channel for the measurement of the Y axis (`{:ain1, :gnd}`)
Return values:
On success the function returns `{:ok, joystick}`, where `joystick` is the PID of the supervised 'Joystick'
"""
@spec connect(connect_options()) :: {:ok, reference()} | {:error, atom()}
def connect(options) do
Breadboard.Supervisor.Joystick.start_child(options)
end
@doc """
Read the current state values of a joystick
## Examples
> {:ok, joystick} = Joystick.connect([i2c_bus_name: "i2c-0"])
{:ok, #PID<0.388.0>}
> Joystick.get_values(joystick)
[x_axis: 8940, y_axis: 8863, push_button: 17510]
*any single value will be between -32,768 and 32,767*
"""
@spec get_values(GenServer.server()) :: nonempty_list(any())
def get_values(joystick) do
GenServer.call(joystick, :get_values)
end
@doc """
Disconnect the joystick from the 'breadboard'
"""
@spec disconnect(GenServer.server()) :: :ok | {:error, :not_found}
def disconnect(joystick) do
Breadboard.Supervisor.Joystick.stop_child(joystick)
end
end
# SPDX-License-Identifier: Apache-2.0
|
lib/breadboard/joystick.ex
| 0.7696
| 0.562297
|
joystick.ex
|
starcoder
|
defmodule EctoTablestore.Migration do
@moduledoc """
Migrations are used to create your tables.
Support the partition key is autoincrementing based on this library's wrapper, for this use
case, we can use the migration to automatically create an another separated table to generate
the serial value when `:insert` (viz `ExAliyunOts.put_row/5`) or `:batch_write` (viz
`ExAliyunOts.batch_write/3`) with `:put` option.
In practice, we don't create migration files by hand either, we typically use `mix
ecto.ots.gen.migration` to generate the file with the proper timestamp and then we just fill in
its contents:
$ mix ecto.ots.gen.migration create_posts_table
And then we can fill the table definition details:
defmodule EctoTablestore.TestRepo.Migrations.CreatePostsTable do
use EctoTablestore.Migration
def change do
create table("ecto_ots_test_posts") do
add :post_id, :integer, partition_key: true, auto_increment: true
end
end
end
After we filled the above migration content, you can run the migration above by going to the
root of your project and typing:
$ mix ecto.ots.migrate
Finally, we successfully create the "ecto_ots_test_posts" table, since the above definition
added an autoincrementing column for the partition key, there will automatically create an
"ecto_ots_test_posts_seq" table to generate a serial integer for `:post_id` field when insert a
new record.
"""
require ExAliyunOts.Const.PKType, as: PKType
require Logger
alias EctoTablestore.{Sequence, Migration.Runner}
alias Ecto.MigrationError
alias ExAliyunOts.Var.Search
defmodule Table do
@moduledoc false
defstruct name: nil, prefix: nil, partition_key: true, meta: []
@type t :: %__MODULE__{
name: String.t(),
prefix: String.t() | nil,
partition_key: boolean(),
meta: Keyword.t()
}
end
defmodule SecondaryIndex do
@moduledoc false
defstruct table_name: nil, index_name: nil, prefix: nil, include_base_data: true
@type t :: %__MODULE__{
table_name: String.t(),
index_name: String.t(),
prefix: String.t() | nil,
include_base_data: boolean()
}
end
defmodule SearchIndex do
@moduledoc false
defstruct table_name: nil, index_name: nil, prefix: nil
@type t :: %__MODULE__{
table_name: String.t(),
index_name: String.t(),
prefix: String.t() | nil
}
end
@doc false
defmacro __using__(_) do
quote location: :keep do
import EctoTablestore.Migration,
only: [
table: 1,
table: 2,
secondary_index: 2,
secondary_index: 3,
search_index: 2,
search_index: 3,
create: 2,
drop: 1,
drop_if_exists: 1,
add: 2,
add: 3,
add_pk: 1,
add_pk: 2,
add_pk: 3,
add_column: 1,
add_column: 2,
add_index: 3
]
import ExAliyunOts.Search
def __migration__, do: :ok
end
end
@doc """
Returns a table struct that can be given to `create/2`.
Since Tablestore is a NoSQL service, there are up to 4 primary key(s) can be added when
creation, the first added key is partition key when set `partition_key` option as false.
## Examples
create table("products") do
add :name, :string
add :price, :integer
end
create table("products", partition_key: false) do
add :name, :string
add :price, :integer
end
## Options
* `:partition_key` - as `true` by default, and there will add an `:id` field as partition key
with type as a large autoincrementing integer (as `bigserial`), Tablestore does not support
`bigserial` type for primary keys, but can use the `ex_aliyun_ots` lib's wrapper - Sequence to
implement it; when `false`, a partition key field is not generated on table creation.
* `:prefix` - the prefix for the table.
* `:meta` - define the meta information when create table, can see Tablestore's document for details:
* `:reserved_throughput_write` - reserve the throughput for write when create table, an
integer, the default value is 0;
* `:reserved_throughput_read` - reserve the throughput for read when create table, an integer,
the default value is 0;
* `:time_to_live` - the survival time of the saved data, a.k.a TTL; an integer, unit as second,
the default value is -1 (permanent preservation);
* `:deviation_cell_version_in_sec` - maximum version deviation, the default value is 86400
seconds, which is 1 day;
* `stream_spec` - set the stream specification of Tablestore:
- `is_enabled`, open or close stream
- `expiration_time`, the expiration time of the table's stream
"""
def table(name, opts \\ [])
def table(name, opts) when is_atom(name) do
table(Atom.to_string(name), opts)
end
def table(name, opts) when is_binary(name) and is_list(opts) do
struct(%Table{name: name}, opts)
end
@doc """
Returns a secondary index struct that can be given to `create/2`.
For more information see the [Chinese Docs](https://help.aliyun.com/document_detail/91947.html) | [English Docs](https://www.alibabacloud.com/help/doc-detail/91947.html)
## Examples
create secondary_index("posts", "posts_owner") do
add_pk(:owner_id)
add_pk(:id)
add_column(:title)
add_column(:content)
end
## Options
* `:include_base_data`, specifies whether the index table includes the existing data in the base table, if set it to
`true` means the index includes the existing data, if set it to `false` means the index excludes the existing data,
optional, by default it is `true`.
"""
def secondary_index(table_name, index_name, opts \\ [])
when is_binary(table_name) and is_binary(index_name) and is_list(opts) do
struct(%SecondaryIndex{table_name: table_name, index_name: index_name}, opts)
end
@doc """
Returns a search index struct that can be given to `create/2`.
For more information see the [Chinese Docs](https://help.aliyun.com/document_detail/117452.html) | [English Docs](https://www.alibabacloud.com/help/doc-detail/117452.html)
## Examples
create search_index("posts", "posts_owner") do
field_schema_keyword("title")
field_schema_keyword("content")
field_sort("title")
end
Please refer the [link](https://hexdocs.pm/ex_aliyun_ots/ExAliyunOts.Search.html#define-field-schema) to the available field schema definition options,
and the [link](https://hexdocs.pm/ex_aliyun_ots/ExAliyunOts.Search.html#sort) to the available sort options.
"""
def search_index(table_name, index_name, opts \\ [])
when is_binary(table_name) and is_binary(index_name) and is_list(opts) do
struct(%SearchIndex{table_name: table_name, index_name: index_name}, opts)
end
@doc """
Adds a primary key when creating a secondary index.
"""
defmacro add_pk(column) when is_binary(column), do: quote(do: {:pk, column})
defmacro add_pk(column) when is_atom(column),
do: quote(do: {:pk, unquote(Atom.to_string(column))})
defmacro add_pk(column) do
raise ArgumentError,
"error type when defining pk column: #{inspect(column)} for secondary_index, only supported one of type: [:binary, :atom]"
end
@doc """
Adds a column what already be pre-defined column when creating a secondary index.
"""
defmacro add_column(column) when is_binary(column), do: quote(do: {:column, column})
defmacro add_column(column) when is_atom(column),
do: quote(do: {:column, unquote(Atom.to_string(column))})
defmacro add_column(column) do
raise ArgumentError,
"error type when defining pre-defined column: #{inspect(column)} for secondary_index, only supported one of type: [:binary, :atom]"
end
@doc """
Define the primary key(s) of the table to create.
By default, the table will also include an `:id` primary key field (it is also partition key)
that has a type of `:integer` which is an autoincrementing column. Check the `table/2` docs for
more information.
There are up to 4 primary key(s) can be added when creation.
## Example
create table("posts") do
add :title, :string
end
# The above is equivalent to
create table("posts") do
add :id, :integer, partition_key: true, auto_increment: true
add :title, :string
end
"""
defmacro create(object, do: block), do: expand_create(object, block)
defp expand_create(object, block) do
columns =
case block do
{:__block__, _, columns} -> columns
column -> [column]
end
quote do
map = unquote(__MODULE__).__create__(unquote(object), unquote(columns))
Runner.push_command(&unquote(__MODULE__).do_create(&1, map))
end
end
def __create__(%Table{} = table, columns) do
{index_metas, columns} = Enum.split_with(columns, &is_tuple(&1))
%{primary_key: pk_columns, pre_defined_column: pre_defined_columns} =
Map.merge(
%{primary_key: [], pre_defined_column: []},
Enum.group_by(columns, & &1.column_type)
)
partition_key_count = Enum.count(pk_columns, & &1.partition_key)
pk_columns =
cond do
partition_key_count == 1 ->
pk_columns
# Make the partition key as `:id` and in an increment integer sequence
partition_key_count == 0 and table.partition_key ->
opts = Runner.repo_config(:migration_primary_key, [])
{name, opts} = Keyword.pop(opts, :name, "id")
{type, _opts} = Keyword.pop(opts, :type, :integer)
[
%{
name: name,
type: type,
column_type: :primary_key,
partition_key: true,
auto_increment: true
}
| pk_columns
]
# No partition key defined
partition_key_count == 0 ->
raise MigrationError,
message: "Please define at least one partition primary keys for table: " <> table.name
# The partition key only can define one
true ->
raise MigrationError,
message:
"The maximum number of partition primary keys is 1, now is #{partition_key_count} defined on table: " <>
table.name <> " columns:\n" <> inspect(pk_columns)
end
case Enum.count(pk_columns) do
# The number of primary keys can not be more than 4
pk_count when pk_count > 4 ->
raise MigrationError,
message:
"The maximum number of primary keys is 4, now is #{pk_count} defined on table: " <>
table.name <> " columns:\n" <> inspect(pk_columns)
# Only support to define one primary key as auto_increment integer
_pk_count ->
%{hashids: hashids_count, auto_increment: auto_increment_count} =
Enum.reduce(pk_columns, %{hashids: 0, auto_increment: 0, none: 0}, fn
%{type: :hashids}, acc -> Map.update!(acc, :hashids, &(&1 + 1))
%{auto_increment: true}, acc -> Map.update!(acc, :auto_increment, &(&1 + 1))
_, acc -> Map.update!(acc, :none, &(&1 + 1))
end)
if (total_increment_count = auto_increment_count + hashids_count) > 1 do
raise MigrationError,
message:
"The maximum number of [auto_increment & hashids] primary keys is 1, but now find #{
total_increment_count
} primary keys defined on table: " <> table.name
else
%{
table: table,
pk_columns: pk_columns,
pre_defined_columns: pre_defined_columns,
index_metas: index_metas,
create_seq_table?: auto_increment_count > 0 or hashids_count > 0
}
end
end
end
def __create__(%SecondaryIndex{} = secondary_index, columns) do
g_columns = Enum.group_by(columns, &elem(&1, 0), &elem(&1, 1))
case Map.keys(g_columns) -- [:pk, :column] do
[] ->
:ok
[missing] ->
raise MigrationError,
"Missing #{missing} definition when creating: #{inspect(secondary_index)}, please use add_#{
missing
}/1 when creating secondary index."
_ ->
raise MigrationError,
"Missing pk & column definition when creating: #{inspect(secondary_index)}, please use add_pk/1 and add_column/1 when creating secondary index."
end
%{
secondary_index: secondary_index,
primary_keys: g_columns.pk,
defined_columns: g_columns.column
}
end
def __create__(%SearchIndex{} = search_index, columns) do
group_key = fn column ->
if column.__struct__ in [
Search.PrimaryKeySort,
Search.FieldSort,
Search.GeoDistanceSort,
Search.ScoreSort
] do
:index_sorts
else
:field_schemas
end
end
g_columns = Enum.group_by(columns, group_key)
unless Map.get(g_columns, :field_schemas) do
raise MigrationError,
"Missing field_schemas definition when creating: #{inspect(search_index)}, please use field_schema_* functions when creating search index."
end
%{
search_index: search_index,
field_schemas: g_columns.field_schemas,
index_sorts: g_columns[:index_sorts] || []
}
end
@doc false
# create table
def do_create(repo, %{
table: table,
pk_columns: pk_columns,
pre_defined_columns: pre_defined_columns,
index_metas: index_metas,
create_seq_table?: create_seq_table?
}) do
table_name = get_table_name(table, repo.config())
table_name_str = IO.ANSI.format([:green, table_name, :reset])
repo_meta = Ecto.Adapter.lookup_meta(repo)
instance = repo_meta.instance
primary_keys = Enum.map(pk_columns, &transform_table_column/1)
defined_columns = Enum.map(pre_defined_columns, &transform_table_column/1)
print_list =
Enum.reject(
[
primary_keys: primary_keys,
defined_columns: defined_columns,
index_metas: index_metas
],
&match?({_, []}, &1)
)
Logger.info(fn ->
">> creating table: #{table_name_str} by #{
inspect(print_list, pretty: true, limit: :infinity)
} "
end)
options =
Keyword.merge(table.meta,
max_versions: 1,
defined_columns: defined_columns,
index_metas: index_metas
)
case ExAliyunOts.create_table(instance, table_name, primary_keys, options) do
:ok ->
result_str = IO.ANSI.format([:green, "ok", :reset])
Logger.info(fn -> ">>>> create table: #{table_name_str} result: #{result_str}" end)
create_seq_table!(create_seq_table?, table_name, instance)
:ok
{:error, error} ->
raise MigrationError, "create table: #{table_name} error: " <> error.message
end
end
# create secondary_index
def do_create(repo, %{
secondary_index: secondary_index,
primary_keys: primary_keys,
defined_columns: defined_columns
}) do
{table_name, index_name} = get_index_name(secondary_index, repo.config())
table_name_str = IO.ANSI.format([:green, table_name, :reset])
index_name_str = IO.ANSI.format([:green, index_name, :reset])
include_base_data = secondary_index.include_base_data
repo_meta = Ecto.Adapter.lookup_meta(repo)
Logger.info(fn ->
">> creating secondary_index: #{index_name_str} for table: #{table_name_str} by #{
inspect(
[
primary_keys: primary_keys,
defined_columns: defined_columns,
include_base_data: include_base_data
],
pretty: true,
limit: :infinity
)
} "
end)
case ExAliyunOts.create_index(
repo_meta.instance,
table_name,
index_name,
primary_keys,
defined_columns,
include_base_data: include_base_data
) do
:ok ->
result_str = IO.ANSI.format([:green, "ok", :reset])
Logger.info(fn ->
">>>> create secondary_index: #{index_name_str} for table: #{table_name_str} result: #{
result_str
}"
end)
:ok
{:error, error} ->
raise MigrationError,
"create secondary index: #{index_name} for table: #{table_name} error: " <>
error.message
end
end
# create search_index
def do_create(repo, %{
search_index: search_index,
field_schemas: field_schemas,
index_sorts: index_sorts
}) do
{table_name, index_name} = get_index_name(search_index, repo.config())
table_name_str = IO.ANSI.format([:green, table_name, :reset])
index_name_str = IO.ANSI.format([:green, index_name, :reset])
repo_meta = Ecto.Adapter.lookup_meta(repo)
Logger.info(fn ->
">> creating search index: #{index_name_str} for table: #{table_name_str} by #{
inspect(
[field_schemas: field_schemas, index_sorts: index_sorts],
pretty: true,
limit: :infinity
)
} "
end)
case ExAliyunOts.create_search_index(
repo_meta.instance,
table_name,
index_name,
field_schemas: field_schemas,
index_sorts: index_sorts
) do
{:ok, _} ->
result_str = IO.ANSI.format([:green, "ok", :reset])
Logger.info(fn ->
">>>> create search index: #{index_name_str} for table: #{table_name_str} result: #{
result_str
}"
end)
:ok
{:error, error} ->
raise MigrationError,
"create search index: #{index_name} for table: #{table_name} error: " <>
error.message
end
end
defp create_seq_table!(false, _table_name, _instance),
do: :ignore
defp create_seq_table!(true, table_name, instance) do
seq_table_name = Sequence.default_table()
# check if not exists
with {:list_table, {:ok, %{table_names: table_names}}} <-
{:list_table, ExAliyunOts.list_table(instance)},
true <- seq_table_name not in table_names,
:ok <-
ExAliyunOts.Sequence.create(instance, %ExAliyunOts.Var.NewSequence{
name: seq_table_name
}) do
Logger.info(fn ->
">> auto create table: #{seq_table_name} for table: " <> table_name
end)
:ok
else
{:list_table, {:error, error}} ->
raise MigrationError, "list_table error: " <> error.message
{:error, error} ->
raise MigrationError, "create table: #{seq_table_name} error: " <> error.message
false ->
:already_exists
end
end
@doc false
defp get_table_name(%{prefix: prefix, name: name}, repo_config) do
prefix = prefix || Keyword.get(repo_config, :migration_default_prefix)
if prefix do
prefix <> name
else
name
end
end
defp get_index_name(
%{prefix: prefix, table_name: table_name, index_name: index_name},
repo_config
) do
prefix = prefix || Keyword.get(repo_config, :migration_default_prefix)
if prefix do
{prefix <> table_name, prefix <> index_name}
else
{table_name, index_name}
end
end
defp transform_table_column(%{column_type: :pre_defined_column, name: field_name, type: type}) do
{field_name, type}
end
defp transform_table_column(%{
column_type: :primary_key,
name: field_name,
type: type,
partition_key: partition_key?,
auto_increment: auto_increment?
}) do
case type do
:integer when auto_increment? and not partition_key? ->
{field_name, PKType.integer(), PKType.auto_increment()}
_ ->
type_mapping = %{
hashids: PKType.string(),
integer: PKType.integer(),
string: PKType.string(),
binary: PKType.binary()
}
{field_name, type_mapping[type]}
end
end
@doc """
Adds a primary key when creating a table.
This function only accepts types as `:string` | `:binary` | `:integer` | `:hashids` | `:id`.
About `:auto_increment` option:
* set `:auto_increment` as `true` and its field is primary key of non-partitioned key, there
will use Tablestore's auto-increment column to process it.
* set `:auto_increment` as `true` and its field is partition key, there will use
`ex_aliyun_ots`'s built-in Sequence function, the actual principle behind it is to use the
atomic update operation though another separate table when generate serial integer, by default
there will add an `:id` partition key as `:integer` type, the initial value of the sequence is
0, and the increment step is 1.
Tablestore can only have up to 4 primary keys, meanwhile the first defined primary key is the
partition key, Please know that the order of the primary key definition will be directly mapped
to the created table.
About `:hashids` type to define the partition key:
* set `partition_key` as `true` is required.
* set `auto_increment` as `true` is required.
## Examples
The auto generated serial integer for partition key:
create table("posts") do
add :title, :string
end
# The above is equivalent to
create table("posts", partition_key: false) do
add :id, :integer, partition_key: true, auto_increment: true
add :title, :string
end
The explicitly defined field with `partition_key`:
create table("posts") do
add :title, :string
end
# The above is equivalent to
create table("posts") do
add :id, :integer, partition_key: true, auto_increment: true
add :title, :string
end
The `:auto_increment` integer for primary key of non-partitioned key:
create table("posts") do
add :tag, :integer, auto_increment: true
end
# The above is equivalent to
create table("posts", partition_key: false) do
add :id, :integer, partition_key: true, auto_increment: true
add :version, :integer, auto_increment: true
end
The `:hashids` type for the partition key with the built-in sequence feature:
create table("posts") do
add :id, :hashids, auto_increment: true, partition_key: true
end
The `:id` type for the partition key with the built-in sequence feature:
create table("posts") do
add :id, :id
end
# The above is equivalent to
create table("posts", partition_key: false) do
add :id, :integer, partition_key: true, auto_increment: true
end
## Options
* `:partition_key` - when `true`, marks this field as the partition key, only the first
explicitly defined field is available for this option.
* `:auto_increment` - when `true` and this field is non-partitioned key, Tablestore
automatically generates the primary key value, which is unique in the partition key, and which
increases progressively, when `true` and this field is a partition key, use `ex_aliyun_ots`'s
Sequence to build a serial number for this field, the `auto_increment: true` option only
allows binding of one primary key.
"""
defmacro add(column, type, opts \\ []), do: _add_pk(column, type, opts)
@doc """
Adds a primary key when creating a table.
Same as `add/2`, see `add/2` for more information.
"""
defmacro add_pk(column, type, opts \\ []), do: _add_pk(column, type, opts)
defp _add_pk(column, type, opts)
when (is_atom(column) or is_binary(column)) and is_list(opts) do
validate_pk_type!(column, type)
if type == :id do
quote location: :keep do
%{
name: unquote(to_string(column)),
type: :integer,
column_type: :primary_key,
partition_key: true,
auto_increment: true
}
end
else
quote location: :keep do
%{
name: unquote(to_string(column)),
type: unquote(type),
column_type: :primary_key,
partition_key: Keyword.get(unquote(opts), :partition_key, false),
auto_increment: Keyword.get(unquote(opts), :auto_increment, false)
}
end
end
end
defp validate_pk_type!(column, type) do
# more information can be found in the [documentation](https://help.aliyun.com/document_detail/106536.html)
if type in [:integer, :string, :binary, :hashids, :id] do
:ok
else
raise ArgumentError,
"#{inspect(type)} is not a valid primary key type for column: `#{inspect(column)}`, " <>
"please use an atom as :integer | :string | :binary | :hashids | :id."
end
end
@doc """
Adds a pre-defined column when creating a table.
This function only accepts types as `:integer` | `:double` | `:boolean` | `:string` | `:binary`.
For more information see the [Chinese Docs](https://help.aliyun.com/document_detail/91947.html) | [English Docs](https://www.alibabacloud.com/help/doc-detail/91947.html)
## Examples
create table("posts") do
add_pk(:id, :integer, partition_key: true)
add_pk(:owner_id, :string)
add_column(:title, :string)
add_column(:content, :string)
end
"""
defmacro add_column(column, type), do: _add_column(column, type)
defp _add_column(column, type) when is_atom(column) or is_binary(column) do
validate_pre_defined_col_type!(column, type)
quote location: :keep do
%{
name: unquote(to_string(column)),
type: unquote(type),
column_type: :pre_defined_column
}
end
end
defp validate_pre_defined_col_type!(column, type) do
# more information can be found in the [documentation](https://help.aliyun.com/document_detail/106536.html)
if type in [:integer, :double, :boolean, :string, :binary] do
:ok
else
raise ArgumentError,
"#{inspect(type)} is not a valid pre-defined column type for column: `#{
inspect(column)
}`, " <>
"please use an atom as :integer | :double | :boolean | :string | :binary ."
end
end
@doc """
Adds a secondary index when creating a table.
For more information see the [Chinese Docs](https://help.aliyun.com/document_detail/91947.html) | [English Docs](https://www.alibabacloud.com/help/doc-detail/91947.html)
## Examples
create table("posts") do
add_pk(:id, :integer, partition_key: true)
add_pk(:owner_id, :string)
add_column(:title, :string)
add_column(:content, :string)
add_index("posts_owner", [:owner_id, :id], [:title, :content])
add_index("posts_title", [:title, :id], [:content])
end
"""
defmacro add_index(index_name, primary_keys, defined_columns)
when is_binary(index_name) and is_list(primary_keys) and is_list(defined_columns) do
check_and_transform_columns = fn columns ->
columns
|> Macro.prewalk(&Macro.expand(&1, __CALLER__))
|> Enum.map(fn
column when is_binary(column) ->
column
column when is_atom(column) ->
Atom.to_string(column)
column ->
raise ArgumentError,
"error type when defining column: #{inspect(column)} for add_index: #{index_name}, " <>
"only supported one of type: [:binary, :atom]"
end)
end
quote location: :keep do
{
unquote(index_name),
unquote(check_and_transform_columns.(primary_keys)),
unquote(check_and_transform_columns.(defined_columns))
}
end
end
@doc """
Drops a table or index if it exists.
Does not raise an error if the specified table or index does not exist.
## Examples
drop_if_exists table("posts")
drop_if_exists secondary_index("posts", "posts_owner")
drop_if_exists search_index("posts", "posts_index")
"""
def drop_if_exists(obj), do: drop(obj, true)
@doc """
Drops one of the following:
* a table
* a secondary index
* a search index
## Examples
drop table("posts")
drop secondary_index("posts", "posts_owner")
drop search_index("posts", "posts_index")
"""
def drop(obj), do: drop(obj, false)
def drop(%Table{} = table, if_exists) do
Runner.push_command(fn repo ->
table_name = get_table_name(table, repo.config())
table_name_str = IO.ANSI.format([:green, table_name, :reset])
repo_meta = Ecto.Adapter.lookup_meta(repo)
instance = repo_meta.instance
Logger.info(fn -> ">> dropping table: #{table_name_str}" end)
case ExAliyunOts.delete_table(instance, table_name) do
:ok ->
result_str = IO.ANSI.format([:green, "ok", :reset])
Logger.info(fn -> ">>>> dropping table: #{table_name_str} result: #{result_str}" end)
:ok
{:error, %{code: "OTSObjectNotExist"}} when if_exists ->
result_str = IO.ANSI.format([:green, "not exists", :reset])
Logger.info(fn -> ">>>> dropping table: #{table_name_str} result: #{result_str}" end)
:ok
{:error, error} ->
raise MigrationError, "dropping table: #{table_name} error: " <> error.message
end
end)
end
def drop(%SecondaryIndex{} = secondary_index, if_exists) do
Runner.push_command(fn repo ->
{table_name, index_name} = get_index_name(secondary_index, repo.config())
table_name_str = IO.ANSI.format([:green, table_name, :reset])
index_name_str = IO.ANSI.format([:green, index_name, :reset])
repo_meta = Ecto.Adapter.lookup_meta(repo)
Logger.info(fn ->
">> dropping secondary_index table: #{table_name_str}, index: #{index_name_str}"
end)
case ExAliyunOts.delete_index(repo_meta.instance, table_name, index_name) do
:ok ->
result_str = IO.ANSI.format([:green, "ok", :reset])
Logger.info(fn ->
">>>> dropping secondary_index table: #{table_name_str}, index: #{index_name_str} result: #{
result_str
}"
end)
:ok
{:error, %{code: "OTSObjectNotExist"}} when if_exists ->
result_str = IO.ANSI.format([:green, "not exists", :reset])
Logger.info(fn ->
">>>> dropping secondary_index table: #{table_name_str}, index: #{index_name_str} result: #{
result_str
}"
end)
:ok
{:error, %{code: "OTSParameterInvalid", message: "Index does not exist" <> _}} when if_exists ->
result_str = IO.ANSI.format([:green, "not exists", :reset])
Logger.info(fn ->
">>>> dropping secondary_index table: #{table_name_str}, index: #{index_name_str} result: #{
result_str
}"
end)
:ok
{:error, error} ->
raise MigrationError,
"dropping secondary_index index: #{index_name} for table: #{table_name} error: " <>
error.message
end
end)
end
def drop(%SearchIndex{} = search_index, if_exists) do
Runner.push_command(fn repo ->
{table_name, index_name} = get_index_name(search_index, repo.config())
table_name_str = IO.ANSI.format([:green, table_name, :reset])
index_name_str = IO.ANSI.format([:green, index_name, :reset])
repo_meta = Ecto.Adapter.lookup_meta(repo)
Logger.info(fn ->
">> dropping search index table: #{table_name_str}, index: #{index_name_str}"
end)
case ExAliyunOts.delete_search_index(repo_meta.instance, table_name, index_name) do
{:ok, _} ->
result_str = IO.ANSI.format([:green, "ok", :reset])
Logger.info(fn ->
">>>> dropping search index table: #{table_name_str}, index: #{index_name_str} result: #{
result_str
}"
end)
:ok
{:error, %{code: "OTSObjectNotExist"}} when if_exists ->
result_str = IO.ANSI.format([:green, "not exists", :reset])
Logger.info(fn ->
">>>> dropping search index table: #{table_name_str}, index: #{index_name_str} result: #{
result_str
}"
end)
:ok
{:error, error} ->
raise MigrationError,
"dropping search index index: #{index_name} for table: #{table_name} error: " <>
error.message
end
end)
end
end
|
lib/ecto_tablestore/migration.ex
| 0.811116
| 0.519338
|
migration.ex
|
starcoder
|
defmodule Plug.Parsers.MULTIPART do
@moduledoc """
Parses multipart request body.
## Options
All options supported by `Plug.Conn.read_body/2` are also supported here.
They are repeated here for convenience:
* `:length` - sets the maximum number of bytes to read from the request,
defaults to 8_000_000 bytes
* `:read_length` - sets the amount of bytes to read at one time from the
underlying socket to fill the chunk, defaults to 1_000_000 bytes
* `:read_timeout` - sets the timeout for each socket read, defaults to
15_000ms
So by default, `Plug.Parsers` will read 1_000_000 bytes at a time from the
socket with an overall limit of 8_000_000 bytes.
Besides the options supported by `Plug.Conn.read_body/2`, the multipart parser
also checks for `:headers` option that contains the same `:length`, `:read_length`
and `:read_timeout` options which are used explicitly for parsing multipart
headers.
"""
@behaviour Plug.Parsers
def init(opts) do
opts
end
def parse(conn, "multipart", subtype, _headers, opts) when subtype in ["form-data", "mixed"] do
{adapter, state} = conn.adapter
try do
adapter.parse_req_multipart(state, opts, &handle_headers/1)
rescue
e in Plug.UploadError -> # Do not ignore upload errors
reraise e, System.stacktrace
e -> # All others are wrapped
reraise Plug.Parsers.ParseError.exception(exception: e), System.stacktrace
else
{:ok, params, state} ->
{:ok, params, %{conn | adapter: {adapter, state}}}
{:more, _params, state} ->
{:error, :too_large, %{conn | adapter: {adapter, state}}}
{:error, :timeout} ->
raise Plug.TimeoutError
{:error, _} ->
raise Plug.BadRequestError
end
end
def parse(conn, _type, _subtype, _headers, _opts) do
{:next, conn}
end
defp handle_headers(headers) do
case List.keyfind(headers, "content-disposition", 0) do
{_, disposition} -> handle_disposition(disposition, headers)
nil -> :skip
end
end
defp handle_disposition(disposition, headers) do
case :binary.split(disposition, ";") do
[_, params] ->
params = Plug.Conn.Utils.params(params)
if name = Map.get(params, "name") do
handle_disposition_params(name, params, headers)
else
:skip
end
[_] ->
:skip
end
end
defp handle_disposition_params(name, params, headers) do
case Map.get(params, "filename") do
nil -> {:binary, name}
"" -> :skip
filename ->
path = Plug.Upload.random_file!("multipart")
{:file, name, path, %Plug.Upload{filename: filename, path: path,
content_type: get_header(headers, "content-type")}}
end
end
defp get_header(headers, key) do
case List.keyfind(headers, key, 0) do
{^key, value} -> value
nil -> nil
end
end
end
|
deps/plug/lib/plug/parsers/multipart.ex
| 0.753194
| 0.449151
|
multipart.ex
|
starcoder
|
defmodule BatchPlease.FileBatcher do
@typedoc ~S"""
The state of a single FileBatcher batch.
`filename` is a string holding the name of the file containing items
from the current batch.
`file` is a filehandle to the current batch file.
`encode` is a function which takes an item as input and encodes it
into string format. It returns either `{:ok, encoded_item}` or
{:error, message}`. Defaults to `Poison.encode/1`.
`decode` is a function which takes a string-encoded item as input
and decodes it, returning either `{:ok, item}` or `{:error, message}`.
Defaults to `Poison.decode/1`.
"""
@type batch :: %{
opts: Keyword.t,
filename: String.t,
file: File.io_device,
encode: ((item) -> {:ok, binary} | {:error, String.t}),
}
@typedoc ~S"""
Items can be of any type that is representable with the given
`encode` and `decode` functions (which, by default, use JSON).
"""
@type item :: any
@doc ~S"""
Callback to create a new FileBatcher batch.
`opts[:batch_directory]` can be used to specify where to put batch files
(default `/tmp`).
`opts[:encode]` can be used to manually specify an encoder function.
"""
def batch_init(opts) do
dir = (opts[:batch_directory] || "/tmp") |> String.replace_trailing("/", "")
with :ok <- File.mkdir_p(dir)
do
{:ok, %{
opts: opts,
dir: dir,
filename: nil,
file: nil,
encode: opts[:encode],
}}
end
end
defp create_file_unless_exists(%{file: nil, filename: nil}=batch) do
filename = make_filename(batch.dir)
with {:ok, file} <- File.open(filename, [:write])
do
{:ok, %{batch |
file: file,
filename: filename,
}}
end
end
defp create_file_unless_exists(batch), do: {:ok, batch}
@doc false
def batch_add_item(batch_maybe_without_file, item) do
with {:ok, batch} <- create_file_unless_exists(batch_maybe_without_file),
{:ok, enc_item} <- do_encode(batch, item),
encoded_item <- String.replace_trailing(enc_item, "\n", ""),
:ok <- IO.binwrite(batch.file, encoded_item <> "\n")
do
{:ok, batch}
end
end
@doc false
def batch_pre_flush(batch) do
with :ok <- File.close(batch.file)
do
{:ok, batch}
end
end
@doc false
def batch_post_flush(batch) do
File.rm(batch.filename)
end
defp do_encode(batch, item) do
cond do
batch.encode ->
batch.encode.(item)
{:module, _} = Code.ensure_loaded(Poison) ->
Poison.encode(item)
:else ->
raise UndefinedFunctionError, message: "no `encode` function was provided, and `Poison.encode/1` is not available"
end
end
defp make_filename(dir) do
rand = :random.uniform() |> to_string |> String.replace(~r/^0\./, "")
"#{dir}/#{:erlang.system_time(:milli_seconds)}_#{rand}.batch"
end
defmacro __using__(opts) do
quote do
use BatchPlease, unquote(opts)
def batch_init(opts), do: BatchPlease.FileBatcher.batch_init(opts)
def batch_add_item(batch, item), do: BatchPlease.FileBatcher.batch_add_item(batch, item)
def batch_pre_flush(batch), do: BatchPlease.FileBatcher.batch_pre_flush(batch)
def batch_post_flush(batch), do: BatchPlease.FileBatcher.batch_post_flush(batch)
end
end
end
|
lib/batch_please/file_batcher.ex
| 0.683525
| 0.55923
|
file_batcher.ex
|
starcoder
|
defmodule EWalletDB.ExchangePair do
@moduledoc """
Ecto Schema representing an exchange pair.
# What is an exchange rate?
The exchange rate is the amount of the destination token (`to_token`) that will be received
when exchanged with one unit of the source token (`from_token`).
For example:
```
%EWalletDB.ExchangePair{
from_token: AAA,
to_token: BBB,
rate: 2.00
}
```
The struct above means that 1 AAA can be exchanged for 2 AAA.
"""
use Ecto.Schema
use EWalletDB.SoftDelete
use EWalletConfig.Types.ExternalID
import Ecto.Changeset
import EWalletDB.Helpers.Preloader
import EWalletDB.Validator
import EWalletDB.Validator
alias Ecto.UUID
alias EWalletDB.{Repo, Token}
@primary_key {:uuid, UUID, autogenerate: true}
schema "exchange_pair" do
external_id(prefix: "exg_")
belongs_to(
:from_token,
Token,
references: :uuid,
type: UUID,
foreign_key: :from_token_uuid
)
belongs_to(
:to_token,
Token,
references: :uuid,
type: UUID,
foreign_key: :to_token_uuid
)
field(:rate, :float)
timestamps()
soft_delete()
end
defp changeset(exchange_pair, attrs) do
exchange_pair
|> cast(attrs, [:from_token_uuid, :to_token_uuid, :rate, :deleted_at])
|> validate_required([:from_token_uuid, :to_token_uuid, :rate])
|> validate_different_values(:from_token_uuid, :to_token_uuid)
|> validate_immutable(:from_token_uuid)
|> validate_immutable(:to_token_uuid)
|> validate_number(:rate, greater_than: 0)
|> assoc_constraint(:from_token)
|> assoc_constraint(:to_token)
|> unique_constraint(
:from_token,
name: "exchange_pair_from_token_uuid_to_token_uuid_index"
)
end
defp restore_changeset(exchange_pair, attrs) do
exchange_pair
|> cast(attrs, [:deleted_at])
|> unique_constraint(
:deleted_at,
name: "exchange_pair_from_token_uuid_to_token_uuid_index"
)
end
@doc """
Get all exchange pairs.
"""
@spec all(keyword()) :: [%__MODULE__{}] | []
def all(opts \\ []) do
__MODULE__
|> exclude_deleted()
|> Repo.all()
|> preload_option(opts)
end
@doc """
Retrieves an exchange pair with the given ID.
"""
@spec get(String.t(), keyword()) :: %__MODULE__{} | nil
def get(id, opts \\ [])
def get(id, opts) when is_external_id(id) do
get_by([id: id], opts)
end
def get(_id, _opts), do: nil
@doc """
Retrieves an exchange pair using one or more fields.
"""
@spec get_by(map() | keyword(), keyword()) :: %__MODULE__{} | nil
def get_by(fields, opts \\ []) do
__MODULE__
|> exclude_deleted()
|> Repo.get_by(fields)
|> preload_option(opts)
end
@doc """
Creates a new exchange pair with the passed attributes.
"""
@spec insert(map()) :: {:ok, %__MODULE__{}} | {:error, Ecto.Changeset.t()}
def insert(attrs) do
%__MODULE__{}
|> changeset(attrs)
|> Repo.insert()
end
@doc """
Updates an exchange pair with the passed attributes.
"""
@spec update(%__MODULE__{}, map()) :: {:ok, %__MODULE__{}} | {:error, Ecto.Changeset.t()}
def update(exchange_pair, attrs) do
exchange_pair
|> changeset(attrs)
|> Repo.update()
end
@doc """
Checks whether the given exchange pair is soft-deleted.
"""
@spec deleted?(%__MODULE__{}) :: boolean()
def deleted?(exchange_pair), do: SoftDelete.deleted?(exchange_pair)
@doc """
Soft-deletes the given exchange pair.
"""
@spec delete(%__MODULE__{}) :: {:ok, %__MODULE__{}} | {:error, Ecto.Changeset.t()}
def delete(exchange_pair), do: SoftDelete.delete(exchange_pair)
@doc """
Restores the given exchange pair from soft-delete.
"""
@spec restore(%__MODULE__{}) :: {:ok, %__MODULE__{}} | {:error, Ecto.Changeset.t()}
def restore(exchange_pair) do
changeset = restore_changeset(exchange_pair, %{deleted_at: nil})
case Repo.update(changeset) do
{:error, %{errors: [deleted_at: {"has already been taken", []}]}} ->
{:error, :exchange_pair_already_exists}
result ->
result
end
end
@doc """
Touches the given exchange pair and updates `updated_at` to the current date & time.
"""
@spec touch(%__MODULE__{}) :: {:ok, %__MODULE__{}} | {:error, Ecto.Changeset.t()}
def touch(exchange_pair) do
exchange_pair
|> change(updated_at: NaiveDateTime.utc_now())
|> Repo.update()
end
@doc """
Gets the standard name of the exchange pair.
"""
@spec get_name(%__MODULE__{}) :: String.t()
def get_name(exchange_pair) do
exchange_pair = Repo.preload(exchange_pair, [:from_token, :to_token])
exchange_pair.from_token.symbol <> "/" <> exchange_pair.to_token.symbol
end
@doc """
Retrieves an exchange pair using `from_token` and `to_token`.
If an exchange pair is found, `{:ok, pair}` is returned.
If an exchange pair could not be found, `{:error, :exchange_pair_not_found}` is returned.
"""
@spec fetch_exchangable_pair(%Token{} | String.t(), %Token{} | String.t(), keyword()) ::
{:ok, %__MODULE__{}} | {:error, :exchange_pair_not_found}
def fetch_exchangable_pair(from, to, opts \\ [])
def fetch_exchangable_pair(%Token{} = from_token, %Token{} = to_token, opts) do
fetch_exchangable_pair(from_token.uuid, to_token.uuid, opts)
end
def fetch_exchangable_pair(from_token_uuid, to_token_uuid, opts) do
case get_by([from_token_uuid: from_token_uuid, to_token_uuid: to_token_uuid], opts) do
%__MODULE__{} = pair ->
{:ok, pair}
nil ->
{:error, :exchange_pair_not_found}
end
end
end
|
apps/ewallet_db/lib/ewallet_db/exchange_pair.ex
| 0.903236
| 0.710829
|
exchange_pair.ex
|
starcoder
|
defmodule Faker.Pokemon.De do
import Faker, only: [sampler: 2]
@moduledoc """
Functions for Pokemon names in German
"""
@doc """
Returns a Pokemon name
## Examples
iex> Faker.Pokemon.De.name()
"Viscogon"
iex> Faker.Pokemon.De.name()
"Lepumentas"
iex> Faker.Pokemon.De.name()
"Quajutsu"
iex> Faker.Pokemon.De.name()
"Pyroleo"
"""
@spec name() :: String.t()
sampler(:name, [
"Bisasam",
"Bisaknosp",
"Bisaflor",
"Glumanda",
"Glutexo",
"Glurak",
"Schiggy",
"Schillok",
"Turtok",
"Raupy",
"Safcon",
"Smettbo",
"Hornliu",
"Kokuna",
"Bibor",
"Taubsi",
"Tauboga",
"Tauboss",
"Rattfratz",
"Rattikarl",
"Habitak",
"Ibitak",
"Rettan",
"Arbok",
"Pikachu",
"Raichu",
"Sandan",
"Sandamer",
"Nidoranβ",
"Nidorina",
"Nidoqueen",
"Nidoranβ",
"Nidorino",
"Nidoking",
"Piepi",
"Pixi",
"Vulpix",
"Vulnona",
"Pummeluff",
"Knuddeluff",
"Zubat",
"Golbat",
"Myrapla",
"Duflor",
"Giflor",
"Paras",
"Parasek",
"Bluzuk",
"Omot",
"Digda",
"Digdri",
"Mauzi",
"Snobilikat",
"Enton",
"Entoron",
"Menki",
"Rasaff",
"Fukano",
"Arkani",
"Quapsel",
"Quaputzi",
"Quappo",
"Abra",
"Kadabra",
"Simsala",
"Machollo",
"Maschock",
"Machomei",
"Knofensa",
"Ultrigaria",
"Sarzenia",
"Tentacha",
"Tentoxa",
"Kleinstein",
"Georok",
"Geowaz",
"Ponita",
"Gallopa",
"Flegmon",
"Lahmus",
"Magnetilo",
"Magneton",
"Porenta",
"Dodu",
"Dodri",
"Jurob",
"Jugong",
"Sleima",
"Sleimok",
"Muschas",
"Austos",
"Nebulak",
"Alpollo",
"Gengar",
"Onix",
"Traumato",
"Hypno",
"Krabby",
"Kingler",
"Voltobal",
"Lektrobal",
"Owei",
"Kokowei",
"Tragosso",
"Knogga",
"Kicklee",
"Nockchan",
"Schlurp",
"Smogon",
"Smogmog",
"Rihorn",
"Rizeros",
"Chaneira",
"Tangela",
"Kangama",
"Seeper",
"Seemon",
"Goldini",
"Golking",
"Sterndu",
"Starmie",
"Pantimos",
"Sichlor",
"Rossana",
"Elektek",
"Magmar",
"Pinsir",
"Tauros",
"Karpador",
"Garados",
"Lapras",
"Ditto",
"Evoli",
"Aquana",
"Blitza",
"Flamara",
"Porygon",
"Amonitas",
"Amoroso",
"Kabuto",
"Kabutops",
"Aerodactyl",
"Relaxo",
"Arktos",
"Zapdos",
"Lavados",
"Dratini",
"Dragonir",
"Dragoran",
"Mewtu",
"Mew",
"Endivie",
"Lorblatt",
"Meganie",
"Feurigel",
"Igelavar",
"Tornupto",
"Karnimani",
"Tyracroc",
"Impergator",
"Wiesor",
"Wiesenior",
"Hoothoot",
"Noctuh",
"Ledyba",
"Ledian",
"Webarak",
"Ariados",
"Iksbat",
"Lampi",
"Lanturn",
"Pichu",
"Pii",
"Fluffeluff",
"Togepi",
"Togetic",
"Natu",
"Xatu",
"Voltilamm",
"Waaty",
"Ampharos",
"Blubella",
"Marill",
"Azumarill",
"Mogelbaum",
"Quaxo",
"Hoppspross",
"Hubelupf",
"Papungha",
"Griffel",
"Sonnkern",
"Sonnflora",
"Yanma",
"Felino",
"Morlord",
"Psiana",
"Nachtara",
"Kramurx",
"Laschoking",
"Traunfugil",
"Icognito",
"Woingenau",
"Girafarig",
"Tannza",
"Forstellka",
"Dummisel",
"Skorgla",
"Stahlos",
"Snubbull",
"Granbull",
"Baldorfish",
"Scherox",
"Pottrott",
"Skaraborn",
"Sniebel",
"Teddiursa",
"Ursaring",
"Schneckmag",
"Magcargo",
"Quiekel",
"Keifel",
"Corasonn",
"Remoraid",
"Octillery",
"Botogel",
"Mantax",
"Panzaeron",
"Hunduster",
"Hundemon",
"Seedraking",
"Phanpy",
"Donphan",
"Porygon2",
"Damhirplex",
"Farbeagle",
"Rabauz",
"Kapoera",
"Kussilla",
"Elekid",
"Magby",
"Miltank",
"Heiteira",
"Raikou",
"Entei",
"Suicune",
"Larvitar",
"Pupitar",
"Despotar",
"Lugia",
"Ho-Oh",
"Celebi",
"Geckarbor",
"Reptain",
"Gewaldro",
"Flemmli",
"Jungglut",
"Lohgock",
"Hydropi",
"Moorabbel",
"Sumpex",
"Fiffyen",
"Magnayen",
"Zigzachs",
"Geradaks",
"Waumpel",
"Schaloko",
"Papinella",
"Panekon",
"Pudox",
"Loturzel",
"Lombrero",
"Kappalores",
"Samurzel",
"Blanas",
"Tengulist",
"Schwalbini",
"Schwalboss",
"Wingull",
"Pelipper",
"Trasla",
"Kirlia",
"Guardevoir",
"Gehweiher",
"Maskeregen",
"Knilz",
"Kapilz",
"Bummelz",
"Muntier",
"Letarking",
"Nincada",
"Ninjask",
"Ninjatom",
"Flurmel",
"Krakeelo",
"Krawumms",
"Makuhita",
"Hariyama",
"Azurill",
"Nasgnet",
"Eneco",
"Enekoro",
"Zobiris",
"Flunkifer",
"Stollunior",
"Stollrak",
"Stolloss",
"Meditie",
"Meditalis",
"Frizelbliz",
"Voltenso",
"Plusle",
"Minun",
"Volbeat",
"Illumise",
"Roselia",
"Schluppuck",
"Schlukwech",
"Kanivanha",
"Tohaido",
"Wailmer",
"Wailord",
"Camaub",
"Camerupt",
"Qurtel",
"Spoink",
"Groink",
"Pandir",
"Knacklion",
"Vibrava",
"Libelldra",
"Tuska",
"Noktuska",
"Wablu",
"Altaria",
"Sengo",
"Vipitis",
"Lunastein",
"Sonnfel",
"Schmerbe",
"Welsar",
"Krebscorps",
"Krebutack",
"Puppance",
"Lepumentas",
"Liliep",
"Wielie",
"Anorith",
"Armaldo",
"Barschwa",
"Milotic",
"Formeo",
"Kecleon",
"Shuppet",
"Banette",
"Zwirrlicht",
"Zwirrklop",
"Tropius",
"Palimpalim",
"Absol",
"Isso",
"Schneppke",
"Firnontor",
"Seemops",
"Seejong",
"Walraisa",
"Perlu",
"Aalabyss",
"Saganabyss",
"Relicanth",
"Liebiskus",
"Kindwurm",
"Draschel",
"Brutalanda",
"Tanhel",
"Metang",
"Metagross",
"Regirock",
"Regice",
"Registeel",
"Latias",
"Latios",
"Kyogre",
"Groudon",
"Rayquaza",
"Jirachi",
"Deoxys",
"Chelast",
"Chelcarain",
"Chelterrar",
"Panflam",
"Panpyro",
"Panferno",
"Plinfa",
"Pliprin",
"Impoleon",
"Staralili",
"Staravia",
"Staraptor",
"Bidiza",
"Bidifas",
"Zirpurze",
"Zirpeise",
"Sheinux",
"Luxio",
"Luxtra",
"Knospi",
"Roserade",
"Koknodon",
"Rameidon",
"Schilterus",
"Bollterus",
"Burmy",
"Burmadame",
"Moterpel",
"Wadribie",
"Honweisel",
"Pachirisu",
"Bamelin",
"Bojelin",
"Kikugi",
"Kinoso",
"Schalellos",
"Gastrodon",
"Ambidiffel",
"Driftlon",
"Drifzepeli",
"Haspiror",
"Schlapor",
"Traunmagil",
"Kramshef",
"Charmian",
"Shnurgarst",
"Klingplim",
"Skunkapuh",
"Skunktank",
"Bronzel",
"Bronzong",
"Mobai",
"Pantimimi",
"Wonneira",
"Plaudagei",
"Kryppuk",
"Kaumalat",
"Knarksel",
"Knakrack",
"Mampfaxo",
"Riolu",
"Lucario",
"Hippopotas",
"Hippoterus",
"Pionskora",
"Piondragi",
"Glibunkel",
"Toxiquak",
"Venuflibis",
"Finneon",
"Lumineon",
"Mantirps",
"Shnebedeck",
"Rexblisar",
"Snibunna",
"Magnezone",
"Schlurplek",
"Rihornior",
"Tangoloss",
"Elevoltek",
"Magbrant",
"Togekiss",
"Yanmega",
"Folipurba",
"Glaziola",
"Skorgro",
"Mamutel",
"Porygon-Z",
"Galagladi",
"Voluminas",
"Zwirrfinst",
"Frosdedje",
"Rotom",
"Selfe",
"Vesprit",
"Tobutz",
"Dialga",
"Palkia",
"Heatran",
"Regigigas",
"Giratina",
"Cresselia",
"Phione",
"Manaphy",
"Darkrai",
"Shaymin",
"Arceus",
"Victini",
"Serpifeu",
"Efoserp",
"Serpiroyal",
"Floink",
"Ferkokel",
"Flambirex",
"Ottaro",
"Zwottronin",
"Admurai",
"Nagelotz",
"Kukmarda",
"Yorkleff",
"Terribark",
"Bissbark",
"Felilou",
"Kleoparda",
"Vegimak",
"Vegichita",
"Grillmak",
"Grillchita",
"Sodamak",
"Sodachita",
"Somniam",
"Somnivora",
"Dusselgurr",
"Navitaub",
"Fasasnob",
"Elezeba",
"Zebritz",
"Kiesling",
"Sedimantur",
"Brockoloss",
"Fleknoil",
"Fletiamo",
"Rotomurf",
"Stalobor",
"Ohrdoch",
"Praktibalk",
"Strepoli",
"Meistagrif",
"Schallquap",
"Mebrana",
"Branawarz",
"Jiutesto",
"Karadonis",
"Strawickl",
"Folikon",
"Matrifol",
"Toxiped",
"Rollum",
"Cerapendra",
"Waumboll",
"Elfun",
"Lilminip",
"Dressella",
"Barschuft",
"Ganovil",
"Rokkaiman",
"Rabigator",
"Flampion",
"Flampivian",
"Maracamba",
"Lithomith",
"Castellith",
"Zurrokex",
"Irokex",
"Symvolara",
"Makabaja",
"Echnatoll",
"Galapaflos",
"Karippas",
"Flapteryx",
"Aeropteryx",
"UnratΓΌtox",
"Deponitox",
"Zorua",
"Zoroark",
"Picochilla",
"Chillabell",
"Mollimorba",
"Hypnomorba",
"Morbitesse",
"Monozyto",
"Mitodos",
"Zytomega",
"Piccolente",
"Swaroness",
"Gelatini",
"Gelatroppo",
"Gelatwino",
"Sesokitz",
"Kronjuwild",
"Emolga",
"Laukaps",
"Cavalanzas",
"Tarnpignon",
"Hutsassa",
"Quabbel",
"Apoquallyp",
"Mamolida",
"Wattzapf",
"Voltula",
"Kastadur",
"Tentantel",
"Klikk",
"Kliklak",
"Klikdiklak",
"Zapplardin",
"Zapplalek",
"Zapplarang",
"Pygraulon",
"Megalon",
"Lichtel",
"Laternecto",
"Skelabra",
"Milza",
"Sharfax",
"Maxax",
"Petznief",
"Siberio",
"Frigometri",
"Schnuthelm",
"Hydragil",
"Flunschlik",
"Fu",
"Shu",
"Shardrago",
"Golbit",
"Golgantes",
"Gladiantri",
"Caesurio",
"Bisofank",
"Geronimatz",
"Washakwil",
"Skallyk",
"Grypheldis",
"FurnifraΓ",
"Fermicula",
"Kapuno",
"Duodino",
"Trikephalo",
"Ignivor",
"Ramoth",
"Kobalium",
"Terrakium",
"Viridium",
"Boreos",
"Voltolos",
"Reshiram",
"Zekrom",
"Demeteros",
"Kyurem",
"Keldeo",
"Meloetta",
"Genesect",
"Igamaro",
"Igastarnish",
"Brigaron",
"Fynx",
"Rutena",
"Fennexis",
"Froxy",
"Amphizel",
"Quajutsu",
"Scoppel",
"Grebbit",
"Dartiri",
"Dartignis",
"Fiaro",
"Purmel",
"Puponcho",
"Vivillon",
"Leufeo",
"Pyroleo",
"FlabΓ©bΓ©",
"Floette",
"Florges",
"MΓ€hikel",
"Chevrumm",
"Pam",
"Pandagro",
"Coiffwaff",
"Psiau",
"Psiaugon",
"Gramokles",
"Duokles",
"Durengard",
"Parfi",
"Parfinesse",
"Flauschling",
"Sabbaione",
"Iscalar",
"Calamanero",
"Bithora",
"Thanathora",
"Algitt",
"Tandrak",
"Scampisto",
"Wummer",
"Eguana",
"Elezard",
"Balgoras",
"Monargoras",
"Amarino",
"Amagarga",
"Feelinara",
"Resladero",
"Dedenne",
"Rocara",
"Viscora",
"Viscargot",
"Viscogon",
"Clavion",
"Paragoni",
"Trombork",
"Irrbis",
"Pumpdjinn",
"Arktip",
"Arktilas",
"eF-eM",
"UHaFnir",
"Xerneas",
"Yveltal",
"Zygarde",
"Diancie",
"Hoopa",
"Volcanion",
"Bauz",
"Arboretoss",
"Silvarro",
"Flamiau",
"Miezunder",
"Fuegro",
"Robball",
"Marikeck",
"Primarene",
"Peppeck",
"Trompeck",
"Tukanon",
"Mangunior",
"Manguspektor",
"Mabula",
"Akkup",
"Donarion",
"Krabbox",
"Krawell",
"Choreogel",
"Wommel",
"Bandelby",
"Wuffels",
"Wolwerock",
"Lusardin",
"Garstella",
"Aggrostella",
"Pampuli",
"Pampross",
"Araqua",
"Aranestro",
"Imantis",
"Mantidea",
"Bubungus",
"Lamellux",
"Molunk",
"Amfira",
"Velursi",
"Kosturso",
"Frubberl",
"Frubaila",
"Fruyal",
"Curelei",
"Kommandutan",
"Quartermak",
"ReiΓlaus",
"Tectass",
"Sankabuh",
"Colossand",
"Gufa",
"Typ:Null",
"Amigento",
"Meteno",
"Koalelu",
"Tortunator",
"Togedemaru",
"Mimigma",
"Knirfish",
"Sen-Long",
"Moruda",
"Miniras",
"Mediras",
"Grandiras",
"Kapu-Riki",
"Kapu-Fala",
"Kapu-Toro",
"Kapu-Kime",
"Cosmog",
"Cosmovum",
"Solgaleo",
"Lunala",
"Anego",
"Masskito",
"Schabelle",
"Voltriant",
"Kaguron",
"Katagami",
"Schlingking",
"Necrozma",
"Magearna",
"Marshadow",
"Venicro",
"Agoyon",
"Muramura",
"Kopplosio",
"Zeraora",
"Meltan",
"Melmetal"
])
@doc """
Returns a location from Pokemon universe
## Examples
iex> Faker.Pokemon.De.location()
"Blumenparadies"
iex> Faker.Pokemon.De.location()
"Kraterberg"
iex> Faker.Pokemon.De.location()
"Zweiblattdorf"
iex> Faker.Pokemon.De.location()
"Sandgemme"
"""
@spec location() :: String.t()
sampler(:location, [
"<NAME>",
"Zweiblattdorf",
"Sandgemme",
"Flori",
"Trostu",
"Elyses",
"Jubelstadt",
"Fleetburg",
"Erzelingen",
"Ewigenau",
"Herzhofen",
"Weideburg",
"Schleiede",
"Sonnewik",
"Blizzach",
"<NAME>",
"Erzelingen-Mine",
"Windkraftwerk",
"Ewigwald",
"FeuriohΓΌtte",
"Kraterberg",
"SpeersΓ€ule",
"GroΓmoor",
"Trostu-Ruinen",
"SiegesstraΓe",
"Park der Freunde",
"Platz der Treue",
"VerwΓΌsteter Pfad",
"<NAME> Flori",
"Erzelingen-Tor",
"Vollmond-Insel",
"Scheidequelle",
"HΓΆhle der Umkehr",
"Blumenparadies",
"Blizzach-Tempel",
"<NAME>le",
"Maniac-HΓΆhle",
"Maniac-Tunnel",
"TrophΓ€engarten",
"Eiseninsel",
"Alte Villa",
"Galakti-Zentrale",
"Wahrheitsufer",
"KΓΌhnheitsufer",
"StΓ€rkeufer",
"FrΓΌhlingspfad",
"See der Wahrheit",
"See der KΓΌhnheit",
"See der StΓ€rke",
"Neumond-Insel",
"Duellturm",
"Kampfareal",
"Γberlebensareal",
"Erholungsgebiet",
"Kahlberg",
"Buhnen-Pfad",
"Halle d. Beginns",
"Wahrheitsgrotte",
"KΓΌhnheitsgrotte",
"StΓ€rkegrotte",
"Jubelstadt TV",
"PokΓ©tch Ges.",
"GTS",
"Trainerschule",
"Bergbaumuseum",
"Blumenladen",
"Fahrradladen",
"Wettbewerbshalle",
"Knursperei",
"BegegnungsstΓ€tte",
"PokΓ©mon-Pension",
"Schleiede-Kaufh.",
"Spielhalle",
"Fleetburg-Bibl.",
"Leuchtturm",
"Sonnewik-Markt",
"PKMN-Landgut",
"Schrittritt-Haus",
"CafΓ©",
"Prachtsee",
"Restaurant",
"Duellpark",
"Kampfzone",
"Kampffabrik",
"Kampfpalais",
"Kampfarkaden",
"Kampfsaal",
"Zerrwelt",
"Global. Terminal",
"Villa",
"Duellareal",
"Raum von ROTOM",
"Galaktik-GebΓ€ude",
"Eisen-Ruinen",
"Eisberg-Ruinen",
"Gipfelruinen",
"Neuborkia",
"Rosalia City",
"Viola City",
"Azalea City",
"Anemonia City",
"Dukatia City",
"Oliviana City",
"Teak City",
"Mahagonia City",
"See des Zorns",
"Ebenholz City",
"Silberberg",
"Alabastia",
"Vertania City",
"Marmoria City",
"Azuria City",
"Lavandia",
"Orania City",
"Prismania City",
"Fuchsania City",
"Zinnoberinsel",
"Indigo Plateau",
"Saffronia City",
"DIGDAs HΓΆhle",
"Mondberg",
"Azuria-HΓΆhle",
"Felstunnel",
"Kraftwerk",
"Safari-Zone",
"Seeschauminseln",
"KNOFENSA-Turm",
"Glockenturm",
"Turmruine",
"Nationalpark",
"Radioturm",
"Alph-Ruinen",
"Einheitstunnel",
"FLEGMON-Brunnen",
"Leuchtturm",
"Rocket-Versteck",
"Steineichenwald",
"Dukatia-Passage",
"Kesselberg",
"Eispfad",
"Strudelinseln",
"SilberberghΓΆhle",
"DunkelhΓΆhle",
"SiegesstraΓe",
"DrachenhΓΆhle",
"Tohjo-FΓ€lle",
"Vertania-Wald",
"PokΓ©athlonhallen",
"<NAME>",
"Safari-Eingang",
"FelsschlundhΓΆhle",
"Kampfzonenzugang",
"Glockenklangpfad",
"Sinjoh-Ruinen",
"Felsenherzturm",
"PokΓ©walker",
"Felsklippentor"
])
end
|
lib/faker/pokemon/de.ex
| 0.589598
| 0.475057
|
de.ex
|
starcoder
|
defmodule YipyipExAuth.Plugs.ProcessAccessToken do
@moduledoc """
Plug to process and verify access tokens. Must be initialized with a `YipyipExAuth.Config`-struct, which can be initialized itself using `YipyipExAuth.Config.from_enum/1`.
The plug does not reject unauthenticated requests by itself. If a request is successfully verified, the user ID, session ID and extra payload are assigned to the conn. If not, an authentication error message is put in the conn's private map, which can be retrieved using `YipyipExAuth.Utils.get_auth_error/1`. This allows applications to implement their own plug to reject unauthenticated requests, for example:
## Usage example that rejects unauthenticated requests
```
defmodule MyPhoenixAppWeb.Router do
use MyPhoenixAppWeb, :router
@config YipyipExAuth.Config.from_enum(
session_ttl: 68400,
refresh_token_ttl: 3600,
session_store_module: MyModule
)
pipeline :valid_access_token do
plug YipyipExAuth.Plugs.ProcessAccessToken, @config
plug :only_authenticated
end
@doc \"\"\"
Reject unauthenticated requests
\"\"\"
def only_authenticated(%{assigns: %{current_user_id: _}} = conn, _opts), do: conn
def only_authenticated(conn, _opts) do
auth_error = YipyipExAuth.Utils.get_auth_error(conn)
conn |> Plug.Conn.send_resp(401, auth_error) |> halt()
end
end
```
In this way, applications can completely customize how to respond to unauthenticated requests and how much information to expose to the client.
## Examples / doctests
alias Plug.Conn
alias YipyipExAuth.Plugs.ProcessAccessToken
alias YipyipExAuth.Utils
import YipyipExAuth.TestHelpers
# only available when Mix env = test
alias YipyipExAuth.TestSupport.FakeSessionStore
import YipyipExAuth.TestSupport.Shared
@config YipyipExAuth.Config.from_enum(
session_ttl: 68400,
refresh_token_ttl: 3600,
session_store_module: FakeSessionStore
)
@plug_opts ProcessAccessToken.init(@config)
# "reject" requests without bearer token
iex> conn = %Conn{} |> ProcessAccessToken.call(@plug_opts)
iex> "bearer token not found" = Utils.get_auth_error(conn)
iex> conn.assigns
%{}
# "reject" requests with invalid token
iex> config = %{@config | access_token_salt: "different"}
iex> conn = build_conn() |> put_access_token(config) |> ProcessAccessToken.call(@plug_opts)
iex> "bearer token invalid" = Utils.get_auth_error(conn)
iex> conn.assigns
%{}
# "reject" requests with expired bearer token
iex> plug_opts = ProcessAccessToken.init(%{@config | access_token_ttl: -1})
iex> conn = build_conn() |> put_access_token(@config) |> ProcessAccessToken.call(plug_opts)
iex> "bearer token expired" = Utils.get_auth_error(conn)
iex> conn.assigns
%{}
# "reject" requests where the signature transport mechanism does not match the session's initial value
iex> token = generate_access_token(build_conn(), @config, %{tst: :cookie})
iex> conn = build_conn() |> put_access_token(@config, token) |> ProcessAccessToken.call(@plug_opts)
iex> "token signature transport invalid" = Utils.get_auth_error(conn)
iex> conn.assigns
%{}
# "reject" requests with an expired session
iex> token = generate_access_token(build_conn(), @config, %{exp: 1})
iex> conn = build_conn() |> put_access_token(@config, token) |> ProcessAccessToken.call(@plug_opts)
iex> "session expired" = Utils.get_auth_error(conn)
iex> conn.assigns
%{}
# "allow" requests with valid bearer token
iex> conn = build_conn() |> put_access_token(@config) |> ProcessAccessToken.call(@plug_opts)
iex> nil = Utils.get_auth_error(conn)
iex> conn.assigns
%{current_session_id: "a", current_user_id: 1, extra_access_token_payload: %{}}
# "allow" requests with valid bearer token with signature in cookie
iex> token = generate_access_token(build_conn(), @config, %{tst: :cookie})
iex> [header, encoded_payload, signature] = String.split(token, ".", parts: 3)
iex> conn = build_conn()
...> |> put_access_token(@config, header <> "." <> encoded_payload)
...> |> Plug.Test.put_req_cookie(@config.access_cookie_name, "." <> signature)
...> |> ProcessAccessToken.call(@plug_opts)
iex> nil = Utils.get_auth_error(conn)
iex> conn.assigns
%{current_session_id: "a", current_user_id: 1, extra_access_token_payload: %{}}
"""
@behaviour Plug
alias Phoenix.Token
alias Plug.Conn
use YipyipExAuth.Utils.Constants
alias YipyipExAuth.{SharedInternals, Config}
require Logger
@doc false
@impl true
@spec init(YipyipExAuth.Config.t()) :: Plug.opts()
def init(%Config{
access_cookie_name: cookie_name,
access_token_salt: salt,
access_token_ttl: max_age,
access_token_key_digest: digest,
session_store_module: session_store
}) do
{session_store, salt, cookie_name, key_digest: digest, max_age: max_age}
end
@doc false
@impl true
@spec call(Conn.t(), Plug.opts()) :: Conn.t()
def call(conn, {session_store, salt, cookie_name, verification_opts}) do
with {:token, {sig_transport, token}} <- SharedInternals.get_token(conn, cookie_name),
{:ok, payload} <- Token.verify(conn, salt, token, verification_opts),
{:pl, %{uid: uid, tst: exp_sig_trans, sid: sid, exp: exp, epl: epl}} <- {:pl, payload},
{:transport_matches, true} <- {:transport_matches, sig_transport == exp_sig_trans},
{:session_expired, false} <-
SharedInternals.session_expired?(sid, uid, exp, session_store) do
conn
|> Conn.assign(:current_user_id, uid)
|> Conn.assign(:current_session_id, sid)
|> Conn.assign(:extra_access_token_payload, epl)
|> Conn.put_private(@private_access_token_payload_key, payload)
|> Conn.put_private(@private_token_signature_transport_key, sig_transport)
else
{:token, nil} ->
SharedInternals.auth_error(conn, "bearer token not found")
{:error, :expired} ->
SharedInternals.auth_error(conn, "bearer token expired")
{:error, :invalid} ->
SharedInternals.auth_error(conn, "bearer token invalid")
{:pl, _} ->
SharedInternals.auth_error(conn, "invalid bearer token payload")
{:transport_matches, false} ->
SharedInternals.auth_error(conn, "token signature transport invalid")
{:session_expired, true} ->
SharedInternals.auth_error(conn, "session expired")
error ->
Logger.error("Unexpected auth error: #{inspect(error)}")
SharedInternals.auth_error(conn, "unexpected error")
end
end
end
|
lib/plugs/process_access_token.ex
| 0.888414
| 0.40204
|
process_access_token.ex
|
starcoder
|
defmodule Scenic.Primitive.Rectangle do
@moduledoc """
Draw a rectangle on the screen.
## Data
`{width, height}`
The data for a line is a tuple containing two numbers.
* `width` - width of the rectangle
* `height` - height of the rectangle
## Styles
This primitive recognizes the following styles
* [`hidden`](Scenic.Primitive.Style.Hidden.html) - show or hide the primitive
* [`fill`](Scenic.Primitive.Style.Fill.html) - fill in the area of the primitive
* [`stroke`](Scenic.Primitive.Style.Stroke.html) - stroke the outline of the primitive. In this case, only the curvy part.
* [`join`](Scenic.Primitive.Style.Join.html) - control how segments are joined.
* [`miter_limit`](Scenic.Primitive.Style.MiterLimit.html) - control how segments are joined.
## Usage
You should add/modify primitives via the helper functions in
[`Scenic.Primitives`](Scenic.Primitives.html#rectangle/3)
```elixir
graph
|> rect( {100, 50}, stroke: {1, :yellow} )
|> rectangle( {100, 50}, stroke: {1, :yellow} )
```
Note: `rect` is a shortcut for `rectangle` and they can be used interchangeably.
"""
use Scenic.Primitive
alias Scenic.Script
alias Scenic.Primitive
alias Scenic.Primitive.Style
@type t :: {width :: number, height :: number}
@type styles_t :: [
:hidden | :scissor | :fill | :stroke_width | :stroke_fill | :join | :miter_limit
]
@styles [:hidden, :scissor, :fill, :stroke_width, :stroke_fill, :join, :miter_limit]
@impl Primitive
@spec validate(t()) :: {:ok, t()} | {:error, String.t()}
def validate({width, height}) when is_number(width) and is_number(height) do
{:ok, {width, height}}
end
def validate(data) do
{
:error,
"""
#{IO.ANSI.red()}Invalid Rectangle specification
Received: #{inspect(data)}
#{IO.ANSI.yellow()}
The data for a Rectangle is {height, width}#{IO.ANSI.default_color()}
"""
}
end
# --------------------------------------------------------
@doc """
Returns a list of styles recognized by this primitive.
"""
@impl Primitive
@spec valid_styles() :: styles_t()
def valid_styles(), do: @styles
# --------------------------------------------------------
@doc """
Compile the data for this primitive into a mini script. This can be combined with others to
generate a larger script and is called when a graph is compiled.
"""
@impl Primitive
@spec compile(primitive :: Primitive.t(), styles :: Style.t()) :: Script.t()
def compile(%Primitive{module: __MODULE__, data: {width, height}}, styles) do
Script.draw_rectangle([], width, height, Script.draw_flag(styles))
end
# --------------------------------------------------------
def default_pin(data), do: centroid(data)
# --------------------------------------------------------
@doc """
Returns the centroid of the rectangle. This is used as the default pin when applying
rotate or scale transforms.
"""
def centroid(data)
def centroid({width, height}) do
{width / 2, height / 2}
end
# --------------------------------------------------------
def contains_point?({w, h}, {xp, yp}) do
# width and xp must be the same sign
# height and yp must be the same sign
# xp must be less than the width
# yp must be less than the height
xp * w >= 0 && yp * h >= 0 && abs(xp) <= abs(w) && abs(yp) <= abs(h)
end
# --------------------------------------------------------
@doc false
def default_pin({width, height}, _styles) do
{width / 2, height / 2}
end
end
|
lib/scenic/primitive/rectangle.ex
| 0.957596
| 0.922343
|
rectangle.ex
|
starcoder
|
defmodule Crux.Rest.ApiError do
@moduledoc """
Represents a Discord API error.
Raised or returned whenever the api responded with a non `200` / `204` status code
"""
defexception(
status_code: nil,
code: nil,
message: nil,
path: nil,
method: nil
)
@typedoc """
| Property | Description | Example(s) |
| ------------- | ------------------------------------------------------------------------------------------------------------------------------ | ------------------- |
| `status_code` | HTTP status code | `400`, `404`, `403` |
| `code` | See Discord's [JSON Error Codes](https://discordapp.com/developers/docs/topics/opcodes-and-status-codes#json-json-error-codes) | `10006`, `90001` |
| `message` | Message describing the error | `Unknown Invite` |
| `path` | Path of the request | `/invites/broken` |
"""
@type t :: %{
status_code: integer(),
code: integer() | nil,
message: String.t(),
path: String.t(),
method: String.t()
}
@doc """
Default implementation only providing a `message` for `raise/2`
"""
@spec exception(message :: String.t()) :: __MODULE__.t()
def exception(message) when is_bitstring(message) do
%__MODULE__{message: message}
end
@doc """
Creates a full `Crux.Rest.ApiError` struct, returned / raised by all `Crux.Rest` functions in case of an API error.
"""
@spec exception(
error :: map(),
status_code :: pos_integer(),
path :: String.t(),
method :: String.t()
) :: __MODULE__.t()
def exception(%{"message" => message} = error, status_code, path, method) do
code = Map.get(error, "code")
inner =
error
|> Map.get("errors")
|> map_inner()
message = if inner, do: "#{message}\n#{inner}", else: message
%__MODULE__{
status_code: status_code,
code: code,
message: message,
path: path,
method: method
}
end
defp map_inner(error, key \\ nil)
defp map_inner(nil, _key), do: nil
defp map_inner(error, key) when is_map(error) do
Enum.map_join(error, "\n", fn {k, v} ->
cond do
key && Regex.match?(~r/\d+/, k) -> "#{key}[#{k}]"
key -> "#{key}.#{k}"
true -> k
end
|> transform_value(v)
end)
end
defp map_inner(_error, _key), do: nil
defp transform_value(_key, value) when is_bitstring(value), do: value
defp transform_value(key, %{"_errors" => errors}),
do: "#{key}: #{Enum.map_join(errors, " ", &Map.get(&1, "message"))}"
defp transform_value(_key, %{"code" => code, "message" => message}), do: "#{code}: #{message}"
defp transform_value(_key, %{"message" => message}), do: message
defp transform_value(key, value), do: map_inner(value, key)
end
|
lib/rest/api_error.ex
| 0.841728
| 0.41947
|
api_error.ex
|
starcoder
|
defmodule JsonApiDeserializer do
@moduledoc """
Json api deserializer able to deserialize json api documents with relationships.
For instance, this payload:
```
{
"data": [
{
"type": "posts",
"id": "13608770-76dd-47e5-a1c4-4d0d9c2483ad",
"links": {
"self": "http://link-to-post/1"
},
"attributes": {
"title": "First post",
"content": "First post content"
},
"relationships": {
"creator": {
"data": {
"type": "creators",
"id": "22208770-76dd-47e5-a1c4-4d0d9c2483ad"
},
"links": {
"related": "http://link-to-creator/1"
}
},
"comments": {
"links": {},
"data": [
{
"type": "comment",
"id": "22208770-76dd-47e5-a1c4-4d0d9c2483ab"
},
{
"type": "comment",
"id": "cb0759b0-03ab-4291-b067-84a9017fea6f"
}
]
}
}
},
{
"type": "posts",
"id": "13608770-76dd-47e5-a1c4-4d0d9c2483ae",
"links": {
"self": "http://link-to-post/2"
},
"attributes": {
"title": "Second post",
"content": "Second post content"
},
"relationships": {
"creator": {
"data": {
"type": "creators",
"id": "22208770-76dd-47e5-a1c4-4d0d9c2483ad"
},
"links": {
"related": "http://lnk-to-creator/1"
}
},
"comments": {
"links": {},
"data": [
{
"type": "comment",
"id": "22208770-76dd-47e5-a1c4-4d0d9c2483ac"
}
]
}
}
}
],
"included": [
{
"type": "creators",
"id": "22208770-76dd-47e5-a1c4-4d0d9c2483ad",
"attributes": {
"firstname": "John",
"lastname": "Doe"
},
"links": {
"self": "http://link-to-creator/1"
},
"relationships": {}
},
{
"type": "comment",
"id": "22208770-76dd-47e5-a1c4-4d0d9c2483ac",
"attributes": {
"content": "Comment 1 content",
"email": "<EMAIL>"
},
"links": {
"self": "http://link-to-comment/1"
},
"relationships": {}
},
{
"type": "comment",
"id": "22208770-76dd-47e5-a1c4-4d0d9c2483ab",
"attributes": {
"content": "Comment 2 content",
"email": "<EMAIL>"
},
"links": {
"self": "http://link-to-comment/2"
},
"relationships": {}
},
{
"type": "comment",
"id": "cb0759b0-03ab-4291-b067-84a9017fea6f",
"attributes": {
"content": "Comment 3 content",
"email": "<EMAIL>"
},
"links": {
"self": "http://link-to-comment/3"
},
"relationships": {}
}
]
}
```
Will be deserialized in a map like this one:
```
[
%{
"__metadata" => %{
"links" => %{"self" => "http://link-to-post/1"},
"type" => "posts"
},
"comments" => [
%{
"__metadata" => %{
"links" => %{"self" => "http://link-to-comment/2"},
"type" => "comment"
},
"content" => "Comment 2 content",
"email" => "<EMAIL>",
"id" => "22208770-76dd-47e5-a1c4-4d0d9c2483ab"
},
%{
"__metadata" => %{
"links" => %{"self" => "http://link-to-comment/3"},
"type" => "comment"
},
"content" => "Comment 3 content",
"email" => "<EMAIL>",
"id" => "cb0759b0-03ab-4291-b067-84a9017fea6f"
}
],
"content" => "First post content",
"creator" => %{
"__metadata" => %{
"links" => %{"self" => "http://link-to-creator/1"},
"type" => "creators"
},
"firstname" => "John",
"id" => "22208770-76dd-47e5-a1c4-4d0d9c2483ad",
"lastname" => "Doe"
},
"id" => "13608770-76dd-47e5-a1c4-4d0d9c2483ad",
"title" => "First post"
},
%{
"__metadata" => %{
"links" => %{"self" => "http://link-to-post/2"},
"type" => "posts"
},
"comments" => [
%{
"__metadata" => %{
"links" => %{"self" => "http://link-to-comment/1"},
"type" => "comment"
},
"content" => "Comment 1 content",
"email" => "<EMAIL>",
"id" => "22208770-76dd-47e5-a1c4-4d0d9c2483ac"
}
],
"content" => "Second post content",
"creator" => %{
"__metadata" => %{
"links" => %{"self" => "http://link-to-creator/1"},
"type" => "creators"
},
"firstname" => "John",
"id" => "22208770-76dd-47e5-a1c4-4d0d9c2483ad",
"lastname" => "Doe"
},
"id" => "13608770-76dd-47e5-a1c4-4d0d9c2483ae",
"title" => "Second post"
}
]
```
"""
@doc """
Deserialize a payload.
Payload can be a map or a string that will be decoded with `Jason`.
The return value is `{:ok, data}` with data beeing the decoded document as a list
or a map. Or `{:error, error}` if something went wrong when decoding.
Possible errors are:
* `Jason.DecodeError.t()` is an error from json decoding
* `:invalid_data` is when `"data"` is not a map or a list
* `:bad_relationships_type` is when `"relationships"` is not a map
* `:bad_included_type` is when `"included"` is not a list
* `:relationship_not_found` is when no relationship could be found for a specified type and id
* `:bad_relationship_data` is when the relationship data does not have an `id` or a `type` field
"""
@spec deserialize(binary() | map()) ::
{:ok, list() | map()}
| {:error, Jason.DecodeError.t()}
| {:error, :invalid_data}
| {:error, :bad_relationships_type}
| {:error, :bad_included_type}
| {:error, :relationship_not_found}
| {:error, :bad_relationship_data}
def deserialize(body) when is_binary(body) do
case Jason.decode(body) do
{:ok, data} ->
deserialize(data)
{:error, error} ->
{:error, error}
end
end
def deserialize(%{"data" => data} = body) when is_list(data) do
data
|> Enum.map(&deserialize(&1, Map.get(body, "included")))
|> Enum.reduce({:ok, []}, fn
{:ok, data}, {:ok, list} ->
{:ok, [data | list]}
{:ok, _data}, {:error, error} ->
{:error, error}
{:error, error}, _ ->
{:error, error}
end)
|> case do
{:ok, list} -> {:ok, Enum.reverse(list)}
{:error, error} -> {:error, error}
end
end
def deserialize(%{"data" => data} = body), do: deserialize(data, Map.get(body, "included"))
def deserialize(_), do: {:error, :invalid_data}
defp deserialize(nil, _), do: {:error, :invalid_data}
defp deserialize({:error, error}, _), do: {:error, error}
defp deserialize(data, included) do
case find_relationships(Map.get(data, "relationships", nil), included) do
{:ok, relationships} ->
{:ok,
data
|> Map.get("attributes", %{})
|> Map.merge(%{"id" => Map.get(data, "id", nil)})
|> Map.merge(%{
"__metadata" => %{
"type" => Map.get(data, "type", nil),
"links" => Map.get(data, "links", nil)
}
})
|> Map.merge(relationships)
|> JsonApiDeserializer.KeyFormatting.parse()}
{:error, error} ->
{:error, error}
end
end
defp find_relationships(nil, _), do: {:ok, %{}}
defp find_relationships(relationships, included)
when is_map(relationships) do
relationships
|> Map.to_list()
|> Enum.reduce({:ok, %{}}, fn
{key, value}, {:ok, map} ->
case find_relationship(value, included) do
{:ok, relationship} ->
{:ok, Map.put(map, key, relationship)}
{:error, error} ->
{:error, error}
end
_key, {:error, error} ->
{:error, error}
end)
end
defp find_relationships(_, _included),
do: {:error, :bad_relationships_type}
defp find_relationship(%{"data" => data}, included) when is_list(data) do
data
|> Enum.map(&find_relationship_in_included(&1, included))
|> Enum.reduce({:ok, []}, fn
{:ok, data}, {:ok, list} ->
{:ok, [data | list]}
{:ok, _data}, {:error, error} ->
{:error, error}
{:error, error}, _ ->
{:error, error}
end)
|> case do
{:ok, list} -> {:ok, Enum.reverse(list)}
{:error, error} -> {:error, error}
end
end
defp find_relationship(%{"data" => data}, included),
do: find_relationship_in_included(data, included)
defp find_relationship(%{}, _included), do: {:ok, nil}
defp find_relationship(_, _),
do: {:error, :bad_relationship_object}
defp find_relationship_in_included(%{"type" => type, "id" => id}, included)
when is_list(included) do
included
|> Enum.find({:error, :relationship_not_found}, &is_relationship(type, id, &1))
|> deserialize(included)
end
defp find_relationship_in_included(_, included) when is_list(included),
do: {:error, :bad_relationship_data}
defp find_relationship_in_included(_, _), do: {:error, :bad_included_type}
defp is_relationship(type, id, %{"type" => type_, "id" => id_})
when type == type_ and id == id_,
do: true
defp is_relationship(_, _, _), do: false
end
|
lib/json_api_deserializer.ex
| 0.739046
| 0.488344
|
json_api_deserializer.ex
|
starcoder
|
defmodule RGBMatrix.Animation.SolidReactive do
@moduledoc """
Static single hue, pulses keys hit to shifted hue then fades to current hue.
"""
use RGBMatrix.Animation
alias Chameleon.HSV
import RGBMatrix.Utils, only: [mod: 2]
field :speed, :integer,
default: 4,
min: 0,
max: 32,
doc: [
name: "Speed",
description: """
The speed at which the hue shifts back to base.
"""
]
field :distance, :integer,
default: 180,
min: 0,
max: 360,
step: 10,
doc: [
name: "Distance",
description: """
The distance that the hue shifts on key-press.
"""
]
field :direction, :option,
default: :random,
options: ~w(random negative positive)a,
doc: [
name: "Direction",
description: """
The direction (through the color wheel) that the hue shifts on key-press.
"""
]
defmodule State do
@moduledoc false
defstruct [:first_render, :paused, :tick, :color, :leds, :hits]
end
@delay_ms 17
@impl true
def new(leds, _config) do
color = HSV.new(190, 100, 100)
%State{first_render: true, paused: false, tick: 0, color: color, leds: leds, hits: %{}}
end
@impl true
def render(%{first_render: true} = state, _config) do
%{color: color, leds: leds} = state
colors = Enum.map(leds, &{&1.id, color})
{:never, colors, %{state | first_render: false, paused: true}}
end
def render(%{paused: true} = state, _config),
do: {:never, [], state}
def render(state, config) do
%{tick: tick, color: color, leds: leds, hits: hits} = state
%{speed: _speed, distance: distance} = config
colors =
Enum.map(leds, fn
led when is_map_key(hits, led) ->
{hit_tick, direction_modifier} = hits[led]
if tick - hit_tick >= distance do
{led.id, color}
else
hue_shift = (tick - hit_tick - distance) * direction_modifier
hue = mod(color.h + hue_shift, 360)
{led.id, HSV.new(hue, color.s, color.v)}
end
led ->
{led.id, color}
end)
updated_hits =
hits
|> Enum.reject(fn {_led, {hit_tick, _direction_modifier}} ->
tick - hit_tick >= distance
end)
|> Enum.into(%{})
state = %{
state
| tick: tick + 1,
hits: updated_hits,
paused: updated_hits == %{}
}
{@delay_ms, colors, state}
end
@impl true
def interact(state, config, led) do
direction = direction_modifier(config.direction)
render_in =
case state.paused do
true -> 0
false -> :ignore
end
{render_in, %{state | paused: false, hits: Map.put(state.hits, led, {state.tick, direction})}}
end
defp direction_modifier(:random), do: Enum.random([-1, 1])
defp direction_modifier(:negative), do: -1
defp direction_modifier(:positive), do: 1
end
|
lib/rgb_matrix/animation/solid_reactive.ex
| 0.91331
| 0.55914
|
solid_reactive.ex
|
starcoder
|
defmodule Svalinn do
@on_load :preload_tokens
@default_encoding Svalinn.Encodings.Binary
@version 1
import Svalinn.Util, only: [prefix: 1, prefix: 2]
import Svalinn.Encoder, only: [encode: 3]
import Svalinn.Decoder, only: [decode: 3]
defprotocol Tokenize do
@doc ~S"""
"""
@spec token(map) :: map
def token(data)
end
@doc ~S"""
Encode a structure or token.
"""
@spec encode(map, Keyword.t()) :: {:ok, any} | {:error, atom}
def encode(token, opts \\ []) do
version = opts[:version] || @version
encoding = opts[:encoding] || @default_encoding
with {:ok, prefix} <- prefix(version, encoding),
{:ok, token} <- prepare_token(token),
{:ok, packed} <- encode(version, token, opts),
{:ok, data} <- encoding.encode(packed) do
{:ok, prefix <> data}
end
end
@doc ~S"""
Decode a token.
"""
@spec decode(binary, Keyword.t()) :: {:ok, any} | {:error, atom}
def decode(token, opts \\ [])
def decode(<<prefix::binary-1, data::binary>>, opts) do
with {:ok, version, encoding} <- prefix(prefix),
{:ok, packed} <- encoding.decode(data),
{:ok, token} <- decode(version, packed, opts),
{:ok, data} <- load_token(token, opts) do
{:ok, data}
end
end
def decode(_, _), do: {:error, :invalid_token_data}
@spec prepare_token(map) :: map | {:error, :invalid_token}
defp prepare_token(token) do
types = Application.fetch_env!(:svalinn, :types)
module = token.__struct__
cond do
types[module] ->
{:ok, %{type: types[module], data: module.__token_parse__(token)}}
Tokenize.impl_for(token) ->
with {:ok, data} <- Tokenize.token(token), do: prepare_token(data)
:invalid ->
{:error, :invalid_token}
end
end
@spec load_token(map, Keyword.t()) :: any
defp load_token(%{"type" => type, "data" => data}, opts) do
types = Application.fetch_env!(:svalinn, :reverse_types)
with {type, defaults} <- Map.get(types, type, {:error, :invalid_type}) do
type.__token_load__(data, Keyword.merge(defaults, opts))
end
end
@doc false
@spec preload_tokens :: :ok
def preload_tokens do
types =
:svalinn
|> Application.get_env(:tokens, [])
|> Enum.map(fn {k, v} ->
type = to_string(k)
value = if is_atom(v), do: v, else: elem(v, 0)
case String.split(type, "@", parts: 2) do
[a, b] -> {a, String.to_integer(b), value}
_ -> {type, nil, value}
end
end)
|> Enum.group_by(&elem(&1, 2))
|> Enum.map(fn {_, values} ->
{type, version, value} =
values
|> Enum.sort_by(&elem(&1, 1))
|> List.last()
if is_nil(version), do: {value, type}, else: {value, "#{type}@#{version}"}
end)
|> Enum.into(%{})
reverse_types =
:svalinn
|> Application.get_env(:tokens, [])
|> Enum.map(fn {k, v} -> {to_string(k), v} end)
|> Enum.into(%{})
Application.put_env(:svalinn, :types, types, persistent: true)
Application.put_env(:svalinn, :reverse_types, reverse_types, persistent: true)
:ok
end
end
|
lib/svalinn.ex
| 0.721645
| 0.448064
|
svalinn.ex
|
starcoder
|
defmodule Sanbase.Metric.SqlQuery.Helper do
@aggregations [:any, :sum, :avg, :min, :max, :last, :first, :median, :count, :ohlc]
@type operator ::
:inside_channel
| :outside_channel
| :less_than
| :less_than_or_equal_to
| :greater_than
| :greater_than_or_equal_to
| :inside_channel_inclusive
| :inside_channel_exclusive
| :outside_channel_inclusive
| :outside_channel_exclusive
def aggregations(), do: @aggregations
def aggregation(:ohlc, value_column, dt_column) do
"""
argMin(#{value_column}, #{dt_column}) AS open,
max(#{value_column}) AS high,
min(#{value_column}) AS low,
argMax(#{value_column}, #{dt_column}) AS close
"""
end
def aggregation(:last, value_column, dt_column), do: "argMax(#{value_column}, #{dt_column})"
def aggregation(:first, value_column, dt_column), do: "argMin(#{value_column}, #{dt_column})"
def aggregation(:count, value_column, _dt_column), do: "coalesce(count(#{value_column}), 0)"
def aggregation(:sum, value_column, _dt_column), do: "sumKahan(#{value_column})"
def aggregation(aggr, value_column, _dt_column), do: "#{aggr}(#{value_column})"
def generate_comparison_string(column, :inside_channel, value),
do: generate_comparison_string(column, :inside_channel_inclusive, value)
def generate_comparison_string(column, :outside_channel, value),
do: generate_comparison_string(column, :outside_channel_inclusive, value)
def generate_comparison_string(column, :less_than, threshold)
when is_number(threshold),
do: "#{column} < #{threshold}"
def generate_comparison_string(column, :less_than_or_equal_to, threshold)
when is_number(threshold),
do: "#{column} <= #{threshold}"
def generate_comparison_string(column, :greater_than, threshold)
when is_number(threshold),
do: "#{column} > #{threshold}"
def generate_comparison_string(column, :greater_than_or_equal_to, threshold)
when is_number(threshold),
do: "#{column} >= #{threshold}"
def generate_comparison_string(column, :inside_channel_inclusive, [low, high])
when is_number(low) and is_number(high),
do: "#{column} >= #{low} AND #{column} <= #{high}"
def generate_comparison_string(column, :inside_channel_exclusive, [low, high])
when is_number(low) and is_number(high),
do: "#{column} > #{low} AND #{column} < #{high}"
def generate_comparison_string(column, :outside_channel_inclusive, [low, high])
when is_number(low) and is_number(high),
do: "#{column} <= #{low} OR #{column} >= #{high}"
def generate_comparison_string(column, :outside_channel_exclusive, [low, high])
when is_number(low) and is_number(high),
do: "#{column} < #{low} OR #{column} > #{high}"
def asset_id_filter(slug, opts) when is_binary(slug) do
arg_position = Keyword.fetch!(opts, :argument_position)
"asset_id = ( SELECT asset_id FROM asset_metadata FINAL PREWHERE name = ?#{arg_position} LIMIT 1 )"
end
def asset_id_filter(slugs, opts) when is_list(slugs) do
arg_position = Keyword.fetch!(opts, :argument_position)
"asset_id IN ( SELECT DISTINCT(asset_id) FROM asset_metadata FINAL PREWHERE name IN (?#{arg_position}) )"
end
def metric_id_filter(metric, opts) when is_binary(metric) do
arg_position = Keyword.fetch!(opts, :argument_position)
"metric_id = ( SELECT metric_id FROM metric_metadata FINAL PREWHERE name = ?#{arg_position} LIMIT 1 )"
end
def metric_id_filter(metrics, opts) when is_list(metrics) do
arg_position = Keyword.fetch!(opts, :argument_position)
"metric_id IN ( SELECT DISTINCT(metric_id) FROM metric_metadata FINAL PREWHERE name IN (?#{arg_position}) )"
end
def label_id_by_label_fqn_filter(label_fqn, opts) when is_binary(label_fqn) do
arg_position = Keyword.fetch!(opts, :argument_position)
"label_id = dictGetUInt64('default.label_ids_dict', 'label_id', tuple(?#{arg_position}))"
end
def label_id_by_label_fqn_filter(label_fqns, opts) when is_list(label_fqns) do
arg_position = Keyword.fetch!(opts, :argument_position)
"label_id IN (
SELECT dictGetUInt64('default.label_ids_dict', 'label_id', tuple(fqn)) AS label_id
FROM system.one
ARRAY JOIN [?#{arg_position}] AS fqn
)"
end
def label_id_by_label_key_filter(label_key, opts) when is_binary(label_key) do
arg_position = Keyword.fetch!(opts, :argument_position)
"label_id IN (SELECT label_id FROM label_metadata PREWHERE key = ?#{arg_position})"
end
def label_id_by_label_key_filter(label_keys, opts) when is_list(label_keys) do
arg_position = Keyword.fetch!(opts, :argument_position)
"label_id IN (SELECT label_id FROM label_metadata PREWHERE key IN (?#{arg_position}))"
end
# Add additional `=`/`in` filters to the query. This is mostly used with labeled
# metrics where additional column filters must be applied.
def additional_filters([], args, _opts), do: {"", args}
def additional_filters(filters, args, opts) do
{filters_str_list, args} =
Enum.reduce(filters, {[], args}, fn {column, value}, {list_acc, args_acc} ->
{filter_str, updated_args} = do_additional_filters(column, value, args_acc)
{[filter_str | list_acc], updated_args}
end)
filters_string = filters_str_list |> Enum.reverse() |> Enum.join(" AND\n")
filters_string =
case Keyword.get(opts, :trailing_and, false) do
false -> filters_string
true -> filters_string <> " AND"
end
{filters_string, args}
end
def dt_to_unix(:from, dt) do
Enum.max([dt, ~U[2009-01-01 00:00:00Z]], DateTime) |> DateTime.to_unix()
end
def dt_to_unix(:to, dt) do
Enum.min([dt, DateTime.utc_now()], DateTime) |> DateTime.to_unix()
end
# Private functions
defp do_additional_filters(:label_fqn, value, args) when is_binary(value) do
pos = length(args) + 1
str = "label_id IN (
SELECT dictGetUInt64('default.label_ids_dict', 'label_id', tuple(fqn)) AS label_id
FROM system.one
ARRAY JOIN [?#{pos}] AS fqn
)"
args = args ++ [value]
{str, args}
end
defp do_additional_filters(:label_fqn, [value | _] = list, args) when is_binary(value) do
pos = length(args) + 1
str = "label_id = dictGetUInt64('default.label_ids_dict', 'label_id', tuple(?#{pos}))"
args = args ++ [list]
{str, args}
end
defp do_additional_filters(column, [value | _] = list, args)
when is_binary(value) do
pos = length(args) + 1
str = "lower(#{column}) IN (?#{pos})"
list = Enum.map(list, &String.downcase/1)
args = args ++ [list]
{str, args}
end
defp do_additional_filters(column, [value | _] = list, args) when is_number(value) do
pos = length(args) + 1
str = "#{column} IN (?#{pos})"
args = args ++ [list]
{str, args}
end
defp do_additional_filters(column, value, args) when is_binary(value) do
pos = length(args) + 1
str = "lower(#{column}) = ?#{pos}"
args = args ++ [String.downcase(value)]
{str, args}
end
defp do_additional_filters(column, value, args) when is_number(value) do
pos = length(args) + 1
str = "#{column} = ?#{pos}"
args = args ++ [value]
{str, args}
end
end
|
lib/sanbase/metric/sql_query_helper.ex
| 0.640748
| 0.513546
|
sql_query_helper.ex
|
starcoder
|
defmodule Lapin.Pattern do
@moduledoc """
Extensible behaviour to define pattern modules.
Lapin provides a number of submodules which impelment the patterns found in
the [RabbitMQ Tutorials](http://www.rabbitmq.com/getstarted.html).
```
defmodule ExampleApp.SomePatter do
use Lapin.Pattern
[... callbacks implementation ...]
end
```
"""
alias Lapin.Channel
@typedoc "Lapin Pattern Behaviour"
@type t :: __MODULE__
@doc """
Consumer acknowledgements enabled
"""
@callback consumer_ack(channel :: Channel.t()) :: boolean
@doc """
Consumer message prefetch count
"""
@callback consumer_prefetch(channel :: Channel.t()) :: Channel.consumer_prefetch()
@doc """
Declare exchange type
"""
@callback exchange_type(channel :: Channel.t()) :: boolean
@doc """
Declare exchange durable
"""
@callback exchange_durable(channel :: Channel.t()) :: boolean
@doc """
Request publisher confirms (RabbitMQ only)
"""
@callback publisher_confirm(channel :: Channel.t()) :: boolean
@doc """
Request message persistence when publishing
"""
@callback publisher_persistent(channel :: Channel.t()) :: boolean
@doc """
Request message mandatory routing when publishing
"""
@callback publisher_mandatory(channel :: Channel.t()) :: boolean
@doc """
Declare queue arguments
"""
@callback queue_arguments(channel :: Channel.t()) :: Channel.queue_arguments()
@doc """
Declare queue durable
"""
@callback queue_durable(channel :: Channel.t()) :: boolean
@doc """
Bind queue to routing_key
"""
@callback routing_key(channel :: Channel.t()) :: Channel.routing_key()
defmacro __using__([]) do
quote do
alias Lapin.Channel
@behaviour Lapin.Pattern
@consumer_ack false
@consumer_prefetch nil
@exchange_durable true
@exchange_type :direct
@publisher_confirm false
@publisher_mandatory false
@publisher_persistent false
@queue_arguments []
@queue_durable true
@routing_key ""
def consumer_ack(%Channel{config: config}),
do: Keyword.get(config, :consumer_ack, @consumer_ack)
def consumer_prefetch(%Channel{config: config}),
do: Keyword.get(config, :consumer_prefetch, @consumer_prefetch)
def exchange_durable(%Channel{config: config}),
do: Keyword.get(config, :exchange_durable, @exchange_durable)
def exchange_type(%Channel{config: config}),
do: Keyword.get(config, :exchange_type, @exchange_type)
def publisher_confirm(%Channel{config: config}),
do: Keyword.get(config, :publisher_confirm, @publisher_confirm)
def publisher_mandatory(%Channel{config: config}),
do: Keyword.get(config, :publisher_mandatory, @publisher_mandatory)
def publisher_persistent(%Channel{config: config}),
do: Keyword.get(config, :publisher_persistent, @publisher_persistent)
def queue_arguments(%Channel{config: config}),
do: Keyword.get(config, :queue_arguments, @queue_arguments)
def queue_durable(%Channel{config: config}),
do: Keyword.get(config, :queue_durable, @queue_durable)
def routing_key(%Channel{config: config}),
do: Keyword.get(config, :routing_key, @routing_key)
defoverridable Lapin.Pattern
end
end
end
|
lib/lapin/pattern.ex
| 0.841158
| 0.607925
|
pattern.ex
|
starcoder
|
defmodule Plexy.Config do
@moduledoc """
Provides access to a hard coded config value, a value stored as a environment
variable on the current system at runtime or a default provided value.
The config.exs can look like this
```
config :plexy,
redis_url: {:system, "REDIS_URL"},
port: {:system, "PORT", 5000},
normal: "normal"
```
When using this modules `get/3` function, System.get_env("REDIS_URL") will be
ran at runtime.
"""
alias __MODULE__
defmacro __using__(opts \\ [name: :plexy]) do
unless opts[:name] do
raise "Option `:name` missing from configuration"
end
quote do
def get(key, default \\ nil) do
Config.get(unquote(opts[:name]), key, default)
end
def get_int(key, default \\ nil) do
Config.get_int(unquote(opts[:name]), key, default)
end
def get_bool(key, default \\ nil) do
Config.get_bool(unquote(opts[:name]), key, default)
end
end
end
@doc """
Used to gain access to the application env.
## Examples
iex> Application.put_env(:my_config, HerokuApi, heroku_api_url: "https://api.heroku.com")
iex> Plexy.Config.get(:my_config, {HerokuApi, :heroku_api_url})
"https://api.heroku.com"
iex> Plexy.Config.get(:my_config, {HerokuApi, :not_set}, "and a default")
"and a default"
iex> Application.put_env(:my_config, :redis_url, "redis://localhost:6379")
iex> Plexy.Config.get(:my_config, :redis_url)
"redis://localhost:6379"
iex> Plexy.Config.get(:my_config, :foo, "and a default")
"and a default"
"""
@spec get(atom(), atom() | {atom(), atom()}, any()) :: any()
def get(config_name, key, default \\ nil)
def get(config_name, {module_name, key}, default) when is_atom(module_name) and is_atom(key) do
default_resolver = fn
nil ->
default
found ->
found
end
config_name
|> Application.get_env(module_name)
|> get_in([key])
|> default_resolver.()
|> resolve(default)
end
def get(config_name, key, default) do
config_name
|> Application.get_env(key, default)
|> resolve(default)
end
@doc """
Like `get/3` except it attempts to convert the value to an integer.
## Examples
iex> Application.put_env(:my_config, :port, "5000")
iex> Plexy.Config.get_int(:my_config, :port, 9999)
5000
iex> Plexy.Config.get_int(:my_config, :foo, "123")
123
"""
def get_int(config_name, key, default \\ nil) do
case get(config_name, key, default) do
value when is_integer(value) ->
value
value when is_binary(value) ->
String.to_integer(value)
_error ->
raise "Attempted to parse a value #{key} that could not be converted to an integer"
end
end
@doc """
Like `get/3` except it attempts to convert the value to an bool.
## Examples
iex> Plexy.Config.get_bool(:my_config, :bar, "true")
true
iex> Plexy.Config.get_bool(:my_config, :bar, "yes")
true
iex> Plexy.Config.get_bool(:my_config, :foo, "0")
false
iex> Plexy.Config.get_bool(:my_config, :baz, "no")
false
iex> Plexy.Config.get_bool(:my_config, :baz, "false")
false
iex> Plexy.Config.get_bool(:my_config, :baz, nil)
false
"""
def get_bool(config_name, key, default \\ nil) do
case get(config_name, key, default) do
value when value in [false, "false", "f", 0, "0", "no", "n", nil] ->
false
_value ->
true
end
end
@doc false
defp resolve({:system, var_name, config_default}, _default) do
System.get_env(var_name) || config_default
end
defp resolve({:system, var_name}, default) do
System.get_env(var_name) || default
end
defp resolve(value, _default) do
value
end
end
|
lib/plexy/config.ex
| 0.840717
| 0.661255
|
config.ex
|
starcoder
|
defmodule Hextille.Offset do
alias Hextille.Offset, as: Offset
alias Hextille.Cube
use Bitwise
@moduledoc """
Hexagon module that represents hexagon tiles using offset coordinates.
Instead of names x, y this module uses names col and row.
Coordinates in q-offset system represent hexagons in pointy top orientation,
r-offset in flat top orientation.
By default this module uses even-r and even-q offset coordinates,
but the offset can be optionally specified.
This module should only be used to display coordinates as offset coordinates.
All math should be done by using cube coordinates.
```
"""
defstruct col: 0, row: 0
@doc """
Converts hexagon in Cube coordinates to pointy top r-offset coordinates.
## Examples:
iex> h = %Cube{q: 4, r: 3, s: -7}
iex> Offset.roffset_from_cube(h)
%Offset{col: 5, row: 3}
iex> Offset.roffset_from_cube(h, 0)
%Offset{col: 5, row: 3}
iex> Offset.roffset_from_cube(h, 1)
%Offset{col: 6, row: 3}
"""
def roffset_from_cube(%Cube{} = h, offset \\ 0) do
col = h.q + div((h.r + offset * (h.r &&& 1)), 2)
row = h.r
%Offset{col: col, row: row}
end
@doc """
Converts Offset in pointy top r-offset coordinates to Cube.
## Examples:
iex> a = %Offset{col: 5, row: 3}
iex> b = %Offset{col: 6, row: 3}
iex> Offset.roffset_to_cube(a)
%Cube{q: 4, r: 3, s: -7}
iex> Offset.roffset_to_cube(a, 0)
%Cube{q: 4, r: 3, s: -7}
iex> Offset.roffset_to_cube(b, 1)
%Cube{q: 4, r: 3, s: -7}
"""
def roffset_to_cube(%Offset{} = h, offset \\ 0) do
q = h.col - div((h.row + offset * (h.row &&& 1)), 2)
r = h.row
s = -q - r
%Cube{q: q, r: r, s: s}
end
@doc """
Converts Cube in cube coordinates to flat top q-offset coordinates.
## Examples:
iex> h = %Cube{q: 3, r: 4, s: -7}
iex> Offset.qoffset_from_cube(h)
%Offset{col: 3, row: 5}
iex> Offset.qoffset_from_cube(h, 0)
%Offset{col: 3, row: 5}
iex> Offset.qoffset_from_cube(h, 1)
%Offset{col: 3, row: 6}
"""
def qoffset_from_cube(%Cube{} = h, offset \\ 0) do
col = h.q
row = h.r + div((h.q + offset * (h.q &&& 1)), 2)
%Offset{col: col, row: row}
end
@doc """
Converts Offset in flat top q-offset coordinates to Cube.
## Examples:
iex> a = %Offset{col: 3, row: 5}
iex> b = %Offset{col: 3, row: 6}
iex> Offset.qoffset_to_cube(a)
%Cube{q: 3, r: 4, s: -7}
iex> Offset.qoffset_to_cube(a, 0)
%Cube{q: 3, r: 4, s: -7}
iex> Offset.qoffset_to_cube(b, 1)
%Cube{q: 3, r: 4, s: -7}
"""
def qoffset_to_cube(%Offset{} = h, offset \\ 0) do
q = h.col;
r = h.row - div((h.col + offset * (h.col &&& 1)), 2)
s = -q - r
%Cube{q: q, r: r, s: s}
end
end
|
lib/offset.ex
| 0.901721
| 0.845177
|
offset.ex
|
starcoder
|
defmodule CLI.UI do
@moduledoc """
Renders the CLI's UI. This should be the **only** way in which the UI is drawn from the CLI,
except for the prompt.
"""
require Integer
@enforce_keys [:game_pid]
defstruct(game_pid: nil, below_board: "", above_board: "")
@doc ~S"""
Get a string containing a visual representation of the board.
## Examples
iex> {:ok, game_pid} = Game.start_link()
iex> Game.move(game_pid, [3, 3, 3, 4, 4])
{:ok, %{moves: [3, 3, 3, 4, 4], result: nil}
iex> board_string = CLI.render(%UI{game_pid: game_pid})
iex> board_string ==
...> "\n" <>
...> "βββββ¬ββββ¬ββββ¬ββββ¬ββββ¬ββββ¬ββββ\n" <>
...> "β β β β β β β β\n" <>
...> "βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
...> "β β β β β β β β\n" <>
...> "βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
...> "β β β β β β β β\n" <>
...> "βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
...> "β β β β β β β β β\n" <>
...> "βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
...> "β β β β β β β β β β\n" <>
...> "βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
...> "β β β β β β β β β β\n" <>
...> "βββββ΄ββββ΄ββββ΄ββββ΄ββββ΄ββββ΄ββββ\n" <>
...> " 1 2 3 4 5 6 7" <>
...> "\n"
true
"""
def render(%__MODULE__{game_pid: game_pid, below_board: below_board, above_board: above_board}) do
{:ok, %{moves: moves}} = Game.look(game_pid)
board = render_board(moves)
ui = IO.ANSI.clear() <> above_board <> "\n" <> board <> "\n" <> below_board
IO.puts(ui)
end
@spec render_board(Game.moves()) :: String.t()
defp render_board(moves) do
board_data =
moves
|> Enum.with_index()
|> Enum.reduce(
%{0 => [], 1 => [], 2 => [], 3 => [], 4 => [], 5 => [], 6 => []},
fn move_with_index, acc ->
{column, index} = move_with_index
player_color = if Integer.is_even(index), do: :yellow, else: :red
Map.update!(acc, column, &(&1 ++ [player_color]))
end
)
"βββββ¬ββββ¬ββββ¬ββββ¬ββββ¬ββββ¬ββββ\n" <>
board_row(board_data, 5) <>
"βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
board_row(board_data, 4) <>
"βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
board_row(board_data, 3) <>
"βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
board_row(board_data, 2) <>
"βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
board_row(board_data, 1) <>
"βββββΌββββΌββββΌββββΌββββΌββββΌββββ€\n" <>
board_row(board_data, 0) <>
"βββββ΄ββββ΄ββββ΄ββββ΄ββββ΄ββββ΄ββββ\n" <> " 1 2 3 4 5 6 7"
end
defp board_row(board_data, row_num) when row_num in 0..5 do
0..6
|> Enum.reduce("β", fn column_num, row ->
row_color_symbol =
case Enum.at(board_data[column_num], row_num) do
:yellow -> IO.ANSI.format([:yellow, "β"], true)
:red -> IO.ANSI.format([:red, "β"], true)
nil -> " "
end
"#{row} #{row_color_symbol} β"
end)
|> (&"#{&1}\n").()
end
end
|
apps/cli/lib/cli/ui.ex
| 0.725454
| 0.696771
|
ui.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.