code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
defmodule Chapter_2 do @moduledoc """ Chapter 2 - Induction and Recursion """ @spec are_equal?([any], [any]) :: boolean @doc """ Def. 2.12 - Two sequences S and T are equal iff 1. they are both empty or 2. the head of S equals the head of T and the tail of S equals the tail of T """ def are_equal?(_s = [], _t = []), do: true def are_equal?(_s = [], _t), do: false def are_equal?(_s, _t = []), do: false def are_equal?([head_s | tail_s], [head_t | tail_t]), do: head_s == head_t and are_equal?(tail_s, tail_t) @spec is_member_of?(any, [any]) :: boolean @doc """ Def 2.1.3 - ElementOf(x, S) is 1. FALSE if S is empty 2. TRUE if S is nonempty and x = head(S) 2'. ElementOf(x, tail(S)) otherwise. """ def is_member_of?(_x, []), do: false def is_member_of?(x, [head_s | _tail_s]) when x == head_s, do: true def is_member_of?(x, [_head_s | tail_s]), do: is_member_of?(x, tail_s) @spec get_length([any]) :: non_neg_integer @doc """ Def 2.1.4 - If S is a sequence, then Length(S) is 1. 0, if S is empty 2. 1 + Length(tail(s)), if not. """ def get_length([]), do: 0 def get_length([_s_head | s_tail]), do: 1 + length(s_tail) @spec get_elem(any, [any]) :: any @doc """ Def 2.15 - Kth(k, S) is 1. undefined, if S is empty 1'. head(S) if k= 1 2. Kth(k - 1, tail(S)) otherwise Note: This utilizes 0th indexing """ def get_elem(_idx, []), do: nil def get_elem(-1, _), do: nil def get_elem(0, [s_head | _s_tail]), do: s_head def get_elem(idx, [_s_head | s_tail]), do: get_elem(idx - 1, s_tail) @spec traverse([any]) :: :ok @doc """ Def 2.18 - Traverse(S) if S is nonempty then 1. print head(S) 2. traverse(tail(S)) Note: This prints to the screen. """ def traverse([]), do: IO.write("") def traverse([s_head | s_tail]) do IO.write(s_head) traverse(s_tail) end @spec concat([any], [any]) :: any @doc """ Def 2.19 - Concatenation(S, T) is 1. T, if S is empty 2. (Head(S) . Concatenation(tail(S), T)) otherwise. """ def concat([], t), do: t def concat([s_head | s_tail], t), do: [s_head | concat(s_tail, t)] @spec sum([float]) :: float @doc """ Def 2.20 - Sum(S) {S is a nonempty sequence to be summed} 1. a if S is the sequence(a) - {i.e, if S has length 1} 2. head(S) + Sum(tail(S)) otherwise. """ def sum([]), do: 0 def sum([s_head]), do: s_head def sum([s_head | s_tail]), do: s_head + sum(s_tail) @spec max(any) :: any @doc """ Def 2.21 - Max(S) 1. Max(<a>) = a 2. Max(<a.S>) = a if a > Max(S) 2'. Max(<a.S>) = Max(S) if a <= Max(S) """ def max([]), do: nil def max([s_head]), do: s_head def max([s_head | s_tail]) do if s_head <= max(s_tail), do: max(s_tail), else: s_head end @spec min([any]) :: any @doc """ Def 2.21.b - Min(S) 1. Min(<a>) = a 2. Min(<a.S>) = a if a < Min(S) 2'. Min(<a.S>) = Min(S) if a >= Min(S) Note: This was left as a supplemental excerisice """ # def max(a) when not is_list(a), do: a def min([]), do: nil def min([s_head]), do: s_head def min([s_head | s_tail]) do if s_head >= min(s_tail), do: min(s_tail), else: s_head end @spec concatlist([[any]]) :: [any] @doc """ Def 2.22 - Concatlist If elemnts of S are all sequences then a function concatlist may be defined by: 1. Concatlist(<>) = <> 2. Concatlist(<s.S> = Concatenation(s,Concatlist(S)) """ def concatlist([]), do: [] def concatlist([s_head | s_tail]), do: concat(s_head, concatlist(s_tail)) @spec sorted?(maybe_improper_list) :: false | nil | true @doc """ Def 2.23 - Sorted(S) - {S is a nonempty sequence} 1. TRUE if tail(S) is empty 2. FALSE if head(S) > head(tail(S)) 3. Sorted(tail(S)) otherwise. """ def sorted?([]), do: nil def sorted?([_]), do: true def sorted?([s_head | s_tail]) do [tail_head | _] = s_tail if s_head > tail_head, do: false, else: sorted?(s_tail) end @spec insert_into_sorted(any, [any]) :: [any] @doc """ Def 2.24 - InsertIntoSorted(x, S) is 1. <x> if S is empty 2. <x . S> if x <= head(S) 3. <head(S). InsertIntoSorted(x, tail(S))> otherwise. """ def insert_into_sorted(x, []), do: [x] def insert_into_sorted(x, [s_head | s_tail]) when x <= s_head, do: concatlist([[x], [s_head], s_tail]) def insert_into_sorted(x, [s_head | s_tail]), do: concat([s_head], insert_into_sorted(x, s_tail)) @spec insertion_sort([any]) :: [any] @doc """ Def 2.25 InsertionSort(S) 1. () if S is empty 2. InsertIntoSorted(head(S), InsertionSort(tail(S))) otherwise. """ def insertion_sort([]), do: [] def insertion_sort([s_head | s_tail]), do: insert_into_sorted(s_head, insertion_sort(s_tail)) end
lib/chapter_2.ex
0.823328
0.610541
chapter_2.ex
starcoder
defmodule ExType.Example.Foo.StructExample do @moduledoc false @type t() :: %__MODULE__{ hi: binary(), ok: integer() } defstruct [:hi, :ok] end defmodule ExType.Example.Foo do @moduledoc false require ExType.T alias ExType.T @spec unquote_example() :: integer() hi = 12 def unquote_example() do x = 10 unquote(hi) + x end @okk 333 @spec attribute_example() :: integer() def attribute_example() do @okk + 33 end @spec inspect() :: any() def inspect() do T.inspect({1, 2}) end @spec assert() :: any() def assert() do T.assert(10 == integer()) end @spec hello() :: integer() def hello() do %{1 => 2, 3 => 4} |> Enum.map(fn {a, b} -> a + b end) |> Enum.flat_map(fn k -> [k, k] end) |> Enum.reduce(1, fn x, y -> x + y end) end @spec enum_into() :: :ok def enum_into() do Enum.into([{"hello", 123}], %{}) :ok end @spec for_fn() :: [integer()] def for_fn() do for x <- [1, 2, 3] do x + 1 end end @spec with_fn() :: integer() def with_fn() do with {:ok, a} <- {:ok, 123} do a + 1 end end @spec call_defp(integer()) :: float() def call_defp(x) do add(x, 2.2) end defp add(a, b) do a + b end @spec fab(integer()) :: integer() def fab(0), do: 0 def fab(1), do: 1 def fab(n), do: fab(n - 1) + fab(n - 2) @spec hint() :: integer() def hint() do x = :erlang.binary_to_term("xxxxx") T.assert(x == any()) T.assert(x :: integer()) T.assert(x == integer()) x end @hi 123 @spec module_attribute() :: integer() def module_attribute() do @hi end @spec receive_example() :: any() def receive_example() do receive do {x} -> x [x] -> x end end @spec case_example(integer()) :: integer() def case_example(x) do case x do i -> i end end @spec cond_example(integer(), integer()) :: integer() def cond_example(a, b) do cond do a > b -> a true -> b end end @spec type_guard_case(any(), any(), any()) :: {integer(), float(), atom()} def type_guard_case(x, y, z) do case {x, y, z} do {a, b, c} when is_integer(a) and is_float(b) and is_atom(c) -> {a, b, c} end end @spec type_guard_receive() :: {integer(), float(), atom()} def type_guard_receive() do receive do {x, y, z} when is_integer(x) and is_float(y) and is_atom(z) -> {x, y, z} end end @spec struct() :: ExType.Example.Foo.StructExample.t() def struct() do %ExType.Example.Foo.StructExample{ hi: "yes", ok: 123 } end @spec destruct(ExType.Example.Foo.StructExample.t()) :: {binary(), integer()} def destruct(%ExType.Example.Foo.StructExample{hi: hi, ok: ok} = _f) do T.inspect({hi, ok}) end @spec none_example() :: none() def none_example() do raise ArgumentError, "test" end @spec union_example({:ok, binary()} | :error) :: binary() def union_example({:ok, x}) do x end def union_example(:error) do "error" end @spec binary_example(binary()) :: {integer(), integer(), bitstring()} def binary_example(<<a::1, b::1, c::bits>>) do {a, b, c} end @spec range_example() :: [float()] def range_example() do for i <- 1..10 do i + 2.0 end end @spec map_to_list() :: [{binary(), integer()}] def map_to_list() do %{"hello" => 123} |> Map.to_list() end @spec map_values() :: [integer()] def map_values() do %{"hello" => 123} |> Map.values() end @spec add_number(number(), number()) :: number() def add_number(x, y) do x + y end end
lib/example/foo.ex
0.839043
0.632545
foo.ex
starcoder
defmodule Storage.Object do @moduledoc """ Use of this module helps with easy configuration to store, retrieve and delete similar files. ## Definition New file module can be created really easy: defmodule Photo do use Storage.Object, directory: "photos" end As a options you can specify keyword list: * `directory:` - a subdirectory within path of configuration variable `root:`, where files saved using this module will be stored * `adapter:` - if you would like to use different adapter for this object ## Functions After you `use Storage.Object` you will have access to these functions: * `store(source, scope \\\\ "")` - Source can be path or `Plug.Upload` struct * `url(filename, scope \\\\ "")` - Returns the url of file given file if it exists * `delete(filename, scope \\\\ "")` - Deletes the file in given scope `scope` can be a simple value (a number or a string), or a list of values ## Overridable functions There are also two overridable functions * `filename(%Storage.File{} = file, scope)` - this function returns the name of a file, which will be saved. It can be modified in any way using the data from `file`, `scope` or even randomly generate the file name. * `valid?(%Storage.File{} = file)` - this function returns true/false value and makes sure the `file` is valid. Extension, size, or other attributes can be used to determine if file is valid or not. Let's have a look at an example, where we override the functions above. We will generate random filename and check if the original file type is valid: defmodule Photo do use Storage.Object, directory: "photos" def filename(%Storage.File{} = file, scope) do Ecto.UUID.generate() end @allowed_extensions ~w(jpg jpeg png) def valid?(%Storage.File{} = file) do file.extension in @allowed_extensions and file.metadata.size < 2_000_000 end end After definition we can use the module like this: iex> album = "some_album" iex> user = %{id: 1} iex> Photo.store("path/to/file", [user.id, album]) Here we use user's ID and album as a scope and the file will be saved to `photos/1/some_album/`. Then we can retrieve URL or delete the file using file's name and the scope: iex> Photo.url("d72dfb2a-2ab9-4466-bf3b-cd059296026e.jpg", [user.id, album]) "http://localhost:4000/photos/1/some_album/d72dfb2a-2ab9-4466-bf3b-cd059296026e.jpg" """ defmacro __using__(opts \\ []) do adapter = Keyword.get(opts, :adapter, Storage.Adapters.Local) object_scope = Keyword.get(opts, :directory, "") quote bind_quoted: [adapter: adapter, object_scope: object_scope] do def store(source, scope \\ "") def store(%Plug.Upload{filename: filename, path: path}, scope) do file = path |> Storage.File.new([ adapter: unquote(adapter), scope: [unquote(object_scope), scope], filename: filename ]) store_object(path, scope, file) end def store(source_path, scope) do file = source_path |> Storage.File.new([ adapter: unquote(adapter), scope: [unquote(object_scope), scope] ]) store_object(source_path, scope, file) end defp store_object(source, scope, file) do filename = filename(file, scope) file = replace_filename(file, filename) if valid?(file) do unquote(adapter).put(file, source) else {:error, :file_not_valid} end end defp replace_filename(file, new_filename) do path = file.path |> Path.split() |> List.replace_at(-1, new_filename) |> Path.join() file |> Map.put(:filename, new_filename) |> Map.put(:path, path) end def url(filename, scope \\ "") do path = build_path(filename, scope) unquote(adapter).url(path) end def delete(filename, scope \\ "") do path = build_path(filename, scope) unquote(adapter).delete(path) end defp build_path(filename, scope) do scope = Storage.Support.convert_scope(scope) Path.join([unquote(object_scope), scope, filename]) end defp filename(file, _scope), do: file.filename defp valid?(_file), do: true defoverridable [filename: 2, valid?: 1] end end end
lib/object.ex
0.791499
0.582966
object.ex
starcoder
defmodule Search.Paginator do @moduledoc """ Handles the formatting of JSONAPI pagination params to Elasticsearch from and size. Also translates the the result counts to the proper page links for the response. """ import Destructure alias Plug.Conn alias Plug.Conn.Query, as: ConnQuery @default_page_size 10 @max_page_size 100 @typedoc """ * from - the starting position for the pagination results * size - the number of results for the pagination to display """ @type offset :: %{from: non_neg_integer, size: non_neg_integer} @typedoc """ Mapping of the types of pages for pagination and the links to access those pages. """ @type page_links :: %{ required(:first) => non_neg_integer, required(:last) => non_neg_integer, optional(:prev) => non_neg_integer | nil, optional(:next) => non_neg_integer | nil } @typedoc """ Mapping of the types of pages for pagination and the numbers of the pages. """ @type page_numbers :: %{ required(:first) => non_neg_integer, required(:last) => non_neg_integer, required(:prev) => non_neg_integer | nil, required(:next) => non_neg_integer | nil } ## Public functions @doc """ Converts page number and size params to values Elasticsearch uses for pagination """ @spec page_to_offset(params :: map()) :: offset() def page_to_offset(params = %{}) do size = get_in(params, ["page", "size"]) number = get_in(params, ["page", "number"]) size = verify_page_size(size) from = page_number_into_from(number, size) %{from: from, size: size} end @doc """ Takes the params from the Conn and the total number of hits (results) and generates pagination links. """ @spec to_page_links(Conn.t(), non_neg_integer) :: page_links() def to_page_links(conn = %Conn{params: params}, total_hits) do %{from: from, size: size} = page_to_offset(params) pages = Enum.chunk_every(0..(total_hits - 1), size) first_page_number = 1 last_page_number = Enum.count(pages) current_page_number = determine_current_page_number(pages, from) {prev, next} = prev_next_number(current_page_number, last_page_number) numbers = %{first: first_page_number, last: last_page_number, next: next, prev: prev} page_numbers_to_links(numbers, size, conn) end ## Public but not documented functions # Left function public to be able to unit test. @doc false @spec prev_next_number(current :: non_neg_integer, last :: non_neg_integer, first :: non_neg_integer) :: {prev :: non_neg_integer | nil, next :: non_neg_integer | nil} def prev_next_number(current, last), do: prev_next_number(current, last, 1) def prev_next_number(_, 1, 1), do: {nil, nil} def prev_next_number(1, _last, 1), do: {nil, 2} def prev_next_number(current, last, 1) when current == last, do: {current - 1, nil} def prev_next_number(current, _last, 1), do: {current - 1, current + 1} ## Private functions @spec determine_current_page_number(list(list(non_neg_integer)), non_neg_integer) :: non_neg_integer defp determine_current_page_number(pages, from) do Enum.reduce_while(pages, 0, fn page, acc -> if Enum.member?(page, from) do {:halt, acc + 1} else {:cont, acc + 1} end end) end @spec verify_page_size(size :: String.t() | integer) :: non_neg_integer defp verify_page_size(size) when is_binary(size) do size |> String.to_integer() |> verify_page_size() end defp verify_page_size(nil), do: @default_page_size defp verify_page_size(0), do: @default_page_size defp verify_page_size(size) when is_integer(size) and size < 0, do: @default_page_size defp verify_page_size(size) when is_integer(size) and size > @max_page_size, do: @max_page_size defp verify_page_size(size) when is_integer(size) and size <= @max_page_size, do: size @spec page_number_into_from(number :: String.t() | integer, size :: non_neg_integer) :: non_neg_integer defp page_number_into_from(number, size) when is_binary(number) and is_integer(size) do number |> String.to_integer() |> page_number_into_from(size) end defp page_number_into_from(nil, _size), do: 0 defp page_number_into_from(0, _size), do: 0 defp page_number_into_from(1, _size), do: 0 defp page_number_into_from(number, _size) when is_integer(number) and number < 0, do: 0 defp page_number_into_from(number, size) when is_integer(number) and is_integer(size) do size * (number - 1) end @spec page_numbers_to_links(numbers :: page_numbers(), size :: non_neg_integer, Conn.t()) :: page_links() defp page_numbers_to_links(numbers, size, conn = d(%Conn{params})) do url = build_url(conn) Enum.reduce(numbers, %{}, fn {type, number}, acc -> if number do encoded_params = params |> Map.put("page", %{"size" => size, "number" => number}) |> ConnQuery.encode() |> String.replace("[", "%5B") |> String.replace("]", "%5D") link = url <> "?" <> encoded_params Map.put(acc, type, link) else acc end end) end @spec build_url(Conn.t()) :: String.t() defp build_url(d(%Conn{host, request_path, req_headers, scheme})) do fallback = "#{scheme}://#{host}#{request_path}" Enum.find_value(req_headers, fallback, fn {header, value} -> if header == "x-forwarded-url" do value end end) end end
lib/elastic_jsonapi/paginator.ex
0.762998
0.476884
paginator.ex
starcoder
defmodule Phoenix.HTML do @moduledoc """ Helpers for working HTML strings and templates. When used, it brings the given functionality: * `use Phoenix.HTML.Controller` - imports controllers functions commonly used in views; * `import Phoenix.HTML`- imports functions for handle HTML safety; * `import Phoenix.HTML.Tag` - imports functions for generating HTML tags; * `import Phoenix.HTML.Form` - imports functions for working with forms; * `import Phoenix.HTML.Link` - imports functions for generating links and urls; ## HTML Safe One of the main responsibilities of this module is to provide convenience functions for escaping and marking HTML code as safe. By default, data output in templates is not considered safe: <%= "<hello>" %> will be shown as: &lt;hello&gt; User data or data coming from the database is almost never considered safe. However, in some cases, you may want to tag it as safe and show its original contents: <%= safe "<hello>" %> Keep in mind most helpers will automatically escape your data and return safe content: <%= tag :p, "<hello>" %> will properly output: <p>&lt;hello&gt;</p> """ @doc false defmacro __using__(_) do quote do use Phoenix.HTML.Controller import Phoenix.HTML import Phoenix.HTML.Form import Phoenix.HTML.Link import Phoenix.HTML.Tag end end @typedoc "Guaranteed to be safe" @type safe :: {:safe, iodata} @typedoc "May be safe or unsafe (i.e. it needs to be converted)" @type unsafe :: Phoenix.HTML.Safe.t @doc """ Provides `~e` sigil with HTML safe EEx syntax inside source files. iex> ~e"\"" ...> Hello <%= "world" %> ...> "\"" {:safe, [[["" | "Hello "] | "world"] | "\\n"]} """ defmacro sigil_e(expr, opts) do handle_sigil(expr, opts, __CALLER__.line) end @doc """ Provides `~E` sigil with HTML safe EEx syntax inside source files. This sigil does not support interpolation and is should be prefered rather than `~e`. iex> ~E"\"" ...> Hello <%= "world" %> ...> "\"" {:safe, [[["" | "Hello "] | "world"] | "\\n"]} """ defmacro sigil_E(expr, opts) do handle_sigil(expr, opts, __CALLER__.line) end defp handle_sigil({:<<>>, _, [expr]}, [], line) do EEx.compile_string(expr, engine: Phoenix.HTML.Engine, line: line + 1) end defp handle_sigil(_, _, _) do raise ArgumentError, "interpolation not allowed in ~e sigil. " <> "Remove the interpolation or use ~E instead" end @doc """ Marks the given value as safe. iex> Phoenix.HTML.safe("<hello>") {:safe, "<hello>"} iex> Phoenix.HTML.safe({:safe, "<hello>"}) {:safe, "<hello>"} """ @spec safe(iodata | safe) :: safe def safe({:safe, value}), do: {:safe, value} def safe(value) when is_binary(value) or is_list(value), do: {:safe, value} @doc """ Concatenates data in the given list safe. iex> safe_concat(["<hello>", "safe", "<world>"]) {:safe, "&lt;hello&gt;safe&lt;world&gt;"} """ @spec safe_concat([iodata | safe]) :: safe def safe_concat(list) when is_list(list) do Enum.reduce(list, {:safe, ""}, &safe_concat(&2, &1)) end @doc """ Concatenates data safely. iex> safe_concat("<hello>", "<world>") {:safe, "&lt;hello&gt;&lt;world&gt;"} iex> safe_concat({:safe, "<hello>"}, "<world>") {:safe, "<hello>&lt;world&gt;"} iex> safe_concat("<hello>", {:safe, "<world>"}) {:safe, "&lt;hello&gt;<world>"} iex> safe_concat({:safe, "<hello>"}, {:safe, "<world>"}) {:safe, "<hello><world>"} iex> safe_concat({:safe, "<hello>"}, {:safe, '<world>'}) {:safe, ["<hello>"|'<world>']} """ @spec safe_concat(iodata | safe, iodata | safe) :: safe def safe_concat({:safe, data1}, {:safe, data2}), do: {:safe, io_concat(data1, data2)} def safe_concat({:safe, data1}, data2), do: {:safe, io_concat(data1, io_escape(data2))} def safe_concat(data1, {:safe, data2}), do: {:safe, io_concat(io_escape(data1), data2)} def safe_concat(data1, data2), do: {:safe, io_concat(io_escape(data1), io_escape(data2))} defp io_escape(data) when is_binary(data), do: Phoenix.HTML.Safe.BitString.to_iodata(data) defp io_escape(data) when is_list(data), do: Phoenix.HTML.Safe.List.to_iodata(data) defp io_concat(d1, d2) when is_binary(d1) and is_binary(d2), do: d1 <> d2 defp io_concat(d1, d2), do: [d1|d2] @doc """ Escapes the HTML entities in the given term, returning iodata. iex> html_escape("<hello>") {:safe, "&lt;hello&gt;"} iex> html_escape('<hello>') {:safe, ["&lt;", 104, 101, 108, 108, 111, "&gt;"]} iex> html_escape(1) {:safe, "1"} iex> html_escape({:safe, "<hello>"}) {:safe, "<hello>"} """ @spec html_escape(unsafe) :: safe def html_escape({:safe, _} = safe), do: safe def html_escape(nil), do: {:safe, ""} def html_escape(other) when is_binary(other), do: {:safe, Phoenix.HTML.Safe.BitString.to_iodata(other)} def html_escape(other), do: {:safe, Phoenix.HTML.Safe.to_iodata(other)} end
lib/phoenix/html.ex
0.785267
0.515193
html.ex
starcoder
defmodule Apoc.Hazmat.RSA do @moduledoc """ RSA Public Key Cryptography with Elixir. This module wraps the erlang `:crypto` and `:public_key` modules (which in turn wrap OpenSSL) but simplifies the syntax, uses its own types and combines the main functions into one module. ## RSA Basics Public Key cryptography is one of the most important building blocks in secure communications. The original SSL (TLS) implementations used RSA. While modern TLS versions have [moved away from RSA](https://tools.ietf.org/html/rfc8446#section-1.2) for key exchange (moving instead to [Eliptic Curve Diffie-Hellman in TLS 1.3](https://tools.ietf.org/html/rfc8446#section-4.2.7)), RSA still has many useful applications and is still considered very secure when used correctly (see [FIPS-140-2 Implementation Guide](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/fips140-2/fips1402ig.pdf) section A.14). RSA uses two encryption keys: a public and a private (sometimes called secret) key. A user's public key is not considered sensitive and may be published on the web for example. Any user with the public key can encrypt a message but *cannot* decrypt it. Only the holder of the private key can do that. The reverse is also true: any message encrypted with the private key can only be decrypted with the public key. ### Security For an RSA key to be secure (based on 2018 computer hardware) it should be at least 2048 bits in size. This is the Apoc default but sizes of 3072 or 4096 are perfectly fine. Just remember that bigger keys will take more storage and increase the encryption/decryption time. The most common attacks on RSA (such as [Bleichenbacher’s attack on RSA-PKCS1](https://asecuritysite.com/encryption/c_c3)) exploit how text is padded before it is encrypted. An early scheme for this, defined in [PKCS1 v1.5](https://tools.ietf.org/html/rfc2313) is vulnerable. A modern scheme called Optimal Asymmetric Encryption Padding (OAEP) is used in Apoc which is not vulnerable to the attack. Bare in mind that security is limited by the underlying OTP and OpenSSL versions on your system. Be sure to use the latest Erlang/OTP compiled against the latest OpenSSL. ### Speed RSA compared to many other encryption schemes is very slow. If you don't need Public Key cryptography I recommend using `Apoc.AES`. ## Usage To encrypt with an existing public key (PEM/ASN1 encoded): {:ok, ciphertext} = "public.pem" |> File.read! |> Apoc.RSA.PublicKey.load_pem |> Apoc.RSA.encrypt("A very secret message") And to decrypt with the private key, the code is very similar. "private.pem" |> File.read! |> Apoc.RSA.PrivateKey.load_pem |> Apoc.RSA.decrypt(ciphertext) |> case do {:ok, message} -> IO.puts(message) :error -> IO.puts("Decryption failed") end The `encrypt/2` and `decrypt/2` functions pattern match on the key type and then call the relevant function for the key. If you prefer to make the code more explicit you can call these functions directly: Apoc.RSA.PublicKey.encrypt(pkey, "A very secret message") See `Apoc.RSA.PublicKey` and `Apoc.RSA.PrivateKey`. ## Generating Keys Often folks will want to use existing tools to generate a key, say with `openssl`: ```sh openssl genrsa -out private.pem 2048 openssl rsa -in private.pem -outform PEM -pubout -out public.pem ``` However, you can also generate your keys with Apoc. {:ok, pkey, skey} = Apoc.RSA.generate_key_pair The keys can then be PEM encoded and stored somewhere safe (in the case of the private key). Apoc.RSA.PublicKey.dump_pem(pkey) Apoc.RSA.PrivateKey.dump_pem(skey) For details of the Erlang implementation, see [crypto](http://erlang.org/doc/man/crypto.html) and [public_key](http://erlang.org/doc/man/public_key.html). """ alias __MODULE__.{PrivateKey, PublicKey} @public_exponent 65537 defguardp valid_mod?(size) when size in [2048, 3072, 4096] @typedoc "Valid RSA modulus size" @type rsa_mod() :: 2048 | 3072 | 4096 @doc """ Encrypt a message with the given Key """ @spec encrypt(PublicKey.t | PrivateKey.t, binary()) :: {:ok, binary()} | :error def encrypt(%PublicKey{} = pubkey, message) do PublicKey.encrypt(pubkey, message) end def encrypt(%PrivateKey{} = seckey, message) do PrivateKey.encrypt(seckey, message) end @doc """ Decrypt a message with the given Key """ @spec decrypt(PublicKey.t | PrivateKey.t, binary()) :: {:ok, binary()} | :error def decrypt(%PublicKey{} = pubkey, ciphertext) do PublicKey.decrypt(pubkey, ciphertext) end def decrypt(%PrivateKey{} = seckey, ciphertext) do PrivateKey.decrypt(seckey, ciphertext) end @doc """ Generates an RSA key pair. Remember that the secret key is *sensitive*. Don't share it! The function only takes one argument: `size` which is set to 2048 by default. You can also use a value of 3072 or 4096. """ @spec generate_key_pair(size :: rsa_mod()) :: {:ok, PublicKey.t, PrivateKey.t} | {:error, String.t} def generate_key_pair(size \\ 2048) def generate_key_pair(size) when valid_mod?(size) do with {pub, priv} <- :crypto.generate_key(:rsa, {size, @public_exponent}), %PublicKey{} = pkey <- public_key_struct(pub), %PrivateKey{} = skey <- private_key_struct(priv) do {:ok, pkey, skey} else _ -> {:error, "Failed"} end end def generate_key_pair(_) do {:error, "Key size should be 2048, 3072 or 4096"} end defp public_key_struct([e, n]) do %PublicKey{ modulus: :crypto.bytes_to_integer(n), public_exponent: :crypto.bytes_to_integer(e) } end # TODO: Use from_erlang_type defp private_key_struct(values) do [e, n, d, p1, p2, e1, e2, c] = Enum.map(values, &:crypto.bytes_to_integer/1) %PrivateKey{ version: :"two-prime", modulus: n, public_exponent: e, private_exponent: d, prime1: p1, prime2: p2, exponent1: e1, exponent2: e2, coefficient: c } end end
lib/apoc/hazmat/rsa.ex
0.853134
0.874614
rsa.ex
starcoder
defmodule Idefix.AST do @doc """ Parse Elixir code in a format for easy manipulation """ def parse(input) do opts = [ columns: true, token_metadata: true, warn_on_unnecessary_quotes: false ] Code.string_to_quoted(input, opts) end @doc """ Given a line/column pair; find the nearest AST node. """ def find_node(ast, line, col) do Macro.prewalk(ast, nil, fn {_, meta, _} = n, nil -> if meta[:line] == line do {n, {n, meta}} else {n, nil} end {_, newmeta, _} = n0, {n, oldmeta} -> if newmeta[:line] == line and abs(newmeta[:column] - col) <= abs(oldmeta[:column] - col) do {n0, {n0, newmeta}} else {n0, {n, oldmeta}} end n, acc -> {n, acc} end) |> elem(1) |> elem(0) end @doc """ Find the closest surrounding do: block given the AST and a target node. """ def find_nearest_do_block(ast, node) do {:ok, stack} = collect_ast_stack(ast, &(&1 == node)) stack |> Enum.reverse() |> Enum.find(&match?({:do, _}, &1)) end @doc """ Collect the AST stack; test each AST node against a predicate function. When the predicate matches the function, it will return the entire stack of the AST up to the matched node. """ def collect_ast_stack({_, args} = node, predicate) do collect_ast_stack(args, predicate) |> opt_stack(node) end def collect_ast_stack(args, predicate) when is_list(args) do reduce_ast_stack_list(args, predicate) end def collect_ast_stack({x, _, args} = node, predicate) when is_atom(x) and is_list(args) do case predicate.(node) do true -> {:ok, []} false -> reduce_ast_stack_list(args, predicate) end end def collect_ast_stack(node, predicate) do case predicate.(node) do true -> {:ok, []} false -> nil end end defp reduce_ast_stack_list(args, predicate) do Enum.reduce(args, nil, fn _, {:ok, _} = r -> r sub, nil -> case collect_ast_stack(sub, predicate) do {:ok, s} -> {:ok, [sub | s]} nil -> nil end end) end defp opt_stack({:ok, stack}, node), do: {:ok, [node | stack]} defp opt_stack(nil, _), do: nil @doc """ Given an AST node, return a pair of {line, column} tuples which are the textual span of the node. """ def get_node_text_range({_, _, _} = node) do {lines, columns} = Macro.prewalk(node, [], fn {_, m, _} = node, acc -> all = [ line_plus_column(m), line_plus_column(m, :end_of_expression), line_plus_column(m, :closing) ] |> Enum.reject(&is_nil/1) {node, all ++ acc} node, acc -> {node, acc} end) |> elem(1) |> Enum.unzip() {:ok, {Enum.min(lines), Enum.min(columns)}, {Enum.max(lines), Enum.max(columns)}} end def get_node_text_range(_node) do {:error, :no_node_metadata} end defp line_plus_column(m, key \\ nil) defp line_plus_column(m, nil) do {m[:line], m[:column]} end defp line_plus_column(m, key) do case m[key] do nil -> nil k -> {k[:line], k[:column]} end end end
lib/idefix/ast.ex
0.646349
0.457803
ast.ex
starcoder
defmodule Owl.IO do @moduledoc "A set of functions for handling IO with support of `t:Owl.Data.t/0`." @type select_option :: {:label, Owl.Data.t() | nil} | {:render_as, (any() -> Owl.Data.t())} @doc """ Selects one item from the given nonempty list. Returns value immediately if list contains only 1 element. ## Options * `:label` - a text label. Defaults to `nil` (no label). * `:render_as` - a function that renders given item. Defaults to `Function.identity/1`. ## Examples Owl.IO.select(["one", "two", "three"]) #=> 1. one #=> 2. two #=> 3. three #=> #=> > 1 #=> "one" ~D[2001-01-01] |> Date.range(~D[2001-01-03]) |> Enum.to_list() |> Owl.IO.select(render_as: &Date.to_iso8601/1, label: "Please select a date") #=> 1. 2001-01-01 #=> 2. 2001-01-02 #=> 3. 2001-01-03 #=> #=> Please select a date #=> > 2 #=> ~D[2001-01-02] packages = [ %{name: "elixir", description: "programming language"}, %{name: "asdf", description: "version manager"}, %{name: "neovim", description: "fork of vim"} ] Owl.IO.select(packages, render_as: fn %{name: name, description: description} -> [Owl.Data.tag(name, :cyan), "\\n ", Owl.Data.tag(description, :light_black)] end ) #=> 1. elixir #=> programming language #=> 2. asdf #=> version manager #=> 3. neovim #=> fork of vim #=> #=> > 3 #=> %{description: "fork of vim", name: "neovim"} """ @spec select(nonempty_list(item), [select_option()]) :: item when item: any() def select([_ | _] = list, opts \\ []) do label = Keyword.get(opts, :label) render_item = Keyword.get(opts, :render_as, &Function.identity/1) case list do [item] -> if label, do: puts(label) puts(["Autoselect: ", render_item.(item), "\n"]) item list -> list |> Enum.with_index(1) |> puts_ordered_list(render_item) IO.puts([]) index = input(cast: {:integer, min: 1, max: length(list)}, label: label) - 1 Enum.at(list, index) end end @type multiselect_option :: {:label, Owl.Data.t() | nil} | {:render_as, (any() -> Owl.Data.t())} | {:min, non_neg_integer() | nil} | {:max, non_neg_integer() | nil} @doc """ Select multiple values from the given nonempty list. Input item numbers must be separated by any non-digit character. Most likely you'd want to use spaces or commas. It is possible to specify a range of numbers using hyphen. ## Options * `:label` - a text label. Defaults to `nil` (no label). * `:render_as` - a function that renders given item. Defaults to `Function.identity/1`. * `:min` - a minimum output list length. Defaults to `nil` (no lower bound). * `:max` - a maximum output list length. Defaults to `nil` (no upper bound). ## Examples Owl.IO.multiselect(["one", "two", "three"], min: 2, label: "Select 2 numbers:", render_as: &String.upcase/1) #=> 1. ONE #=> 2. TWO #=> 3. THREE #=> #=> Select 2 numbers: #=> > 1 #=> the number of elements must be greater than or equal to 2 #=> Select 2 numbers: #=> > 1 3 #=> ["one", "three"] Owl.IO.multiselect(Enum.to_list(1..5), render_as: &to_string/1) #=> 1. 1 #=> 2. 2 #=> 3. 3 #=> 4. 4 #=> 5. 5 #=> #=> > 1-3 5 #=> [1, 2, 3, 5] """ @spec multiselect(nonempty_list(item), [multiselect_option()]) :: [item] when item: any() def multiselect([_ | _] = list, opts \\ []) do label = Keyword.get(opts, :label) render_item = Keyword.get(opts, :render_as, &Function.identity/1) min_elements = Keyword.get(opts, :min) max_elements = Keyword.get(opts, :max) ordered_list = Enum.with_index(list, 1) indexed_values = Map.new(ordered_list, fn {value, index} -> {index, value} end) list_size = map_size(indexed_values) if is_integer(min_elements) and min_elements > list_size do raise ArgumentError, "input list must contain at least #{min_elements} elements" end puts_ordered_list(ordered_list, render_item) IO.puts([]) bounds = 1..list_size numbers = input( cast: &cast_multiselect_input(&1, bounds, min_elements, max_elements), label: label, optional: true ) indexed_values |> Map.take(numbers) |> Map.values() end defp cast_multiselect_input(value, bounds, min_elements, max_elements) do numbers = ~r/(\d+)\-?(\d+)?/ |> Regex.scan(to_string(value), capture: :all_but_first) |> Enum.flat_map(fn [string] -> [String.to_integer(string)] [first, second] -> Enum.to_list(String.to_integer(first)..String.to_integer(second)) end) |> Enum.uniq() case Enum.reject(numbers, &(&1 in bounds)) do [] -> numbers_length = length(numbers) with :ok <- validate_bounds(numbers_length, :min, min_elements), :ok <- validate_bounds(numbers_length, :max, max_elements) do {:ok, numbers} else {:error, reason} -> {:error, "the number of elements #{reason}"} end invalid_numbers -> {:error, "unknown values: #{Kernel.inspect(invalid_numbers, charlists: :as_lists)}"} end end defp puts_ordered_list(ordered_list, render_item) do last_index_width = ordered_list |> Enum.reverse() |> hd() |> elem(1) |> to_string() |> String.length() # 2 is length of ". " max_width = last_index_width + 2 ordered_list |> Enum.map(fn {item, index} -> rendered_item = render_item.(item) [Owl.Data.tag(to_string(index), :blue), ". "] |> Owl.Box.new( border_style: :none, min_height: length(Owl.Data.lines(rendered_item)), min_width: max_width, horizontal_align: :right ) |> Owl.Data.zip(rendered_item) end) |> Owl.Data.unlines() |> puts() end @doc """ Opens `data` in editor for editing. Returns updated data when file is saved and editor is closed. Similarly to `IEx.Helpers.open/1`, this function uses `ELIXIR_EDITOR` environment variable by default. `__FILE__` notation is supported as well. ## Example # use neovim in alacritty terminal emulator as an editor $ export ELIXIR_EDITOR="alacritty -e nvim" # open editor from Elixir code Owl.IO.open_in_editor("hello\\nworld") # specify editor explicitly Owl.IO.open_in_editor("hello\\nworld", "alacritty -e nvim") """ @spec open_in_editor(iodata()) :: String.t() def open_in_editor(data, elixir_editor \\ System.fetch_env!("ELIXIR_EDITOR")) do dir = System.tmp_dir!() filename = "owl-#{random_string()}" tmp_file = Path.join(dir, filename) File.write!(tmp_file, data) elixir_editor = if String.contains?(elixir_editor, "__FILE__") do String.replace(elixir_editor, "__FILE__", tmp_file) else elixir_editor <> " " <> tmp_file end {_, 0} = System.shell(elixir_editor) File.read!(tmp_file) end defp random_string do length = 9 length |> :crypto.strong_rand_bytes() |> Base.url_encode64() |> binary_part(0, length) end @type confirm_option :: {:message, Owl.Data.t()} | {:default, boolean()} | {:answers, [ true: {primary_true_answer :: binary(), other_true_answers :: [binary()]}, false: {primary_false_answer :: binary(), other_false_answers :: [binary()]} ]} @default_confirmation_message "Are you sure?" @doc """ Asks user to type a confirmation. Valid inputs are a blank string and values specified in `:answers` option. User will be asked to type a confirmation again on invalid input. ## Options * `:message` - typically a question about performing operation. Defaults to `#{Kernel.inspect(@default_confirmation_message)}`. * `:default` - a value that is used when user responds with a blank string. Defaults to `false`. * `:answers` - allows to specify alternative answers. Defaults to `[true: {"y", ["yes"]}, false: {"n", ["no"]}]`. ## Examples Owl.IO.confirm() #=> Are you sure? [yN] n false Owl.IO.confirm(message: Owl.Data.tag("Really?", :red), default: true) #=> Really? [Yn] true Owl.IO.confirm( message: Owl.Data.tag("Справді?", :red), answers: [true: {"т", ["так", "y", "yes"]}, false: {"н", ["ні", "n", "no"]}] ) #=> Справді? [тН] НІ false """ @spec confirm([confirm_option()]) :: boolean() def confirm(opts \\ []) do message = Keyword.get(opts, :message, @default_confirmation_message) default = Keyword.get(opts, :default, false) {primary_true_answer, other_true_answers} = get_in(opts, [:answers, true]) || {"y", ["yes"]} {primary_false_answer, other_false_answers} = get_in(opts, [:answers, false]) || {"n", ["no"]} answers = if default do String.upcase(primary_true_answer) <> String.downcase(primary_false_answer) else String.downcase(primary_true_answer) <> String.upcase(primary_false_answer) end result = gets(false, [message, " [", answers, "]: "]) cond do is_nil(result) -> default String.downcase(result) in Enum.map( [primary_true_answer | other_true_answers], &String.downcase/1 ) -> true String.downcase(result) in Enum.map( [primary_false_answer | other_false_answers], &String.downcase/1 ) -> false true -> report_error("unknown answer") confirm(opts) end end @type cast_input :: (String.t() | nil -> {:ok, value :: any()} | {:error, reason :: String.Chars.t()}) @type input_option :: {:label, Owl.Data.t()} | {:cast, atom() | {atom(), Keyword.t()} | cast_input()} | {:optional, boolean()} @doc """ Reads a line from the `stdio` and casts a value to the given type. After reading a line from `stdio` it will be automatically trimmed with `String.trim/2`. The end value will be returned when user types a valid value. ## Options * `:secret` - set to `true` if you want to make input invisible. Defaults to `false`. * `:label` - a text label. Defaults to `nil` (no label). * `:optional` - a boolean that sets whether value is optional. Defaults to `false`. * `:cast` - casts a value after reading it from `stdio`. Defaults to `:string`. Possible values: * an anonymous function with arity 1 that is described by `t:cast_input/0` * a pair with built-in type represented as atom and a keyword-list with options. Built-in types: * `:integer`, options: * `:min` - a minimum allowed value. Defaults to `nil` (no lower bound). * `:max` - a maximum allowed value. Defaults to `nil` (no upper bound). * `:string`, options: * no options * an atom which is simply an alias to `{atom(), []}` ## Examples Owl.IO.input() #=> > hello world "hello world" Owl.IO.input(secret: true) #=> > "password" Owl.IO.input(optional: true) #=> > nil Owl.IO.input(label: "Your age", cast: {:integer, min: 18, max: 100}) #=> Your age #=> > 12 #=> must be greater than or equal to 18 #=> Your age #=> > 102 #=> must be less than or equal to 100 #=> Your age #=> > 18 18 Owl.IO.input(label: "Birth date in ISO 8601 format:", cast: &Date.from_iso8601/1) #=> Birth date in ISO 8601 format: #=> > 1 January #=> invalid_format #=> Birth date in ISO 8601 format: #=> > 2021-01-01 ~D[2021-01-01] """ @spec input([input_option()]) :: any() def input(opts \\ []) do cast = case Keyword.get(opts, :cast) || :string do type_name when is_atom(type_name) -> &cast_input(type_name, &1, []) {type_name, opts} when is_atom(type_name) and is_list(opts) -> &cast_input(type_name, &1, opts) callback when is_function(callback, 1) -> callback end label = case Keyword.get(opts, :label) do nil -> [] value -> [value, "\n"] end secret = Keyword.get(opts, :secret, false) value = gets(secret, [label, Owl.Data.tag("> ", :blue)]) [&validate_required(&1, opts), cast] |> Enum.reduce_while({:ok, value}, fn callback, {:ok, value} -> case callback.(value) do {:ok, value} -> {:cont, {:ok, value}} {:error, reason} -> {:halt, {:error, to_string(reason)}} end end) |> case do {:ok, value} -> IO.puts([]) value {:error, reason} -> report_error(reason) input(opts) end end # https://github.com/hexpm/hex/blob/1523f44e8966d77a2c71738629912ad59627b870/lib/mix/hex/utils.ex#L32-L58 defp gets(true = _secret, prompt) do [last_row | rest] = prompt |> Owl.Data.lines() |> Enum.reverse() case rest do [] -> :noop rest -> puts(rest |> Enum.reverse() |> Owl.Data.unlines()) end prompt = Owl.Data.to_ansidata(last_row) pid = spawn_link(fn -> loop_prompt(prompt) end) ref = make_ref() value = IO.gets(prompt) send(pid, {:done, self(), ref}) receive do: ({:done, ^pid, ^ref} -> :ok) normalize_gets_result(value) end defp gets(false = _secret, prompt) do prompt |> Owl.Data.to_ansidata() |> IO.gets() |> normalize_gets_result() end defp normalize_gets_result(value) when is_binary(value) do case String.trim(value) do "" -> nil string -> string end end defp normalize_gets_result(_) do nil end defp loop_prompt(prompt) do receive do {:done, parent, ref} -> send(parent, {:done, self(), ref}) IO.write(:standard_error, "\e[2K\r") after 1 -> IO.write(:standard_error, ["\e[2K\r", prompt]) loop_prompt(prompt) end end defp report_error(text) do Owl.IO.puts(Owl.Data.tag(text, :red)) end defp validate_required(value, opts) do optional? = Keyword.get(opts, :optional, false) if is_nil(value) and not optional? do {:error, "is required"} else {:ok, value} end end defp cast_input(:integer, nil, _opts), do: {:ok, nil} defp cast_input(:integer, binary, opts) do case Integer.parse(binary) do {number, ""} -> with :ok <- validate_bounds(number, :min, opts[:min]), :ok <- validate_bounds(number, :max, opts[:max]) do {:ok, number} end _ -> {:error, "not an integer"} end end defp cast_input(:string, binary, _opts), do: {:ok, binary} defp validate_bounds(_number, _, nil), do: :ok defp validate_bounds(number, :max, limit) do if number > limit do {:error, "must be less than or equal to #{limit}"} else :ok end end defp validate_bounds(number, :min, limit) do if number < limit do {:error, "must be greater than or equal to #{limit}"} else :ok end end @doc """ Wrapper around `IO.puts/2` that accepts `t:Owl.Data.t/0`. The other difference is that `device` argument is moved to second argument. ## Examples Owl.IO.puts(["Hello ", Owl.Data.tag("world", :green)]) #=> Hello world # specify Owl.LiveScreen as a device in order to print data above rendered live blocks Owl.IO.puts(["Hello ", Owl.Data.tag("world", :green)], Owl.LiveScreen) #=> Hello world """ @spec puts(Owl.Data.t(), device :: IO.device()) :: :ok def puts(data, device \\ :stdio) do data = Owl.Data.to_ansidata(data) IO.puts(device, data) end @doc """ Wrapper around `IO.inspect/3` with changed defaults. As in `puts/2`, `device` argument is moved to the end. Options are the same as for `IO.inspect/3` with small changes: * `:pretty` is `true` by default. * `:syntax_colors` uses color schema from `IEx` by default. * `:label` is extended and accepts `t:Owl.Data.t/0`. ## Examples "Hello" |> Owl.IO.inspect(label: "Greeting") |> String.upcase() |> Owl.IO.inspect(label: Owl.Data.tag("GREETING", :cyan)) #=> Greeting: "Hello" #=> GREETING: "HELLO" # inspect data above rendered live blocks Owl.IO.inspect("Hello", [], Owl.LiveScreen) #=> "Hello" """ @spec inspect(item, keyword(), IO.device()) :: item when item: var def inspect(item, opts \\ [], device \\ :stdio) do IO.inspect( device, item, [ pretty: true, syntax_colors: [ atom: :cyan, string: :green, list: :default_color, boolean: :magenta, nil: :magenta, tuple: :default_color, binary: :default_color, map: :default_color, reset: :yellow ] ] |> Keyword.merge(opts) |> Keyword.update(:label, nil, fn nil -> nil value -> Owl.Data.to_ansidata(value) end) ) end @doc """ Returns a width of a terminal. A wrapper around `:io.columns/0`, but returns `nil` if terminal is not found. This is useful for convenient falling back to other value using `||/2` operator. ## Example Owl.IO.columns() || 80 """ @spec columns() :: pos_integer() | nil def columns do case :io.columns() do {:ok, value} -> value _ -> nil end end end
lib/owl/io.ex
0.832305
0.485966
io.ex
starcoder
defmodule Raxx.SimpleServer do @moduledoc """ Server interface for simple `request -> response` interactions. *Modules that use Raxx.SimpleServer implement the Raxx.Server behaviour. Default implementations are provided for the streaming interface to buffer the request before a single call to `handle_request/2`.* ## Example Echo the body of a request to the client defmodule EchoServer do use Raxx.SimpleServer, maximum_body_length: 12 * 1024 * 1024 def handle_request(%Raxx.Request{method: :POST, path: [], body: body}, _state) do response(:ok) |> set_header("content-type", "text/plain") |> set_body(body) end end ## Options - **maximum_body_length** (default 8MB) the maximum sized body that will be automatically buffered. For large requests, e.g. file uploads, consider implementing a streaming server. """ @typedoc """ State of application server. Original value is the configuration given when starting the raxx application. """ @type state :: any() @doc """ Called with a complete request once all the data parts of a body are received. Passed a `Raxx.Request` and server configuration. Note the value of the request body will be a string. """ @callback handle_request(Raxx.Request.t(), state()) :: Raxx.Response.t() @eight_MB 8 * 1024 * 1024 defmacro __using__(options) do {options, []} = Module.eval_quoted(__CALLER__, options) maximum_body_length = Keyword.get(options, :maximum_body_length, @eight_MB) quote do @behaviour unquote(__MODULE__) import Raxx @behaviour Raxx.Server def handle_head(request = %{body: false}, state) do response = __MODULE__.handle_request(%{request | body: ""}, state) case response do %{body: true} -> raise "Incomplete response" _ -> response end end def handle_head(request = %{body: true}, state) do {[], {request, [], state}} end def handle_data(data, {request, iodata_buffer, state}) do iodata_buffer = [data | iodata_buffer] if :erlang.iolist_size(iodata_buffer) <= unquote(maximum_body_length) do {[], {request, iodata_buffer, state}} else Raxx.error_response(:payload_too_large) end end def handle_tail([], {request, iodata_buffer, state}) do body = :erlang.iolist_to_binary(Enum.reverse(iodata_buffer)) response = __MODULE__.handle_request(%{request | body: body}, state) case response do %{body: true} -> raise "Incomplete response" _ -> response end end def handle_info(message, state) do require Logger Logger.warn( "#{inspect(self())} received unexpected message in handle_info/2: #{inspect(message)}" ) {[], state} end end end end
lib/raxx/simple_server.ex
0.938969
0.487246
simple_server.ex
starcoder
defmodule BusDetective.GTFS do @moduledoc """ The GTFS context module contains all of the functions needed to serve requests about scheduled or realtime GTFS data """ import Ecto.Query require Logger alias BusDetective.GTFS.{ Departure, Feed, ProjectedStopTime, Stop, StopSearch, StopTime, Trip } alias BusDetective.Repo alias Realtime.{StopTimeUpdate, TripUpdates} @doc """ This function takes projected stop times for the given stop and time range, and overlays realtime data for the stop times if available. """ def departures_for_stop(stop, start_time, end_time) do stop |> projected_stop_times_for_stop(start_time, end_time) |> Enum.map(fn projected_stop_time -> %ProjectedStopTime{ stop_time: %StopTime{ feed: %Feed{name: feed_name}, trip: %Trip{remote_id: trip_remote_id}, stop_sequence: stop_sequence } } = projected_stop_time case trip_updates_source().find_stop_time(feed_name, trip_remote_id, stop_sequence) do {:ok, %StopTimeUpdate{} = stop_time_update} -> %Departure{ scheduled_time: projected_stop_time.scheduled_departure_time, time: stop_time_update.departure_time, realtime?: true, delay: stop_time_update.delay, trip: projected_stop_time.stop_time.trip, route: projected_stop_time.stop_time.trip.route, agency: projected_stop_time.stop_time.trip.route.agency } _ -> %Departure{ scheduled_time: projected_stop_time.scheduled_departure_time, time: projected_stop_time.scheduled_departure_time, realtime?: false, delay: 0, trip: projected_stop_time.stop_time.trip, route: projected_stop_time.stop_time.trip.route, agency: projected_stop_time.stop_time.trip.route.agency } end end) |> Enum.sort_by(&Timex.to_erl(&1.time)) end def projected_stop_times_for_stop(%Stop{id: stop_id}, %DateTime{} = start_time, %DateTime{} = end_time) do Repo.all( from( projected in ProjectedStopTime, join: stop_time in assoc(projected, :stop_time), where: stop_time.stop_id == ^stop_id, where: projected.scheduled_departure_time >= ^start_time, where: projected.scheduled_departure_time <= ^end_time, order_by: [:scheduled_departure_time], preload: [stop_time: [:feed, trip: [:shape, route: :agency]]] ) ) end def search_stops(options) do search_string = Keyword.get(options, :query) latitude = Keyword.get(options, :latitude) longitude = Keyword.get(options, :longitude) pagination_options = options Stop |> preload([:routes, :feed]) |> StopSearch.query_string(search_string) |> StopSearch.query_nearby(latitude, longitude) |> Repo.paginate(pagination_options) end def get_stop(nil), do: nil def get_stop(id) do case Repo.get(Stop, id) do nil -> {:error, :not_found} stop -> {:ok, Repo.preload(stop, [:feed, :routes])} end end def get_stop(feed_id, stop_remote_id) do query = from( s in Stop, where: s.feed_id == ^feed_id and s.remote_id == ^stop_remote_id, preload: [:feed, :routes] ) case Repo.one(query) do nil -> {:error, :not_found} stop -> {:ok, stop} end end def get_stops([]), do: [] def get_stops(stop_ids) do query = from( s in Stop, preload: [:feed, :routes] ) query = stop_ids |> Enum.map(&String.split(&1, "-", parts: 2)) |> Enum.group_by(&Enum.at(&1, 0), &Enum.at(&1, 1)) |> Enum.reduce(query, fn {feed_id, remote_ids}, query -> from(stop in query, or_where: stop.feed_id == ^feed_id and stop.remote_id in ^remote_ids) end) Repo.all(query) end def subscribe_to_realtime(event_type) when event_type in [:trip_updates, :vehicle_positions] do Registry.register(Registry.Realtime, event_type, []) :ok end defp trip_updates_source do Application.get_env(:realtime, :trip_updates_source) || TripUpdates end end
apps/bus_detective/lib/bus_detective/gtfs/gtfs.ex
0.736874
0.427217
gtfs.ex
starcoder
defmodule OliWeb.Common.PagingParams do defstruct [ :rendered_pages_count, :start_page_index, :current_page_index, :last_page_index, :label ] def calculate(total_count, offset, limit, max_rendered_pages) do half = ceil((max_rendered_pages - 1) / 2) # Calculate the index of the absolute last page (not last rendered page choice) # of these results. For instance, if we have 1000 results and a limit of 10, we'd # have a total of 100 pages, the last index of those being 99 last_page_index = ceil(total_count / limit) - 1 # Determine how many visible page choices we are going to render. Because we want to # center the "current" page, we only show a maximum of 9. rendered_pages = min(last_page_index + 1, max_rendered_pages) # Based off of the current offset and limit, set our current page index. We assume # that our offset is either 0 or always a multiple of the limit current_page = floor(offset / limit) # Now determine our starting rendered page index. We want to center the current # page within the rendered pages, if we can start = cond do # When there are nine or less pages, the start is the first page # 0 1 2 # S C L last_page_index < max_rendered_pages -> 0 # See if there aren't enough pages to the left of the current to center it # 0 1 2 3 4 5 6 7 8 9 10 11 12 # S C L current_page <= half -> 0 # See if there aren't enough to the right of current to center # 0 1 2 3 4 5 6 7 8 9 10 11 12 # S C L last_page_index - current_page < half -> last_page_index - (max_rendered_pages - 1) # When the current page can be centered, do it # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 # S C L true -> current_page - half end upper = min(offset + limit, total_count) label = "Showing result #{offset + 1} - #{upper} of #{total_count} total" %__MODULE__{ rendered_pages_count: rendered_pages, start_page_index: start, current_page_index: current_page, last_page_index: last_page_index, label: label } end end
lib/oli_web/live/common/paging_params.ex
0.832849
0.439687
paging_params.ex
starcoder
defmodule GrovePi.Ultrasonic do alias GrovePi.Board use GrovePi.Poller, default_trigger: GrovePi.Ultrasonic.DefaultTrigger, read_type: GrovePi.Digital.level() @moduledoc """ Read distance and subscribe to changes from the Grove Ultrasonic Ranger sensor. Listen for events from a GrovePi Ultrasonic Ranger sensor. This module is configured for the Ultrasonic Ranger, the one that comes with the GrovePi+ Starter Kit. There is only one type of event by default; `:changed`. When registering for an event the Ultrasonic will send a message in the form of `{pin, :changed, %{value: 13}` with the distance as an integer. The `GrovePi.Ultrasonic` module works by polling the pin that you have registered to a Ultrasonic sensor. Example use: ``` iex> pin = 3 iex> {:ok, pid} = GrovePi.Ultrasonic.start_link(pin) {:ok, #PID<0.205.0>} iex> GrovePi.Ultrasonic.read(pin) 20 iex> GrovePi.Ultrasonic.read(pin) 23 iex> GrovePi.Ultrasonic.subscribe(7, :changed) :ok ``` The `GrovePi.Ultrasonic.DefaultTrigger` is written so when the value of the ultrasonic sensor changes, the subscribed process will receive a message in the form of `{pid, :changed, %{value: 44}`. The message should be received using GenServer handle_info/2. For example: ``` def handle_info({_pid, :changed, %{value: value}}, state) do # do something with value {:noreply, state} end """ @type distance :: integer @doc false @deprecated "Use GrovePi.Ultrasonic.read/1 instead" @spec read_distance(GrovePi.pin(), atom) :: distance def read_distance(pin, prefix \\ Default) do GenServer.call(Pin.name(prefix, pin), :read) end @doc false @spec read_value(atom, GrovePi.pin()) :: distance def read_value(prefix, pin) do with :ok <- Board.send_request(prefix, <<7, pin, 0, 0>>), :ok <- wait_for_sensor(), <<_, distance::big-integer-size(16)>> <- Board.get_response(prefix, 3), do: distance end defp wait_for_sensor do Process.sleep(60) end end
lib/grovepi/ultrasonic.ex
0.853425
0.900879
ultrasonic.ex
starcoder
defmodule LiveStruct do @moduledoc """ LiveStruct is a tool that lets you use a struct as the 'assigns' of a `Phoenix.LiveView`. Note this is probably done, but it's still alpha, and it *might* cause problems with LiveView in the future. ## Installation The package can be installed by adding `live_struct` to your list of dependencies in `mix.exs`: ```elixir def deps do [ {:live_struct, "~> 0.1.0", runtime: false} ] end ``` ## Usage Use LiveStruct as follows: ```elixir defmodule MyAppWeb.PageLive do use MyAppWeb, :live_view use LiveStruct # <-- add this defassigns [:value, :other_value, :yet_another_value] # <-- and this # optional: @opaque state :: %__MODULE__{ value: String.t, other_value: [atom], yet_another_value: integer, } # also optional: @opaque socket :: %Phoenix.LiveView.Socket{ assigns: state } # ninja in your struct here: @impl true def mount(params, session, socket) do ... #your mount code {:ok, struct_assigns(socket)} #<-- with this code. end end ``` """ @doc false defmacro __using__(_) do quote do import LiveStruct, only: [defassigns: 1, struct_assigns: 1] @before_compile LiveStruct end end @doc false defmacro defassigns(assigns) do quote do defstruct unquote(assigns) ++ [:flash, :live_action] end end defmacro struct_assigns(socket) do quote do Map.put( unquote(socket), :assigns, struct(__MODULE__, unquote(socket).assigns) ) end end @doc false defmacro __before_compile__(context) do keys = context.module |> Module.get_attribute(:struct) |> Map.keys() |> Enum.reject(&(&1 == :__struct__)) quote do @behaviour Access @keys unquote(keys) @impl Access defdelegate fetch(struct, key), to: Map @impl Access defdelegate get_and_update(struct, key, function), to: Map @impl Access def pop(struct, key) when key in @keys do {Map.get(struct, key), %{struct | key => nil}} end def pop(_struct, _key), do: raise(KeyError) defimpl Enumerable do def count(struct) do struct |> Map.from_struct() |> Enumerable.Map.count() end def member?(struct, kv) do struct |> Map.from_struct() |> Enumerable.Map.member?(kv) end def slice(struct) do struct |> Map.from_struct() |> Enumerable.Map.slice() end def reduce(struct, acc, fun) do struct |> Map.from_struct() |> Enumerable.Map.reduce(acc, fun) end end end end end
lib/live_struct.ex
0.825695
0.822581
live_struct.ex
starcoder
defmodule Vivid.Arc do alias Vivid.{Arc, Point, Path} import Vivid.Math defstruct ~w(center radius start_angle range steps)a @moduledoc ~S""" This module represents an Arc, otherwise known as a circle segment. ## Example iex> use Vivid ...> Arc.init(Point.init(10,10), 10, 0, 45) ...> |> to_string() "@@@@@@\n" <> "@@@ @\n" <> "@@@ @@\n" <> "@@ @@@\n" <> "@@ @@@\n" <> "@ @@@\n" <> "@ @@@@\n" <> "@ @@@@\n" <> "@ @@@@\n" <> "@@@@@@\n" """ @opaque t :: %Arc{center: Point.t(), radius: number, start_angle: number, steps: integer} @doc ~S""" Creates an Arc. * `center` is a Point definining the center point of the arc's parent circle. * `radius` is the radius of the parent circle. * `start_angle` is the angle at which to start drawing the arc, `0` is the parallel to the X axis, to the left. * `range` is the number of degrees to draw the arc. * `steps` the arc is drawn by dividing it into a number of lines. Defaults to 12. ## Examples iex> Vivid.Arc.init(Vivid.Point.init(5,5), 4, 45, 15) %Vivid.Arc{ center: %Vivid.Point{x: 5, y: 5}, radius: 4, start_angle: 45, range: 15, steps: 12 } """ @spec init(Point.t(), number, number, number, integer) :: Arc.t() def init(%Point{} = center, radius, start_angle, range, steps \\ 12) when is_number(radius) and is_number(start_angle) and is_number(range) and is_integer(steps) do %Arc{ center: center, radius: radius, start_angle: start_angle, range: range, steps: steps } end @doc """ Returns the center point of an `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.center #Vivid.Point<{10, 10}> """ @spec center(Arc.t()) :: Point.t() def center(%Arc{center: p} = _arc), do: p @doc """ Changes the center `point` of `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.center(Vivid.Point.init(15,15)) ...> |> Vivid.Arc.center #Vivid.Point<{15, 15}> """ @spec center(Arc.t(), Point.t()) :: Arc.t() def center(%Arc{} = arc, %Point{} = point), do: %{arc | center: point} @doc """ Returns the radius of an `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.radius 5 """ @spec radius(Arc.t()) :: number def radius(%Arc{radius: r} = _arc), do: r @doc """ Change the `radius` of `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.radius(10) ...> |> Vivid.Arc.radius 10 """ @spec radius(Arc.t(), number) :: Arc.t() def radius(%Arc{} = arc, radius) when is_number(radius), do: %{arc | radius: radius} @doc """ Returns the start angle of an `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.start_angle 0 """ @spec start_angle(Arc.t()) :: number def start_angle(%Arc{start_angle: a} = _arc), do: a @doc """ Change the start angle of an `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.start_angle(45) ...> |> Vivid.Arc.start_angle 45 """ @spec start_angle(Arc.t(), number) :: Arc.t() def start_angle(%Arc{} = arc, theta), do: %{arc | start_angle: theta} @doc """ Returns the range of the `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.range 90 """ @spec range(Arc.t()) :: number def range(%Arc{range: r} = _arc), do: r @doc """ Change the range of an `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.range(270) ...> |> Vivid.Arc.range 270 """ @spec range(Arc.t(), number) :: Arc.t() def range(%Arc{} = arc, theta) when is_number(theta), do: %{arc | range: theta} @doc """ Returns the number of steps in the `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.steps 12 """ @spec steps(Arc.t()) :: integer def steps(%Arc{steps: s} = _arc), do: s @doc """ Changes the number of `steps` in `arc`. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 12) ...> |> Vivid.Arc.steps(19) ...> |> Vivid.Arc.steps 19 """ @spec steps(Arc.t(), integer) :: Arc.t() def steps(%Arc{} = arc, steps) when is_integer(steps), do: %{arc | steps: steps} @doc """ Converts the `arc` into a Path, which is used for a bunch of things like Transforms, Bounds calculation, Rasterization, etc. ## Example iex> Vivid.Arc.init(Vivid.Point.init(10,10), 5, 0, 90, 3) ...> |> Vivid.Arc.to_path #Vivid.Path<[#Vivid.Point<{5, 10}>, #Vivid.Point<{6, 13}>, #Vivid.Point<{8, 14}>, #Vivid.Point<{10, 15}>]> """ @spec to_path(Arc.t()) :: Path.t() def to_path( %Arc{center: center, radius: radius, start_angle: start_angle, range: range, steps: steps} = _arc ) do h = center |> Point.x() k = center |> Point.y() step_degree = range / steps start_angle = start_angle - 180 points = Enum.map(0..steps, fn step -> theta = step_degree * step + start_angle theta = degrees_to_radians(theta) x = round(h + radius * cos(theta)) y = round(k - radius * sin(theta)) Point.init(x, y) end) points |> Path.init() end end
lib/vivid/arc.ex
0.934035
0.443721
arc.ex
starcoder
defmodule AMQPX.ConnectionPool do use GenServer require Logger @moduledoc """ A pool of connections for shared use. AMQPX encourages using multiple channels per TCP connection instead of multiple connections, wherever possible. `AMQPX.ConnectionPool` stores open connections that other modules can retrieve from it. # Configuration ```elixir config :ulfnet_amqpx, connections: %{name => url} ``` """ defstruct connections: %{}, config: [] @doc false def child_spec(_) do %{ id: __MODULE__, start: {__MODULE__, :start_link, []} } end @doc false def start_link(), do: GenServer.start(__MODULE__, nil, name: __MODULE__) @doc "Fetch a connection by its name." @spec get(name :: atom(), timeout :: integer() | :infinity) :: {:ok, AMQP.Connection.t()} | {:error, reason :: any()} def get(name, timeout \\ :infinity), do: GenServer.call(__MODULE__, {:get, name}, timeout) @doc "Register a connection config outside of `sys.config`." @spec register(name :: atom(), url :: any()) :: any() def register(name, url), do: GenServer.call(__MODULE__, {:register, name, url}) @doc false def update_config, do: GenServer.call(__MODULE__, :update_config) @impl GenServer def init(_) do config = Application.get_env(:ulfnet_amqpx, :connections, []) state = %__MODULE__{config: config} {:ok, state} end @impl GenServer def handle_call({:get, id}, _, state = %__MODULE__{connections: connections, config: config}) do case connections do %{^id => conn} -> {:reply, {:ok, conn}, state} _ -> case try_connect(id, config) do resp = {:ok, conn} -> setup_monitor(id, conn) connections = Map.put(connections, id, conn) {:reply, resp, %__MODULE__{state | connections: connections}} err -> {:reply, err, state} end end end def handle_call(:update_config, _, state = %__MODULE__{config: config}) do new_config = Application.get_env(:ulfnet_amqpx, :connections, []) config = Map.merge(config, new_config) state = %__MODULE__{state | config: config} {:reply, config, state} end def handle_call({:register, name, url}, _, state = %__MODULE__{config: config}) do state = %__MODULE__{state | config: Map.put(config, name, url)} {:reply, :ok, state} end @impl GenServer def handle_info({:connection_down, id, _reason}, state = %__MODULE__{connections: connections}) do Logger.error("lost connection #{id}") connections = Map.delete(connections, id) {:noreply, %__MODULE__{state | connections: connections}} end defp try_connect(id, config) do case config do %{^id => url} -> AMQP.Connection.open(url) _ -> {:error, :not_configured} end end defp setup_monitor(id, conn) do ref = make_ref() pool = self() spawn(fn -> pid = conn.pid Process.monitor(pid) send(pool, {:monitor_setup, ref}) receive do {:DOWN, _, :process, ^pid, reason} -> send(pool, {:connection_down, id, reason}) end end) receive do {:monitor_setup, ^ref} -> nil end end end
lib/amqpx/connection_pool.ex
0.813757
0.591753
connection_pool.ex
starcoder
defmodule Paginator.Cursor do @moduledoc false def decode(nil), do: nil def decode(encoded_cursor) do encoded_cursor |> Base.url_decode64!() |> safe_binary_to_term() |> Enum.map(&Paginator.Cursor.Decode.convert/1) end def encode(values) when is_list(values) do values |> Enum.map(&Paginator.Cursor.Encode.convert/1) |> safe_term_to_binary() |> Base.url_encode64() end def encode(value) do encode([value]) end # Adapted from https://github.com/elixir-plug/plug_crypto/blob/f53977806ab4ee82850fb11fd552a663b60e12ab/lib/plug/crypto.ex#L27 @spec safe_binary_to_term(binary()) :: term() def safe_binary_to_term(binary) when is_binary(binary) do term = :erlang.binary_to_term(binary, [:safe]) safe_terms(term) term end def safe_term_to_binary(term) do safe_terms(term) :erlang.term_to_binary(term) end defp safe_terms(list) when is_list(list) do safe_list(list) end defp safe_terms(tuple) when is_tuple(tuple) do safe_tuple(tuple, tuple_size(tuple)) end defp safe_terms(map) when is_map(map) do folder = fn key, value, acc -> safe_terms(key) safe_terms(value) acc end :maps.fold(folder, map, map) end defp safe_terms(other) when is_atom(other) or is_number(other) or is_bitstring(other) or is_pid(other) or is_reference(other) do other end defp safe_terms(other) do raise ArgumentError, "cannot deserialize #{inspect(other)}, the term is not safe for deserialization" end defp safe_list([]), do: :ok defp safe_list([h | t]) when is_list(t) do safe_terms(h) safe_list(t) end defp safe_list([h | t]) do safe_terms(h) safe_terms(t) end defp safe_tuple(_tuple, 0), do: :ok defp safe_tuple(tuple, n) do safe_terms(:erlang.element(n, tuple)) safe_tuple(tuple, n - 1) end end defprotocol Paginator.Cursor.Encode do @fallback_to_any true def convert(term) end defprotocol Paginator.Cursor.Decode do @fallback_to_any true def convert(term) end defimpl Paginator.Cursor.Encode, for: Any do def convert(term), do: term end defimpl Paginator.Cursor.Decode, for: Any do def convert(term), do: term end
lib/paginator/cursor.ex
0.766294
0.433802
cursor.ex
starcoder
defmodule HandheldHalting do @spec run_until_done(String.t()) :: {non_neg_integer, %{acc: integer, ip: integer}} @doc """ iex> import HandheldHalting iex> run_until_done(sample_input()) {7, %{ip: 9, acc: 8}} """ def run_until_done(code), do: load_code(code) |> run_until_done(0) @spec run_until_done(:array.array(String.t()), non_neg_integer) :: {non_neg_integer, %{acc: integer, ip: integer}} def run_until_done(code, change_here) do {changed_code, changed_pos} = change(code, change_here) run_until_loop_or_done(changed_code) |> case do {:done, state} -> {changed_pos, state} {:loop, _} -> run_until_done(code, changed_pos + 1) end end @spec run_until_loop_or_done(:array.array(String.t()), %{acc: integer, ip: non_neg_integer}, MapSet.t(integer())) :: {:done, %{acc: integer, ip: integer}} | {:loop, %{acc: integer, ip: integer}} def run_until_loop_or_done(code, state \\ %{ip: 0, acc: 0}, visited \\ MapSet.new()) do new_state = step(code, state) cond do MapSet.member?(visited, new_state[:ip]) -> {:loop, new_state} new_state[:ip] == :array.size(code) -> {:done, new_state} true -> run_until_loop_or_done(code, new_state, MapSet.put(visited, state[:ip])) end end @spec change(:array.array(String.t()), non_neg_integer) :: {:array.array(String.t()), non_neg_integer} def change(code, pos) do case :array.get(pos, code) do "nop " <> arg -> {:array.set(pos, "jmp " <> arg, code), pos} "jmp " <> arg -> {:array.set(pos, "nop " <> arg, code), pos} _ -> change(code, pos + 1) end end @doc """ iex> import HandheldHalting iex> run_until_loop(sample_input()) %{ip: 1, acc: 5} """ def run_until_loop(code) when is_binary(code), do: run_until_loop(load_code(code), %{ip: 0, acc: 0}) def run_until_loop(code, state, visited \\ MapSet.new()) do new_state = step(code, state) MapSet.member?(visited, new_state[:ip]) |> case do true -> new_state false -> run_until_loop(code, new_state, MapSet.put(visited, state[:ip])) end end @spec step(binary | :array.array(any), %{acc: integer(), ip: non_neg_integer}) :: %{ acc: integer(), ip: integer() } @doc """ iex> import HandheldHalting iex> step("nop +0", %{ip: 0, acc: 0}) %{ip: 1, acc: 0} iex> step("nop +0\\nacc +5", %{ip: 1, acc: 3}) %{ip: 2, acc: 8} iex> step("nop +0\\njmp -1", %{ip: 1, acc: 3}) %{ip: 0, acc: 3} """ def step(code, state) when is_binary(code), do: step(load_code(code), state) def step(code, %{ip: ip, acc: acc} = state) do :array.get(ip, code) |> case do "nop " <> _ -> %{state | ip: ip + 1} "acc " <> arg -> %{state | ip: ip + 1, acc: acc + String.to_integer(arg)} "jmp " <> arg -> %{state | ip: ip + String.to_integer(arg)} end end @spec load_code(String.t()) :: :array.array(String.t()) def load_code(str) do str |> String.split("\n", trim: true) |> :array.from_list() end @spec sample_input :: String.t() def sample_input do """ nop +0 acc +1 jmp +4 acc +3 jmp -3 acc -99 acc +1 jmp -4 acc +6 """ end end
8-HandheldHalting/lib/handheld_halting.ex
0.785309
0.469703
handheld_halting.ex
starcoder
defmodule Elixircom do alias Elixircom.Server @moduledoc """ A serial port terminal emulator for IEx Run interactively be starting it from the `IEx` prompt. Here's an example that uses `Elixircom` to interact with a modem: ```elixir iex> Elixircom.run("/dev/tty.usbmodem14103", speed: 115_200) AT OK ^B iex> ``` """ @type uart_opt :: {:speed, non_neg_integer} @type uart_opts :: [uart_opts()] @doc """ Run `Elixircom` This will, in effect, make your IEx session into a serial port terminal emulator. You can always get back to your original IEx session by pressing: `Ctrl+B` The first argument is the serial port name which is string of the serial port device you are trying to connect to. The second argument is a keyword list of options: * `:speed` - the baud rate """ @spec run(serial_port_name :: String.t(), uart_opts()) :: :ok def run(serial_port_name, opts \\ []) do gl = Process.group_leader() orig_opts = :io.getopts(gl) :io.setopts(gl, echo: false, expand_fun: fn _ -> {:no, "", []} end, binary: false) case Server.start( group_leader: gl, uart_opts: opts, serial_port_name: serial_port_name, io_restore_opts: orig_opts ) do {:ok, server} -> get_chars(gl, server) :io.setopts(gl, orig_opts) {:error, _} = error -> :io.setopts(gl, orig_opts) log_error(error) end end defp get_chars(gl, server) do case :io.get_chars(gl, "", 1) do :eof -> get_chars(gl, server) [2] -> Server.stop(server) :ok [char] -> Server.handle_input(server, char) get_chars(gl, server) _ -> :ok end end defp log_error({:error, :enoent} = error) do IO.puts(""" Unable to find specified port. Please make sure your device is plugged in and ready to be connected to. """) error end defp log_error({:error, :eagain} = error) do IO.puts(""" Serial port is already open. Make sure you are not connecting to the port in another terminal or IEx session. """) error end defp log_error({:error, :eacces} = error) do IO.puts(""" Permission denied when opening port. Make sure you have the correct permissions to access the port. """) error end end
lib/elixircom.ex
0.81841
0.805364
elixircom.ex
starcoder
defmodule ConciergeSite.TripCardHelper do @moduledoc """ Display our Trip Card in templates """ import Phoenix.HTML.Tag, only: [content_tag: 3] import Phoenix.HTML.Link, only: [link: 2] import Phoenix.Controller, only: [get_csrf_token: 0] import ConciergeSite.TimeHelper, only: [format_time_string: 2, time_to_string: 1] alias AlertProcessor.Model.{Trip, Subscription} alias ConciergeSite.{FontAwesomeHelpers, IconViewHelper, RouteHelper} @station_features [ elevator: "Elevators", escalator: "Escalators", bike_storage: "Bike storage", parking_area: "Parking" ] @spec render(Plug.Conn.t(), atom | Trip.t()) :: Phoenix.HTML.safe() def render(conn, %Trip{trip_type: :accessibility, id: id} = trip) do content_tag :div, class: "card trip__card #{paused_class(trip)}", data: [trip_card: "link", link_type: "accessibility", trip_id: id] do accessibility_content(trip, conn, id) end end def render(conn, %Trip{id: id} = trip) do content_tag :div, class: "card trip__card #{paused_class(trip)}", data: [trip_card: "link", link_type: "commute", trip_id: id] do commute_content(trip, conn, id) end end def render(_), do: "" @spec display(Plug.Conn.t(), atom | Trip.t()) :: Phoenix.HTML.safe() def display(conn, %Trip{trip_type: :accessibility, id: id} = trip) do content_tag :div, class: "card trip__card trip__card--display #{paused_class(trip)}" do accessibility_content(trip, conn, id) end end def display(_), do: "" @spec accessibility_content(Trip.t(), Plug.Conn.t(), String.t()) :: [Phoenix.HTML.safe()] defp accessibility_content( %Trip{ trip_type: :accessibility, relevant_days: relevant_days, facility_types: facility_types, subscriptions: subscriptions } = trip, conn, id ) do [ content_tag :div, class: "trip__card--top" do [ content_tag :span, class: "trip__card--route-icon" do IconViewHelper.icon(:t) end, content_tag :span, class: "trip__card--route" do "Station features" end, content_tag :div, class: "trip__card--type" do "#{stops_and_routes(subscriptions)}" end ] end, content_tag :div, class: "trip__card--bottom" do [ content_tag :div, class: "trip__card--type" do "#{facility_types(facility_types)}" end, content_tag :div, class: "trip__card--type" do "#{days(relevant_days)}" end ] end, footer(conn, :accessibility, id, Trip.paused?(trip)) ] end @spec commute_content(Trip.t(), Plug.Conn.t(), String.t()) :: [Phoenix.HTML.safe()] defp commute_content( %Trip{ subscriptions: subscriptions, roundtrip: roundtrip, relevant_days: relevant_days, start_time: start_time, end_time: end_time, return_start_time: return_start_time, return_end_time: return_end_time, facility_types: facility_types } = trip, conn, id ) do [ content_tag :div, class: "trip__card--top" do [ routes(subscriptions), content_tag :div, class: "trip__card--top-details" do [ roundtrip(roundtrip), stops(subscriptions) ] end ] end, content_tag :div, class: "trip__card--bottom" do [ content_tag :div, class: "trip__card--type" do "#{facility_types(facility_types)}" end, days(relevant_days), trip_times({start_time, end_time}, {return_start_time, return_end_time}) ] end, footer(conn, :commute, id, Trip.paused?(trip)) ] end @spec footer(Plug.Conn.t(), atom, String.t(), boolean) :: Phoenix.HTML.safe() defp footer(conn, trip_type, id, trip_paused?) do content_tag :div, class: "trip__card--footer" do [ delete_link(id), edit_link(conn, trip_type, id), pause_link(conn, id, trip_paused?) ] end end @spec routes([Subscription.t()]) :: [Phoenix.HTML.safe()] defp routes(subscriptions) do subscriptions |> exclude_return_trip_subscriptions() |> RouteHelper.collapse_duplicate_green_legs() |> Enum.map(fn subscription -> content_tag :div, class: "trip__card--route-container" do [ content_tag :span, class: "trip__card--route-icon" do IconViewHelper.icon_for_route(subscription.type, subscription.route) end, content_tag :span, class: "trip__card--route" do RouteHelper.route_name(subscription.route) end ] end end) end @spec stops([Subscription.t()]) :: Phoenix.HTML.safe() defp stops(subscriptions) do first_trip_subscriptions = subscriptions |> exclude_return_trip_subscriptions() origin = List.first(first_trip_subscriptions).origin destination = List.last(first_trip_subscriptions).destination content_tag :span, class: "trip__card--stops" do case {origin, destination} do {nil, nil} -> "" {nil, destination} -> [" to ", RouteHelper.stop_name(destination)] {origin, nil} -> [" from ", RouteHelper.stop_name(origin)] {origin, destination} -> ": #{RouteHelper.stop_name(origin)} — #{RouteHelper.stop_name(destination)}" end end end @spec roundtrip(boolean) :: String.t() defp roundtrip(true), do: "Round-trip" defp roundtrip(false), do: "One-way" @spec days([atom]) :: String.t() defp days([:monday, :tuesday, :wednesday, :thursday, :friday]), do: "Weekdays" defp days([:saturday, :sunday]), do: "Weekends" defp days([:monday, :tuesday, :wednesday, :thursday, :friday, :saturday, :sunday]), do: "Every day" defp days(days) do days |> Enum.map(&String.slice(String.capitalize(Atom.to_string(&1)), 0..2)) |> Enum.join(", ") end @spec trip_times({Time.t(), Time.t()}, {Time.t(), Time.t()} | {nil, nil}) :: Phoenix.HTML.safe() defp trip_times(start_time, {nil, nil}) do content_tag :div, class: "trip__card--times" do format_time(start_time) end end defp trip_times(start_time, end_time) do content_tag :div, class: "trip__card--times" do "#{format_time(start_time)}, #{format_time(end_time)}" end end @spec format_time({Time.t(), Time.t()}) :: String.t() defp format_time({start_time, end_time}) do "#{String.slice(format_time_string(time_to_string(start_time), "%l:%M%p"), 0..-2)} - #{ String.slice(format_time_string(time_to_string(end_time), "%l:%M%p"), 0..-2) }" end @spec facility_types([atom]) :: String.t() defp facility_types(facility_types) do facility_types |> Enum.map(&@station_features[&1]) |> Enum.join(", ") end @spec stops_and_routes([Subscription.t()]) :: [String.t()] defp stops_and_routes(subscriptions) do subscriptions |> Enum.map(fn subscription -> case {subscription.origin, subscription.route} do {stop_id, nil} -> RouteHelper.stop_name(stop_id) {nil, route_id} -> RouteHelper.route_name(route_id) end end) |> Enum.intersperse(", ") end @spec delete_link(String.t()) :: Phoenix.HTML.safe() defp delete_link(trip_id) do content_tag :a, class: "trip__card--delete-link", tabindex: "0", data: [toggle: "modal", target: "#deleteModal", trip_id: trip_id, token: get_csrf_token()] do [ "Delete", FontAwesomeHelpers.fa("trash") ] end end @spec edit_link(Plug.Conn.t(), atom, String.t()) :: Phoenix.HTML.safe() defp edit_link(conn, :commute, id) do conn |> ConciergeSite.Router.Helpers.trip_path(:edit, id) |> edit_link_for_path() end defp edit_link(conn, :accessibility, id) do conn |> ConciergeSite.Router.Helpers.accessibility_trip_path(:edit, id) |> edit_link_for_path() end @spec edit_link_for_path(String.t()) :: Phoenix.HTML.safe() defp edit_link_for_path(path) do link to: path, class: "trip__card--edit-link" do [ "Edit", FontAwesomeHelpers.fa("pencil") ] end end @spec pause_link(Plug.Conn.t(), String.t(), boolean) :: Phoenix.HTML.safe() defp pause_link(conn, trip_id, false = trip_paused?) do conn |> ConciergeSite.Router.Helpers.trip_pause_path(:pause, trip_id) |> pause_link_for_path(trip_paused?) end defp pause_link(conn, trip_id, true = trip_paused?) do conn |> ConciergeSite.Router.Helpers.trip_resume_path(:resume, trip_id) |> pause_link_for_path(trip_paused?) end @spec pause_link_for_path(String.t(), boolean) :: Phoenix.HTML.safe() defp pause_link_for_path(path, trip_paused?) do link to: path, method: :patch, class: "trip__card--pause-link" do pause_link_content(trip_paused?) end end @spec pause_link_content(boolean) :: [Phoenix.HTML.safe()] defp pause_link_content(false = _trip_paused?) do [ "Pause", FontAwesomeHelpers.fa("pause-circle") ] end defp pause_link_content(true = _trip_paused?) do [ "Resume", FontAwesomeHelpers.fa("play-circle") ] end defp exclude_return_trip_subscriptions(subscriptions) do subscriptions |> Enum.reject(& &1.return_trip) end @spec paused_class(Trip.t()) :: String.t() defp paused_class(%Trip{} = trip), do: if(Trip.paused?(trip), do: "paused", else: "") end
apps/concierge_site/lib/views/trip_card_helper.ex
0.700895
0.426799
trip_card_helper.ex
starcoder
defmodule Benchee.Formatters.Console.Helpers do @moduledoc false # These are common functions shared between the formatting of the run time and # memory usage statistics. alias Benchee.Conversion.{Count, DeviationPercent, Format, Scale, Unit} alias Benchee.Scenario alias Benchee.Statistics @type unit_per_statistic :: %{atom => Unit.t()} # Length of column header @default_label_width 4 @spec mode_out(Statistics.mode(), Benchee.Conversion.Unit.t()) :: String.t() def mode_out(modes, _run_time_unit) when is_nil(modes) do "None" end def mode_out(modes, run_time_unit) when is_list(modes) do Enum.map_join(modes, ", ", fn mode -> unit_output(mode, run_time_unit) end) end def mode_out(mode, run_time_unit) when is_number(mode) do unit_output(mode, run_time_unit) end defp unit_output(value, unit) do Format.format({Scale.scale(value, unit), unit}) end @spec label_width([Scenario.t()]) :: number def label_width(scenarios) do max_label_width = scenarios |> Enum.map(fn scenario -> String.length(scenario.name) end) |> Stream.concat([@default_label_width]) |> Enum.max() max_label_width + 1 end @spec count_output(number, Count.units()) :: binary def count_output(count, unit) do Count.format({Count.scale(count, unit), unit}) end @spec deviation_output(number) :: binary def deviation_output(std_dev_ratio) do DeviationPercent.format(std_dev_ratio) end @spec descriptor(String.t()) :: String.t() def descriptor(header_str), do: "\n#{header_str}: \n" def format_comparison( name, statistics, display_value, comparison_name, display_unit, label_width, column_width ) do "~*s~*s ~ts" |> :io_lib.format([ -label_width, name, column_width, display_value, comparison_display(statistics, comparison_name, display_unit) ]) |> to_string end defp comparison_display(%Statistics{relative_more: nil, absolute_difference: nil}, _, _), do: "" defp comparison_display(statistics, comparison_name, unit) do "- #{comparison_text(statistics, comparison_name)} #{ absolute_difference_text(statistics, unit) }\n" end defp comparison_text(%Statistics{relative_more: :infinity}, name), do: "∞ x #{name}" defp comparison_text(%Statistics{relative_more: nil}, _), do: "N/A" defp comparison_text(statistics, comparison_name) do "~.2fx ~s" |> :io_lib.format([statistics.relative_more, comparison_name]) |> to_string end defp absolute_difference_text(statistics, unit) do formatted_value = Format.format({Scale.scale(statistics.absolute_difference, unit), unit}) if statistics.absolute_difference >= 0 do "+#{formatted_value}" else formatted_value end end end
lib/benchee/formatters/console/helpers.ex
0.78316
0.458773
helpers.ex
starcoder
defmodule Itsy.Bit do use Bitwise @doc """ A guard expression that checks whether the integer is a power of 2 or not. """ @spec is_power_of_2(integer) :: Macro.t defmacro is_power_of_2(x), do: quote do: (unquote(x) &&& ~~~-unquote(x)) == 0 @doc """ Get the lowest unset bit. iex> Itsy.Bit.lowest_unset(0b1101) 0b0010 iex> Itsy.Bit.lowest_unset(0b1100) 0b0001 iex> Itsy.Bit.lowest_unset(0x1feffffffffffffffffffffffffffffffffffffffffffffff) 0x0010000000000000000000000000000000000000000000000 iex> Itsy.Bit.lowest_unset(0x1fefffffffffffffffffffffffffffffffffffffffffffffe) 0x0000000000000000000000000000000000000000000000001 iex> Itsy.Bit.lowest_unset(-7) 2 iex> Itsy.Bit.lowest_unset(-8) 1 """ @spec lowest_unset(integer) :: integer def lowest_unset(x), do: ~~~x &&& (x + 1) @doc """ Get the lowest set bit. iex> Itsy.Bit.lowest_set(0b1101) 0b0001 iex> Itsy.Bit.lowest_set(0b1100) 0b0100 iex> Itsy.Bit.lowest_set(0x1feffffffffffffffffffffffffffffffffffffffffffffff) 0x0000000000000000000000000000000000000000000000001 iex> Itsy.Bit.lowest_set(0x1fefffffffffffffffffffffffffffffffffffffffffffffe) 0x0000000000000000000000000000000000000000000000002 iex> Itsy.Bit.lowest_set(-7) 1 iex> Itsy.Bit.lowest_set(-8) 8 """ @spec lowest_set(integer) :: integer def lowest_set(x), do: x &&& -x @doc """ Get the highest set bit. iex> Itsy.Bit.highest_set(0b1101) 0b1000 iex> Itsy.Bit.highest_set(0b1100) 0b1000 iex> Itsy.Bit.highest_set(0x1feffffffffffffffffffffffffffffffffffffffffffffff) 0x1000000000000000000000000000000000000000000000000 iex> Itsy.Bit.highest_set(0x1feff00000000000000000000000000000000000000000000) 0x1000000000000000000000000000000000000000000000000 iex> Itsy.Bit.highest_set(-7) 4 iex> Itsy.Bit.highest_set(-8) 8 """ @spec highest_set(integer) :: integer def highest_set(x) do x = mask(x) x ^^^ (x >>> 1) end @doc """ Find the minimum power of 2 that is equal or greater than the value. iex> Itsy.Bit.min_power_of_2(0b1101) 0b10000 iex> Itsy.Bit.min_power_of_2(0b1000) 0b1000 iex> Itsy.Bit.min_power_of_2(0x1feffffffffffffffffffffffffffffffffffffffffffffff) 0x2000000000000000000000000000000000000000000000000 iex> Itsy.Bit.min_power_of_2(0x1000000000000000000000000000000000000000000000000) 0x1000000000000000000000000000000000000000000000000 iex> Itsy.Bit.min_power_of_2(-7) -8 iex> Itsy.Bit.min_power_of_2(-8) -8 """ @spec min_power_of_2(integer) :: integer def min_power_of_2(0), do: 0 def min_power_of_2(x) when x < 0, do: mask(x + 1) def min_power_of_2(x), do: mask(x - 1) + 1 @doc """ Create a mask for the entire value. An interesting property of this function is that a positive integer produces a mask that is the opposite to a negative integer. iex> Itsy.Bit.mask(0b1101) 0b1111 iex> Itsy.Bit.mask(0b1100) 0b1111 iex> Itsy.Bit.mask(0x1feffffffffffffffffffffffffffffffffffffffffffffff) 0x1ffffffffffffffffffffffffffffffffffffffffffffffff iex> Itsy.Bit.mask(0x1feff00000000000000000000000000000000000000000000) 0x1ffffffffffffffffffffffffffffffffffffffffffffffff iex> Itsy.Bit.mask(-3) -4 iex> Itsy.Bit.mask(-4) -8 iex> :erlang.band(0b1111, Itsy.Bit.mask(2)) + :erlang.band(0b1111, Itsy.Bit.mask(-2)) 0b1111 iex> { :erlang.band(0b1111, Itsy.Bit.mask(2)), :erlang.band(0b1111, Itsy.Bit.mask(-2)) } { 3, 12 } """ @spec mask(integer) :: integer def mask(x) when x < 0, do: unmask(mask(-x)) def mask(x), do: mask(x, 1) defp mask(x, _) when is_power_of_2(x + 1), do: x defp mask(x, size), do: mask(x ||| (x >>> size), size <<< 1) @doc """ Create a mask for the range that exceeds the value. Produces a mask opposite to `mask/1`. An interesting property of this function is that a positive integer produces a mask that is the opposite to a negative integer. iex> Itsy.Bit.unmask(0b1101) -0b10000 iex> Itsy.Bit.unmask(0b1100) -0b10000 iex> Itsy.Bit.unmask(0x1feffffffffffffffffffffffffffffffffffffffffffffff) -0x2000000000000000000000000000000000000000000000000 iex> Itsy.Bit.unmask(0x1feff00000000000000000000000000000000000000000000) -0x2000000000000000000000000000000000000000000000000 iex> Itsy.Bit.unmask(-3) 3 iex> Itsy.Bit.unmask(-4) 7 iex> :erlang.band(0b1111, Itsy.Bit.unmask(2)) + :erlang.band(0b1111, Itsy.Bit.unmask(-2)) 0b1111 iex> { :erlang.band(0b1111, Itsy.Bit.unmask(2)), :erlang.band(0b1111, Itsy.Bit.unmask(-2)) } { 12, 3 } """ @spec unmask(integer) :: integer def unmask(x) when x < 0, do: ~~~unmask(-x) def unmask(x), do: ~~~mask(x) @doc """ Masks the lower bits of a power of 2 value. Value must be a power of 2. iex> Itsy.Bit.mask_lower_power_of_2(0b1101) ** (Itsy.PowerOf2Error) value 13 is not a power of 2 iex> Itsy.Bit.mask_lower_power_of_2(0b1000) 0b0111 iex> Itsy.Bit.mask_lower_power_of_2(0x10e0000000000000000000000000000000000000000000000) ** (Itsy.PowerOf2Error) value 6620380736540639868108059157289335673232953007833161400320 is not a power of 2 iex> Itsy.Bit.mask_lower_power_of_2(0x1000000000000000000000000000000000000000000000000) 0x0ffffffffffffffffffffffffffffffffffffffffffffffff """ @spec mask_lower_power_of_2(integer) :: integer def mask_lower_power_of_2(x) when (x != 0) and is_power_of_2(x), do: ~~~-x def mask_lower_power_of_2(x) when x < 0, do: -mask_lower_power_of_2(-x) def mask_lower_power_of_2(x), do: raise Itsy.PowerOf2Error, value: x @doc """ Count the set bits. iex> Itsy.Bit.count(0b1101) 3 iex> Itsy.Bit.count(0b1100) 2 iex> Itsy.Bit.count(0x1feffffffffffffffffffffffffffffffffffffffffffffff) 192 iex> Itsy.Bit.count(0x1feff00000000000000000000000000000000000000000000) 16 """ @spec count(non_neg_integer) :: non_neg_integer def count(x) when x >= 0, do: count(x, 0) defp count(0, total), do: total defp count(x, total) do c = x &&& 0xffffffff c = c - ((c >>> 1) &&& 0x55555555) c = (c &&& 0x33333333) + ((c >>> 2) &&& 0x33333333) c = (c + (c >>> 4)) &&& 0x0f0f0f0f c = ((c * 0x01010101) &&& 0xffffffff) >>> 24 count(x >>> 32, total + c) end @doc """ Sets `n` least significant bits. iex> Itsy.Bit.set(0) 0 iex> Itsy.Bit.set(4) 0b1111 iex> Itsy.Bit.set(70) 0b1111111111111111111111111111111111111111111111111111111111111111111111 iex> Itsy.Bit.set(192) 0xffffffffffffffffffffffffffffffffffffffffffffffff """ @spec set(non_neg_integer) :: non_neg_integer def set(n) when n >= 0, do: (1 <<< n) - 1 @doc """ Check whether the value is a power of 2. iex> Itsy.Bit.power_of_2?(0b1101) false iex> Itsy.Bit.power_of_2?(0b1000) true iex> Itsy.Bit.power_of_2?(0x10e0000000000000000000000000000000000000000000000) false iex> Itsy.Bit.power_of_2?(0x1000000000000000000000000000000000000000000000000) true """ @spec power_of_2?(integer) :: boolean() def power_of_2?(x) when x < 0, do: power_of_2?(-x) def power_of_2?(x), do: (x != 0) && is_power_of_2(x) end
lib/itsy/bit.ex
0.842766
0.600979
bit.ex
starcoder
defmodule Toby.Data.Node do @moduledoc """ Retrieves system information for a particular connected node via RPC call. """ # General def system_info(node, key) do call(node, :erlang, :system_info, [key]) end def memory(node) do call(node, :erlang, :memory) end def statistics(node, key) do call(node, :erlang, :statistics, [key]) end def where_is(node, name) do call(node, :erlang, :whereis, [name]) end def cookie(node) do call(node, :erlang, :get_cookie) end def monotonic_time(node) do call(node, :erlang, :monotonic_time) end def sample_schedulers(node) do call(node, :scheduler, :sample) end def lookup(node, name) when is_atom(name) do lookup(node, where_is(node, name)) end def lookup(node, pid) when is_pid(pid) do Map.merge(process_info_extended(node, pid), %{type: :process}) end def lookup(node, port) when is_port(port) do Map.merge(port_info_extended(node, port), %{type: :port}) end def lookup(_node, nil) do nil end # Processes def processes(node) do call(node, :erlang, :processes) end def processes_extended(node) do for pid <- processes(node), do: process_info_extended(node, pid) end def process_info(node, pid) do call(node, :erlang, :process_info, [pid]) end def process_info(node, pid, key) do call(node, :erlang, :process_info, [pid, key]) end def process_info_extended(node, pid) do with {:memory, memory} <- process_info(node, pid, :memory), {:monitors, monitors} <- process_info(node, pid, :monitors), {:monitored_by, monitored_by} <- process_info(node, pid, :monitored_by), info <- process_info(node, pid) do info |> Enum.into(%{}) |> Map.merge(%{ pid: pid, memory: memory, monitors: monitors, monitored_by: monitored_by }) else _ -> %{pid: pid, links: [], monitors: [], monitored_by: []} end end # Ports def ports(node) do call(node, :erlang, :ports) end def ports_extended(node) do for port <- ports(node), do: port_info_extended(node, port) end def port_info(node, port) do call(node, :erlang, :port_info, [port]) end def port_info(node, port, key) do call(node, :erlang, :port_info, [port, key]) end def port_info_extended(node, port) do with {:memory, memory} <- port_info(node, port, :memory), {:queue_size, queue_size} <- port_info(node, port, :queue_size), {:parallelism, parallelism} <- port_info(node, port, :parallelism), {:locking, locking} <- port_info(node, port, :locking), {:monitors, monitors} <- port_info(node, port, :monitors), {:monitored_by, monitored_by} = port_info(node, port, :monitored_by), info <- port_info(node, port) do info |> Enum.into(%{}) |> Map.merge(%{ id: port, slot: info[:id], memory: memory, queue_size: queue_size, parallelism: parallelism, locking: locking, monitors: monitors, monitored_by: monitored_by }) else :undefined -> %{id: port, links: [], monitors: [], monitored_by: []} end end # Tables @doc """ Returns ETS tables for a given node. Table memory is returned in bytes, converted from the number of words reported by ETS. """ def ets_tables(node) do word_size = system_info(node, :wordsize) with tables <- call(node, :ets, :all) do for id <- tables do data = node |> call(:ets, :info, [id]) |> Enum.into(%{}) owner_name = case process_info(node, data.owner, :registered_name) do {:registered_name, name} when is_atom(name) -> name _ -> nil end Map.merge(data, %{ source: "ets", memory: data.memory * word_size, owner_name: owner_name }) end end end # Applications def application(node, name) do call(node, :application, :get_all_key, [name]) end def application_by_pid(node, pid) do call(node, :application, :get_application, [pid]) end # Allocators @doc """ Returns aggregated allocator data for the given node. """ def allocators(node) do alloc_names = system_info(node, :alloc_util_allocators) allocs = for alloc <- alloc_names, into: %{} do {alloc, allocator(node, alloc)} end total = Enum.reduce( allocs, %{block_size: 0, carrier_size: 0}, fn {_, alloc}, acc -> %{ block_size: acc.block_size + alloc.block_size, carrier_size: acc.carrier_size + alloc.carrier_size } end ) Map.merge(allocs, %{"Total" => total}) end @doc """ Returns aggregated data for the allocator on the given node. """ def allocator(node, alloc) do data = call(node, :erlang, :alloc_sizes, [alloc]) Enum.reduce( data, %{block_size: 0, carrier_size: 0}, fn {:instance, _, values}, acc -> with [ {:blocks_size, mbcs_block_size, _, _}, {:carriers_size, mbcs_carrier_size, _, _} ] <- values[:mbcs], [ {:blocks_size, sbcs_block_size, _, _}, {:carriers_size, sbcs_carrier_size, _, _} ] <- values[:sbcs] do %{ block_size: acc.block_size + mbcs_block_size + sbcs_block_size, carrier_size: acc.carrier_size + mbcs_carrier_size + sbcs_carrier_size } else _ -> acc end end ) end # Nodes def visible_nodes do case :net_adm.names() do {:ok, visible} -> visible {:error, _} -> [] end end def connected_nodes, do: [Node.self() | Node.list()] def self, do: Node.self() # Utility defp call(node, module, func, args \\ []) do :rpc.call(node, module, func, args) end end
lib/toby/data/node.ex
0.706697
0.500366
node.ex
starcoder
defmodule S3DirectUpload do @moduledoc """ Pre-signed S3 upload helper for client-side multipart POSTs. See: [Browser-Based Upload using HTTP POST (Using AWS Signature Version 4)](http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html) [Task 3: Calculate the Signature for AWS Signature Version 4](http://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html) This module expects three application configuration settings for the AWS access and secret keys and the S3 bucket name. You may also supply an AWS region (the default if you do not is `us-east-1`). Here is an example configuration that reads these from environment variables. Add your own configuration to `config.exs`. ``` config :s3_direct_upload, aws_access_key: System.get_env("AWS_ACCESS_KEY_ID"), aws_secret_key: System.get_env("AWS_SECRET_ACCESS_KEY"), aws_s3_bucket: System.get_env("AWS_S3_BUCKET"), aws_region: System.get_env("AWS_REGION") ``` """ @doc """ The `S3DirectUpload` struct represents the data necessary to generate an S3 pre-signed upload object. The required fields are: - `file_name` the name of the file being uploaded - `mimetype` the mimetype of the file being uploaded - `path` the path where the file will be uploaded in the bucket Fields that can be over-ridden are: - `acl` defaults to `public-read` """ defstruct file_name: nil, mimetype: nil, path: nil, acl: "public-read" @date_util Application.get_env(:s3_direct_upload, :date_util, S3DirectUpload.DateUtil) @doc """ Returns a map with `url` and `credentials` keys. - `url` - the form action URL - `credentials` - name/value pairs for hidden input fields ## Examples iex> %S3DirectUpload{file_name: "image.jpg", mimetype: "image/jpeg", path: "path/to/file"} ...> |> S3DirectUpload.presigned ...> |> Map.get(:url) "https://s3-bucket.s3.amazonaws.com" iex> %S3DirectUpload{file_name: "image.jpg", mimetype: "image/jpeg", path: "path/to/file"} ...> |> S3DirectUpload.presigned ...> |> Map.get(:credentials) |> Map.get(:"x-amz-credential") "123abc/20170101/us-east-1/s3/aws4_request" iex> %S3DirectUpload{file_name: "image.jpg", mimetype: "image/jpeg", path: "path/to/file"} ...> |> S3DirectUpload.presigned ...> |> Map.get(:credentials) |> Map.get(:key) "path/to/file/image.jpg" """ def presigned(%S3DirectUpload{} = upload) do %{ url: "https://#{bucket()}.s3.amazonaws.com", credentials: %{ policy: policy(upload), "x-amz-algorithm": "AWS4-HMAC-SHA256", "x-amz-credential": credential(), "x-amz-date": @date_util.today_datetime(), "x-amz-signature": signature(upload), acl: upload.acl, key: file_path(upload) } } end @doc """ Returns a json object with `url` and `credentials` properties. - `url` - the form action URL - `credentials` - name/value pairs for hidden input fields """ def presigned_json(%S3DirectUpload{} = upload) do presigned(upload) |> Poison.encode! end defp signature(upload) do signing_key() |> hmac(policy(upload)) |> Base.encode16(case: :lower) end defp signing_key do "AWS4#{secret_key()}" |> hmac(@date_util.today_date()) |> hmac(region()) |> hmac("s3") |> hmac("aws4_request") end defp policy(upload) do %{ expiration: @date_util.expiration_datetime, conditions: conditions(upload) } |> Poison.encode! |> Base.encode64 end defp conditions(upload) do [ %{"bucket" => bucket()}, %{"acl" => upload.acl}, %{"x-amz-algorithm": "AWS4-HMAC-SHA256"}, %{"x-amz-credential": credential()}, %{"x-amz-date": @date_util.today_datetime()}, ["starts-with", "$Content-Type", upload.mimetype], ["starts-with", "$key", upload.path] ] end defp credential() do "#{access_key()}/#{@date_util.today_date()}/#{region()}/s3/aws4_request" end defp file_path(upload) do "#{upload.path}/#{upload.file_name}" end defp hmac(key, data) do :crypto.hmac(:sha256, key, data) end defp access_key, do: Application.get_env(:s3_direct_upload, :aws_access_key) defp secret_key, do: Application.get_env(:s3_direct_upload, :aws_secret_key) defp bucket, do: Application.get_env(:s3_direct_upload, :aws_s3_bucket) defp region, do: Application.get_env(:s3_direct_upload, :aws_region) || "us-east-1" end
lib/s3_direct_upload.ex
0.885593
0.710101
s3_direct_upload.ex
starcoder
defmodule Niffler.Library do @moduledoc """ `Niffler.Library` allows to wrap dynamically loaded libraries. And create foreign function interfaces (FFI) for them. This usually requires: * Shared headers such for type definitions * Some init code involving `dlopen()` * Static variables/state that is part of the library * Potential deinit code involving `dlclose()` `Niffler.Library` allows to produce modules at runtime doing all these things and exposing access to the c-functions of a dynamic library. ``` defmodule Gmp do use Niffler.Library, thread_safe: false @impl true def header() do \""" // library handle void *gmp; // types typedef struct { int _mp_alloc; int _mp_size; void *_mp_d; } __mpz_struct; typedef __mpz_struct mpz_t[1]; void (*mpz_init)(mpz_t); // ... \""" end @impl true def on_load() do \""" // loading the library with dlopen() gmp = dlopen("libgmp.\#{library_suffix()}", RTLD_LAZY); if (!gmp) { return "could not load libgmp"; } // loading symbols: dlerror(); if (!(mpz_init = dlsym(gmp, "__gmpz_init"))) { return dlerror(); } // other symbols ... \""" end @impl true def on_destroy() do \""" if (!gmp) { return; } // unloading dlclose(gmp); \""" end # definining one or more operations here... defnif :mul, [a: :int, b: :int], ret: :int do \""" mpz_set_si(ma, $a); mpz_set_si(mb, $b); mpz_mul(mc, ma, mb); $ret = mpz_get_si(mc); \""" end end ``` Once defined the module functions can be used via: ``` {ok, [result]} = Gmp.mul(4, 5) ``` """ @doc false defmacro __using__(_opts) do quote do @module __MODULE__ @nifs :niffler_nifs @on_load :pre_compile @behaviour Niffler.Library def pre_compile() do program = Niffler.Library.compile(@module, header(), on_load()) :persistent_term.put({@module, :niffler_program}, program) :ok end import Niffler.Library end end @doc """ Return a c-fragement of a common header for type definitions, static variables and other neccesary c code. Example: ``` @impl true def header() do \""" // library handle void *gmp; \""" end ``` """ @callback header() :: binary @doc """ Return a c-fragement that is called on the first call to the module. Typicall this fragment would contain a call to `dlopen()` when loading a dynamic library. This c-fragment should return a char* (a common string in c) when any error has occured. Example: ``` @impl true def on_load() do \""" // loading the library with dlopen() gmp = dlopen("libgmp.\#{library_suffix()}", RTLD_LAZY); if (!gmp) { return "could not load libgmp"; } \""" end ``` """ @callback on_load() :: binary @doc """ Return a c-fragement that is called when the module is unloaded. Typicallly this fragment would contain a call to `dlclose()` for a dynamic library. Example: ``` @impl true def on_destroy() do \""" if (!gmp) { return; } // unloading dlclose(gmp); \""" end ``` """ @callback on_destroy() :: binary @spec defnif(atom(), keyword, keyword, [{:do, binary()}]) :: {:__block__, [], [{any, any, any}, ...]} @doc """ Defines a new nif function in the current module. Same as `Niffler.defnif/4` but with access to the current module context. """ defmacro defnif(name, inputs, outputs, do: source) do keys = Keyword.keys(inputs) |> Enum.map(fn n -> Macro.var(n, __MODULE__) end) key = {name, length(inputs)} quote do nifs = Module.get_attribute(@module, @nifs, []) Module.put_attribute(@module, unquote(name), length(nifs)) @idx length(nifs) Module.put_attribute( @module, @nifs, nifs ++ [{unquote(key), unquote(inputs), unquote(outputs), unquote(source)}] ) def unquote(name)(unquote_splicing(keys)) do :persistent_term.get({@module, :niffler_program}) |> Niffler.run(@idx, [unquote_splicing(keys)]) end end end @doc """ Returns the current platforms default library suffix: * `dll` on windows * `dylib` on mac * `so` on linux Useful for dlopen() code to load the correct library: ``` @impl true def on_load() do \""" // loading the library with dlopen() gmp = dlopen("libgmp.\#{library_suffix()}", RTLD_LAZY); if (!gmp) { return "could not load libgmp"; } \""" end ``` """ def library_suffix() do case :os.type() do {:unix, :darwin} -> "dylib" {:unix, _} -> "so" {:win32, _} -> "dll" end end @nifs :niffler_nifs @doc false def compile(module, header, on_load) do funs = Module.get_attribute(module, @nifs, []) cases = Enum.with_index(funs) |> Enum.map(fn {{_, inputs, outputs, source}, idx} -> """ case #{idx}: { #{Niffler.type_defs(inputs, outputs)} #{source} #{Niffler.type_undefs(inputs, outputs)} break; } """ end) |> Enum.join("\n ") params = Enum.map(funs, fn {_key, inputs, outputs, _source} -> {inputs, outputs} end) Niffler.compile!( """ #{header} DO_RUN static int niffler_initialized = 0; if (!niffler_initialized) { #{on_load} niffler_initialized = 1; } switch (niffler_env->method) { #{cases} default: return "failed fo fetch requested method"; } END_RUN """, params ) end end
lib/niffler/library.ex
0.905982
0.561275
library.ex
starcoder
defmodule StreamStats do @moduledoc """ Enables concurrent calculation of count, mean and standard deviation. New values can be aggregated into an existing stat tuple and two stat tuples can be merged into one. Inspired by the following article by <NAME>: https://www.johndcook.com/blog/skewness_kurtosis/ """ @type t() :: {count(), mean(), m2()} @type count() :: Integer.t() @type mean() :: number() @type m2() :: number() @doc """ Adds a value to the aggregated stats tuple. Implemented as Welford's Online algorithm. https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_Online_algorithm """ @spec push_value(number(), nil | t()) :: t() def push_value(value, nil), do: {1, value, 0.0} def push_value(value, {prev_count, prev_mean, prev_m2}) do count = prev_count + 1 prev_delta = value - prev_mean mean = prev_mean + prev_delta / count new_delta = value - mean m2 = prev_m2 + prev_delta * new_delta {count, mean, m2} end @doc """ Merges two stats tuples. Implemented as Chan's Parallel Algorithm. https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm """ @spec combine_stats(nil | t(), t()) :: t() def combine_stats({_, _, _} = stats_a, nil), do: stats_a def combine_stats(nil, {_, _, _} = stats_b), do: stats_b def combine_stats({count_a, mean_a, m2_a}, {count_b, mean_b, m2_b}) do count = count_a + count_b delta = mean_b - mean_a # I think this way of calculating the mean is more stable than the obvious way mean = mean_a + delta * count_b / count m2 = m2_a + m2_b + delta * delta * count_a * count_b / count {count, mean, m2} end @doc """ Aggregates the values in a list to a stats tuple. """ @spec reduce(Enum.t(), t() | nil) :: any() def reduce(values, stats \\ nil) do Enum.reduce(values, stats, &combine/2) end @doc """ Aggregates a number or stats tuple into a stats tuple. First argument can be a number or stats tuple. """ @spec combine(number() | t(), t()) :: t() def combine({_, _, _} = stats_a, stats_b), do: combine_stats(stats_a, stats_b) def combine(value, stats), do: push_value(value, stats) @doc """ Calculates the variance using a stats tuple. """ @spec variance(t()) :: number() def variance(stats) do {count, _mean, m2} = stats if count <= 1 do 0.0 else m2 / (count - 1.0) end end @doc """ Calculates the standard deviation using a stats tuple. """ @spec standard_deviation(t()) :: number() def standard_deviation(stats) do :math.sqrt(variance(stats)) end end
lib/stream_stats.ex
0.78572
0.819641
stream_stats.ex
starcoder
defmodule Loppers do alias Loppers.{Walk, Validate, Match, List} @type error :: {:not_allowed, ast :: term} @type function_ref :: {module :: atom, :__all__} | {module :: atom, :__submodules_all__} | {module :: atom, function :: atom} | (function :: atom) @type validate_option :: {:whitelist, [function_ref]} | {:blacklist, [function_ref]} @moduledoc ~S""" A code validator for the Elixir-AST. It can operate on both white- and blacklists. ## Basic example: iex> quoted = quote do "hello" |> String.upcase |> String.pad_leading(4, "0") end iex> whitelist = Loppers.special_forms ++ [{Kernel, :|>}, {String, :upcase}, {String, :pad_leading}] iex> Loppers.validate(quoted, whitelist: whitelist) :ok """ @doc ~S""" Validates a syntax tree against the given whitelist. Use `Code.string_to_quoted/2` to get the syntax tree out of source code. When no whitelist is defined, it is assumed that all function calls are ok, except when they exist in the blacklist. Supplying both a white- and a blacklist can be useful, for example when you want to allow all functions of a module, except a few that you don't want: iex> whitelist = Loppers.special_forms ++ [{Enum, :__all__}] iex> blacklist = [{Enum, :map_reduce}] iex> quoted = quote do Enum.map_reduce([], nil, &({&1, nil})) end iex> Loppers.validate(quoted, [whitelist: whitelist, blacklist: blacklist]) {:error, [ not_allowed: {{:., [parent_modules: []], [ {:__aliases__, [parent_modules: [], alias: false], [:Enum]}, :map_reduce ]}, [parent_modules: []], [ [], nil, {:&, [parent_modules: []], [{{:&, [parent_modules: []], [1]}, nil}]} ]} ]} ## Options * `:whitelist` - a list of `function_ref`s that are allowed in the code * `:blacklist` - a list of `function_ref`s that are forbidden in the code """ @spec validate(quoted :: term, opts :: [validate_option]) :: :ok | {:error, [error]} def validate(quoted, opts) do {quoted, _acc} = Walk.walk(quoted, {%{}, [{Kernel, []}]}, &Walk.gen_meta/2) {quoted, _acc} = Walk.walk(quoted, [], &Walk.module_functions/2) {quoted, _acc} = Walk.recurse(quoted, %{}, &Walk.track_parent_modules/2) whitelist = Keyword.get(opts, :whitelist, nil) blacklist = Keyword.get(opts, :blacklist, []) acc = Validate.validate(quoted, [], fn {_, meta, _} = ast, acc -> # IO.inspect ast if Match.is_fn?(ast) do if (Keyword.get(meta, :allow, false) or whitelist == nil or List.in_list?(ast, whitelist)) and not List.in_list?(ast, blacklist) do acc else [{:not_allowed, ast} | acc] end else acc end end) case acc do [] -> :ok errors -> {:error, errors} end end @doc """ Convenience list of commonly used operators """ def operators do [ {Kernel, :+}, {Kernel, :*}, {Kernel, :/}, {Kernel, :-}, {Kernel, :<<>>}, {Kernel, :<>}, :"->", :<- ] end @doc """ All functions and macros needed to define modules, functions and set attributes """ def module_support do [ {Kernel, :@}, {Kernel, :defmodule}, {Kernel, :def}, :when ] end @doc """ A list of all macros contained in `Kernel.SpecialForms`. Without those it's going to be hard to write any elixir code. """ def special_forms do special_forms = [:functions, :macros] |> Enum.flat_map(&Kernel.SpecialForms.__info__/1) |> Keyword.keys() special_forms ++ [ :{}, :doc, :->, {Kernel, :is_binary} ] end end
lib/loppers.ex
0.645232
0.505066
loppers.ex
starcoder
defmodule Spat.Geometry.Box do use Bitwise @doc """ Obtain the indexes of a box within the subdivided bounds. iex> Spat.Geometry.Box.index({ 0 }, { 1 }, Spat.Bounds.new({ 10 }), 1) [[0]] iex> Spat.Geometry.Box.index({ 5 }, { 6 }, Spat.Bounds.new({ 10 }), 1) [[0], [1]] iex> Spat.Geometry.Box.index({ 10 }, { 11 }, Spat.Bounds.new({ 10 }), 1) [[1]] iex> Spat.Geometry.Box.index({ -1 }, { -0.5 }, Spat.Bounds.new({ 10 }), 1) [] iex> Spat.Geometry.Box.index({ 2.5 }, { 3 }, Spat.Bounds.new({ 10 }), 1) [[0]] iex> Spat.Geometry.Box.index({ 2.5 }, { 3 }, Spat.Bounds.new({ 10 }), 2) [[0, 0], [0, 1]] iex> Spat.Geometry.Box.index({ 5, 5 }, { 6, 6 }, Spat.Bounds.new({ 10, 10 }), 2) [[0, 3], [1, 2], [2, 1], [3, 0]] iex> Spat.Geometry.Box.index({ 2.5, 5 }, { 3, 6 }, Spat.Bounds.new({ 10, 10 }), 2) [[0, 2], [0, 3], [2, 0], [2, 1]] iex> Spat.Geometry.Box.index({ 0, 0 }, { 1, 1 }, Spat.Bounds.new({ 10, 10 }), 2) [[0, 0]] iex> Spat.Geometry.Box.index({ 12.5, 5 }, { 13.5, 6 }, Spat.Bounds.new({ 10, 0 }, { 20, 10 }), 2) [[0, 2], [0, 3], [2, 0], [2, 1]] iex> Spat.Geometry.Box.index({ 10, 0 }, { 11, 1 }, Spat.Bounds.new({ 10, 0 }, { 20, 10 }), 2) [[0, 0]] iex> Spat.Geometry.Box.index({ 0 }, { 1 }, Spat.Bounds.new({ -10 }, { 10 }), 1) [[0], [1]] iex> Spat.Geometry.Box.index({ -5 }, { -4 }, Spat.Bounds.new({ -10 }, { 10 }), 2) [[0, 0], [0, 1]] iex> Spat.Geometry.Box.index({ 11, 1 }, { 50, 50 }, Spat.Bounds.new({ 10, 10 }), 2) [] iex> Spat.Geometry.Box.index({ -25, -25 }, { 50, 50 }, Spat.Bounds.new({ 10, 10 }), 2) [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0], [2, 1], [2, 2], [2, 3], [3, 0], [3, 1], [3, 2], [3, 3]] """ @spec index(Spat.Coord.t, Spat.Coord.t, Spat.Bounds.t, pos_integer) :: [Spat.grid_index] def index(min, max, bounds, subdivisions), do: Spat.Geometry.index(&intersect(min, max, &1), bounds, subdivisions) @doc """ Check whether a box intersects with the given bounds (equal to or contained inside). """ @spec intersect(Spat.Coord.t, Spat.Coord.t, Spat.Bounds.t) :: boolean def intersect(box_min, box_max, %{ min: min, max: max, dimension: dimension }), do: intersect(box_min, box_max, min, max, dimension) @doc false @spec intersect(Spat.Coord.t, Spat.Coord.t, Spat.Coord.t, Spat.Coord.t, non_neg_integer) :: boolean defp intersect(_, _, _, _, 0), do: true defp intersect(box_min, box_max, min, max, dimension) do axis = dimension - 1 box_start = Spat.Coord.get(box_min, axis) box_stop = Spat.Coord.get(box_max, axis) start = Spat.Coord.get(min, axis) stop = Spat.Coord.get(max, axis) if (box_start <= stop) && (box_stop >= start) do intersect(box_min, box_max, min, max, axis) else false end end end
lib/spat/geometry/box.ex
0.842313
0.800809
box.ex
starcoder
defmodule Plug.Crypto do @moduledoc """ Namespace and module for crypto-related functionality. For low-level functionality, see `Plug.Crypto.KeyGenerator`, `Plug.Crypto.MessageEncryptor`, and `Plug.Crypto.MessageVerifier`. """ use Bitwise alias Plug.Crypto.{KeyGenerator, MessageVerifier, MessageEncryptor} @doc """ Prunes the stacktrace to remove any argument trace. This is useful when working with functions that receives secrets and we want to make sure those secrets do not leak on error messages. """ @spec prune_args_from_stacktrace(Exception.stacktrace()) :: Exception.stacktrace() def prune_args_from_stacktrace(stacktrace) def prune_args_from_stacktrace([{mod, fun, [_ | _] = args, info} | rest]), do: [{mod, fun, length(args), info} | rest] def prune_args_from_stacktrace(stacktrace) when is_list(stacktrace), do: stacktrace @doc false @deprecated "Use non_executable_binary_to_term/2" def safe_binary_to_term(binary, opts \\ []) do non_executable_binary_to_term(binary, opts) end @doc """ A restricted version of `:erlang.binary_to_term/2` that forbids *executable* terms, such as anonymous functions. The `opts` are given to the underlying `:erlang.binary_to_term/2` call, with an empty list as a default. By default this function does not restrict atoms, as an atom interned in one node may not yet have been interned on another (except for releases, which preload all code). If you want to avoid atoms from being created, then you can pass `[:safe]` as options, as that will also enable the safety mechanisms from `:erlang.binary_to_term/2` itself. """ @spec non_executable_binary_to_term(binary(), [atom()]) :: term() def non_executable_binary_to_term(binary, opts \\ []) when is_binary(binary) do term = :erlang.binary_to_term(binary, opts) non_executable_terms(term) term end defp non_executable_terms(list) when is_list(list) do non_executable_list(list) end defp non_executable_terms(tuple) when is_tuple(tuple) do non_executable_tuple(tuple, tuple_size(tuple)) end defp non_executable_terms(map) when is_map(map) do folder = fn key, value, acc -> non_executable_terms(key) non_executable_terms(value) acc end :maps.fold(folder, map, map) end defp non_executable_terms(other) when is_atom(other) or is_number(other) or is_bitstring(other) or is_pid(other) or is_reference(other) do other end defp non_executable_terms(other) do raise ArgumentError, "cannot deserialize #{inspect(other)}, the term is not safe for deserialization" end defp non_executable_list([]), do: :ok defp non_executable_list([h | t]) when is_list(t) do non_executable_terms(h) non_executable_list(t) end defp non_executable_list([h | t]) do non_executable_terms(h) non_executable_terms(t) end defp non_executable_tuple(_tuple, 0), do: :ok defp non_executable_tuple(tuple, n) do non_executable_terms(:erlang.element(n, tuple)) non_executable_tuple(tuple, n - 1) end @doc """ Masks the token on the left with the token on the right. Both tokens are required to have the same size. """ @spec mask(binary(), binary()) :: binary() def mask(left, right) do :crypto.exor(left, right) end @doc """ Compares the two binaries (one being masked) in constant-time to avoid timing attacks. It is assumed the right token is masked according to the given mask. """ @spec masked_compare(binary(), binary(), binary()) :: boolean() def masked_compare(left, right, mask) when is_binary(left) and is_binary(right) and is_binary(mask) do byte_size(left) == byte_size(right) and masked_compare(left, right, mask, 0) end defp masked_compare(<<x, left::binary>>, <<y, right::binary>>, <<z, mask::binary>>, acc) do xorred = bxor(x, bxor(y, z)) masked_compare(left, right, mask, acc ||| xorred) end defp masked_compare(<<>>, <<>>, <<>>, acc) do acc === 0 end @doc """ Compares the two binaries in constant-time to avoid timing attacks. See: http://codahale.com/a-lesson-in-timing-attacks/ """ @spec secure_compare(binary(), binary()) :: boolean() def secure_compare(left, right) when is_binary(left) and is_binary(right) do byte_size(left) == byte_size(right) and secure_compare(left, right, 0) end defp secure_compare(<<x, left::binary>>, <<y, right::binary>>, acc) do xorred = bxor(x, y) secure_compare(left, right, acc ||| xorred) end defp secure_compare(<<>>, <<>>, acc) do acc === 0 end @doc """ Encodes and signs data into a token you can send to clients. Plug.Crypto.sign(conn.secret_key_base, "user-secret", {:elixir, :terms}) A key will be derived from the secret key base and the given user secret. The key will also be cached for performance reasons on future calls. ## Options * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to 1000 * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to 32 * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to `:sha256` * `:signed_at` - set the timestamp of the token in seconds. Defaults to `System.system_time(:second)` * `:max_age` - the default maximum age of the token. Defaults to `86400` seconds (1 day) and it may be overridden on `verify/4`. """ def sign(key_base, salt, data, opts \\ []) when is_binary(key_base) and is_binary(salt) do data |> encode(opts) |> MessageVerifier.sign(get_secret(key_base, salt, opts)) end @doc """ Encodes, encrypts, and signs data into a token you can send to clients. Plug.Crypto.encrypt(conn.secret_key_base, "user-secret", {:elixir, :terms}) A key will be derived from the secret key base and the given user secret. The key will also be cached for performance reasons on future calls. ## Options * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to 1000 * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to 32 * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to `:sha256` * `:signed_at` - set the timestamp of the token in seconds. Defaults to `System.system_time(:second)` * `:max_age` - the default maximum age of the token. Defaults to `86400` seconds (1 day) and it may be overridden on `decrypt/4`. """ def encrypt(key_base, secret, data, opts \\ []) when is_binary(key_base) and is_binary(secret) do encrypt(key_base, secret, nil, data, opts) end @doc false def encrypt(key_base, secret, salt, data, opts) do data |> encode(opts) |> MessageEncryptor.encrypt( get_secret(key_base, secret, opts), get_secret(key_base, salt, opts) ) end defp encode(data, opts) do signed_at_seconds = Keyword.get(opts, :signed_at) signed_at_ms = if signed_at_seconds, do: trunc(signed_at_seconds * 1000), else: now_ms() max_age_in_seconds = Keyword.get(opts, :max_age, 86400) :erlang.term_to_binary({data, signed_at_ms, max_age_in_seconds}) end @doc """ Decodes the original data from the token and verifies its integrity. ## Examples In this scenario we will create a token, sign it, then provide it to a client application. The client will then use this token to authenticate requests for resources from the server. See `Plug.Crypto` summary for more info about creating tokens. iex> user_id = 99 iex> secret = "<KEY>" iex> user_salt = "<PASSWORD>" iex> token = Plug.Crypto.sign(secret, user_salt, user_id) The mechanism for passing the token to the client is typically through a cookie, a JSON response body, or HTTP header. For now, assume the client has received a token it can use to validate requests for protected resources. When the server receives a request, it can use `verify/4` to determine if it should provide the requested resources to the client: iex> Plug.Crypto.verify(secret, user_salt, token, max_age: 86400) {:ok, 99} In this example, we know the client sent a valid token because `verify/4` returned a tuple of type `{:ok, user_id}`. The server can now proceed with the request. However, if the client had sent an expired or otherwise invalid token `verify/4` would have returned an error instead: iex> Plug.Crypto.verify(secret, user_salt, expired, max_age: 86400) {:error, :expired} iex> Plug.Crypto.verify(secret, user_salt, invalid, max_age: 86400) {:error, :invalid} ## Options * `:max_age` - verifies the token only if it has been generated "max age" ago in seconds. Defaults to the max age signed in the token (86400) * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to 1000 * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to 32 * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to `:sha256` """ def verify(key_base, salt, token, opts \\ []) def verify(key_base, salt, token, opts) when is_binary(key_base) and is_binary(salt) and is_binary(token) do secret = get_secret(key_base, salt, opts) case MessageVerifier.verify(token, secret) do {:ok, message} -> decode(message, opts) :error -> {:error, :invalid} end end def verify(_key_base, salt, nil, _opts) when is_binary(salt) do {:error, :missing} end @doc """ Decrypts the original data from the token and verifies its integrity. ## Options * `:max_age` - verifies the token only if it has been generated "max age" ago in seconds. A reasonable value is 1 day (86400 seconds) * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to 1000 * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to 32 * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` when generating the encryption and signing keys. Defaults to `:sha256` """ def decrypt(key_base, secret, token, opts \\ []) when is_binary(key_base) and is_binary(secret) and is_list(opts) do decrypt(key_base, secret, nil, token, opts) end @doc false def decrypt(key_base, secret, salt, token, opts) when is_binary(token) do secret = get_secret(key_base, secret, opts) salt = get_secret(key_base, salt, opts) case MessageEncryptor.decrypt(token, secret, salt) do {:ok, message} -> decode(message, opts) :error -> {:error, :invalid} end end def decrypt(_key_base, _secret, _salt, nil, _opts) do {:error, :missing} end defp decode(message, opts) do {data, signed, max_age} = case non_executable_binary_to_term(message) do {data, signed, max_age} -> {data, signed, max_age} # For backwards compatibility with Plug.Crypto v1.1 {data, signed} -> {data, signed, 86400} # For backwards compatibility with Phoenix which had the original code %{data: data, signed: signed} -> {data, signed, 86400} end if expired?(signed, Keyword.get(opts, :max_age, max_age)) do {:error, :expired} else {:ok, data} end end ## Helpers # Gathers configuration and generates the key secrets and signing secrets. defp get_secret(_secret_key_base, nil, _opts) do "" end defp get_secret(secret_key_base, salt, opts) do iterations = Keyword.get(opts, :key_iterations, 1000) length = Keyword.get(opts, :key_length, 32) digest = Keyword.get(opts, :key_digest, :sha256) cache = Keyword.get(opts, :cache, Plug.Crypto.Keys) KeyGenerator.generate(secret_key_base, salt, iterations, length, digest, cache) end defp expired?(_signed, :infinity), do: false defp expired?(_signed, max_age_secs) when max_age_secs <= 0, do: true defp expired?(signed, max_age_secs), do: signed + trunc(max_age_secs * 1000) < now_ms() defp now_ms, do: System.system_time(:millisecond) end
lib/plug/crypto.ex
0.910275
0.607867
crypto.ex
starcoder
defmodule Segment do @moduledoc """ The Segement analytics-elixir is a non-official third-party client for [Segment](https://segment.com). Since version `0.2.0` it supports batch delivery of events and retries for the API. ## Installation Add `segment` to your list of dependencies in mix.exs ``` def deps do [ {:segment, "~> 0.2.0"} ] end ``` ## Documentation Documentation can be be found at [https://hexdocs.pm/segment](https://hexdocs.pm/segment). ## Usage Start the Segment agent with your write_key from Segment for a HTTP API Server Source ```elixir Segment.start_link("YOUR_WRITE_KEY") ``` There are then two ways to call the different methods on the API. A basic way through `Segment.Analytics` functions with either the full event Struct or some helper methods (also allowing Context and Integrations to be set manually). This way will use the defined GenServer implementation such as `Segment.Analytics.Batcher` which will queue and batch events to Segment. The other way is to drop down lower and use `Segment.Http` `send` and `batch` directly. This will require first creating a `client` with `Segment.Http.client/1`/`Segment.Http.client/2` ### Track ```elixir Segment.Analytics.track(user_id, event, %{property1: "", property2: ""}) ``` or the full way using a struct with all the possible options for the track call ```elixir %Segment.Analytics.Track{ userId: "sdsds", event: "eventname", properties: %{property1: "", property2: ""} } |> Segment.Analytics.track ``` ### Identify ```elixir Segment.Analytics.identify(user_id, %{trait1: "", trait2: ""}) ``` or the full way using a struct with all the possible options for the identify call ```elixir %Segment.Analytics.Identify{ userId: "sdsds", traits: %{trait1: "", trait2: ""} } |> Segment.Analytics.identify ``` ### Screen ```elixir Segment.Analytics.screen(user_id, name) ``` or the full way using a struct with all the possible options for the screen call ```elixir %Segment.Analytics.Screen{ userId: "sdsds", name: "dssd" } |> Segment.Analytics.screen ``` ### Alias ```elixir Segment.Analytics.alias(user_id, previous_id) ``` or the full way using a struct with all the possible options for the alias call ```elixir %Segment.Analytics.Alias{ userId: "sdsds", previousId: "dssd" } |> Segment.Analytics.alias ``` ### Group ```elixir Segment.Analytics.group(user_id, group_id) ``` or the full way using a struct with all the possible options for the group call ```elixir %Segment.Analytics.Group{ userId: "sdsds", groupId: "dssd" } |> Segment.Analytics.group ``` ### Page ```elixir Segment.Analytics.page(user_id, name) ``` or the full way using a struct with all the possible options for the page call ```elixir %Segment.Analytics.Page{ userId: "sdsds", name: "dssd" } |> Segment.Analytics.page ``` ### Using the Segment Context If you want to set the Context manually you should create a `Segment.Analytics.Context` struct with `Segment.Analytics.Context.new/1` ```elixir context = Segment.Analytics.Context.new(%{active: false}) Segment.Analytics.track(user_id, event, %{property1: "", property2: ""}, context) ``` ## Configuration The library has a number of configuration options you can use to overwrite default values and behaviours * `config :segment, :sender_impl` Allows selection of a sender implementation. At the moment this defaults to `Segment.Analytics.Batcher` which will send all events in batch. Change this value to `Segment.Analytics.Sender` to have all messages sent immediately (asyncronously) * `config :segment, :max_batch_size` The maximum batch size of messages that will be sent to Segment at one time. Default value is 100. * `config :segment, :batch_every_ms` The time (in ms) between every batch request. Default value is 2000 (2 seconds) * `config :segment, :retry_attempts` The number of times to retry sending against the segment API. Default value is 3 * `config :segment, :retry_expiry` The maximum time (in ms) spent retrying. Default value is 10000 (10 seconds) * `config :segment, :retry_start` The time (in ms) to start the first retry. Default value is 100 * `config :segment, :send_to_http` If set to `false`, the libray will override the Tesla Adapter implementation to only log segment calls to `debug` but not make any actual API calls. This can be useful if you want to switch off Segment for test or dev. Default value is true * `config :segment, :tesla, :adapter` This config option allows for overriding the HTTP Adapter for Tesla (which the library defaults to Hackney).This can be useful if you prefer something else, or want to mock the adapter for testing. ## Usage in Phoenix This is how I add to a Phoenix project (may not be your preferred way) 1. Add the following to deps section of your mix.exs: `{:segment, "~> 0.2.0"}` and then `mix deps.get` 2. Add a config variable for your write_key (you may want to make this load from ENV) ie. ```elixir config :segment, write_key: "<KEY>" ``` 3. Start the Segment GenServer in the supervised children list. In `application.ex` add to the children list: ```elixir {Segment, Application.get_env(:segment, :write_key)} ``` """ @type segment_event :: Segment.Analytics.Track.t() | Segment.Analytics.Identify.t() | Segment.Analytics.Screen.t() | Segment.Analytics.Alias.t() | Segment.Analytics.Group.t() | Segment.Analytics.Page.t() @doc """ Start the configured GenServer for handling Segment events with the Segment HTTP Source API Write Key By default if nothing is configured it will start `Segment.Analytics.Batcher` """ @spec start_link(String.t()) :: GenServer.on_start() def start_link(api_key) do Segment.Config.service().start_link(api_key) end @doc """ Start the configured GenServer for handling Segment events with the Segment HTTP Source API Write Key and a custom Tesla Adapter. By default if nothing is configured it will start `Segment.Analytics.Batcher` """ @spec start_link(String.t(), Segment.Http.adapter()) :: GenServer.on_start() def start_link(api_key, adapter) do Segment.Config.service().start_link(api_key, adapter) end @spec child_spec(map()) :: map() def child_spec(opts) do Segment.Config.service().child_spec(opts) end end
lib/segment.ex
0.896622
0.956553
segment.ex
starcoder
defmodule Logi.BuiltIn.Sink.IoDevice do @moduledoc """ A built-in IO device sink. Behaviours: `Logi.SinkWriter`. This sink writes log messages to an IO device (e.g. standard output, file, etc). The default layout is `Logi.BuiltIn.Layout.Default.new`. ## Note This module is provided for debugging/testing purposes only. (e.g. Overload protection is missing) ## Examples The default IO device is `:standard_io`: ```elixir iex> require Logi iex> {:ok, _} = Logi.Channel.install_sink Logi.BuiltIn.Sink.IoDevice.new(:foo), :info iex> Logi.info "hello world" #OUTPUT# 2016-12-05 09:55:25.213 [info] nonode@nohost <0.142.0> nil:nil:28 [] hello world ``` Outputs to a file: ```elixir iex> {:ok, fd} = :file.open("foo.tmp", [:write]) iex> sink = Logi.BuiltIn.Sink.IoDevice.new(:foo, [io_device: fd]) iex> {:ok, _} = Logi.Channel.install_sink sink, :info iex> Logi.info "hello world" iex> :file.read_file "foo.tmp" {:ok, "2016-12-05 09:57:25.879 [info] nonode@nohost <0.142.0> nil:nil:32 [] hello world\\n"} ``` Customizes message layout: ```elixir iex> layout = Logi.BuiltIn.Layout.Fun.new fn(_, format, data) -> :io_lib.format("[may_layout] " <> format <> "\\n", data) end iex> sink = Logi.BuiltIn.Sink.IoDevice.new :foo, [layout: layout] iex> {:ok, _} = Logi.Channel.install_sink sink, :info iex> Logi.info "hello world" #OUTPUT# [may_layout] hello world ``` """ @behaviour Logi.SinkWriter @doc """ Creates a new sink instance. ## Default Values - `:io_device`: `:standard_io` - `:layout`: `Logi.BuiltIn.Layout.Default.new` """ @spec new(Logi.Sink.id, options) :: Logi.Sink.sink when options: [ {:io_device, :io.device} | {:layout, Logi.Layout.layout} ] def new(id, options \\ []) do :logi_builtin_sink_io_device.new id, options end @doc false def write(context, format, data, state) do :logi_builtin_sink_io_device.write context, format, data, state end @doc false def get_writee(state) do :logi_builtin_sink_io_device.get_writee state end end
lib/logi/builtin/sink/io_device.ex
0.795975
0.67874
io_device.ex
starcoder
defmodule OMG.State.Transaction.Recovered do @moduledoc """ Representation of a signed transaction, with addresses recovered from signatures (from `OMG.State.Transaction.Signed`) Intent is to allow concurrent processing of signatures outside of serial processing in `OMG.State`. `Transaction.Recovered` represents a transaction that can be sent to `OMG.State.exec/1` """ alias OMG.Crypto alias OMG.State.Transaction alias OMG.Utxo require Utxo @empty_signature <<0::size(520)>> @type recover_tx_error() :: :bad_signature_length | :duplicate_inputs | :malformed_transaction | :malformed_transaction_rlp | :signature_corrupt | :missing_signature defstruct [:signed_tx, :tx_hash, spenders: nil] @type t() :: %__MODULE__{ tx_hash: Transaction.tx_hash(), spenders: [Crypto.address_t()], signed_tx: Transaction.Signed.t() } @doc """ Transforms a RLP-encoded child chain transaction (binary) into a: - decoded - statelessly valid (mainly inputs logic) - recovered (i.e. signatures get recovered into spenders) transaction See docs/transaction_validation.md for more information about stateful and stateless validation. """ @spec recover_from(binary) :: {:ok, Transaction.Recovered.t()} | {:error, recover_tx_error()} def recover_from(encoded_signed_tx) do with {:ok, signed_tx} <- Transaction.Signed.decode(encoded_signed_tx), true <- valid?(signed_tx), do: recover_from_struct(signed_tx) end @doc """ Throwing version of `recover_from/1` """ @spec recover_from!(binary) :: Transaction.Recovered.t() def recover_from!(encoded_signed_tx) do {:ok, recovered} = Transaction.Recovered.recover_from(encoded_signed_tx) recovered end @doc """ Checks if input spenders and recovered transaction's spenders are the same and have the same order """ @spec all_spenders_authorized(t(), list()) :: :ok | {:error, :unauthorized_spent} def all_spenders_authorized(%__MODULE__{spenders: spenders}, inputs_spenders) do if spenders == inputs_spenders, do: :ok, else: {:error, :unauthorized_spent} end @spec recover_from_struct(Transaction.Signed.t()) :: {:ok, t()} | {:error, recover_tx_error()} defp recover_from_struct(%Transaction.Signed{} = signed_tx) do with {:ok, spenders} <- Transaction.Signed.get_spenders(signed_tx), do: {:ok, %__MODULE__{tx_hash: Transaction.raw_txhash(signed_tx), spenders: spenders, signed_tx: signed_tx}} end defp valid?(%Transaction.Signed{sigs: sigs} = tx) do inputs = Transaction.get_inputs(tx) with true <- no_duplicate_inputs?(inputs) || {:error, :duplicate_inputs}, do: all_inputs_signed?(inputs, sigs) end defp no_duplicate_inputs?(inputs) do number_of_unique_inputs = inputs |> Enum.uniq() |> Enum.count() inputs_length = Enum.count(inputs) inputs_length == number_of_unique_inputs end defp all_inputs_signed?(non_zero_inputs, sigs) do count_non_zero_signatures = Enum.count(sigs, &(&1 != @empty_signature)) count_non_zero_inputs = length(non_zero_inputs) cond do count_non_zero_signatures > count_non_zero_inputs -> {:error, :superfluous_signature} count_non_zero_signatures < count_non_zero_inputs -> {:error, :missing_signature} true -> true end end end
apps/omg/lib/omg/state/transaction/recovered.ex
0.89878
0.466724
recovered.ex
starcoder
defmodule Snitch.Data.Model.CountryZone do @moduledoc """ CountryZone API """ use Snitch.Data.Model use Snitch.Data.Model.Zone import Ecto.Query alias Snitch.Data.Model.Zone, as: ZoneModel alias Snitch.Data.Schema.{Country, CountryZoneMember, Zone} @doc """ Creates a new country `Zone` whose members are `country_ids`. `country_ids` is a list of primary keys of the `Snitch.Data.Schema.CountryZoneMember`s that make up this zone. Duplicate IDs are ignored. ## Note The list of `CountryZoneMember.t` is put in `zone.members`. """ @spec create(String.t(), String.t(), [non_neg_integer]) :: term def create(name, description, country_ids) do zone_params = %{name: name, description: description, zone_type: "C"} zone_changeset = Zone.create_changeset(%Zone{}, zone_params) multi = ZoneModel.creation_multi(zone_changeset, country_ids) case Repo.transaction(multi) do {:ok, %{zone: zone, members: members}} -> {:ok, struct(zone, members: members)} error -> error end end @spec delete(non_neg_integer | Zone.t()) :: {:ok, Zone.t()} | {:error, Ecto.Changeset.t()} | {:error, :not_found} def delete(id_or_instance) do QH.delete(Zone, id_or_instance, Repo) end @spec get(map | non_neg_integer) :: {:ok, Zone.t()} | {:error, atom} def get(query_fields_or_primary_key) do QH.get(Zone, query_fields_or_primary_key, Repo) end @spec get_all() :: [Zone.t()] def get_all, do: Repo.all(from(z in Zone, where: z.zone_type == "C")) @doc """ Returns the list of `Country` IDs that make up this zone. """ @spec member_ids(Zone.t()) :: [non_neg_integer] def member_ids(%Zone{} = zone) do zone |> members() |> Enum.into([], fn x -> x.id end) end @doc """ Returns the list of `Country` structs that make up this zone. """ @spec members(Zone.t()) :: [Country.t()] def members(%Zone{} = zone) do query = from( c in Country, join: m in CountryZoneMember, on: m.country_id == c.id, where: m.zone_id == ^zone.id ) Repo.all(query) end @doc """ Updates Zone params and sets the members as per `new_country_ids`. This replaces the old members with the new ones. Duplicate IDs in the list are ignored. ## Note The `zone.members` is set to `nil`! """ @spec update(Zone.t(), map, [non_neg_integer]) :: {:ok, Zone.t()} | {:error, Ecto.Changeset.t()} def update(%Zone{} = zone, zone_params, new_country_ids) do zone_changeset = Zone.update_changeset(zone, zone_params) multi = ZoneModel.update_multi(zone, zone_changeset, new_country_ids) case Repo.transaction(multi) do {:ok, %{zone: zone}} -> {:ok, zone} error -> error end end @doc """ Returns a query to bulk remove `CountryZoneMember` records as per `to_be_removed` in `zone`. """ @spec remove_members_query([non_neg_integer], Zone.t()) :: Ecto.Query.t() def remove_members_query(to_be_removed, zone) do from( m in CountryZoneMember, where: m.country_id in ^to_be_removed and m.zone_id == ^zone.id ) end @doc """ Returns `CountryZoneMember` changesets for given `country_ids` for `country_zone` as a stream. """ @spec member_changesets([non_neg_integer], Zone.t()) :: Enumerable.t() def member_changesets(country_ids, %Zone{} = country_zone) do country_ids |> Stream.uniq() |> Stream.map( &CountryZoneMember.create_changeset(%CountryZoneMember{}, %{ country_id: &1, zone_id: country_zone.id }) ) end @doc """ Returns a query to fetch the country zones shared by (aka. common to) given `country_id`s. """ @spec common_zone_query(non_neg_integer, non_neg_integer) :: Ecto.Query.t() def common_zone_query(country_a_id, country_b_id) do from( czm_a in CountryZoneMember, join: czm_b in CountryZoneMember, join: z in Zone, on: czm_a.zone_id == czm_b.zone_id and czm_a.zone_id == z.id, where: czm_a.country_id == ^country_a_id and czm_b.country_id == ^country_b_id, select: z ) end end
apps/snitch_core/lib/core/data/model/zone/country_zone.ex
0.900985
0.502014
country_zone.ex
starcoder
defmodule Day7 do def from_file(path) do File.read!(path) |> String.split(",") |> Enum.map(&Integer.parse/1) |> Enum.map(&(elem(&1, 0))) end def modify(memory, address, value) do memory |> List.delete_at(address) |> List.insert_at(address, value) end def read_instruction(value) do {params, inst} = Integer.digits(value) |> Enum.split((value |> Integer.digits |> length) - 2) {Enum.reverse(params), Integer.undigits(inst)} end def execute(%{:memory => memory, :pc => pc} = runtime) do {modes, inst} = read_instruction(Enum.at(memory, pc)) cond do inst == 99 -> Map.put(runtime, :done, true) inst == 3 && runtime.inputs == [] -> Map.put(runtime, :done, false) true -> case exec_inst(runtime, inst, modes) do %{} = runtime -> execute(runtime) :error -> runtime end end end def value(memory, address, mode) do if mode == 0 do Enum.at(memory, Enum.at(memory, address)) else Enum.at(memory, address) end end def mode(modes, param) do case Enum.fetch(modes, param) do {:ok, mode} -> mode :error -> 0 end end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 1, modes) do x = value(memory, pc + 1, mode(modes, 0)) y = value(memory, pc + 2, mode(modes, 1)) address = Enum.at(memory, pc + 3) %{runtime | :memory => memory |> modify(address, x + y), :pc => pc + 4} end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 2, modes) do x = value(memory, pc + 1, mode(modes, 0)) y = value(memory, pc + 2, mode(modes, 1)) address = Enum.at(memory, pc + 3) %{runtime | :memory => memory |> modify(address, x * y), :pc => pc + 4} end def exec_inst(%{:memory => memory, :pc => pc, :inputs => inputs} = runtime, 3, _) do address = Enum.at(memory, pc + 1) [input | rest] = inputs %{runtime | :memory => memory |> modify(address, input), :inputs => rest, :pc => pc + 2} end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 4, _) do address = Enum.at(memory, pc + 1) %{runtime | :memory => memory, :pc => pc + 2, :output => Enum.at(memory, address)} end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 5, modes) do jump_if_true = value(memory, pc + 1, mode(modes, 0)) jump_to = value(memory, pc + 2, mode(modes, 1)) if jump_if_true != 0 do %{runtime | :pc => jump_to} else %{runtime | :pc => pc + 3} end end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 6, modes) do jump_if_false = value(memory, pc + 1, mode(modes, 0)) jump_to = value(memory, pc + 2, mode(modes, 1)) if jump_if_false == 0 do %{runtime | :pc => jump_to} else %{runtime | :pc => pc + 3} end end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 7, modes) do first = value(memory, pc + 1, mode(modes, 0)) second = value(memory, pc + 2, mode(modes, 1)) address = Enum.at(memory, pc + 3) if first < second do %{runtime | :memory => memory |> modify(address, 1), :pc => pc + 4} else %{runtime | :memory => memory |> modify(address, 0), :pc => pc + 4} end end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 8, modes) do first = value(memory, pc + 1, mode(modes, 0)) second = value(memory, pc + 2, mode(modes, 1)) address = Enum.at(memory, pc + 3) if first == second do %{runtime | :memory => memory |> modify(address, 1), :pc => pc + 4} else %{runtime | :memory => memory |> modify(address, 0), :pc => pc + 4} end end def exec_inst(_, _, inst, _) do IO.puts("invalid instruction #{inst}") :error end def run(program, inputs) do execute(%{:memory => program, :pc => 0, :inputs => inputs, :output => nil}) end def thruster_signal(program, inputs) do Enum.scan(inputs, {0, []}, fn input, {prev, _} -> %{:output => output} = run(program, [input, prev]) {output, inputs} end) |> List.last end def max_thruster_signal(program) do permutations(0..4 |> Enum.to_list) |> Enum.map(&(thruster_signal(program, &1))) |> Enum.max_by(fn {signal, _} -> signal end) end def permutations([]), do: [[]] def permutations(list), do: for elem <- list, rest <- permutations(list -- [elem]), do: [elem | rest] def feedback_thruster_signal(program, inputs) do signal = Stream.cycle([inputs]) |> Enum.reduce_while(%{}, fn sequence, program_states -> states = Enum.reduce(sequence, program_states, fn input, program_states -> program_state = Map.get(program_states, input, %{:memory => program, :pc => 0, :inputs => [], :output => 0}) prev_output = Map.get(program_states, :output, 0) next_inputs = if Map.has_key?(program_states, input) do [prev_output] else [input, prev_output] end new_state = execute(%{program_state | :inputs => next_inputs}) program_states |> Map.put(input, new_state) |> Map.put(:output, new_state.output) end) is_done = states |> Map.split(inputs) |> elem(0) |> Map.values |> Enum.map(fn %{:done => done} -> done end) |> Enum.any? if is_done do {:halt, states} else {:cont, states} end end) |> Map.get(:output) {signal, inputs} end def max_feedback_thruster_signal(program) do permutations(5..9 |> Enum.to_list) |> Enum.map(&(feedback_thruster_signal(program, &1))) |> Enum.max_by(fn {signal, _} -> signal end) end def solution do IO.puts("#{from_file("day7_input.txt") |> max_thruster_signal |> inspect}") IO.puts("#{from_file("day7_input.txt") |> max_feedback_thruster_signal |> inspect}") end end
lib/day7.ex
0.608943
0.540985
day7.ex
starcoder
defmodule REnum.Ruby do @moduledoc """ Summarized all of Ruby functions. If a function with the same name already exists in Elixir, that is not implemented. Also, the function that returns Enumerator in Ruby is customized each behavior on the characteristics. Defines all of here functions when `use REnum.Ruby`. """ @spec __using__(any) :: list defmacro __using__(_opts) do RUtils.define_all_functions!(__MODULE__) end @type type_enumerable :: Enumerable.t() @type type_pattern :: number() | String.t() | Range.t() | Regex.t() import REnum.Support # https://ruby-doc.org/core-3.1.0/Enumerable.html # ruby_enumerable = [:all?, :any?, :chain, :chunk, :chunk_while, :collect, :collect_concat, :compact, :count, :cycle, :detect, :drop, :drop_while, :each_cons, :each_entry, :each_slice, :each_with_index, :each_with_object, :entries, :filter, :filter_map, :find, :find_all, :find_index, :first, :flat_map, :grep, :grep_v, :group_by, :include?, :inject, :lazy, :map, :max, :max_by, :member?, :min, :min_by, :minmax, :minmax_by, :none?, :one?, :partition, :reduce, :reject, :reverse_each, :select, :slice_after, :slice_before, :slice_when, :sort, :sort_by, :sum, :take, :take_while, :tally, :to_a, :to_h, :uniq, :zip] # |> Enum.reject(fn method -> # Enum.module_info()[:exports] # |> Keyword.keys() # |> Enum.find(&(&1 == method)) # end) # ✔ chain # ✔ collect # ✔ collect_concat # ✔ compact # ✔ cycle # ✔ detect # ✔ each_cons # ✔ each_entry # ✔ each_slice # ✔ each_with_index # ✔ each_with_object # ✔ entries # ✔ find_all # ✔ first # ✔ grep # ✔ grep_v # ✔ include? # ✔ inject # ✔ lazy # ✔ minmax # ✔ minmax_by # ✔ none? # ✔ one? # ✔ reverse_each # ✔ select # ✔ slice_after # ✔ slice_before # ✔ slice_when # ✔ tally # ✔ to_a # ✔ to_h @doc """ Returns an list of all non-nil elements. ## Examples iex> REnum.compact([1, nil, 2, 3]) [1, 2, 3] iex> REnum.compact(%{ ...> :truthy => true, ...> false => false, ...> nil => nil, ...> :map => %{key: :value} ...> }) %{ :truthy => true, false => false, :map => %{key: :value} } """ @spec compact(type_enumerable) :: type_enumerable def compact(enumerable) when is_list(enumerable) do enumerable |> Enum.reject(&(&1 |> is_nil())) end def compact(enumerable) when is_map(enumerable) do enumerable |> Enum.reject(fn {key, value} -> is_nil(key) && is_nil(value) end) |> Enum.into(%{}) end @doc """ Returns the first element. ## Examples iex> REnum.first([1, 2, 3]) 1 iex> REnum.first(%{a: 1, b: 2}) {:a, 1} """ @spec first(type_enumerable) :: any() def first(enumerable) do result = Enum.at(enumerable, 0) cond do result |> is_nil() -> nil true -> result end end @doc """ Returns leading elements. ## Examples iex> REnum.first([1, 2, 3], 2) [1, 2] iex> REnum.first(%{a: 1, b: 2}, 2) [{:a, 1}, {:b, 2}] """ @spec first(type_enumerable, non_neg_integer()) :: type_enumerable() def first(enumerable, n) do 0..(n - 1) |> Enum.with_index(fn _, index -> enumerable |> Enum.at(index) end) |> compact() end @doc """ Return true if enumerable has only one truthy element; false otherwise. ## Examples iex> REnum.one?([1, nil, false]) true iex> REnum.one?(1..4) false """ @spec one?(type_enumerable) :: boolean() def one?(enumerable) do truthy_count(enumerable) == 1 end @doc """ Returns true if exactly one element meets a specified criterion; false otherwise. ## Examples iex> REnum.one?(1..4, 1..2) false iex> REnum.one?(1..4, &(&1 < 2)) true iex> REnum.one?(1..4, 1) true """ @spec one?(type_enumerable, function() | type_pattern) :: boolean() def one?(enumerable, pattern_or_func) do truthy_count(enumerable, pattern_or_func) == 1 end @doc """ Returns true if enumerable does not include truthy value; false otherwise. ## Examples iex> REnum.none?(1..4) false iex> REnum.none?([nil, false]) true iex> REnum.none?([foo: 0, bar: 1]) false """ @spec none?(type_enumerable) :: boolean() def none?(enumerable) do truthy_count(enumerable) == 0 end @doc """ Returns whether no element meets a given criterion. ## Examples iex> REnum.none?(1..4, &(&1 < 1)) true iex> REnum.none?(%{foo: 0, bar: 1, baz: 2}, fn {_, v} -> v < 0 end) true iex> REnum.none?(1..4, 5) true iex> REnum.none?(1..4, 2..3) false """ @spec none?(type_enumerable, function() | type_pattern) :: boolean() def none?(enumerable, pattern_or_func) do truthy_count(enumerable, pattern_or_func) == 0 end @doc """ When called with positive integer argument n and a function, calls the block with each element, then does so again, until it has done so n times; returns given enumerable When called with a function and n is nil, returns Stream cycled forever. ## Examples iex> REnum.cycle(["a", "b"], 2, &IO.puts(&1)) # a # b # a # b ["a", "b"] iex> REnum.cycle(%{a: 1, b: 2}, nil, &IO.inspect(&1)) |> Enum.take(2) # {:a, 1} # {:b, 2} # {:a, 1} # {:b, 2} [:ok, :ok] """ @spec cycle(type_enumerable, non_neg_integer(), function()) :: Stream | type_enumerable def cycle(enumerable, n, func) when is_nil(n) do Stream.repeatedly(fn -> enumerable |> Enum.each(func) end) end def cycle(enumerable, n, _) when n < 1, do: enumerable def cycle(enumerable, n, func) do enumerable |> Enum.each(func) enumerable |> cycle(n - 1, func) end @doc """ Calls the function with each successive overlapped n-list of elements; returns given enumerable. ## Examples iex> ["a", "b", "c", "d", "e"] iex> |> REnum.each_cons(3, &IO.inspect(&1)) # ["a", "b", "c"] # ["b", "c", "d"] # ["c", "d", "e"] ["a", "b", "c", "d", "e"] iex> %{a: 1, b: 2, c: 3, d: 4, e: 5, f: 6} iex> |> REnum.each_cons(4, &IO.inspect(&1)) # [a: 1, b: 2, c: 3, d: 4] # [b: 2, c: 3, d: 4, e: 5] # [c: 3, d: 4, e: 5, f: 6] %{a: 1, b: 2, c: 3, d: 4, e: 5, f: 6} """ @spec each_cons(type_enumerable, integer(), function()) :: type_enumerable def each_cons(enumerable, n, _) when n < 1, do: enumerable def each_cons(enumerable, n, func) do if Enum.count(enumerable) >= n do [_ | next_els] = enumerable |> Enum.to_list() enumerable |> Enum.take(n) |> func.() each_cons(next_els, n, func) end enumerable end @doc """ Calls the function with each element, but in reverse order; returns given enumerable. ## Examples iex> REnum.reverse_each([1, 2, 3], &IO.inspect(&1)) # 3 # 2 # 1 [1, 2, 3] """ @spec reverse_each(type_enumerable(), function()) :: type_enumerable() def reverse_each(enumerable, func) do enumerable |> Enum.reverse() |> Enum.each(func) enumerable end @doc """ Returns a map each of whose entries is the key-value pair formed from one of those list. ## Examples iex> REnum.to_h([[:a, 1], [:b, 2]]) %{a: 1, b: 2} iex> REnum.to_h(a: 1, b: 2) %{a: 1, b: 2} """ @spec to_h(type_enumerable()) :: map() def to_h(enumerable) do if(list_and_not_keyword?(enumerable)) do enumerable |> Enum.map(&{Enum.at(&1, 0), Enum.at(&1, 1)}) |> Map.new() else Map.new(enumerable) end end @doc """ The function is called with each element. The function should return a 2-element tuple which becomes a key-value pair in the returned map. ## Examples iex> REnum.to_h([[:a, 1], [:b, 2]], fn el -> ...> {REnum.at(el, 0), REnum.at(el, 1)} ...> end) %{a: 1, b: 2} iex> REnum.to_h(%{a: 1, b: 2}, fn {key, value} -> {key, value * 2} end) %{a: 2, b: 4} """ @spec to_h(type_enumerable(), function()) :: map() def to_h(enumerable, func) do Map.new(enumerable, func) end @doc """ Calls the given function with each element, returns given enumerable: ## Examples iex> ["a", "b", "c"] iex> |> REnum.each_entry(&IO.inspect(&1)) # "a" # "b" # "c" ["a", "b", "c"] iex> %{a: 1, b: 2} iex> |> REnum.each_entry(&IO.inspect(&1)) # {:a, 1} # {:b, 2} %{a: 1, b: 2} """ @spec each_entry(type_enumerable(), function()) :: type_enumerable() def each_entry(enumerable, func) do enumerable |> Enum.each(func) enumerable end @doc """ Returns Stream given enumerable sliced by each amount. ## Examples iex> ["a", "b", "c", "d", "e"] iex> |> REnum.each_slice(2) iex> |> Enum.to_list() [["a", "b"], ["c", "d"], ["e"]] iex> %{a: 1, b: 2, c: 3} iex> |> REnum.each_slice(2) iex> |> Enum.to_list() [[a: 1, b: 2], [c: 3]] """ @spec each_slice(type_enumerable(), non_neg_integer()) :: type_enumerable() | atom() def each_slice(enumerable, amount) do if(amount < 1) do [] else enumerable |> each_slice( 0, amount ) end |> lazy() end def each_slice(enumerable, start_index, amount_or_func) when is_integer(amount_or_func) do sliced = enumerable |> Enum.slice(start_index, amount_or_func) next_start_index = start_index + amount_or_func [sliced] ++ if Enum.count(enumerable) > next_start_index, do: each_slice(enumerable, next_start_index, amount_or_func), else: [] end @doc """ Calls the given function with each element, returns given enumerable. ## Examples iex> ["a", "b", "c", "d", "e"] iex> |> REnum.each_slice(2, &IO.inspect(&1)) # ["a", "b"] # ["c", "d"] # ["e"] :ok iex> %{a: 1, b: 2, c: 3} iex> |> REnum.each_slice(2, &IO.inspect(&1)) # [a: 1, b: 2] # [c: 3] :ok """ @spec each_slice(type_enumerable(), non_neg_integer(), function() | non_neg_integer()) :: atom() def each_slice(enumerable, amount, amount_or_func) when is_function(amount_or_func) do each_slice(enumerable, amount) |> Enum.each(fn els -> amount_or_func.(els) end) end @doc """ Returns Stream, which redefines most Enumerable functions to postpone enumeration and enumerate values only on an as-needed basis. ## Examples iex> [1, 2, 3] iex> |> REnum.lazy() iex> |> REnum.to_list() [1, 2, 3] """ @spec lazy(type_enumerable()) :: type_enumerable() def lazy(enumerable) do enumerable |> chain([]) |> Stream.take(Enum.count(enumerable)) end @doc """ With argument pattern, returns an elements that uses the pattern to partition elements into lists (“slices”). An element ends the current slice if element matches pattern. With a function, returns an elements that uses the function to partition elements into list. An element ends the current slice if its function return is a truthy value. ## Examples iex> [0, 2, 4, 1, 2, 4, 5, 3, 1, 4, 2] iex> |> REnum.slice_after(&(rem(&1, 2) == 0)) [[0], [2], [4], [1, 2], [4], [5, 3, 1, 4], [2]] iex> ["a", "b", "c"] iex> |> REnum.slice_after(~r/b/) [["a", "b"], ["c"]] """ @spec slice_after(type_enumerable(), function() | type_pattern()) :: type_enumerable() def slice_after(enumerable, func) when is_function(func) do if(Enum.count(enumerable) < 1) do enumerable else index = enumerable |> Enum.find_index(func) || Enum.count(enumerable) [Enum.slice(enumerable, 0..index)] ++ slice_after( Enum.slice(enumerable, (index + 1)..Enum.count(enumerable)), func ) end end def slice_after(enumerable, pattern) do slice_after( enumerable, match_function(pattern) ) end @doc """ With argument pattern, returns an elements that uses the pattern to partition elements into lists (“slices”). An element begins a new slice if element matches pattern. (or if it is the first element). With a function, returns an elements that uses the function to partition elements into list. An element ends the current slice if its function return is a truthy value. ## Examples iex> [0, 2, 4, 1, 2, 4, 5, 3, 1, 4, 2] iex> |> REnum.slice_before(&(rem(&1, 2) == 0)) [[0], [2], [4, 1], [2], [4, 5, 3, 1], [4], [2]] iex> ["a", "b", "c"] iex> |> REnum.slice_before(~r/b/) [["a"], ["b", "c"]] """ @spec slice_before(type_enumerable(), function() | type_pattern()) :: type_enumerable() def slice_before(enumerable, func) when is_function(func) do enumerable |> Enum.reverse() |> slice_after(func) |> Enum.reverse() |> Enum.map(&Enum.reverse(&1)) end def slice_before(enumerable, pattern) do enumerable |> Enum.reverse() |> slice_after(match_function(pattern)) |> Enum.reverse() |> Enum.map(&Enum.reverse(&1)) end @doc """ The returned elements uses the function to partition elements into lists (“slices”). It calls the function with each element and its successor. Begins a new slice if and only if the function returns a truthy value. &1 is current_element and &2 is next_element in function arguments. ## Examples iex> [1, 2, 4, 9, 10, 11, 12, 15, 16, 19, 20, 21] iex> |> REnum.slice_when(&(&1 + 1 != &2)) [[1, 2], [4], [9, 10, 11, 12], [15, 16], [19, 20, 21]] """ @spec slice_when(type_enumerable(), function() | type_pattern()) :: type_enumerable() def slice_when(enumerable, func) do if(Enum.count(enumerable) < 1) do enumerable else index = enumerable |> find_index_with_index(fn el, index -> next_el = Enum.at(enumerable, index + 1) func.(el, next_el) end) || Enum.count(enumerable) [Enum.slice(enumerable, 0..index)] ++ slice_when( Enum.slice(enumerable, (index + 1)..Enum.count(enumerable)), func ) end end @doc """ Returns elements selected by a given pattern or function. ## Examples iex> ["foo", "bar", "car", "moo"] iex> |> REnum.grep(~r/ar/) ["bar", "car"] iex> 1..10 iex> |> REnum.grep(3..8) [3, 4, 5, 6, 7, 8] """ @spec grep(type_enumerable(), function() | type_pattern()) :: type_enumerable() def grep(enumerable, func) when is_function(func) do enumerable |> select(func) end def grep(enumerable, pattern) do grep( enumerable, match_function(pattern) ) end @doc """ Calls the function with each matching element and returned. ## Examples iex> ["foo", "bar", "car", "moo"] iex> |> REnum.grep(~r/ar/, &String.upcase(&1)) ["BAR", "CAR"] iex> 1..10 iex> |> REnum.grep(3..8, &to_string(&1)) ["3", "4", "5", "6", "7", "8"] """ @spec grep(type_enumerable(), function() | type_pattern(), function()) :: type_enumerable() def grep(enumerable, pattern, func) do enumerable |> grep(pattern) |> Enum.map(func) end @doc """ Returns elements rejected by a given pattern or function. ## Examples iex> ["foo", "bar", "car", "moo"] iex> |> REnum.grep_v(~r/ar/) ["foo", "moo"] iex> 1..10 iex> |> REnum.grep_v(3..8) [1, 2, 9, 10] """ @spec grep_v(type_enumerable(), function() | type_pattern()) :: type_enumerable() def grep_v(enumerable, pattern) do greped = enumerable |> grep(pattern) enumerable |> Enum.reject(&(&1 in greped)) end @doc """ Calls the function with each unmatching element and returned. ## Examples iex> ["foo", "bar", "car", "moo"] iex> |> REnum.grep_v(~r/ar/, &String.upcase(&1)) ["FOO", "MOO"] iex> 1..10 iex> |> REnum.grep_v(3..8, &to_string(&1)) ["1", "2", "9", "10"] """ @spec grep_v(type_enumerable(), function() | type_pattern(), function()) :: type_enumerable() def grep_v(enumerable, pattern, func) do enumerable |> grep_v(pattern) |> Enum.map(func) end # aliases defdelegate detect(enumerable, default, func), to: Enum, as: :find defdelegate detect(enumerable, func), to: Enum, as: :find defdelegate select(enumerable, func), to: Enum, as: :filter defdelegate find_all(enumerable, func), to: Enum, as: :filter defdelegate inject(enumerable, acc, func), to: Enum, as: :reduce defdelegate inject(enumerable, func), to: Enum, as: :reduce defdelegate collect(enumerable, func), to: Enum, as: :map defdelegate include?(enumerable, element), to: Enum, as: :member? defdelegate collect_concat(enumerable, func), to: Enum, as: :flat_map defdelegate entries(enumerable), to: __MODULE__, as: :to_a defdelegate each_with_object(enumerable, collectable, func), to: Enum, as: :reduce defdelegate each_with_index(enumerable, func), to: Enum, as: :with_index defdelegate each_with_index(enumerable), to: Enum, as: :with_index defdelegate minmax(enumerable), to: Enum, as: :min_max defdelegate minmax(enumerable, func), to: Enum, as: :min_max defdelegate minmax_by(enumerable, func), to: Enum, as: :min_max_by defdelegate minmax_by(enumerable, func1, func2), to: Enum, as: :min_max_by defdelegate minmax_by(enumerable, func1, func2, func3), to: Enum, as: :min_max_by defdelegate tally(enumerable), to: Enum, as: :frequencies defdelegate chain(enumerables), to: Stream, as: :concat defdelegate chain(first, second), to: Stream, as: :concat defdelegate to_a(enumerables), to: Enum, as: :to_list defdelegate to_l(enumerables), to: Enum, as: :to_list end
lib/r_enum/ruby.ex
0.864382
0.445168
ruby.ex
starcoder
defmodule Bencodex do @moduledoc """ Bencodex is a encoder/decoder for the bencode protocol. Bencode supports four types of objects * `string` - Implemented as an ASCII encoded binary * `integer` - Encoded in base 10 ASCII * `list` - Ordered, heterogenious list * `dictionary` - String keys with heterogenious values in lexicographical order """ @doc """ Decode Bencode terms into Elixir terms Bencodex.decode("i1e") # => 1 Bencodex.decode("3:foo") # => "foo" Bencodex.decode("li1e3:fooe") # => [1, "foo"] Bencodex.decode("d3:fooi1ee") # => %{"foo" => 1} """ def decode(input) do Enum.at(Enum.reverse(decode(input, [])), 0) end @doc """ Encode Elixir terms into Bencode terms The accepted types are integer, binary (string), list and map Bencodex.encode(1) # => "i1e" Bencodex.encode("foo") # => "3:foo" Bencodex.encode([1, "foo"]) # => "li1e3:fooe" Bencodex.encode(%{"foo" => 1}) # => "d3:fooi1ee" """ def encode(output) do encode(output, "") end defp decode(<< "i", tail :: binary >>, acc) do { int, rest } = decode_integer(tail, []) acc = [int | acc] decode(rest, acc) end defp decode(<< "l", tail :: binary >>, acc) do { list, rest } = decode_list(tail, []) acc = [list | acc] decode(rest, acc) end defp decode(<< "d", tail :: binary >>, acc) do { map, rest } = decode_dictionary(tail, []) acc = [map | acc] decode(rest, acc) end defp decode("", acc), do: acc defp decode(remaining, acc) do { bin, rest } = decode_binary(remaining, []) acc = [bin | acc] decode(rest, acc) end defp decode_integer(<< "e", tail :: binary >>, acc) do { List.to_integer(Enum.reverse(acc)), tail } end defp decode_integer(<< i :: integer, tail :: binary >>, acc) do acc = [i | acc] decode_integer(tail, acc) end defp decode_list(<< "e", tail :: binary >>, acc), do: { Enum.reverse(acc), tail } defp decode_list(items, acc) do { item, rest } = decode_with_rest(items) acc = [item | acc] decode_list(rest, acc) end defp decode_binary(<< ":", tail :: binary >>, acc) do length = List.to_integer(Enum.reverse(acc)) << bin :: size(length)-binary, rest :: binary >> = tail { bin, rest } end defp decode_binary(<< i :: integer, tail :: binary >>, acc) do acc = [i | acc] decode_binary(tail, acc) end defp decode_dictionary(<< "e", tail :: binary >>, acc) do result = Enum.reduce(acc, Map.new, fn({ key, val }, map) -> Map.put(map, key, val) end) { result, tail } end defp decode_dictionary(pairs, acc) do { key, val_with_rest } = decode_with_rest(pairs) { val, rest } = decode_with_rest(val_with_rest) acc = [{ key, val } | acc] decode_dictionary(rest, acc) end defp decode_with_rest(<< "i", tail :: binary >>), do: decode_integer(tail, []) defp decode_with_rest(<< "l", tail :: binary >>), do: decode_list(tail, []) defp decode_with_rest(<< "d", tail :: binary >>), do: decode_dictionary(tail, []) defp decode_with_rest(binary), do: decode_binary(binary, []) defp encode(i, acc) when is_integer(i), do: acc <> "i#{i}e" defp encode(b, acc) when is_binary(b), do: acc <> "#{byte_size(b)}:#{b}" defp encode(l, acc) when is_list(l) do "#{acc}l" <> Enum.reduce(l, "", fn(element, acc) -> acc <> encode(element, "") end) <> "e" end defp encode(m, acc) when is_map(m) do "#{acc}d" <> Enum.reduce(Map.keys(m), "", fn(k, acc) -> acc <> encode(k, "") <> encode(Map.get(m, k), "") end) <> "e" end end
lib/bencodex.ex
0.732209
0.539893
bencodex.ex
starcoder
defmodule Ash.Resource.Validation do @moduledoc """ Represents a validation in Ash. See `Ash.Resource.Validation.Builtins` for a list of builtin validations. To write your own validation, define a module that implements the `c:init/1` callback to validate options at compile time, and `c:validate/2` callback to do the validation. Then, in a resource, you can say: ``` validations do validation {MyValidation, [foo: :bar]} end ``` To make it more readable, you can define a function in the module that returns that tuple, and import it into your resource. ``` defmodule MyValidation do def my_validation(value) do {__MODULE__, foo: value} end end ``` ``` defmodule MyResource do ... import MyValidation validations do validate my_validation(:foo) end end ``` """ defstruct [ :validation, :module, :opts, :expensive?, :description, :message, :before_action?, :where, on: [] ] @type t :: %__MODULE__{ validation: {atom(), list(atom())}, module: atom(), opts: list(atom()), expensive?: boolean(), description: String.t() | nil, where: list({atom(), list(atom())}), on: list(atom()) } @type path :: [atom | integer] @callback init(Keyword.t()) :: {:ok, Keyword.t()} | {:error, String.t()} @callback validate(Ash.Changeset.t(), Keyword.t()) :: :ok | {:error, term} @schema [ validation: [ type: {:ash_behaviour, Ash.Resource.Validation, Ash.Resource.Validation.Builtins}, required: true, doc: "The module/opts pair of the validation" ], where: [ type: {:list, {:ash_behaviour, Ash.Resource.Validation, Ash.Resource.Validation.Builtins}}, required: false, doc: """ Validations that should pass in order for this validation to apply. These validations failing will not invalidate the changes, but instead just result in this validation being ignored. """ ], on: [ type: {:custom, __MODULE__, :on, []}, default: [:create, :update], doc: """ The action types the validation should run on. Many validations don't make sense in the context of deletion, so by default it is left out of the list. """ ], expensive?: [ type: :boolean, default: false, doc: "If a validation is expensive, it won't be run on invalid changes. All inexpensive validations are always run, to provide informative errors." ], message: [ type: :string, doc: "If provided, overrides any message set by the validation error" ], description: [ type: :string, doc: "An optional description for the validation" ], before_action?: [ type: :boolean, default: false, doc: "If set to `true`, the validation is not run when building changesets using `Ash.Changeset.for_*`. The validation will only ever be run once the action itself is called." ] ] @action_schema Keyword.delete(@schema, :on) defmacro __using__(_) do quote do @behaviour Ash.Resource.Validation def init(opts), do: {:ok, opts} defoverridable init: 1 end end @doc false def transform(%{validation: {module, opts}} = validation) do case module.init(opts) do {:ok, opts} -> {:ok, %{validation | validation: {module, opts}, module: module, opts: opts}} {:error, error} -> {:error, error} end end def opt_schema, do: @schema def action_schema, do: @action_schema def on(list) do list |> List.wrap() |> Enum.all?(&(&1 in [:create, :update, :destroy])) |> case do true -> {:ok, List.wrap(list)} false -> {:error, "Expected items of [:create, :update, :destroy], got: #{inspect(list)}"} end end end
lib/ash/resource/validation.ex
0.843766
0.832985
validation.ex
starcoder
defmodule ZupplerUsers.Auth.Supervisor do @moduledoc """ Provides oauth toke authorization with zuppler ## Dependencies poolboy, lrucache ## Setup In the app module add a new child supervisor(Zuppler.Auth, []]) ## Usage ``` user_info = Zuppler.Auth.auth(token) Authorization.ZupplerAuth.has_any_role?(user_info, "restaurant") Authorization.ZupplerAuth.has_any_role?(user_info, ["restaurant", "channel"]) Authorization.ZupplerAuth.acls_for(user_info, :restaurant) ``` """ import Supervisor use Supervisor def start_link(state \\ []) do Supervisor.start_link(__MODULE__, state, [name: __MODULE__]) end def pool_name, do: :zuppler_auth_pool def init(_state) do # pool_config = [] poolboy_config = [ {:name, {:local, pool_name()}}, {:worker_module, ZupplerUsers.Auth.Worker}, {:size, 5}, {:max_overflow, 10} ] children = [ :poolboy.child_spec(pool_name(), poolboy_config, []), worker(LruCache, [:zuppler_auth_cache, 1000]) ] supervise(children, strategy: :one_for_one) end @doc """ Authetnicates a user by token. Returns user_info if success or nil if not """ def auth(token) do with_cache(token, fn (token) -> :poolboy.transaction(pool_name(), fn(worker) -> :gen_server.call(worker, {:auth, token}) end) end) end def with_cache(token, producer) do case LruCache.get(:zuppler_auth_cache, token) do nil -> data = producer.(token) LruCache.put(:zuppler_auth_cache, token, data) data data -> data end end @doc """ Verifies if the user has a given role. Roles can be either a single string *"admin"* or an a list of string *["restaurant", "channel"]*. It returns true if the user has any of the given roles """ def has_role(user_info, roles) do :poolboy.transaction(pool_name(), fn(worker) -> :gen_server.call(worker, {:has_any_role?, user_info, roles}) end) end @doc """ Returns the given object ids on which the user has access to. subject must be a keyword Example Auth.acls_for(user_info, :restaurant) # => [2,3,4] """ def acls_for(user_info, subject) do :poolboy.transaction(pool_name(), fn(worker) -> :gen_server.call(worker, {:acls_for, user_info, subject}) end) end end
lib/authorization/supervisor.ex
0.637708
0.540621
supervisor.ex
starcoder
defmodule MelodyMatch.Matchmaker do @moduledoc """ Server for matching users on the system with each other. Which matcher is configurable via application config, for example ```elixir config :melody_match, default_matcher: MelodyMatch.Matchmaker.MatcherAny ``` ## Attribution The code in this module is based on lecture notes, see https://github.com/NatTuck/scratch-2021-01/blob/master/4550/0219/hangman/lib/hangman/game_server.ex. """ use GenServer alias MelodyMatch.Accounts alias MelodyMatch.Matches alias MelodyMatch.MatchmakerSupervisor alias MelodyMatch.PoolBackupAgent alias MelodyMatch.Repo # Public interface def start(name) do spec = %{ id: __MODULE__, start: {__MODULE__, :start_link, [name]}, restart: :permanent, type: :worker } MatchmakerSupervisor.start_child(spec) end def start_link(name) do pool = PoolBackupAgent.get(name) || %{} GenServer.start_link(__MODULE__, pool, name: reg(name)) end @doc """ Attempt to match the user with the given ID to users who are available for matching currently. ## Arguments - matchmaker_name: the matchmaker server instance to use; for now, this should always be the same value - user_id: ID of the user to try matching """ @spec try_match(String.t, integer) :: term def try_match(matchmaker_name, user_id) do GenServer.call(reg(matchmaker_name), {:try_match, user_id}) end @doc """ Removes the given user from the matchmaking pool. ## Arguments - matchmaker_name: the matchmaker server instance to use - user_id: ID of the user to remove """ @spec remove_user(String.t, integer) :: term def remove_user(matchmaker_name, user_id) do GenServer.call(reg(matchmaker_name), {:remove_user, user_id}) end defp reg(name), do: {:via, Registry, {MelodyMatch.MatchmakerRegistry, name}} # Implementation def init(pool), do: {:ok, pool} def handle_call({:try_match, user_id}, _from, pool) do params = get_matching_params(user_id) recent_partners = get_recent_partners(user_id) other_user_id = matcher().best_match(recent_partners, params, pool) if other_user_id do {:ok, match} = %{first_user_id: other_user_id, second_user_id: user_id} |> Matches.create_match() send_match_found(user_id, match.id) send_match_found(other_user_id, match.id) {:reply, other_user_id, Map.delete(pool, other_user_id)} else {:reply, nil, Map.put(pool, user_id, params)} end end def handle_call({:remove_user, user_id}, _from, pool) do {:reply, nil, Map.delete(pool, user_id)} end defp get_matching_params(user_id) do user = Accounts.get_user!(user_id) |> Repo.preload(:top_track) if user.top_track do track = user.top_track %{ acousticness: track.acousticness, danceability: track.danceability, energy: track.energy, instrumentalness: track.instrumentalness, liveness: track.liveness, loudness: track.loudness, mode: track.mode, speechiness: track.speechiness, tempo: track.tempo, valence: track.valence, latitude: user.last_latitude, longitude: user.last_longitude } else %{ acousticness: 0.0, danceability: 0.0, energy: 0.0, instrumentalness: 0.0, liveness: 0.0, loudness: 0.0, mode: 0.0, speechiness: 0.0, tempo: 0.0, valence: 0.0, latitude: user.last_latitude, longitude: user.last_longitude } end end defp get_recent_partners(user_id) do Matches.get_user_recent_matches(user_id) |> Enum.map(fn m -> if m.first_user_id == user_id do m.second_user_id else m.first_user_id end end) end defp matcher(), do: Application.get_env(:melody_match, :default_matcher) defp send_match_found(user_id, match_id) do IO.puts "Broadcasting matches #{user_id} match Found" MelodyMatchWeb.Endpoint.broadcast( "matchmaker:#{user_id}", "matchFound", %{matchId: match_id}) end end
server/lib/melody_match/matchmaker.ex
0.797281
0.645106
matchmaker.ex
starcoder
defmodule InlineSvg do require Logger @moduledoc """ Simple and fast in-line SVG library and renderer for web applications. SVG files are images that are formatted as very simple, and usually small, text files. It is faster, and recommended, that you directly include the svg data in-line with your web pages instead of asking the browser to make additional calls to servers before it can render your pages. This makes your pages load faster. `inline_svg` renders your svg files as quickly as possible. To do this, it reads the svg files at compile-time and provides runtime access through a term stored in your beamfile. If you use `nimble_publisher`, this should be a familiar concept. To use `inline_svg`, you create a module in your project that wraps it, providing a compile-time place to build the library and runtime access to it. It also happens to make your template svg rendering code very simple. You do __not__ need to store your svg files in the "assets/static" directory. Those files are copied into your application via a file based mechanism, whereas `inline_svg` compiles them in directly. I recommend simply using "assets/svg". Each `*.svg` file must contain a single valid `<svg></svg>` tag set with data as appropriate. Anything before the `<svg>` tag or after the `</svg>` is treated as comment and stripped from the text during compilation. ## Example wrapper module defmodule MyAppWeb.Svg do # Build the library at compile time @library InlineSvg.compile( "assets/svg" ) # Accesses the library at run time defp library(), do: @library # Render an svg from the library def render( key, opts \\ [] ) do InlineSvg.render( library(), key, opts ) end end To use the library, you would `alias MyAppWeb.Svg` in a controller, live_view or your your main app module. This allows your template code to call Svg.render directly. ## Example use in a template <%= Svg.render( "heroicons/user", class: "h-5 w-5 inline" ) %> ### Live reloading If you are using Phoenix, you can enable live reloading by simply telling Phoenix to watch the svgs directory. Open up "config/dev.exs", search for `live_reload:` and add this to the list of patterns: ```elixir live_reload: [ patterns: [ ..., ~r"assets/svg/*/.*(svg)$" ] ] ``` """ defmodule Error do @moduledoc false defexception message: nil, svg: nil end #-------------------------------------------------------- @doc """ Compile a folder of `*.svg` files into a library you can render from. The folder and it's subfolders will be traversed and all valid `*.svg` files will be added to the library. Each svg will be added to the library with a key that is relative path of the svg file, minus the .svg part. For example, if you compile the folder "assets/svg" and it finds a file with the path "assets/svg/heroicons/calendar.svg", then the key for that svg is `"heroicons/calendar"` in the library. ## Usage The best way to use InlineSvg is to create a new module in your project that wraps it, providing storage for the generated library term. This also allows you to customize naming, rendering or compiling as required. ## Example defmodule MyAppWeb.Svg do # Build the library at compile time @library InlineSvg.compile( "assets/svg" ) # Accesses the library at run time defp library(), do: @library # Render an svg from the library def render( key, opts \\ [] ) do InlineSvg.render( library(), key, opts ) end end Note that @library is accessed through a function. The library could become large, so you want to wrap it with a function to ensure that it is only stored as a term in your beam file once. """ @spec compile(map(), String.t()) :: map() def compile( %{} = library \\ %{}, svg_root ) when is_bitstring(svg_root) do svg_root |> Kernel.<>( "/**/*.svg" ) |> Path.wildcard() |> Enum.reduce( library, fn(path, acc) -> with {:ok, key, svg} <- read_svg( path, svg_root ), :ok <- unique_key( library, key, path ) do Map.put( acc, key, svg <> "</svg>" ) else {:file_error, err, path} -> raise %Error{message: "SVG file #{inspect(path)} is invalid, err: #{err}", svg: path} {:duplicate, key, path} -> Logger.warn("SVG file: #{path} overwrites existing svg: #{key}") end end) end defp read_svg( path, root ) do with {:ok, svg} <- File.read( path ), true <- String.valid?(svg), [_,svg] <- String.split(svg, "<svg"), [svg,_] <- String.split(svg, "</svg>") do { :ok, path # make the key |> String.trim(root) |> String.trim("/") |> String.trim_trailing(".svg"), svg } else err -> {:file_error, err, path} end end defp unique_key(library, key, path) do case Map.fetch( library, key ) do {:ok, _} -> {:duplicate, key, path} _ -> :ok end end #-------------------------------------------------------- @doc """ Renders an svg into a safe string that can be inserted directly into a Phoenix template. The named svg must be in the provided library, which should be build using the compile function. _Optional_: pass in a keyword list of attributes to insert into the svg tag. This can be used to add `class="something"` tag attributes, phoenix directives such as `phx-click`, or even alpine directives such as `@click="some action"`. Note that key names containing the underscore character `"_"` will be converted to the hyphen `"-"` character. You don't normally call `InlineSvg.render()` directly, except in your wrapper module. Instead, you would `alias MyAppWeb.Svg` in a controller, live view or your your main app module. This allows your template code to call Svg.render directly, which is simple and looks nice. The following examples all use an aliased `MyAppWeb.Svg`, which wraps `InlineSvg`. ## Example use in a template <%= Svg.render( "heroicons/menu" ) %> <%= Svg.render( "heroicons/user", class: "h-5 w-5 inline" ) %> ## Other examples Without attributes: Svg.render( "heroicons/menu" ) {:safe, "<svg xmlns= ... </svg>"} With options: Svg.render( "heroicons/menu", class: "h-5 w-5" ) {:safe, "<svg class=\"h-5 w-5\" xmlns= ... </svg>"} Svg.render( "heroicons/menu", phx_click: "action" ) {:safe, "<svg phx-click=\"action\" xmlns= ... </svg>"} Svg.render( "heroicons/menu", "@click": "alpine_action" ) {:safe, "<svg @click=\"alpine_action\" xmlns= ... </svg>"} """ @spec render(map(), String.t(), list()) ::String.t() def render( %{} = library, key, attrs \\ [] ) do case Map.fetch( library, key ) do {:ok, svg} -> {:safe, "<svg" <> render_attrs(attrs) <> svg} _ -> raise %Error{message: "SVG #{inspect(key)} not found", svg: key} end end #-------------------------------------------------------- # transform an opts list into a string of tag options defp render_attrs( attrs ), do: do_render_attrs( attrs, "" ) defp do_render_attrs( [], acc ), do: acc defp do_render_attrs( [{key,value} | tail ], acc ) do key = to_string(key) |> String.replace("_", "-") do_render_attrs( tail, "#{acc} #{key}=#{inspect(value)}" ) end end
lib/inline_svg.ex
0.877207
0.707076
inline_svg.ex
starcoder
defmodule Instream.Encoder.Line do @moduledoc false @type point_map :: %{ required(:fields) => [{term, term}], required(:measurement) => term, optional(:tags) => [{term, term}], optional(:timestamp) => term } @doc """ Creates the write string for a list of data points. """ @spec encode([point_map()]) :: binary def encode(points), do: encode(points, []) defp encode([], lines) do lines |> Enum.reverse() |> Enum.join("\n") end defp encode([point | points], lines) do line = [encode_property(point.measurement)] |> append_tags(point) |> append_fields(point) |> append_timestamp(point) |> Enum.join() encode(points, [line | lines]) end defp append_fields(line, %{fields: fields}) do fields |> Enum.reduce([], fn {_, nil}, acc -> acc {field, value}, acc -> [",", encode_property(field), "=", encode_value(value) | acc] end) |> case do [] -> line ["," | encoded_fields] -> [line, " " | encoded_fields] end end defp append_tags(line, %{tags: tags}) do tags |> Enum.reduce([], fn {_, nil}, acc -> acc {tag, value}, acc -> [",", encode_property(tag), "=", encode_property(value) | acc] end) |> case do [] -> line encoded_tags -> [line | encoded_tags] end end defp append_tags(line, _), do: line defp append_timestamp(line, %{timestamp: nil}), do: line defp append_timestamp(line, %{timestamp: ts}), do: [line, " ", ts] defp append_timestamp(line, _), do: line defp encode_value(i) when is_integer(i), do: [Integer.to_string(i), "i"] defp encode_value(s) when is_binary(s), do: ["\"", String.replace(s, "\"", "\\\""), "\""] defp encode_value(true), do: "true" defp encode_value(false), do: "false" defp encode_value(other), do: inspect(other) defp encode_property(s) do s |> Kernel.to_string() |> String.replace(",", "\\,", global: true) |> String.replace(" ", "\\ ", global: true) |> String.replace("=", "\\=", global: true) end end
lib/instream/encoder/line.ex
0.762689
0.405419
line.ex
starcoder
defmodule Bonfire.Epics.Epic do defstruct [ prev: [], # list of steps we've already run. next: [], # the remaining steps, may be modified during run. errors: [], # any errors accrued along the way. assigns: %{}, # any information accrued along the way ] alias Bonfire.Epics alias Bonfire.Epics.{Act, Epic, Error} import Bonfire.Common.Extend require Where use Arrows require Act @type t :: %Epic{ prev: [Act.t], next: [Act.t], errors: [any], assigns: %{optional(atom) => any}, } @doc """ Loads an epic from the app's config """ def from_config!(module, name) when is_atom(module) and is_atom(name) do case Application.get_application(module) do nil -> raise RuntimeError, message: "Module not found! #{module}" app -> Application.get_env(app, module, []) |> Keyword.fetch!(:epics) |> Keyword.fetch!(name) |> from_spec!() end end @doc """ Loads an epic from a specification of steps """ def from_spec!(acts) when is_list(acts) do for act <- acts do case act do module when is_atom(module) -> Act.new(module) {module, options} when is_atom(module) and is_list(options) -> Act.new(module, options) other -> raise RuntimeError, message: "Bad act specification: #{inspect(other)}" end end |> Epic.new() end def assign(%Epic{}=self, name) when is_atom(name), do: self.assigns[name] def assign(%Epic{}=self, name, value) when is_atom(name), do: %{self | assigns: Map.put(self.assigns, name, value) } def update(%Epic{}=self, name, default, fun), do: assign(self, name, fun.(Map.get(self.assigns, name, default))) def new(next \\ []) def new(next) when is_list(next), do: %Epic{next: next} def prepend(%Epic{}=self, acts) when is_list(acts), do: %{ self | next: acts ++ self.next } def prepend(%Epic{}=self, act), do: %{ self | next: [act | self.next] } def append(%Epic{}=self, acts) when is_list(acts), do: %{ self | next: self.next ++ acts } def append(%Epic{}=self, act), do: %{ self | next: self.next ++ [act] } def update(epic, key, default_value, fun) do %{ epic | assigns: Map.update(epic.assigns, key, default_value, fun) } end def add_error(epic, %Error{}=error), do: %{epic | errors: [error | epic.errors]} def add_error(epic, act, %Error{}=error), do: %{epic | errors: [error | epic.errors]} def add_error(epic, act, error, source \\ nil, stacktrace \\ nil), do: add_error(epic, %Error{error: error, act: act, epic: epic, source: source, stacktrace: stacktrace}) defmacro maybe_debug(epic, thing, label \\ "") do quote do require Where if unquote(epic).assigns.options[:debug], do: Where.warn(unquote(thing), unquote(label)) end end def run(%Epic{}=self) do case self.next do [%Act{}=act|acts] -> run_act(act, acts, self) [] -> self end end defp run_act(act, rest, epic) do crash? = epic.assigns[:options][:crash] epic = %{ epic | next: rest } cond do not Code.ensure_loaded?(act.module) -> Where.warn(act.module, "Skipping act, module not found") run(epic) not module_enabled?(act.module) -> maybe_debug(epic, act.module, "Skipping act, module disabled") run(epic) not function_exported?(act.module, :run, 2) -> raise RuntimeError, message: "Could not run act (module callback not found), act #{inspect(act, pretty: true, printable_limit: :infinity)}" crash? -> run_act(epic, act) true -> try do run_act(epic, act) rescue error -> # IO.puts(Exception.format_banner(:error, error, __STACKTRACE__)) run(add_error(epic, error, act, :error, __STACKTRACE__)) catch :exit, error -> exit(error) # run(add_error(epic, error, act, :exit, __STACKTRACE__)) error -> # IO.puts(Exception.format_banner(:throw, error, __STACKTRACE__)) run(add_error(epic, error, act, :throw, __STACKTRACE__)) end end end defp run_act(epic, act) do maybe_debug(epic, act.module, "Running act") case apply(act.module, :run, [epic, act]) do %Epic{}=epic -> run(%{ epic | prev: [act | epic.prev]}) %Act{}=act -> run(%{ epic | prev: [act | epic.prev]}) %Error{}=error -> run(add_error(epic, error)) {:ok, %Epic{}=epic} -> run(%{ epic | prev: [act | epic.prev]}) {:ok, %Epic{}=epic, %Act{}=act} -> run(%{ epic | prev: [act | epic.prev]}) {:error, %Error{}=error} -> run(add_error(epic, error)) {:error, other} -> run(add_error(epic, act, other, :return)) other -> raise RuntimeError, message: """ Invalid act return: #{inspect(other)} Act: #{inspect(act)} """ end end defmacro debug(epic, thing, label \\ "") do quote do require Where Where.debug?(unquote(thing), unquote(label), unquote(epic).assigns.options) end end end
lib/epic.ex
0.612078
0.48054
epic.ex
starcoder
defmodule Instream.Encoder.Line do @moduledoc """ Encoder for the InfluxDB line protocol. """ alias Instream.Decoder.RFC3339 @type point :: %{ required(:fields) => map, required(:measurement) => binary, optional(:tags) => map, optional(:timestamp) => non_neg_integer | binary | nil } | %{ __struct__: module, fields: map, tags: map, timestamp: non_neg_integer | binary | nil } @doc """ Creates protocol contents for a list of data points. """ @spec encode([point()]) :: binary def encode(points), do: encode(points, []) defp encode([point | points], lines) do line = encode_point(point) encode(points, ["\n", line | lines]) end defp encode([], ["\n" | lines]) do lines |> Enum.reverse() |> IO.iodata_to_binary() end defp encode([], []), do: "" defp append_fields(line, %{fields: fields}) do content = fields |> Enum.reduce([], fn {_, nil}, acc -> acc {field, value}, acc -> [[encode_property(field), "=", encode_value(value)], "," | acc] end) |> Enum.reverse() case content do [] -> line ["," | encoded_fields] -> [line, " " | encoded_fields] end end defp append_tags(line, %{tags: tags}) do content = tags |> Enum.reduce([], fn {_, nil}, acc -> acc {tag, value}, acc -> [[encode_property(tag), "=", encode_property(value)], "," | acc] end) |> Enum.reverse() case content do [] -> line encoded_tags -> [line | encoded_tags] end end defp append_tags(line, _), do: line defp append_timestamp(line, %{timestamp: nil}), do: line defp append_timestamp(line, %{timestamp: ts}) when is_integer(ts), do: [line, " ", Integer.to_string(ts)] defp append_timestamp(line, %{timestamp: ts}) when is_binary(ts), do: [line, " ", ts |> RFC3339.to_nanosecond() |> Integer.to_string()] defp append_timestamp(line, _), do: line defp encode_point(%{__struct__: series, fields: fields, tags: tags, timestamp: timestamp}) do encode_point(%{ measurement: series.__meta__(:measurement), fields: Map.from_struct(fields), tags: Map.from_struct(tags), timestamp: timestamp }) end defp encode_point(%{measurement: measurement} = point) do [encode_property(measurement)] |> append_tags(point) |> append_fields(point) |> append_timestamp(point) end defp encode_property(s) when is_binary(s) do s |> :binary.replace(",", "\\,", [:global]) |> :binary.replace(" ", "\\ ", [:global]) |> :binary.replace("=", "\\=", [:global]) end defp encode_property(s), do: Kernel.to_string(s) defp encode_value(i) when is_integer(i), do: [Integer.to_string(i), "i"] defp encode_value(s) when is_binary(s), do: ["\"", :binary.replace(s, "\"", "\\\"", [:global]), "\""] defp encode_value(true), do: "true" defp encode_value(false), do: "false" defp encode_value(other), do: inspect(other) end
lib/instream/encoder/line.ex
0.848235
0.430866
line.ex
starcoder
defmodule Benchee.Formatter do @moduledoc """ Defines a behaviour for formatters in Benchee, and also defines functions to handle invoking that defined behavior. When implementing a benchee formatter as a behaviour please adopt this behaviour, as it helps with uniformity and also allows at least the `.format` function of formatters to be run in parallel. The module itself then has functions to deal with formatters defined in this way allowing for parallel output through `output/1` or just output a single formatter through `output/3`. """ alias Benchee.{Suite, Utility.Parallel} alias Benchee.Utility.DeepConvert @type options :: any @doc """ Takes the suite and returns whatever representation the formatter wants to use to output that information. It is important that this function **needs to be pure** (aka have no side effects) as benchee will run `format/1` functions of multiple formatters in parallel. The result will then be passed to `write/1`. """ @callback format(Suite.t(), options) :: any @doc """ Takes the return value of `format/1` and then performs some I/O for the user to actually see the formatted data (UI, File IO, HTTP, ...) """ @callback write(any, options) :: :ok | {:error, String.t()} @typep module_configuration :: module | {module, options} @doc """ Format and output all configured formatters and formatting functions. Expects a suite that already has been run through all previous functions so has the aggregated statistics etc. that the formatters rely on. Works by invoking the `format/2` and `write/2` functions defined in this module. The `format/2` functions are actually called in parallel (as they should be pure) - due to potential interference the `write/2` functions are called serial. Also handles pure functions that will then be called with the suite. You can't rely on the formatters being called in pre determined order. Right now first those that are parallelizable (benchee formatter modules) are called, then normal functions. """ @spec output(Suite.t()) :: Suite.t() def output(suite = %{configuration: %{formatters: formatters}}) do {parallelizable, serial} = formatters |> Enum.map(&normalize_module_configuration/1) |> Enum.split_with(&is_formatter_module?/1) # why do we ignore this suite? It shouldn't be changed anyway. # We assign it because dialyzer would complain otherwise :D _suite = parallel_output(suite, parallelizable) Enum.each(serial, fn function -> function.(suite) end) suite end @default_opts %{} defp normalize_module_configuration(module_configuration) defp normalize_module_configuration({module, opts}), do: {module, DeepConvert.to_map(opts)} defp normalize_module_configuration(formatter) when is_atom(formatter) do {formatter, @default_opts} end defp normalize_module_configuration(formatter), do: formatter defp is_formatter_module?({formatter, _options}) when is_atom(formatter) do module_attributes = formatter.module_info(:attributes) module_attributes |> Keyword.get(:behaviour, []) |> Enum.member?(Benchee.Formatter) end defp is_formatter_module?(_), do: false @doc """ Output a suite with a given formatter and options. Replacement for the old `MyFormatter.output/1` - calls `format/2` and `write/2` one after another to create the output defined by the given formatter module. For the given options please refer to the documentation of the formatters you use. """ @spec output(Suite.t(), module, options) :: Suite.t() def output(suite, formatter, options \\ %{}) do :ok = suite |> formatter.format(options) |> formatter.write(options) suite end # Invokes `format/2` and `write/2` as defined by the `Benchee.Formatter` # behaviour. The output for all formatters is generated in parallel, and then # the results of that formatting are written in sequence. @spec parallel_output(Suite.t(), [module_configuration]) :: Suite.t() defp parallel_output(suite, module_configurations) do module_configurations |> Parallel.map(fn {module, options} -> {module, options, module.format(suite, options)} end) |> Enum.each(fn {module, options, output} -> module.write(output, options) end) suite end end
lib/benchee/formatter.ex
0.84966
0.721768
formatter.ex
starcoder
defmodule Ockam.Session.Spawner do @moduledoc """ Simple worker spawner which does not track spawned workers Options: `worker_mod` - worker module to spawn, required `worker_opions` - additional options of the spawned worker, defaults to [] `message_parser` - function parsing init message to a Keyword list, defaults to `&default_message_parser/1` Upon receiving a message, `worker_mod` worker will be started with options from `worker_options` merged with result of `message_parser` Example: ``` ## Given a spawner {:ok, spawner} = Ockam.Session.Spawner.create(worker_mod: MyWorker, worker_options: [key: "val"]) ## Sending init message Ockam.Router.route(%{onward_route: [spawner], return_route: ["me"], payload: "HI!"}) ## Is equivalent to calling: MyWorker.create(key: "val", init_message: %{onward_route: [spawner], return_route: ["me"], payload: "HI!"}) ## If spawner has a custom message parser: {:ok, spawner} = Ockam.Session.Spawner.create(worker_mod: MyWorker, message_parser: fn(msg) -> [pl: Ockam.Message.payload(msg)] end) ## Sending init message Ockam.Router.route(%{onward_route: [spawner], return_route: ["me"], payload: "HI!"}) ## Is equivalent to calling: MyWorker.create(pl: "HI!") ``` """ use Ockam.Worker require Logger @impl true def address_prefix(_options), do: "SP_" @impl true def setup(options, state) do worker_mod = Keyword.fetch!(options, :worker_mod) worker_options = Keyword.get(options, :worker_options, []) message_parser = Keyword.get(options, :message_parser, &default_message_parser/1) {:ok, Map.merge(state, %{ worker_mod: worker_mod, worker_options: worker_options, message_parser: message_parser })} end @impl true def handle_message(message, state) do worker_mod = Map.fetch!(state, :worker_mod) worker_options = Map.fetch!(state, :worker_options) case maybe_parse_message(message, state) do {:ok, result} -> ## NOTE: credo has false-positive here without additional variable worker_options = Keyword.merge(worker_options, result) Logger.info("Worker options: #{inspect(worker_options)}") worker_mod.create(worker_options) {:error, err} -> Logger.error("Invalid init message: #{inspect(message)}, reason: #{inspect(err)}") end {:ok, state} end def maybe_parse_message(message, state) do message_parser = Map.get(state, :message_parser) message_parser.(message) end def default_message_parser(message) do {:ok, [init_message: message]} end end
implementations/elixir/ockam/ockam/lib/ockam/session/spawner.ex
0.881997
0.726062
spawner.ex
starcoder
defmodule SGP40 do @moduledoc """ Use Sensirion SGP40 air quality sensor in Elixir """ use GenServer, restart: :transient require Logger @typedoc """ SGP40 GenServer start_link options * `:name` - A name for the `GenServer` * `:bus_name` - Which I2C bus to use (defaults to `"i2c-1"`) * `:bus_address` - The address of the SGP40 (defaults to `0x59`) * `:humidity_rh` - Relative humidity in percent for compensation * `:temperature_c` - Temperature in degree Celsius for compensation """ @type options() :: [ {:name, GenServer.name()} | {:bus_name, bus_name} | {:bus_address, bus_address} | {:humidity_rh, number} | {:temperature_c, number} ] @type bus_name :: binary @type bus_address :: 0..127 defmodule State do @moduledoc false defstruct [:humidity_rh, :last_measurement, :serial_id, :temperature_c, :transport] end @default_bus_name "i2c-1" @default_bus_address 0x59 @polling_interval_ms 1000 @default_humidity_rh 50 @default_temperature_c 25 @doc """ Start a new GenServer for interacting with the SGP40 sensor. Normally, you'll want to pass the `:bus_name` option to specify the I2C bus going to the SGP40. """ @spec start_link(options()) :: GenServer.on_start() def start_link(init_arg \\ []) do GenServer.start_link(__MODULE__, init_arg, name: init_arg[:name]) end @doc """ Measure the current air quality. """ @spec measure(GenServer.server()) :: {:ok, SGP40.Measurement.t()} | {:error, any} def measure(server) do GenServer.call(server, :measure) end @doc """ Update relative ambient humidity (RH %) and ambient temperature (degree C) for the humidity compensation. """ @spec update_rht(GenServer.server(), number, number) :: :ok def update_rht(server, humidity_rh, temperature_c) when is_number(humidity_rh) and is_number(temperature_c) do GenServer.cast(server, {:update_rht, humidity_rh, temperature_c}) end @impl GenServer def init(init_arg) do bus_name = init_arg[:bus_name] || @default_bus_name bus_address = init_arg[:bus_address] || @default_bus_address humidity_rh = init_arg[:humidity_rh] || @default_humidity_rh temperature_c = init_arg[:temperature_c] || @default_temperature_c Logger.info( "[SGP40] Starting on bus #{bus_name} at address #{inspect(bus_address, base: :hex)}" ) case transport_mod().open(bus_name: bus_name, bus_address: bus_address) do {:ok, transport} -> {:ok, serial_id} = SGP40.Comm.serial_id(transport) state = %State{ humidity_rh: humidity_rh, last_measurement: nil, serial_id: serial_id, temperature_c: temperature_c, transport: transport } {:ok, state, {:continue, :init_sensor}} _error -> {:stop, :device_not_found} end end @impl GenServer def handle_continue(:init_sensor, state) do Logger.info("[SGP40] Initializing sensor #{state.serial_id}") state = read_and_maybe_put_measurement(state) Process.send_after(self(), :schedule_measurement, @polling_interval_ms) {:noreply, state} end @impl GenServer def handle_info(:schedule_measurement, state) do state = read_and_maybe_put_measurement(state) Process.send_after(self(), :schedule_measurement, @polling_interval_ms) {:noreply, state} end defp read_and_maybe_put_measurement(state) do with {:ok, sraw} <- SGP40.Comm.measure_raw_with_rht( state.transport, state.humidity_rh, state.temperature_c ), {:ok, voc_index} <- SGP40.VocIndex.process(sraw) do timestamp_ms = System.monotonic_time(:millisecond) measurement = %SGP40.Measurement{timestamp_ms: timestamp_ms, voc_index: voc_index} %{state | last_measurement: measurement} else {:error, reason} -> Logger.error("[SGP40] Measurement failed: #{inspect(reason)}") state end end @impl GenServer def handle_call(:measure, _from, state) do {:reply, {:ok, state.last_measurement}, state} end @impl GenServer def handle_cast({:update_rht, humidity_rh, temperature_c}, state) do state = %{state | humidity_rh: humidity_rh, temperature_c: temperature_c} {:noreply, state} end defdelegate get_states, to: SGP40.VocIndex defdelegate set_states(args), to: SGP40.VocIndex defdelegate set_tuning_params(args), to: SGP40.VocIndex defp transport_mod() do Application.get_env(:sgp40, :transport_mod, SGP40.Transport.I2C) end end
lib/sgp40.ex
0.899476
0.438845
sgp40.ex
starcoder
defmodule SpeechMarkdown do @moduledoc """ Elixir implementation for the Speech Markdown format. https://www.speechmarkdown.org/ Speech Markdown is a text format which is akin to regular Markdown, but with an alternative syntax and built for the purpose of generating platform-specific [SSML](https://en.wikipedia.org/wiki/Speech_Synthesis_Markup_Language) markup. The Speech Markdown transpiler converts the given Speeach Markdown text to the Speech Synthesis Markup Language (SSML) format. The results are returned as an SSML string. Currently, SMD → SSML support is available in a general variant, and specific SSML variants for the Amazon Alexa and Google Assistant platforms. """ alias SpeechMarkdown.{Grammar, ParseError, Sectionizer, Transpiler, Validator} @type options() :: [option()] @type option() :: {:xml_declaration, boolean()} | {:variant, :general | :google | :alexa} | {:validate, :strict | :loose} @doc """ Convert the given Speech Markdown into SSML. Options: - `xml_declaration` - boolean to indicate whether we need the XML declaration in the output, default `false` - `variant` - Which SSML variant to choose from. Either `:general`, `:alexa` or `:google`; defaults to `:general`. - `validate` - `:strict` (default) or `:loose`; when strict, return error when encountering invalid syntax, unknown attributes or unknown attribute values; when `:loose`, such errors are ignored. """ @spec to_ssml(input :: String.t(), options()) :: {:ok, String.t()} | {:error, term()} def to_ssml(input, options \\ []) when is_binary(input) do validate = Keyword.get(options, :validate, :strict) with {:ok, parsed} <- Grammar.parse(input), :ok <- Validator.validate(parsed) |> validate_result(validate) do parsed |> Sectionizer.sectionize() |> Transpiler.transpile(options) end end @spec to_ssml!(input :: String.t(), options()) :: String.t() def to_ssml!(input, options \\ []) when is_binary(input) do case to_ssml(input, options) do {:ok, output} -> output {:error, reason} -> raise ParseError.new(reason) end end @doc """ Convert the given Speech Markdown into plain text. """ @spec to_plaintext(input :: String.t()) :: ssml :: {:ok, String.t()} | {:error, term()} def to_plaintext(input) when is_binary(input) do with {:ok, parsed} <- Grammar.parse(input) do Transpiler.plaintext(parsed) end end @spec to_plaintext!(input :: String.t()) :: ssml :: String.t() def to_plaintext!(input) when is_binary(input) do case to_plaintext(input) do {:ok, output} -> output {:error, reason} -> raise ParseError.new(reason) end end defp validate_result(_, :loose), do: :ok defp validate_result(r, :strict), do: r end
lib/speech_markdown.ex
0.880309
0.670635
speech_markdown.ex
starcoder
defmodule Ace.Governor do @moduledoc """ A governor maintains servers ready to handle clients. A governor process starts with a reference to supervision that can start servers. It will then wait until the server has accepted a connection. Once it's server has accepted a connection the governor will start a new server. """ use GenServer @enforce_keys [:server_supervisor, :listen_socket, :server, :monitor] defstruct @enforce_keys def start_link(server_supervisor, listen_socket) when is_pid(server_supervisor) do initial_state = %{ server_supervisor: server_supervisor, listen_socket: listen_socket, server: nil, monitor: nil } GenServer.start_link(__MODULE__, initial_state) end def child_spec() do %{ id: __MODULE__, start: {__MODULE__, :start_link, []}, type: :worker, restart: :transient, shutdown: 500 } end @impl GenServer def init(initial_state) do new_state = start_server(initial_state) {:ok, new_state} end @impl GenServer # DEBT should match response are `{:ok, server}` or `{:error, reason}` def handle_info({monitor, _response}, state = %{monitor: monitor, server: server}) do true = Process.unlink(server) true = Process.demonitor(monitor) new_state = start_server(%{state | monitor: nil, server: nil}) {:noreply, new_state} end def handle_info({:DOWN, monitor, :process, _server, :normal}, state = %{monitor: monitor}) do # Server process has terminated so existing references are irrelevant new_state = start_server(%{state | monitor: nil, server: nil}) {:noreply, new_state} end # Messages from previously monitored process can arrive when the connection response quickly and exits normally. def handle_info({:DOWN, _, :process, _, :normal}, state) do {:noreply, state} end # function head ensures that only one server is being monitored at a time defp start_server(state = %{server: nil, monitor: nil}) do # Starting a server process must always succeed, before accepting on a connection it has no external influences. {:ok, server} = DynamicSupervisor.start_child(state.server_supervisor, Ace.HTTP.Server.child_spec()) # The behaviour of a server is to always after creation, therefore linking should always succeed. true = Process.link(server) # Creates a unique reference that we can also use to correlate the call to accept on a given socket. monitor = Process.monitor(server) # Simulate a `GenServer.call` but without blocking on a receive loop. send(server, {:"$gen_call", {self(), monitor}, {:accept, state.listen_socket}}) %{state | monitor: monitor, server: server} end end
lib/ace/governor.ex
0.68637
0.401306
governor.ex
starcoder
defmodule MvOpentelemetry.SpanTracer do @moduledoc """ Reusable behaviour for listening to events emmited with `:telemetry.span/3` convention and converting them into OpenTelemetry traces. You can define custom tracers modules and then register them at the start of your application. You are required to implement at least `c:handle_event/4` callback, which has exactly the same type signature as BEAM telemetry handlers. ## Example ``` defmodule MyApp.SpanTracer do use MvOpentelemetry.SpanTracer, events: [[:my_event, :start], [:my_event, :stop], [:my_event, :exception]] def handle_event([:my_event, :start], measurements, meta, opts) do event_name = opts[:prefix] attributes = ["my_event.name": meta.name] OpentelemetryTelemetry.start_telemetry_span(opts[:name], event_name, meta, %{}) |> Span.set_attributes(attributes) :ok end def handle_event([:my_event, :stop], measurements, meta, opts) do ctx = OpentelemetryTelemetry.set_current_telemetry_span(opts[:name], meta) OpentelemetryTelemetry.end_telemetry_span(opts[:name], meta) :ok end def handle_event([:my_event, :exception], measurements, meta, opts) do ctx = OpentelemetryTelemetry.set_current_telemetry_span(opts[:name], meta) attributes = [reason: meta.reason, error: true, stacktrace: meta.stacktrace, kind: meta.kind] OpenTelemetry.Span.set_attributes(ctx, attributes) OpentelemetryTelemetry.end_telemetry_span(opts[:name], meta) :ok end end def MyApp do def start(_type, _args) do MyApp.SpanTracer.register_tracer() end end ``` In the example above, the `c:register_tracer/1` is generated automatically, and when called it will register the telemetry handler under `{MyApp.SpanTracer, MyApp.SpanTracer}`. The following options will be merged with options provided to `c:register_tracer/1` and forwarded to `c:handle_event/4` callback as config (4th argument). ``` [prefix: MyApp.SpanTracer, name: MyApp.SpanTracer] ``` If you want to, you can also completely override the `c:register_tracer/1` callback. ## Required parameters * `:events` - a list of telemetry events you want the span to attach to. ## Optional Parameters * `:name` - atom to register the handler with. Defaults to module name, but can be changed if needed. * `:prefix` - atom or string that can be used generate span name. Defaults to current module name. All optional parameters can be also provided in `c:register_tracer/1` call site: ``` def MyApp do def start(_type, _args) do MyApp.SpanTracer.register_tracer(name: :test_span, prefix: "other_tracer") end end ``` """ @callback handle_event( event :: [atom()], measurements :: map(), meta :: map(), opts :: Access.t() ) :: :ok @callback register_tracer(opts :: Access.t()) :: :ok | {:error, :already_exists} defmacro __using__(opts) do events = Access.fetch!(opts, :events) name = Access.get(opts, :name, __CALLER__.module) prefix = Access.get(opts, :prefix, name) default_attributes = Access.get(opts, :default_attributes, []) quote location: :keep do @behaviour MvOpentelemetry.SpanTracer require OpenTelemetry.Tracer require OpenTelemetry.Span alias OpenTelemetry.Span @spec register_tracer(Access.t()) :: :ok | {:error, :already_exists} def register_tracer(opts \\ []) do prefix = Access.get(opts, :prefix, unquote(prefix)) name = Access.get(opts, :name, unquote(name)) default_attributes = Access.get(opts, :default_attributes, unquote(default_attributes)) tracer_id = :mv_opentelemetry opts_with_defaults = merge_defaults(opts, prefix: prefix, name: name, tracer_id: tracer_id, default_attributes: default_attributes ) :telemetry.attach_many( {name, __MODULE__}, unquote(events), &__MODULE__.handle_event/4, opts_with_defaults ) end defp __opts__ do unquote(opts) end defp merge_defaults(opts, defaults) do opts |> merge_default(:name, defaults[:name]) |> merge_default(:prefix, defaults[:prefix]) |> merge_default(:tracer_id, defaults[:tracer_id]) |> merge_default(:default_attributes, defaults[:default_attributes]) end def merge_default(opts, key, new_value) do {_, new_container} = Access.get_and_update(opts, key, fn nil -> {nil, new_value} some -> {some, some} end) new_container end defoverridable register_tracer: 1 end end end
lib/mv_opentelemetry/span_tracer.ex
0.917048
0.76999
span_tracer.ex
starcoder
defmodule Collidex.Detection.MixedShapes do @moduledoc """ Handles detection of collisions between disparate shapes. (i.e. Rects and Circles, Rects and Polygons, Polygons and Circles) """ alias Collidex.Geometry.Rect alias Collidex.Geometry.Circle alias Collidex.Geometry.Polygon alias Graphmath.Vec2 alias Collidex.Detection.Polygons alias Collidex.Utils @doc """ Check for collisions between any two shapes not of the same type. Return value is truthy if the shapes overlap on the plane. `method` defaults to :accurate and is ignored if any of the shapes are circles. See Collidex.Detection.Polygons for an explanation of `method`. ## Examples ``` iex> Collidex.Detection.MixedShapes.collision?( ...> Collidex.Geometry.Rect.make(-1.0, -1.0, 1.0, 1.0), ...> Collidex.Geometry.Polygon.make([{0.9,0}, {2,1}, {2,-1}]) ...> ) { :collision, "todo_provide_vector"} iex> Collidex.Detection.MixedShapes.collision?( ...> Collidex.Geometry.Rect.make(-1.0, -1.0, 1.0, 1.0), ...> Collidex.Geometry.Polygon.make([{1.1,0}, {2,1}, {2,-1}]) ...> ) false iex> Collidex.Detection.MixedShapes.collision?( ...> Collidex.Geometry.Circle.make(0,0,1.0), ...> Collidex.Geometry.Rect.make(1,-1,2,1) ...> ) { :collision, "todo_provide_vector" } iex> Collidex.Detection.MixedShapes.collision?( ...> Collidex.Geometry.Circle.make(0,0,1.0), ...> Collidex.Geometry.Rect.make(1.1,-1,2,1) ...> ) false iex> Collidex.Detection.MixedShapes.collision?( ...> Collidex.Geometry.Circle.make(0,0,1.0), ...> Collidex.Geometry.Polygon.make([{0.9,0}, {2,1}, {2,-1}]) ...> ) { :collision, "todo_provide_vector"} iex> Collidex.Detection.MixedShapes.collision?( ...> Collidex.Geometry.Circle.make(0,0,1.0), ...> Collidex.Geometry.Polygon.make([{1.1,0}, {2,1}, {2,-1}]) ...> ) false ``` """ def collision?(shape1, shape2, method \\ :accurate) def collision?(rect = %Rect{}, poly = %Polygon{}, method ) do # TODO: Test if a special case treatment of rects-to-polygons # without promoting the rects has sufficiently better performance # to justify it. Polygons.collision?(Polygon.make(rect), poly, method) end def collision?(poly = %Polygon{}, rect = %Rect{}, method ) do Polygons.collision?(poly, Polygon.make(rect), method) end def collision?(rect = %Rect{}, circle = %Circle{}, method ) do collision?(Polygon.make(rect), circle, method) end def collision?(circle = %Circle{}, rect = %Rect{}, method ) do collision?(circle, Polygon.make(rect), method) end def collision?(poly = %Polygon{}, circle = %Circle{}, _method) do collision?(circle, poly) end def collision?(circle = %Circle{}, poly = %Polygon{}, _method) do center = circle.center closest_vertex = poly.vertices |> Enum.sort_by(&(Vec2.length(Vec2.subtract(&1, center)))) |> List.first close_vertex_axis = Vec2.subtract(center, closest_vertex) all_axes = [close_vertex_axis | Utils.normals_of_edges(poly)] if all_axes |> Enum.find(false, fn({axis_x, axis_y}) -> # Make unit-length projection axes so the circle radius is meaningful unit_axis = Utils.unit_vector({axis_x, axis_y}) polygon_projection = Utils.extent_on_axis(poly, unit_axis) circle_projection = Utils.extent_on_axis(circle, unit_axis) !Utils.overlap?(circle_projection, polygon_projection) end) do false else { :collision, "todo_provide_vector" } end end end
lib/collidex/detection/mixed_shapes.ex
0.835802
0.89115
mixed_shapes.ex
starcoder
defmodule Sandbox do @moduledoc """ #### --- Warning --- #### This library is under heavy development and will have breaking changes until the 1.0.0 release. Sandbox provides restricted, isolated scripting environments for Elixir through the use of embedded Lua. This project is powered by <NAME>'s amazing [Luerl](https://github.com/rvirding/luerl), an Erlang library that lets one execute Lua scripts on the BEAM. Luerl executes Lua code _without_ running a Lua VM as a separate application! Rather, the state of the VM is used as a data structure that can be externally manipulated and processed. The `:luerl_sandbox` module is utilized wherever possible. This limits access to dangerous core libraries. It also permits Lua scripts to be run with enforced CPU reduction limits. To work with Lua's full library, use `Sandbox.unsafe_init/0` as opposed to `Sandbox.init/0`. Conventions followed in this library: - Functions beginning with `eval` return a result from Lua. - Functions starting with `play` return a new Lua state. - Functions preceded by `run` return a tuple of `{result, new_state}` - All functions return ok-error tuples such as `{:ok, value}` or `{:error, reason}` unless followed by a bang. - Elixir functions exposed to Lua take two arguments: a Lua state and a list of Lua arguments. They should return a value corresponding to the `eval`, `play` or `run` responses. - The `max_reductions` argument defaults to `0`, corresponding to unlimited reductions. """ @unlimited_reductions 0 @timeout 100 @sandbox_error "Lua Sandbox Error: " @reduction_error @sandbox_error <> "exceeded reduction limit!" @typedoc """ Compiled Lua code that can be transferred between Lua states. """ @type lua_chunk :: {:lua_func, any(), any(), any(), any(), any()} @typedoc """ The representation of an entire Lua virtual machine and its current state. """ @type lua_state :: {:luerl, any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any(), any()} @typedoc """ Lua code as either a raw string or compile chunk. """ @type lua_code :: lua_chunk() | String.t() @typedoc """ Lua values represented as Elixir data structures. """ @type lua_value :: number() | String.t() | [tuple()] | nil @typedoc """ A dot-delimited name or list of names representing a table path in Lua such as `math.floor` or `["math", "floor"]`. """ @type lua_path :: String.t() | [String.t()] @typedoc """ An Elixir function that can be invoked through Lua. It takes a Lua state and a list of Lua arguments and returns a tuple containing a result and a new Lua state. """ @type elixir_run_fun :: (lua_state(), [lua_value()] -> {any(), lua_state()}) @typedoc """ An Elixir function that can be invoked through Lua. It takes a Lua state and a list of Lua arguments and returns a result. The Lua state acts as a context but is not modified. """ @type elixir_eval_fun :: (lua_state(), [lua_value()] -> any()) @typedoc """ An Elixir function that can be invoked through Lua. It takes a Lua state and a list of Lua arguments and returns a new Lua state. The result of this function is not exposed to Lua. """ @type elixir_play_fun :: (lua_state(), [lua_value()] -> lua_state()) @doc """ Creates a Lua state with "dangerous" core library features such as file IO and networking removed. """ def init() do :luerl_sandbox.init() end @doc """ Creates a Lua state with access to Lua's standard library. The `max_reductions` feature of `Sandbox` is still available, but "dangerous" core library features such as file IO and networking are still available. """ def unsafe_init() do :luerl.init() end @doc ~S""" Evaluates a Lua string or chunk against the given Lua state and returns the result in an ok-error tuple. The state itself is not modified. ## Examples iex> Sandbox.init() |> Sandbox.eval("return 3 + 4") {:ok, 7.0} iex> Sandbox.init() |> Sandbox.eval("return math.floor(9.9)") {:ok, 9.0} """ @spec eval(lua_state(), lua_code(), non_neg_integer(), non_neg_integer()) :: {:ok, lua_value()} | {:error, any()} def eval(state, code, max_reductions \\ @unlimited_reductions, timeout \\ @timeout) do case :luerl_sandbox.run(code, state, max_reductions, [], timeout) do {:error, e} -> {:error, e} {[{:tref, _} = table | _], new_state} -> {:ok, :luerl.decode(table, new_state)} {[result | _], _new_state} -> {:ok, result} {[], _} -> {:ok, nil} end end @doc ~S""" Same as `eval/3`, but will return the raw result or raise a `RuntimeError`. ## Examples iex> Sandbox.init() |> Sandbox.eval!("return 3 + 4") 7.0 iex> Sandbox.init() |> Sandbox.eval!("return math.floor(9.9)") 9.0 """ @spec eval!(lua_state(), lua_code(), non_neg_integer(), non_neg_integer()) :: lua_value() def eval!(state, code, max_reductions \\ @unlimited_reductions, timeout \\ @timeout) do case :luerl_sandbox.run(code, state, max_reductions, [], timeout) do {:error, {:reductions, _n}} -> raise(@reduction_error) {:error, reason} -> raise(@sandbox_error <> "#{inspect(reason)}") {[{:tref, _} = table | _], new_state} -> :luerl.decode(table, new_state) {[result | _], _new_state} -> result {[], _new_state} -> nil end end @doc """ Evaluates a Lua file against the given Lua state and returns the result in an ok-error tuple. The state itself is not modified. """ @spec eval_file(lua_state(), String.t(), non_neg_integer()) :: {:ok, lua_value()} | {:error, any()} def eval_file(state, file_path, max_reductions \\ @unlimited_reductions) do with {:ok, code} <- File.read(file_path), {:ok, result} <- eval(state, code, max_reductions) do {:ok, result} else {:error, reason} -> {:error, reason} end end @doc """ Same as `eval_file/3`, but will return the raw result or raise a `RuntimeError`. """ @spec eval_file!(lua_state(), String.t(), non_neg_integer()) :: lua_value() def eval_file!(state, file_path, max_reductions \\ @unlimited_reductions) do code = File.read!(file_path) eval!(state, code, max_reductions) end @doc """ Calls a function defined in the the Lua state and returns only the result. The state itself is not modified. Lua functions in the Lua state can be referenced by their `lua_path`, being a string or list such as `math.floor` or `["math", "floor"]`. """ @spec eval_function!(lua_state(), lua_path(), non_neg_integer()) :: lua_value() def eval_function!(state, path, args \\ [], max_reductions \\ @unlimited_reductions) def eval_function!(state, path, args, max_reductions) when is_list(path) do eval_function!(state, Enum.join(path, "."), args_to_list(args), max_reductions) end def eval_function!(state, path, args, max_reductions) when is_binary(path) do state |> set!("__sandbox_args__", args_to_list(args)) |> eval!("return " <> path <> "(unpack(__sandbox_args__))", max_reductions) end @doc """ Create a compiled chunk of Lua code that can be transferred between Lua states, returned in an ok-error tuple. """ @spec chunk(lua_state(), lua_code()) :: {:ok, lua_chunk()} | {:error, any()} def chunk(state, code) do case :luerl.load(code, state) do {:ok, result, _state} -> {:ok, result} {:error, e1, e2} -> {:error, {e1, e2}} # {:error, reason} -> {:error, reason} end end @doc """ Same as `chunk/2`, but will return the raw result or raise a `RuntimeError`. """ @spec chunk!(lua_state(), lua_code()) :: lua_chunk() def chunk!(state, code) do {:ok, result} = chunk(state, code) result end @doc """ Runs a Lua string or chunk against a Lua state and returns a new Lua state in an ok-error tuple. """ @spec play(lua_state(), lua_code(), non_neg_integer()) :: {:ok, lua_state()} | {:error, any()} def play(state, code, max_reductions \\ @unlimited_reductions) do case :luerl_sandbox.run(code, state, max_reductions) do {:error, e} -> {:error, e} {_result, new_state} -> {:ok, new_state} end end @doc """ Same as `play/3`, but will return the raw result or raise a `RuntimeError`. """ @spec play!(lua_state(), lua_code(), non_neg_integer()) :: lua_state() def play!(state, code, max_reductions \\ @unlimited_reductions) do case :luerl_sandbox.run(code, state, max_reductions) do {:error, {:reductions, _n}} -> raise(@reduction_error) {_result, new_state} -> new_state end end @doc """ Runs a Lua file in the context of a Lua state and returns a new Lua state. """ @spec play_file!(lua_state(), String.t(), non_neg_integer()) :: lua_state() def play_file!(state, file_path, max_reductions \\ @unlimited_reductions) when is_binary(file_path) and is_integer(max_reductions) do code = File.read!(file_path) play!(state, code, max_reductions) end @doc """ Runs a Lua function defined in the given Lua state and returns a new Lua state. """ @spec play_function!(lua_state(), lua_path(), non_neg_integer()) :: lua_state() def play_function!(state, path, args \\ [], max_reductions \\ @unlimited_reductions) def play_function!(state, path, args, max_reductions) when is_list(path) do play_function!(state, Enum.join(path, "."), args_to_list(args), max_reductions) end def play_function!(state, path, args, max_reductions) when is_binary(path) do state |> set!("__sandbox_args__", args_to_list(args)) |> play!("return " <> path <> "(unpack(__sandbox_args__))", max_reductions) end @doc """ Runs a Lua string or chunk against the given Lua state and returns the result and the new Lua state in an ok-error tuple. """ @spec run(lua_state(), lua_code(), non_neg_integer()) :: {:ok, lua_state() | {lua_value(), lua_state()}} | {:error, any()} def run(state, code, max_reductions \\ @unlimited_reductions) do case :luerl_sandbox.run(code, state, max_reductions) do {:error, e} -> {:error, e} {[], new_state} -> {:ok, {nil, new_state}} {[{:tref, _} = table | _], new_state} -> {:ok, {:luerl.decode(table, new_state), new_state}} {[result | _], new_state} -> {:ok, {result, new_state}} end end @doc """ Same as `run/3`, but will return the raw `{result, state}` or raise a `RuntimeError`. """ @spec run!(lua_state(), lua_code(), non_neg_integer()) :: {lua_value(), lua_state()} def run!(state, code, max_reductions \\ @unlimited_reductions) do case :luerl_sandbox.run(code, state, max_reductions) do {:error, {:reductions, _n}} -> raise(@reduction_error) {[{:tref, _} = table], new_state} -> {:luerl.decode(table, new_state), new_state} {[result], new_state} -> {result, new_state} {[], new_state} -> {nil, new_state} end end @doc """ Runs a function defined in the the Lua state and returns the result and the new Lua state as `{result, state}`. Lua functions in the Lua state can be referenced by their `lua_path`, a string or list such as `math.floor` or `["math", "floor"]`. """ @spec run_function!(lua_state(), lua_path(), non_neg_integer()) :: {lua_value(), lua_state()} def run_function!(state, path, args \\ [], max_reductions \\ @unlimited_reductions) def run_function!(state, path, args, max_reductions) when is_list(path) do run_function!(state, Enum.join(path, "."), args_to_list(args), max_reductions) end def run_function!(state, path, args, max_reductions) when is_binary(path) do state |> set!("__sandbox_args__", args_to_list(args)) |> run!("return " <> path <> "(unpack(__sandbox_args__))", max_reductions) end @doc """ Sets a value in a Lua state and returns the modified state. If `force` is set to true, new tables will be created automatically if they missing from the given `lua_path`. """ @spec set!(lua_state(), lua_path(), any(), boolean()) :: lua_state() def set!(state, path, value, force \\ false) def set!(state, path, value, force) when is_binary(path) do set!(state, String.split(path, "."), value, force) end def set!(state, path, value, false) when is_list(path) do :luerl.set_table(path, value, state) end def set!(state, path, value, true) when is_list(path) do :luerl.set_table(path, value, build_missing_tables(state, path)) end @doc """ Gets a value from a Lua state. """ @spec get!(lua_state(), lua_path()) :: lua_value() def get!(state, path) when is_list(path) do {result, _s} = :luerl.get_table(path, state) result end def get!(state, path) when is_binary(path) do get!(state, String.split(path, ".")) end @doc """ Returns a Lua state modified to include an Elixir function, `elixir_eval_fun()`, at the given `lua_path()`. The `elixir_eval_fun()` takes two arguments, a Lua state and a list of calling arguments from Lua. Its return value is passed along to Lua. It will not mutate the Lua state against which it executes. """ @spec let_elixir_eval!(lua_state(), lua_path(), elixir_eval_fun()) :: lua_state() def let_elixir_eval!(state, name, fun) when is_function(fun) do value = lua_wrap_elixir_eval(fun) set!(state, name, value) end @doc """ Returns a Lua state modified to include an Elixir function, `elixir_play_fun()`, at the given `lua_path()`. The `elixir_play_fun()` takes two arguments, a Lua state and a list of calling arguments from Lua. It should return a new Lua state. This can be used to let Lua scripts use something like controlled inheritance, dynamically adding external functionality and settings. """ @spec let_elixir_play!(lua_state(), lua_path(), elixir_play_fun()) :: lua_state() def let_elixir_play!(state, path, fun) when is_function(fun) do value = lua_wrap_elixir_play(fun) set!(state, path, value) end @doc """ Returns a Lua state modified to include an Elixir function, `elixir_run_fun()`, at the given `lua_path()`. The `elixir_run_fun()` takes two arguments, a Lua state and a list of calling arguments from Lua. It should return a tuple holding the result intended for the calling Lua function alongside a new Lua state. """ @spec let_elixir_run!(lua_state(), lua_path(), elixir_run_fun()) :: lua_state() def let_elixir_run!(state, name, fun) when is_function(fun) do value = lua_wrap_elixir_run(fun) set!(state, name, value) end @doc false def reduction_error(), do: @reduction_error # --- private functions --- # lua state is unchanged, result returned defp lua_wrap_elixir_eval(fun) do fn args, state -> result = fun.(state, args) {[result], state} end end # lua result and state returned defp lua_wrap_elixir_run(fun) do fn args, state -> {result, new_state} = fun.(state, args) {[result], new_state} end end # lua state is changed defp lua_wrap_elixir_play(fun) do fn args, state -> new_state = fun.(state, args) {[], new_state} end end defp args_to_list(args) when is_list(args) do args end defp args_to_list(args) do [args] end defp build_missing_tables(state, path, path_string \\ nil) defp build_missing_tables(state, [], _path_string) do state end defp build_missing_tables(state, [name | path_remaining], path_string) do next_path_string = case path_string do nil -> name _ -> path_string <> "." <> name end next_state = case get!(state, next_path_string) do nil -> set!(state, next_path_string, []) _ -> state end build_missing_tables(next_state, path_remaining, next_path_string) end end
lib/sandbox.ex
0.847968
0.651715
sandbox.ex
starcoder
defmodule Timex.Timezone do @moduledoc """ This module is used for looking up the timezone information for a given point in time, in the desired zone. Timezones are dependent not only on locale, but the date and time for which you are querying. For instance, the timezone offset from UTC for `Europe/Moscow` is different for March 3rd of 2015, than it was in 2013. These differences are important, and as such, all functions in this module are date/time sensitive, and where omitted, the current date/time are assumed. In addition to lookups, this module also does conversion of datetimes from one timezone period to another, and determining the difference between a date in one timezone period and the same date/time in another timezone period. """ alias Timex.AmbiguousDateTime alias Timex.TimezoneInfo alias Timex.PosixTimezone alias Timex.AmbiguousTimezoneInfo alias Timex.Timezone.Local, as: Local alias Timex.Parse.Timezones.Posix alias Timex.Types @doc """ Determines if a given zone name exists """ @spec exists?(String.t()) :: boolean def exists?(zone) when is_binary(zone) do if Tzdata.zone_exists?(zone) do true else case lookup_posix(zone) do %PosixTimezone{} -> true _ -> false end end end @doc """ Gets the local timezone configuration for the current date and time. """ @spec local() :: TimezoneInfo.t() | AmbiguousTimezoneInfo.t() | {:error, term} def local(), do: local(:calendar.universal_time()) @doc """ Gets the local timezone configuration for the provided date and time. The provided date and time can either be an Erlang datetime tuple, or a DateTime struct. """ @spec local(Types.valid_datetime()) :: TimezoneInfo.t() | AmbiguousTimezoneInfo.t() | {:error, term} def local(date) do secs = Timex.to_gregorian_seconds(date) case Local.lookup() do {:error, _} = err -> err tz -> resolve(tz, secs) end end @doc """ This function takes one of the varying timezone representations: - atoms - offset integers - shortcut names (i.e. :utc, :local, "Z", "A") and resolves the full name of the timezone if it's able. If a string is provided which isn't recognized, it is returned untouched, only when `get/2` is called will the timezone lookup fail. """ @spec name_of(Types.valid_timezone() | TimezoneInfo.t() | AmbiguousTimezoneInfo.t()) :: String.t() | {:error, :time_zone_not_found} | {:error, term} def name_of(%TimezoneInfo{:full_name => name}), do: name def name_of(:utc), do: "Etc/UTC" def name_of(:local), do: name_of(Local.lookup()) def name_of("Etc/UTC"), do: "Etc/UTC" def name_of("GMT"), do: "Etc/UTC" def name_of("UTC"), do: "Etc/UTC" def name_of("UT"), do: "Etc/UTC" def name_of("Z"), do: "Etc/UTC" def name_of("A"), do: "Etc/UTC+1" def name_of("M"), do: "Etc/UTC+12" def name_of("N"), do: "Etc/UTC-1" def name_of("Y"), do: "Etc/UTC-12" def name_of(0), do: "Etc/UTC" def name_of(offset) when is_integer(offset) do cond do offset <= -100 -> hh = div(offset * -1, 100) mm = rem(offset, 100) * -1 if mm == 0 do "Etc/UTC-#{hh}" else hh = String.pad_leading(to_string(hh), 2, "0") mm = String.pad_leading(to_string(mm), 2, "0") "Etc/UTC-#{hh}:#{mm}" end offset >= 100 -> hh = div(offset, 100) mm = rem(offset, 100) if mm == 0 do "Etc/UTC+#{hh}" else hh = String.pad_leading(to_string(hh), 2, "0") mm = String.pad_leading(to_string(mm), 2, "0") "Etc/UTC+#{hh}:#{mm}" end offset >= 0 and offset < 24 -> "Etc/UTC+#{offset}" offset <= 0 and offset > -24 -> "Etc/UTC-#{offset * -1}" :else -> {:error, :time_zone_not_found} end end def name_of(<<sign::utf8, ?0, ?0, ?:, ?0, ?0>>) when sign in [?+, ?-] do "Etc/UTC" end def name_of(<<sign::utf8, h::binary-size(2)-unit(8), ?:, ?0, ?0>>) when sign in [?+, ?-] do "Etc/UTC" <> <<sign::utf8, h::binary>> end def name_of(<<sign::utf8, h::binary-size(1)-unit(8), ?:, ?0, ?0>>) when sign in [?+, ?-] do "Etc/UTC" <> <<sign::utf8, h::binary>> end def name_of(<<sign::utf8, h::binary-size(2)-unit(8), ?:, m::binary-size(2)-unit(8)>>) when sign in [?+, ?-] do "Etc/UTC" <> <<sign::utf8, h::binary, ?:, m::binary>> end def name_of(<<sign::utf8, h::binary-size(1)-unit(8), ?:, m::binary-size(2)-unit(8)>>) when sign in [?+, ?-] do "Etc/UTC" <> <<sign::utf8, h::binary, ?:, m::binary>> end def name_of( <<sign::utf8, h::binary-size(2)-unit(8), ?:, m::binary-size(2)-unit(8), ?:, s::binary-size(2)-unit(8)>> ) when sign in [?+, ?-] do "Etc/UTC" <> <<sign::utf8, h::binary, ?:, m::binary, ?:, s::binary>> end def name_of( <<sign::utf8, h::binary-size(1)-unit(8), ?:, m::binary-size(2)-unit(8), ?:, s::binary-size(2)-unit(8)>> ) when sign in [?+, ?-] do "Etc/UTC" <> <<sign::utf8, h::binary, ?:, m::binary, ?:, s::binary>> end def name_of(<<sign::utf8, h::binary-size(2)-unit(8), m::binary-size(2)-unit(8)>>) when sign in [?+, ?-] do "Etc/UTC" <> <<sign::utf8, h::binary, ?:, m::binary>> end def name_of(<<sign::utf8, h::binary-size(1)-unit(8), m::binary-size(2)-unit(8)>>) when sign in [?+, ?-] do "Etc/UTC" <> <<sign::utf8, h::binary, ?:, m::binary>> end def name_of(<<sign::utf8, h::binary-size(2)-unit(8)>>) when sign in [?+, ?-] do "Etc/UTC" <> <<sign::utf8, h::binary>> end def name_of(<<sign::utf8, h::utf8>>) when sign in [?+, ?-] and h >= ?0 and h <= ?9 do "Etc/UTC" <> <<sign::utf8, h::utf8>> end def name_of("Etc/UTC" <> offset), do: name_of(offset) def name_of("Etc/GMT" <> offset) do case name_of("GMT" <> offset) do {:error, _} = err -> err <<"Etc/UTC", ?+, rest::binary>> -> "Etc/GMT-" <> rest <<"Etc/UTC", ?-, rest::binary>> -> "Etc/GMT+" <> rest other -> other end end def name_of(<<"GMT", sign::utf8, hh::utf8>>) when sign in [?+, ?-], do: "Etc/UTC" <> <<sign::utf8, hh::utf8>> def name_of(<<"GMT", sign::utf8, hh::binary-size(2)-unit(8)>>) when sign in [?+, ?-], do: "Etc/UTC" <> <<sign::utf8, hh::binary>> def name_of(<<"GMT", sign::utf8, hh::binary-size(2)-unit(8), ?:, mm::binary-size(2)-unit(8)>>) when sign in [?+, ?-], do: "Etc/UTC" <> <<sign::utf8, hh::binary, ?:, mm::binary>> def name_of(<<"GMT", sign::utf8, hh::binary-size(1)-unit(8), ?:, mm::binary-size(2)-unit(8)>>) when sign in [?+, ?-], do: "Etc/UTC" <> <<sign::utf8, hh::binary, ?:, mm::binary>> def name_of(<<"GMT", sign::utf8, hh::binary-size(2)-unit(8), mm::binary-size(2)-unit(8)>>) when sign in [?+, ?-], do: "Etc/UTC" <> <<sign::utf8, hh::binary, ?:, mm::binary>> def name_of(<<"GMT", sign::utf8, hh::binary-size(1)-unit(8), mm::binary-size(2)-unit(8)>>) when sign in [?+, ?-], do: "Etc/UTC" <> <<sign::utf8, hh::binary, ?:, mm::binary>> def name_of(""), do: {:error, :time_zone_not_found} def name_of(tz) when is_binary(tz) do if Tzdata.zone_exists?(tz) do tz else case lookup_posix(tz) do %PosixTimezone{name: name} -> name nil -> {:error, :time_zone_not_found} end end end def name_of(_tz), do: {:error, :time_zone_not_found} @doc """ Gets timezone info for a given zone name and date. The date provided can either be an Erlang datetime tuple, or a DateTime struct, and if one is not provided, then the current date and time is returned. """ @spec get(Types.valid_timezone()) :: TimezoneInfo.t() | AmbiguousTimezoneInfo.t() | {:error, term} @spec get(Types.valid_timezone(), Types.valid_datetime()) :: TimezoneInfo.t() | AmbiguousTimezoneInfo.t() | {:error, term} def get(tz, datetime \\ NaiveDateTime.utc_now()) def get(:utc, _datetime), do: %TimezoneInfo{} def get(:local, datetime), do: local(datetime) def get(tz, datetime) do utc_or_wall = if match?(%DateTime{}, datetime), do: :utc, else: :wall case name_of(tz) do {:error, _} = err -> err name -> get_info(name, datetime, utc_or_wall) end end @doc """ Same as `get/2`, but allows specifying whether to obtain the TimezoneInfo based on utc time or wall time manually (`:utc` or `:wall` respectively). """ def get(:utc, _, _), do: %TimezoneInfo{} def get(:local, datetime, _), do: local(datetime) def get(tz, datetime, utc_or_wall) do case name_of(tz) do {:error, _} = err -> err name -> get_info(name, datetime, utc_or_wall) end end defp get_info(timezone, datetime, utc_or_wall) defp get_info("Etc/UTC", _datetime, _utc_or_wall), do: %TimezoneInfo{} defp get_info("Etc/GMT", _datetime, _utc_or_wall), do: %TimezoneInfo{} defp get_info(<<"Etc/UTC", sign::utf8, offset::binary>>, datetime, utc_or_wall) when sign in [?+, ?-] do get_utc_info("Etc/UTC", <<sign::utf8, offset::binary>>, datetime, utc_or_wall) end defp get_info(<<"Etc/GMT", sign::utf8, offset::binary>>, datetime, utc_or_wall) when sign in [?+, ?-] do get_utc_info("Etc/GMT", <<sign::utf8, offset::binary>>, datetime, utc_or_wall) end defp get_info(<<"GMT", sign::utf8, offset::binary>>, _datetime, _utc_or_wall) when sign in [?+, ?-] do with offset_secs when is_integer(offset_secs) <- parse_offset(offset) do hours = div(offset_secs, 3600) minutes = div(rem(offset_secs, 3600), 60) if hours != 0 or minutes != 0 do hh = String.pad_leading(to_string(hours), 2, "0") mm = String.pad_leading(to_string(minutes), 2, "0") {suffix, abbr, offset_utc} = cond do sign == ?+ and minutes == 0 -> {"+#{hours}", "-#{hh}", offset_secs * -1} sign == ?+ -> {"+#{hours}:#{mm}", "-#{hh}:#{mm}", offset_secs * -1} sign == ?- and minutes == 0 -> {"-#{hours}", "+#{hh}", offset_secs} sign == ?- -> {"-#{hours}:#{mm}", "+#{hh}:#{mm}", offset_secs} end %TimezoneInfo{ full_name: <<"Etc/GMT", suffix::binary>>, abbreviation: abbr, offset_std: 0, offset_utc: offset_utc, from: :min, until: :max } else %TimezoneInfo{ full_name: "Etc/GMT", abbreviation: "GMT", offset_std: 0, offset_utc: 0, from: :min, until: :max } end end end defp get_info(name, datetime, utc_or_wall) when is_binary(name) do seconds_from_zeroyear = Timex.to_gregorian_seconds(datetime) if Tzdata.zone_exists?(name) do resolve(name, seconds_from_zeroyear, utc_or_wall) else with {:ok, %PosixTimezone{} = posixtz, _} <- Posix.parse(name) do PosixTimezone.to_timezone_info(posixtz, Timex.to_naive_datetime(datetime)) else {:error, _, _} -> {:error, :time_zone_not_found} end end end defp get_info(%TimezoneInfo{} = info, _datetime, _utc_or_wall), do: info defp get_utc_info(prefix, <<sign::utf8, offset::binary>>, _datetime, _utc_or_wall) when sign in [?+, ?-] do with offset_secs when is_integer(offset_secs) <- parse_offset(offset) do hours = div(offset_secs, 3600) minutes = div(rem(offset_secs, 3600), 60) if hours != 0 or minutes != 0 do hh = String.pad_leading(to_string(hours), 2, "0") mm = String.pad_leading(to_string(minutes), 2, "0") {suffix, abbr, offset_utc} = cond do sign == ?+ and minutes == 0 -> {"#{hours}", "+#{hh}", offset_secs} sign == ?+ -> {"#{hours}:#{mm}", "+#{hh}:#{mm}", offset_secs} sign == ?- and minutes == 0 -> {"#{hours}", "-#{hh}", offset_secs * -1} sign == ?- -> {"#{hours}:#{mm}", "-#{hh}:#{mm}", offset_secs * -1} end %TimezoneInfo{ full_name: <<prefix::binary, sign::utf8, suffix::binary>>, abbreviation: abbr, offset_std: 0, offset_utc: offset_utc, from: :min, until: :max } else %TimezoneInfo{} end end end defp parse_offset( <<h::binary-size(2)-unit(8), ?:, m::binary-size(2)-unit(8), ?:, s::binary-size(2)-unit(8)>> ) do with {hh, _} <- Integer.parse(h), {mm, _} <- Integer.parse(m), {ss, _} <- Integer.parse(s) do hh * 3600 + mm * 60 + ss else _ -> {:error, :invalid_offset} end end defp parse_offset( <<h::binary-size(1)-unit(8), ?:, m::binary-size(2)-unit(8), ?:, s::binary-size(2)-unit(8)>> ) do with {hh, _} <- Integer.parse(h), {mm, _} <- Integer.parse(m), {ss, _} <- Integer.parse(s) do hh * 3600 + mm * 60 + ss else _ -> {:error, :invalid_offset} end end defp parse_offset(<<h::binary-size(2)-unit(8), ?:, m::binary-size(2)-unit(8)>>) do with {hh, _} <- Integer.parse(h), {mm, _} <- Integer.parse(m) do hh * 3600 + mm * 60 else _ -> {:error, :invalid_offset} end end defp parse_offset(<<h::binary-size(1)-unit(8), ?:, m::binary-size(2)-unit(8)>>) do with {hh, _} <- Integer.parse(h), {mm, _} <- Integer.parse(m) do hh * 3600 + mm * 60 else _ -> {:error, :invalid_offset} end end defp parse_offset(<<h::binary-size(2)-unit(8)>>) do with {hh, _} <- Integer.parse(h) do hh * 3600 else _ -> {:error, :invalid_offset} end end defp parse_offset(<<h::utf8>> = input) when h >= ?0 and h <= ?9 do String.to_integer(input) * 3600 end def total_offset(%TimezoneInfo{offset_std: std, offset_utc: utc}) do utc + std end def total_offset(std_offset, utc_offset) when is_integer(std_offset) and is_integer(utc_offset) do utc_offset + std_offset end @doc """ Given a timezone name as a string, and a date/time in the form of the number of seconds since year zero, attempt to resolve a TimezoneInfo for that date/time. If the time is ambiguous, AmbiguousTimezoneInfo will be returned. If no result is found, an error will be returned. If an invalid zone name is provided, an error will be returned """ @spec resolve(String.t(), non_neg_integer, :utc | :wall) :: TimezoneInfo.t() | AmbiguousTimezoneInfo.t() | {:error, term} def resolve(tzname, datetime, utc_or_wall \\ :wall) def resolve(name, seconds_from_zeroyear, utc_or_wall) when is_binary(name) and is_integer(seconds_from_zeroyear) and utc_or_wall in [:utc, :wall] do case Tzdata.periods_for_time(name, seconds_from_zeroyear, utc_or_wall) do [] -> {:error, {:could_not_resolve_timezone, name, seconds_from_zeroyear, utc_or_wall}} # Resolved [period] -> tzdata_to_timezone(period, name) # This case happens when using wall clock time, we resolve it by using UTC clock time instead [before_period, after_period | _] -> case Tzdata.periods_for_time(name, seconds_from_zeroyear, :utc) do [] -> # We can't resolve it this way, I don't expect this to be possible, but we handle it before_tz = tzdata_to_timezone(before_period, name) after_tz = tzdata_to_timezone(after_period, name) AmbiguousTimezoneInfo.new(before_tz, after_tz) [period] -> tzdata_to_timezone(period, name) _ -> # Still ambiguous, use wall clock time for info passed back to caller before_tz = tzdata_to_timezone(before_period, name) after_tz = tzdata_to_timezone(after_period, name) AmbiguousTimezoneInfo.new(before_tz, after_tz) end end end @doc """ Convert a date to the given timezone (either TimezoneInfo or a timezone name) """ @spec convert(date :: DateTime.t() | NaiveDateTime.t(), tz :: AmbiguousTimezoneInfo.t()) :: AmbiguousDateTime.t() | {:error, term} @spec convert( date :: DateTime.t() | NaiveDateTime.t(), tz :: TimezoneInfo.t() | Types.valid_timezone() ) :: DateTime.t() | AmbiguousDateTime.t() | {:error, term} def convert(%DateTime{} = date, %AmbiguousTimezoneInfo{} = tz) do before_date = convert(date, tz.before) after_date = convert(date, tz.after) %AmbiguousDateTime{:before => before_date, :after => after_date} end def convert(%DateTime{time_zone: name} = date, %TimezoneInfo{full_name: name}) do # Do not convert date when already in destination time zone date end def convert(%DateTime{} = date, %TimezoneInfo{full_name: name}) do with {:ok, datetime} <- DateTime.shift_zone(date, name, Timex.Timezone.Database) do datetime else {ty, a, b} when ty in [:gap, :ambiguous] -> %AmbiguousDateTime{before: a, after: b, type: ty} {:error, _} = err -> err end end def convert(%DateTime{} = date, tz) do case get(tz, date) do {:error, _} = err -> err timezone -> convert(date, timezone) end end def convert(%NaiveDateTime{} = date, %AmbiguousTimezoneInfo{} = tz) do before_date = convert(date, tz.before) after_date = convert(date, tz.after) %AmbiguousDateTime{:before => before_date, :after => after_date} end def convert(%NaiveDateTime{} = date, %TimezoneInfo{full_name: name}) do with {:ok, datetime} <- DateTime.from_naive(date, name, Timex.Timezone.Database) do datetime else {ty, a, b} when ty in [:gap, :ambiguous] -> %AmbiguousDateTime{before: a, after: b, type: ty} {:error, _} = err -> err end end def convert(%NaiveDateTime{} = date, tz) do with %TimezoneInfo{} = tzinfo <- get(tz, date) do convert(date, tzinfo) end end def convert(date, tz) do case Timex.to_datetime(date, tz) do {:error, _} = err -> err datetime -> datetime end end @doc """ Shifts the provided DateTime to the beginning of the day in it's timezone """ @spec beginning_of_day(DateTime.t()) :: DateTime.t() def beginning_of_day(%DateTime{time_zone: tz, microsecond: {_, precision}} = dt) do do_beginning_of_day(dt, tz, {{dt.year, dt.month, dt.day}, {0, 0, 0}}, precision) end defp do_beginning_of_day(%DateTime{} = dt, tz, {date, {h, _, _}} = day_start, precision) do abs_start = :calendar.datetime_to_gregorian_seconds(day_start) case Tzdata.zone_exists?(tz) do true -> case Tzdata.periods_for_time(tz, abs_start, :wall) do # This hour does not exist, so move ahead one and try again [] -> do_beginning_of_day(dt, tz, {date, {h + 1, 0, 0}}, precision) # Only one period applies [%{utc_off: utc_offset, std_off: std_offset, zone_abbr: abbr}] -> %{ dt | :hour => h, :minute => 0, :second => 0, :microsecond => {0, precision}, :utc_offset => utc_offset, :std_offset => std_offset, :zone_abbr => abbr } # Ambiguous, choose the earliest one in the same day which is unambiguous [_, _] -> do_beginning_of_day(dt, tz, {date, {h + 1, 0, 0}}, precision) end false -> %{dt | :hour => 0, :minute => 0, :second => 0, :microsecond => {0, precision}} end end @doc """ Shifts the provided DateTime to the end of the day in it's timezone """ @spec end_of_day(DateTime.t()) :: DateTime.t() def end_of_day(%DateTime{time_zone: tz, microsecond: {_, precision}} = dt) do do_end_of_day(dt, tz, {{dt.year, dt.month, dt.day}, {23, 59, 59}}, precision) end defp do_end_of_day(%DateTime{} = dt, tz, {date, {h, _, _}} = day_end, precision) do abs_end = :calendar.datetime_to_gregorian_seconds(day_end) case Tzdata.zone_exists?(tz) do true -> case Tzdata.periods_for_time(tz, abs_end, :wall) do # This hour does not exist, so move back one and try again [] -> do_end_of_day(dt, tz, {date, {h - 1, 59, 59}}, precision) # Only one period applies [%{utc_off: utc_offset, std_off: std_offset, zone_abbr: abbr}] -> %{ dt | :hour => h, :minute => 59, :second => 59, :microsecond => Timex.DateTime.Helpers.construct_microseconds(999_999, precision), :utc_offset => utc_offset, :std_offset => std_offset, :zone_abbr => abbr } # Ambiguous, choose the earliest one in the same day which is unambiguous [_, _] -> do_beginning_of_day(dt, tz, {date, {h - 1, 59, 59}}, precision) end false -> us = Timex.DateTime.Helpers.construct_microseconds(999_999, precision) %{dt | :hour => 23, :minute => 59, :second => 59, :microsecond => us} end end @spec tzdata_to_timezone(map, String.t()) :: TimezoneInfo.t() def tzdata_to_timezone( %{ from: %{wall: from}, std_off: std_off_secs, until: %{wall: until}, utc_off: utc_off_secs, zone_abbr: abbr } = _tzdata, zone ) do start_bound = boundary_to_erlang_datetime(from) end_bound = boundary_to_erlang_datetime(until) %TimezoneInfo{ full_name: zone, abbreviation: abbr, offset_std: std_off_secs, offset_utc: utc_off_secs, from: erlang_datetime_to_boundary_date(start_bound), until: erlang_datetime_to_boundary_date(end_bound) } end @spec boundary_to_erlang_datetime(:min | :max | integer) :: :min | :max | Types.datetime() defp boundary_to_erlang_datetime(:min), do: :min defp boundary_to_erlang_datetime(:max), do: :max defp boundary_to_erlang_datetime(secs), do: :calendar.gregorian_seconds_to_datetime(trunc(secs)) @spec erlang_datetime_to_boundary_date(:min | :max | Types.datetime()) :: :min | :max | {Types.weekday_name()} defp erlang_datetime_to_boundary_date(:min), do: :min defp erlang_datetime_to_boundary_date(:max), do: :max defp erlang_datetime_to_boundary_date({{y, m, d}, _} = date) do dow = case :calendar.day_of_the_week({y, m, d}) do 1 -> :monday 2 -> :tuesday 3 -> :wednesday 4 -> :thursday 5 -> :friday 6 -> :saturday 7 -> :sunday end {dow, date} end @doc false @spec lookup_posix(String.t()) :: PosixTimezone.t() | nil def lookup_posix(timezone) when is_binary(timezone) do with {:ok, %PosixTimezone{} = posixtz, _} <- Posix.parse(timezone) do posixtz else _ -> nil end end def lookup_posix(_), do: nil end
lib/timezone/timezone.ex
0.830388
0.651078
timezone.ex
starcoder
defmodule Trunk.Processor do @moduledoc false alias Trunk.State def store(%{versions: versions, async: true} = state), do: store_async(versions, state) def store(%{versions: versions, async: false} = state), do: store_sync(versions, state) defp store_async(versions, state) do process_async(versions, state, fn version, map, state -> with {:ok, map} <- get_version_transform(map, version, state), {:ok, map} <- transform_version(map, version, update_state(state, version, map)), {:ok, map} <- postprocess_version(map, version, update_state(state, version, map)), {:ok, map} <- get_version_storage_dir(map, version, update_state(state, version, map)), {:ok, map} <- get_version_filename(map, version, update_state(state, version, map)), {:ok, map} <- get_version_storage_opts(map, version, update_state(state, version, map)) do save_version(map, version, update_state(state, version, map)) end end) end def store_sync(versions, %{versions: state_versions, timeout: timeout} = state) do task = Task.async(fn -> with {:ok, versions, state} <- map_versions(versions, state, &get_version_transform/3), {:ok, versions, state} <- map_versions(versions, state, &transform_version/3), {:ok, versions, state} <- map_versions(versions, state, &postprocess_version/3), {:ok, versions, state} <- map_versions(versions, state, &get_version_storage_dir/3), {:ok, versions, state} <- map_versions(versions, state, &get_version_filename/3), {:ok, versions, state} <- map_versions(versions, state, &get_version_storage_opts/3), {:ok, versions, state} <- map_versions(versions, state, &save_version/3) do {:ok, %{state | versions: Map.merge(state_versions, Map.new(versions))}} end end) case Task.yield(task, timeout) || Task.shutdown(task, :brutal_kill) do {:ok, result} -> result nil -> {:error, %{state | errors: :timeout}} end end def retrieve( %{versions: versions, storage: storage, storage_opts: storage_opts} = state, version ) do map = versions[version] with {:ok, map} <- get_version_storage_dir(map, version, state), {:ok, %{filename: filename, storage_dir: storage_dir}} <- get_version_filename(map, version, state), {:ok, temp_path} <- Briefly.create(extname: Path.extname(filename)), :ok <- storage.retrieve(storage_dir, filename, temp_path, storage_opts) do {:ok, temp_path} end end def reprocess(_state, :original), do: {:error, "Cannot reprocess :original"} def reprocess(%{versions: versions} = state, version) do versions = version |> List.wrap() |> Enum.map(fn version -> {version, versions[version]} end) |> Map.new() store_async(versions, state) end def delete(%{versions: versions, async: true} = state) do process_async(versions, state, fn version, map, state -> with {:ok, map} <- get_version_storage_dir(map, version, update_state(state, version, map)), {:ok, map} <- get_version_filename(map, version, update_state(state, version, map)) do delete_version(map, version, update_state(state, version, map)) end end) end def delete(%{versions: versions, async: false} = state) do state_versions = versions with {:ok, versions, state} <- map_versions(versions, state, &get_version_storage_dir/3), {:ok, versions, state} <- map_versions(versions, state, &get_version_filename/3), {:ok, versions, state} <- map_versions(versions, state, &delete_version/3) do {:ok, %{state | versions: Map.merge(state_versions, Map.new(versions))}} end end def process_async(versions, %{timeout: timeout} = state, func) do tasks = versions |> Enum.map(fn {version, map} -> task = Task.async(fn -> func.(version, map, state) end) {version, task} end) task_list = Enum.map(tasks, fn {_version, task} -> task end) task_list |> Task.yield_many(timeout) |> Enum.map(fn {task, result} -> {task, result || Task.shutdown(task, :brutal_kill)} end) |> Enum.map(fn {task, nil} -> {find_task_version(tasks, task), {:error, :timeout}} {task, {:ok, response}} -> {find_task_version(tasks, task), response} {task, other} -> {find_task_version(tasks, task), other} end) |> Enum.reduce(state, fn {version, {:ok, map}}, %{versions: versions} = state -> versions = Map.put(versions, version, map) %{state | versions: versions} {version, {:error, error}}, state -> State.put_error(state, version, :processing, error) {version, {:error, stage, error}}, state -> State.put_error(state, version, stage, error) end) |> ok end defp find_task_version([], _task), do: nil defp find_task_version([{version, task} | _tail], task), do: version defp find_task_version([_head | tail], task), do: find_task_version(tail, task) defp update_state(%{versions: versions} = state, version, version_map), do: %{state | versions: Map.put(versions, version, version_map)} defp map_versions(versions, state, func) do {versions, state} = versions |> Enum.map_reduce(state, fn {version, map}, state -> case func.(map, version, state) do {:ok, version_map} -> {{version, version_map}, state} {:error, stage, reason} -> {{version, map}, State.put_error(state, version, stage, reason)} end end) ok(versions, %{state | versions: Map.new(versions)}) end def generate_url( %{versions: versions, storage: storage, storage_opts: storage_opts} = state, version ) do map = versions[version] %{filename: filename, storage_dir: storage_dir} = with {:ok, map} = get_version_storage_dir(map, version, state), {:ok, map} = get_version_filename(map, version, state) do map end storage.build_uri(storage_dir, filename, storage_opts) end defp get_version_transform(version_state, version, %{module: module} = state), do: {:ok, Map.put(version_state, :transform, module.transform(state, version))} @doc false def transform_version(%{transform: nil} = version_state, _version, _state), do: ok(version_state) def transform_version(%{transform: transform} = version_state, _version, state) do case perform_transform(transform, state) do {:ok, temp_path} -> version_state |> Map.put(:temp_path, temp_path) |> ok {:error, error} -> {:error, :transform, error} end end defp postprocess_version(version_state, version, %{module: module} = state), do: module.postprocess(version_state, version, state) defp create_temp_path(%{}, {_, _, extname}), do: Briefly.create(extname: ".#{extname}") defp create_temp_path(%{extname: extname}, _), do: Briefly.create(extname: extname) defp perform_transform(transform, %{path: path}) when is_function(transform), do: transform.(path) defp perform_transform(transform, %{path: path} = state) do {:ok, temp_path} = create_temp_path(state, transform) perform_transform(transform, path, temp_path) end defp perform_transform({command, arguments, _ext}, source, destination), do: perform_transform(command, arguments, source, destination) defp perform_transform({command, arguments}, source, destination), do: perform_transform(command, arguments, source, destination) defp perform_transform(command, arguments, source, destination) do args = prepare_transform_arguments(source, destination, arguments) case System.cmd(to_string(command), args, stderr_to_stdout: true) do {_result, 0} -> {:ok, destination} {result, _} -> {:error, result} end end defp prepare_transform_arguments(source, destination, arguments) when is_function(arguments), do: arguments.(source, destination) defp prepare_transform_arguments(source, destination, [_ | _] = arguments), do: [source | arguments] ++ [destination] defp prepare_transform_arguments(source, destination, arguments), do: prepare_transform_arguments(source, destination, String.split(arguments, " ")) defp get_version_storage_dir(version_state, version, %{module: module} = state), do: {:ok, %{version_state | storage_dir: module.storage_dir(state, version)}} defp get_version_storage_opts(version_state, version, %{module: module} = state), do: {:ok, %{version_state | storage_opts: module.storage_opts(state, version)}} defp get_version_filename(version_state, version, %{module: module} = state), do: {:ok, %{version_state | filename: module.filename(state, version)}} defp save_version( %{ filename: filename, storage_dir: storage_dir, storage_opts: version_storage_opts, temp_path: temp_path } = version_state, _version, %{path: path, storage: storage, storage_opts: storage_opts} ) do storage_opts = Keyword.merge(storage_opts, version_storage_opts) :ok = save_files(temp_path || path, filename, storage, storage_dir, storage_opts) {:ok, version_state} end defp save_files([], _filename, _storage, _storage_dir, _storate_opts), do: :ok defp save_files([_ | _] = paths, filename, storage, storage_dir, storage_opts) do paths |> Enum.zip(0..length(paths)) |> Enum.each(fn {path, i} -> :ok = save_files(path, multi_filename(filename, i), storage, storage_dir, storage_opts) end) :ok end defp save_files(path, filename, storage, storage_dir, storage_opts), do: storage.save(storage_dir, filename, path, storage_opts) defp multi_filename(filename, 0), do: filename defp multi_filename(filename, number) do rootname = Path.rootname(filename) extname = Path.extname(filename) "#{rootname}-#{number}#{extname}" end defp delete_version( %{filename: filename, storage_dir: storage_dir} = version_state, _version, %{storage: storage, storage_opts: storage_opts} ) do :ok = storage.delete(storage_dir, filename, storage_opts) {:ok, version_state} end defp ok(%State{errors: nil} = state), do: {:ok, state} defp ok(%State{} = state), do: {:error, state} defp ok(other), do: {:ok, other} defp ok(versions, %State{errors: nil} = state), do: {:ok, versions, state} defp ok(_versions, %State{} = state), do: {:error, state} end
lib/trunk/processor.ex
0.68763
0.417093
processor.ex
starcoder
defmodule Zuppler.Utilities.DataConvertor do @moduledoc """ Data Convertor module Used to convert from map to Restaurant struct """ @doc """ Convert a map to struct to enforce keys validation ## Example %{ name: "demo", permalink: "demorestaurant", amenities: "Online Orders, Cocktail, Air Condition (A/C), Late Night, Coupons", cuisines: "Continental, Pizza, Seafood", locations: [ %{ id: 1, address: %{city: "Norristown", country: nil, id: "685", state: "PA", geo: %{lat: 40.14543, lng: -75.393859}} }, %{ id: 2, address: %{city: "Phoenixville", country: nil, id: "350", state: "PA", geo: %{lat: 40.134154, lng: -75.516085}} } ] } => %Zuppler.Restaurant{ name: "demo", permalink: "demorestaurant", amenities: "Online Orders, Cocktail, Air Condition (A/C), Late Night, Coupons", cuisines: "Continental, Pizza, Seafood", locations: [ %Zuppler.Location{ id: 1, address: %Zuppler.Address{city: "Norristown", country: nil, id: "685", state: "PA", geo: %Zuppler.Address.Geo{lat: 40.14543, lng: -75.393859}} }, %Zuppler.Location{ id: 2, address: %Zuppler.Address{city: "Phoenixville", country: nil, id: "350", state: "PA", geo: %Zuppler.Address.Geo{lat: 40.134154, lng: -75.516085}} } ] } """ alias Zuppler.Restaurant alias Zuppler.Location alias Zuppler.Address alias Zuppler.Channel alias Zuppler.Integration @spec convert(%{optional(any) => any}) :: Restaurant.t def convert(map) do Restaurant |> struct(map) |> add_locations |> add_services end defp add_locations(%Restaurant{locations: nil} = restaurant), do: restaurant defp add_locations(%Restaurant{locations: locations} = restaurant) do Map.put(restaurant, :locations, Enum.map(locations, &location_convert(&1))) end defp add_services(%Restaurant{services: nil} = restaurant), do: restaurant defp add_services(%Restaurant{services: services} = restaurant) do Map.put(restaurant, :services, Enum.map(services, &service_convert(&1))) end @spec location_convert(%{optional(any) => any}) :: Location.t defp location_convert(location) do new_location = case Map.get(location, :address) do nil -> location _ -> Map.put(location, :address, address_convert(location.address)) end struct(Location, new_location) end @spec address_convert(%{optional(any) => any}) :: Address.t defp address_convert(adr) do new_adr = Map.put(adr, :geo, geo_convert(adr.geo)) struct(Address, new_adr) end @spec geo_convert(%{lat: float, lng: float}) :: Address.Geo.t defp geo_convert(geo) do struct(Address.Geo, geo) end @spec service_convert(%{optional(any) => any}) :: Zuppler.Service.t defp service_convert(srv) do struct(Zuppler.Service, srv) end @spec convert_channel(%{optional(any) => any}) :: Channel.t def convert_channel(map) do Channel |> struct(map) |> add_integrations end defp add_integrations(%Channel{integrations: nil} = channel), do: channel defp add_integrations(%Channel{integrations: integrations} = channel) do Map.put(channel, :integrations, Enum.map(integrations, &struct(Integration, &1))) end end
lib/utilities/data_convertor.ex
0.831314
0.550184
data_convertor.ex
starcoder
defmodule BtrzWebhooksEmitter do @moduledoc """ BtrzWebhooksEmitter emits webhooks to SQS for the Betterez platform. This module has the API to send messages asynchrounously to the `BtrzWebhooksEmitter.SQS`. You will have to set these ENV vars: * AWS_SERVICE_KEY * AWS_SERVICE_SECRET * SQS_QUEUE_NAME You can set `SQS_QUEUE_NAME` in your config: ```elixir config :btrz_ex_webhooks_emitter, queue_url: "id/name" ``` If one of them is missing the messages will be ignored. ## How to use You have to send a map with the following required (string) keys: * "provider_id" * "data" Optional keys: * "url" ```elixir message = %{ "provider_id" => "123", "data" => %{"foo" => "bar"} } BtrzWebhooksEmitter.emit("transaction.created", message) ``` """ require Logger @sqs_server :btrz_webhooks_emitter_sqs @doc """ Builds and sends messages asynchrounously to the BtrzWebhooksEmitter.SQS If there is a validation error in your `attrs` it will return `:error` and log the error, otherwise always `:ok`. """ @spec emit(binary, map) :: :ok | :error def emit(event_name, attrs) do case validate_and_build_message(event_name, attrs) do {:ok, message} -> BtrzWebhooksEmitter.SQS.emit(@sqs_server, message) {:error, reason} -> Logger.error("cannot emit: #{inspect(reason)}") :error end end @doc """ Builds and sends messages synchrounously to the BtrzWebhooksEmitter.SQS For particular use, try always to use emit/2 if possible. Returns `{:ok, term}` or `{:error, term}` """ @spec emit_sync(binary, map) :: BtrzWebhooksEmitter.SQS.emit_sync_response() def emit_sync(event_name, attrs) do case validate_and_build_message(event_name, attrs) do {:ok, message} -> BtrzWebhooksEmitter.SQS.emit_sync(@sqs_server, message) {:error, reason} -> Logger.error("cannot emit: #{inspect(reason)}") {:error, reason} end end @doc false @spec validate_and_build_message(binary, map) :: {:ok, map} | {:error, any} defp validate_and_build_message(event_name, attrs) do with :ok <- validate_fields(event_name, attrs), true <- service_started?(@sqs_server), message <- build_message(event_name, attrs) do {:ok, message} else {:error, reason} -> {:error, reason} false -> {:error, "BtrzWebhooksEmitter.SQS GenServer is down"} e -> {:error, e} end end @doc false @spec validate_fields(binary, map) :: :ok | {:error, String.t()} defp validate_fields(event_name, _attrs) when not is_binary(event_name) do {:error, "event_name is missing"} end defp validate_fields(_event_name, attrs) do cond do not is_binary(attrs["provider_id"]) -> {:error, "provider_id is missing"} true -> :ok end end @doc false @spec service_started?(GenServer.server()) :: boolean defp service_started?(server) do case GenServer.whereis(server) do nil -> false _pid -> true end end @doc """ Returns the message map. """ @spec build_message(binary, map) :: map def build_message(event_name, attrs) do %{ id: UUID.uuid4(), ts: DateTime.utc_now() |> DateTime.to_unix(:millisecond), providerId: attrs["provider_id"], event: event_name, data: filter_fields(event_name, attrs["data"]) } |> maybe_put_url(attrs) end @doc false defp maybe_put_url(message, %{"url" => url}) do Map.put_new(message, :url, url) end defp maybe_put_url(message, _), do: message @doc false @spec filter_fields(binary, map | any) :: map | any defp filter_fields(event_name, data) when is_map(data) do denied_fields = get_denied_fields(event_name) denied_found = Map.take(data, denied_fields) Map.drop(data, Map.keys(denied_found)) end defp filter_fields(_, data), do: data @doc false @spec get_denied_fields(binary) :: list defp get_denied_fields(event_name) do all_denied_fields = BtrzWebhooksDeniedFields.get_fields() wildcard_fields = Map.get(all_denied_fields, "*", []) wildcard_key = hd(Regex.run(~r/([^\.]*)/u, event_name)) <> ".*" wildcard_fields ++ Map.get(all_denied_fields, wildcard_key, []) ++ Map.get(all_denied_fields, event_name, []) end end
lib/btrz_ex_webhooks_emitter.ex
0.868102
0.683908
btrz_ex_webhooks_emitter.ex
starcoder
defmodule Saucexages.IO.SauceBinary do @moduledoc """ Functions for working with SAUCE containing and related binaries. Provides basic building blocks for reading, writing, fixing, and analyzing binaries that may or may not contain SAUCE blocks. ## Notes Because of the way SAUCE works with regard to EOF position + bugs in practice with EOF characters, it may be highly inefficient to use some of these functions with large binaries. As such, several versions of similar functions (ex: split_all vs. split_sauce) exist to allow you to work only with the parts of a binary you need. If you still have issues with large files, prefer to work with the `SauceFile` and `FileReader` modules instead. """ require Saucexages.Sauce alias Saucexages.{Sauce} @type contents_binary :: binary() @type sauce_block_binary :: binary() @type sauce_binary() :: binary() @type comments_binary() :: binary() defguard is_comment_lines(bin) when rem(byte_size(bin), Sauce.comment_line_byte_size()) == 0 #TODO: Cleanup some of these functions that have overlapping handling. It's explicit now which is fine, but there's a lot of boiler plate that exists for binary efficiency that can be deleted with some work. @doc """ Splits a binary into its component parts with respect to SAUCE. A 3-tuple is returned in the form `{contents_bin, sauce_bin, comments_bin}` that consists of the file contents, SAUCE record, if any, and comments, if any. """ @spec split_all(binary()) :: {contents_binary(), sauce_binary(), comments_binary()} def split_all(bin) when is_binary(bin) do case sauce_handle(bin) do {:ok, {sauce_record_bin, comment_lines}} -> case extract_comments(bin, comment_lines) do {:ok, comments_bin} -> contents_offset = byte_size(bin) - Sauce.sauce_byte_size(comment_lines) <<contents_bin::binary-size(contents_offset), _::binary>> = bin {contents_bin, sauce_record_bin, comments_bin} _ -> contents_offset = byte_size(bin) - Sauce.sauce_record_byte_size() <<contents_bin::binary-size(contents_offset), _::binary-size(Sauce.sauce_record_byte_size())>> = bin {contents_bin, sauce_record_bin, <<>>} end _ -> {bin, <<>>, <<>>} end end @doc """ Splits a binary, trimming the file contents and returning the SAUCE and SAUCE comment block if each exists as a 2-tuple in the form of `{sauce_record, comment_block}`. Any element of the tuple that does not exist will be returned as an empty binary. """ @spec split_sauce(binary()) :: {sauce_binary(), comments_binary()} def split_sauce(bin) when is_binary(bin) do with {:ok, {sauce_bin, comments_bin}} <- sauce(bin) do {sauce_bin, comments_bin} else _ -> {<<>>, <<>>} end end @doc """ Splits a binary into its components by SAUCE record. Returns a 2-tuple where the first element is the remaining file binary, if any, and the second element is the SAUCE record binary, if any. Note: The first element may or may not contain SAUCE comments. If you wish to extract all possible components of a file that may have a SAUCE, use `split_all/1` instead. If you wish to obtain the file contents only use `contents/1` instead. """ @spec split_record(binary()) :: {contents_binary(), sauce_binary()} def split_record(bin) when is_binary(bin) do with bin_size when bin_size >= Sauce.sauce_record_byte_size() <- byte_size(bin), sauce_offset = bin_size - Sauce.sauce_record_byte_size(), <<contents_bin::binary-size(sauce_offset), sauce_bin::binary-size(Sauce.sauce_record_byte_size())>> = bin, true <- matches_sauce?(sauce_bin) do {contents_bin, sauce_bin} else _ -> {bin, <<>>} end end @doc """ Splits a binary according to a specified number of `comment_lines` and returns a 3-tuple, where the first element is the file binary, if any, the second element is the SAUCE binary, if any, and the third element is the comments binary, if any. If the `comment_lines` do not match a valid comment block, no comments will be returned. """ @spec split_with(binary(), non_neg_integer()) :: {contents_binary(), sauce_binary(), comments_binary()} def split_with(bin, comment_lines) when is_binary(bin) and is_integer(comment_lines) do bin_size = byte_size(bin) with true <- comment_lines > 0, block_size = Sauce.sauce_byte_size(comment_lines), true <- bin_size >= block_size, comment_size = Sauce.comment_block_byte_size(comment_lines), sauce_offset = bin_size - block_size, <<contents_bin::binary-size(sauce_offset), comments_bin::binary-size(comment_size), sauce_bin::binary-size(Sauce.sauce_record_byte_size())>> = bin, true <- matches_sauce?(sauce_bin), true <- matches_comment_block?(comments_bin) do {contents_bin, sauce_bin, comments_bin} else _ -> {contents_bin, sauce_bin} = split_record(bin) {contents_bin, sauce_bin, <<>>} end end @doc """ Returns the contents before any SAUCE and other EOF data. May truncate any other data that might co-exist with a SAUCE block. """ @spec clean_contents(binary()) :: binary() def clean_contents(bin) when is_binary(bin) do [contents_bin | _] = :binary.split(bin, <<Sauce.eof_character()>>) contents_bin end @doc """ Returns the contents before any SAUCE block. An optional `terminate?` flag may be passed which will append an EOF character to the end of the contents if one does not already exist. This is useful in cases such as writing a new SAUCE record where you want to avoid appending extra EOF characters or ensure one is present to ensure SAUCE can be read properly after write. By default, contents is returned *as is*, and therefore you must opt-in to termination. """ @spec contents(binary(), boolean()) :: {:ok, contents_binary()} | {:error, term()} def contents(bin, terminate? \\ false) def contents(bin, false) when is_binary(bin) do extract_contents(bin) end def contents(bin, true) when is_binary(bin) do {:ok, contents} = extract_contents(bin) terminated_bin = eof_terminate(contents) {:ok, terminated_bin} end @doc """ Returns the byte size of the contents in the binary, before any potential SAUCE block. """ @spec contents_size(binary()) :: non_neg_integer() def contents_size(bin) do {:ok, contents_bin} = contents(bin, false) byte_size(contents_bin) end @doc """ Returns SAUCE record binary if the provided binary *is* a SAUCE record, otherwise an empty binary. """ @spec maybe_sauce_record(sauce_binary()) :: sauce_binary() | <<>> def maybe_sauce_record(<<Sauce.sauce_id(), _sauce_data::binary-size(Sauce.sauce_data_byte_size())>> = sauce_bin) do sauce_bin end def maybe_sauce_record(_sauce_bin) do <<>> end @doc """ Returns SAUCE comment block if the provided binary *is* a SAUCE comment block, otherwise an empty binary. """ @spec maybe_comments(comments_binary()) :: comments_binary | <<>> def maybe_comments(<<Sauce.comment_id(), _first_comment_line::binary-size(Sauce.comment_line_byte_size()), rest::binary>> = comments_bin) do # The optimizer forces us to check this here instead of via guard if comment_lines?(rest) do comments_bin else <<>> end end def maybe_comments(_comments_bin) do <<>> end @doc """ Checks if a binary *is* a SAUCE record. Note: This does not validate a SAUCE record, nor does it scan an entire binary for the presence of a SAUCE record. Instead, it merely matches if the entire binary *is* a SAUCE record, rather than *has* a SAUCE record. If you wish to check if a binary *has* a SAUCE record, instead use `sauce?/1` """ @spec matches_sauce?(sauce_binary()) :: boolean() def matches_sauce?(sauce_bin) # we may want to tighten this up slightly to check for ascii, but validating is really part of decoding, which is at a higher level than this def matches_sauce?(<<Sauce.sauce_id(), 0, 0, _sauce_data::binary-size(121)>>) do false end def matches_sauce?(<<Sauce.sauce_id(), _sauce_data::binary-size(Sauce.sauce_data_byte_size())>>) do true end def matches_sauce?(_sauce_bin) do false end @doc """ Checks if a binary *is* a SAUCE comment block. Note: This does not validate a comment block, nor does it scan an entire binary for the presence of a comment block. Instead, it merely matches if the entire binary *is* a comment block, rather than *has* a comment block. If you wish to check if a binary *has* a comment block, instead use `comments?/1` """ @spec matches_comment_block?(comments_binary()) :: boolean() def matches_comment_block?(<<Sauce.comment_id(), _first_comment_lines::binary-size(Sauce.comment_line_byte_size()), rest::binary>>) do comment_lines?(rest) end def matches_comment_block?(_comments_bin) do false end @doc """ Checks if a binary *is* a SAUCE record, returning `:ok` if true, and `{:error, :no_sauce}` if false. """ @spec verify_sauce_record(sauce_binary()) :: :ok | {:error, :no_sauce} def verify_sauce_record(<<Sauce.sauce_id(), _sauce_data::binary-size(Sauce.sauce_data_byte_size())>>) do :ok end def verify_sauce_record(_sauce_bin) do {:error, :no_sauce} end @doc """ Checks to see if a binary *is* a SAUCE comment block. """ @spec verify_comment_block(comments_binary()) :: :ok | {:error, :no_sauce} def verify_comment_block(<<Sauce.comment_id(), first_comment_lines::binary-size(Sauce.comment_line_byte_size()), _rest::binary>>) do # optimizer forces us to do this here instead of via guard if comment_lines?(first_comment_lines) do :ok else {:error, :no_comments} end end def verify_comment_block(_comments_bin) do {:error, :no_comments} end @doc """ Reads a given `field_id` from a binary SAUCE record or binary containing a SAUCE, and returns the undecoded result as binary. If the binary does not have a SAUCE record, `:no_sauce` is returned. Returns `{:ok, value}` if the value exists where the value will be raw binary. Returns `{:error, reason}` if there is a problem reading the field. If the binary has no SAUCE record to read, `{:error, :no_sauce}` is returned. """ @spec read_field(sauce_binary(), Sauce.field_id()) :: {:ok, binary()} | {:error, :no_sauce} | {:error, term()} def read_field(bin, field_id) def read_field(<<Sauce.sauce_id(), _rest::binary-size(Sauce.sauce_data_byte_size())>> = sauce_bin, field_id) when is_atom(field_id) do case do_read_field(sauce_bin, field_id) do :no_sauce -> {:error, :no_sauce} value -> {:ok, value} end end def read_field(bin, field_id) when is_binary(bin) and is_atom(field_id) do with {:ok, sauce_bin} <- sauce_record(bin), value when is_binary(value) <- do_read_field(sauce_bin, field_id) do {:ok, value} else :no_sauce -> {:error, :no_sauce} {:error, _reason} = err -> err _ -> {:error, "Unable to read SAUCE field #{inspect field_id}"} end end defp do_read_field(sauce_bin, field_id) for %{field_id: field_id, field_size: field_size, position: position} <- Sauce.field_list() do defp do_read_field(sauce_bin, unquote(field_id)) do case sauce_bin do <<_::binary-size(unquote(position)), value::binary-size(unquote(field_size)), _::binary>> -> value _ -> :no_sauce end end end @doc """ Reads a given `field_id` from a binary SAUCE record and returns the undecoded result as binary. Throws if the given binary is not or does not contain a SAUCE record. """ @spec read_field!(sauce_binary(), Sauce.field_id()) :: binary() | :no_sauce def read_field!(bin, field_id) def read_field!(<<Sauce.sauce_id(), _rest::binary-size(Sauce.sauce_data_byte_size())>> = sauce_bin, field_id) when is_atom(field_id) do do_read_field(sauce_bin, field_id) end def read_field!(bin, field_id) when is_binary(bin) and is_atom(field_id) do with {:ok, sauce_bin} <- sauce_record(bin) do read_field!(sauce_bin, field_id) else _ -> raise ArgumentError, "You must supply a valid sauce record binary and field id." end end def read_field!(_sauce_bin, _field_id) do raise ArgumentError, "You must supply a valid sauce record binary and field id." end @doc """ Writes a given `field_id` and a given encoded binary `value` to a binary that already contains a SAUCE record. Writing using this method should be considered "unsafe" unless the values are validated and encoded in advance. The `field_id` must be a valid SAUCE field and the value must be binary of the valid corresponding size according to the SAUCE spec. Can be used for building in-place updates of a SAUCE along with a proper field encoder. """ @spec write_field(binary(), Sauce.field_id(), binary()) :: {:ok, sauce_binary()} | {:error, :no_sauce} | {:error, term()} def write_field(bin, field_id, value) when is_binary(bin) and is_atom(field_id) and is_binary(value) do case sauce_record(bin) do {:ok, sauce_bin} -> {:ok, do_write_field(sauce_bin, field_id, value)} {:error, _reason} = err -> err end end # It's possible for us to do a guard on byte_size here per field id using Sauce.field_size(field_id), but the pattern match pretty much takes care of it anyway. It's just a question if we want a cleaner exception or not, but this call is more of a nice to have for now. defp do_write_field(sauce_bin, field_id, value) for %{field_id: field_id, field_size: field_size, position: position} <- Sauce.field_list() do rest_size = Sauce.sauce_record_byte_size() - (field_size + position) defp do_write_field(<<start::binary-size(unquote(position)), _old_value::binary-size(unquote(field_size)), rest::binary-size(unquote(rest_size))>>, unquote(field_id), value) do <<start::binary-size(unquote(position)), value::binary-size(unquote(field_size)), rest::binary-size(unquote(rest_size))>> end end @doc """ Extracts the SAUCE record from a binary and the number of comment lines present. The returned comment lines can be used to properly fetch the comments block later from the same binary such as via `split_with/1`. """ @spec sauce_handle(binary()) :: {:ok, {sauce_binary(), non_neg_integer()}} | {:error, :no_sauce} | {:error, term()} def sauce_handle(bin) when is_binary(bin) do with {:ok, sauce_record_bin} <- extract_sauce_record(bin), {:ok, comment_lines} <- read_sauce_comment_lines(sauce_record_bin) do {:ok, {sauce_record_bin, comment_lines}} else {:error, _reason} = err -> err end end @doc """ Extracts the SAUCE record and comment block from a binary, and returns `{:ok, {sauce_bin, comments_bin}`. If no SAUCE is found, `{:error, :no_sauce}` will be returned. """ @spec sauce(binary()) :: {:ok, {sauce_binary(), comments_binary()}} | {:error, :no_sauce} | {:error, term()} def sauce(bin) when is_binary(bin) do with {:ok, {sauce_record_bin, comment_lines}} <- sauce_handle(bin), {:ok, comments_bin} <- maybe_extract_comments(bin, comment_lines) do {:ok, {sauce_record_bin, comments_bin}} else {:error, _reason} = err -> err _ -> {:error, "Unable to get SAUCE from binary."} end end @doc """ Extracts the SAUCE block as a single binary as it appears within a SAUCE binary - a comments block (if any) followed by a SAUCE record. If no SAUCE is found, `{:error, :no_sauce}` will be returned. """ @spec sauce_block(binary()) :: {:ok, sauce_block_binary()} | {:error, :no_sauce} | {:error, term()} def sauce_block(bin) when is_binary(bin) do case sauce(bin) do {:ok, {sauce_record_bin, comments_bin}} -> {:ok, <<comments_bin::binary, sauce_record_bin::binary-size(Sauce.sauce_record_byte_size())>>} err -> err end end @doc """ Extracts the SAUCE record from a binary and returns `{:ok, sauce_bin}`. If the SAUCE record cannot be found, {:error, `:no_sauce`} is returned. """ @spec sauce_record(binary()) :: {:ok, sauce_binary()} | {:error, :no_sauce} | {:error, term()} def sauce_record(bin) when is_binary(bin) do extract_sauce_record(bin) end @doc """ Extracts the SAUCE record from a binary. If the SAUCE record cannot be found, an empty binary is returned. """ @spec sauce_record!(binary()) :: sauce_binary() def sauce_record!(bin) when is_binary(bin) do case extract_sauce_record(bin) do {:ok, sauce_bin} -> sauce_bin _ -> <<>> end end @doc """ Checks if a binary has a SAUCE record. """ @spec sauce?(binary()) :: boolean() def sauce?(bin) when is_binary(bin) do case sauce(bin) do {:ok, _} -> true _ -> false end #this method avoids the sub-binary but is somewhat inconsistent with how we do things elsewhere # with bin_size when bin_size >= Sauce.sauce_record_byte_size <- byte_size(bin), # {_, _} <- do_match_sauce(bin, false, [{:scope, {bin_size, -Sauce.sauce_record_byte_size()}}]) do # true # else # _ -> false # end end def sauce?(_bin) do false end @doc """ Extracts the SAUCE comment block from a binary and returns the comment block binary with the comment line count as `{:ok, {comments_bin, line_count}}`. The line count corresponds to the number of comment lines that have been read based on the `comment_lines` field in the SAUCE record, and may be used for decoding purposes. If the SAUCE comment block cannot be found, {:error, `:no_comments`} is returned. """ @spec comments(binary()) :: {:ok, {comments_binary(), non_neg_integer()}} | {:error, :no_sauce} | {:error, :no_comments} | {:error, term()} def comments(bin) when is_binary(bin) do with {:ok, {_sauce_record_bin, comment_lines}} <- sauce_handle(bin), {:ok, comments_bin} <- extract_comments(bin, comment_lines) do {:ok, {comments_bin, comment_lines}} else {:error, :no_sauce} -> {:error, :no_sauce} _ -> {:error, :no_comments} end end @doc """ Checks if a binary has a SAUCE comment block. """ @spec comments?(binary()) :: boolean() def comments?(bin) when is_binary(bin) do case comments(bin) do {:ok, _} -> true _ -> false end # this method uses matching and avoids creating binaries, but is somewhat inconsistent with how we do things elsewhere # case match_comment_block(bin) do # {_, _} -> true # _ -> false # end end def comments?(_bin) do false end @doc """ Extracts the SAUCE comment block from a binary or returns a comment fragment if the binary is a comment fragment. If a SAUCE record exists after the comment block, it will be trimmed. Because there is no other terminator for a comments fragment other than a SAUCE record, it is possible that additional data may be returned that is not part of comments. You should therefore manually parse the data to see if it is actually comment data or otherwise relevant. If you want to be sure you have a valid comment block, you should instead use `comments/1` which will check for the presence of a SAUCE record as a terminator. Alternatively, you can safely use this function if you have previously split a binary and know that the SAUCE terminator existed previously, for example after calling `split_sauce/1`. Useful for casual introspection of SAUCE files and diagnosing damaged files. """ @spec comments_fragment(binary()) :: {:ok, comments_binary()} def comments_fragment(bin) when is_binary(bin) do if matches_comment_fragment?(bin) do # trim any SAUCE record that might exist after - this trim will always produce a value [comments_bin | _] = :binary.split(bin, Sauce.sauce_id()) {:ok, comments_bin} else case comments(bin) do {:ok, {comments_bin, _lines}} -> {:ok, comments_bin} err -> err end end end # a more relaxed check for comments defp matches_comment_fragment?(<<Sauce.comment_id(), _first_comment_lines::binary-size(Sauce.comment_line_byte_size()), _rest::binary>>) do true end defp matches_comment_fragment?(_bin) do false end @doc """ Checks if a binary has a SAUCE comments fragment. """ @spec comments_fragment?(binary()) :: boolean() def comments_fragment?(bin) when is_binary(bin) do case do_match_comments(bin, false, []) do {_, _} -> true _ -> false end end def comments_fragment?(_bin) do false end @doc """ Dynamically counts the number of comment lines in a SAUCE binary and return it as `{:ok, line_count}`. Note that this number may not match the `comment_line` field found in a SAUCE record. Major reasons for this include: * The SAUCE block itself is corrupted. * The SAUCE record `comment_lines` field was not updated properly by a SAUCE writer. * The SAUCE comment block was not updated properly by a SAUCE writer. Useful for finding and fixing damaged SAUCE files. Use `comment_lines/1` if you want to read the comment lines stored in the SAUCE record directly. """ @spec count_comment_lines(binary()) :: {:ok, non_neg_integer()} | {:error, :no_sauce} | {:error, :no_comments} | {:error, term()} def count_comment_lines(bin) when is_binary(bin) do with {:ok, comments_bin} <- comments_fragment(bin), block_size = byte_size(comments_bin) - Sauce.comment_id_byte_size(), line_count when rem(block_size, Sauce.comment_line_byte_size()) == 0 <- div(block_size, Sauce.comment_line_byte_size()) do {:ok, line_count} else {:error, _reason} = err -> err _ -> {:error, "Invalid comment block."} end end def comment_lines?(<<>>) do true end def comment_lines?(<<bin::binary>>) when is_binary(bin) do #rem(byte_size(bin), Sauce.comment_line_byte_size()) == 0 do_comment_lines?(bin) end defp do_comment_lines?(<<>>) do true end defp do_comment_lines?(<<_line::binary-size(Sauce.comment_line_byte_size()), rest::binary>>) do comment_lines?(rest) end defp do_comment_lines?(_bin) do false end @doc """ Returns the number of comment lines stored in the SAUCE record as `{:ok, line_count}`. This value can serve as a pointer for helping you locate a SAUCE comment block in a binary and can be used for reading the comments a variety of ways, such as via `split_with/1`. Note: This value may or may not match the number of lines stored in the comment block. As such, you should exhibit caution when relying on either. See the reasons for this in `count_comment_lines/1`. """ @spec comment_lines(binary()) :: {:ok, non_neg_integer()} | {:error, :no_sauce} | {:error, term()} def comment_lines(bin) when is_binary(bin) do # first verify it's actually a SAUCE record or grab it so we don't read garbage with {:ok, sauce_bin} <- sauce_record(bin) do read_sauce_comment_lines(sauce_bin) else {:error, _reason} = err -> err _ -> {:error, "Unable to read comment lines field."} end end @doc """ Returns a list of comment block line undecoded comment line binaries, if any. """ @spec comment_block_lines(binary()) :: {:ok, [binary()]} | {:error, :no_sauce} | {:error, :no_comments} | {:error, term()} def comment_block_lines(bin) when is_binary(bin) do with {:ok, comments_bin} <- comments_fragment(bin), lines when is_list(lines) <- do_comment_block_lines(comments_bin) do {:ok, lines} end end defp do_comment_block_lines(<<Sauce.comment_id(), comment_lines::binary>>) do do_comment_block_lines(comment_lines, []) end defp do_comment_block_lines(_bin) do [] end defp do_comment_block_lines(<<comment_line::binary-size(Sauce.comment_line_byte_size()), rest::binary>>, lines) do do_comment_block_lines(rest, [comment_line | lines]) end defp do_comment_block_lines(_bin, lines) do lines end @doc """ Checks if a binary most likely has a valid SAUCE using a relaxed set of constraints that avoid a full decode. Useful for deciding whether or not a SAUCE is damaged or worth further actions. """ @spec valid_sauce?(binary()) :: boolean() def valid_sauce?(bin) when is_binary(bin) and byte_size(bin) >= Sauce.sauce_record_byte_size() do with {:ok, sauce_record_bin} <- sauce_record(bin), <<Sauce.sauce_id(), version::binary-size(Sauce.field_size(:version)), _title::binary-size(Sauce.field_size(:title)), _author::binary-size(Sauce.field_size(:author)), _group::binary-size(Sauce.field_size(:group)), _date::binary-size(Sauce.field_size(:date)), _file_size::binary-size(Sauce.field_size(:file_size)), data_type::little-unsigned-integer-unit(8)-size(Sauce.field_size(:data_type)), file_type::little-unsigned-integer-unit(8)-size(Sauce.field_size(:file_type)), _t_info_1::binary-size(Sauce.field_size(:t_info_1)), _t_info_2::binary-size(Sauce.field_size(:t_info_2)), _t_info_3::binary-size(Sauce.field_size(:t_info_3)), _t_info_4::binary-size(Sauce.field_size(:t_info_4)), _comment_lines::binary-size(Sauce.field_size(:comment_lines)), _t_flags::binary-size(Sauce.field_size(:t_flags)), _t_info_s::binary-size(Sauce.field_size(:t_info_s)), >> = sauce_record_bin, false <- <<0, 0>> == version, true <- file_type >= 0, true <- data_type >= 0 do true else _ -> false end end def valid_sauce?(_bin) do false end @doc """ Scans a binary for a comment block and returns it if found. """ @spec discover_comments(binary()) :: comments_binary() def discover_comments(bin) when is_binary(bin) do #TODO: refactor case split_record(bin) do {<<>>, _} -> <<>> {_, <<>>} -> <<>> {contents_bin, sauce_bin} when is_binary(contents_bin) and is_binary(sauce_bin) -> bin_size = byte_size(contents_bin) if bin_size >= Sauce.minimum_comment_block_byte_size() do comment_offset = bin_size - Sauce.minimum_comment_block_byte_size() <<remaining_bin::binary-size(comment_offset), comments_bin::binary-size(Sauce.minimum_comment_block_byte_size())>> = contents_bin do_discover_comments(comments_bin, remaining_bin) else <<>> end end end defp do_discover_comments(<<Sauce.comment_id(), _comment_lines::binary-size(Sauce.minimum_comment_block_byte_size())>> = comments_bin, <<>>) do # 1 comment line comments_bin end defp do_discover_comments(<<Sauce.comment_id(), _comment_lines::binary>> = comments_bin, _remaining) do # multiple comment lines comments_bin end defp do_discover_comments(comments_bin, remaining) do # COMNT + 64 + LAST bin_size = byte_size(remaining) if bin_size >= Sauce.minimum_comment_block_byte_size() do offset = bin_size - Sauce.comment_line_byte_size() <<rest_bin::binary-size(offset), comments_start_bin::binary-size(Sauce.comment_line_byte_size())>> = remaining do_discover_comments(<<comments_start_bin::binary-size(Sauce.comment_line_byte_size()), comments_bin::binary>>, rest_bin) else <<>> end end @doc """ Determines if a binary is terminated by an EOF character. """ @spec eof_terminated?(binary()) :: boolean() def eof_terminated?(bin) when is_binary(bin) do if :binary.last(bin) == Sauce.eof_character() do true else false end end @doc """ Terminates a binary with an EOF character. """ @spec eof_terminate(binary()) :: binary() def eof_terminate(bin) when is_binary(bin) do if eof_terminated?(bin) do bin else bin_size = byte_size(bin) <<bin::binary-size(bin_size), Sauce.eof_character()>> end end @doc """ Searches for the first occurrence of a SAUCE comment block in a binary and returns the `{position, length}` corresponding to the SAUCE comment block. If no comment block is found, :nomatch is returned. Options may be provided according to `:binary.match`. Additionally, the `:eof?` boolean option may be specified to search for a match against the end-of-file character (EOF) that is required in a SAUCE file. When searching sub-binaries or damaged files, you may wish to avoid this requirement by specifying `[eof?: false]` as an option, while conversely you may wish to set it true to explicitly check for a correct comment block. The returned `{position, length}` will always match that of the comment block itself and does not include the EOF character as part of the position or length. Note: A comment block cannot exist by definition if no SAUCE record exists. If the binary has no SAUCE, the comments data will be ignored as it is invalid and not guaranteed to be part of a SAUCE comment block. If you wish to manage the presence of an erroneous comment block for fixing a SAUCE, cleaning a file, or other purposes, use `match_comments_fragment` which removes this requirement. """ @spec match_comment_block(binary(), Enum.t()) :: {non_neg_integer(), pos_integer()} | :nomatch def match_comment_block(bin, opts \\ [eof?: false]) def match_comment_block(bin, opts) when is_binary(bin) do with bin_size when bin_size >= Sauce.minimum_commented_sauce_size() <- byte_size(bin), # first we check if there is a SAUCE because we need to know the comment block is valid at all {sauce_pos, Sauce.sauce_record_byte_size()} <- match_sauce_record(bin, [{:scope, {bin_size, -Sauce.sauce_record_byte_size()}}]), {eof?, match_opts} = Keyword.pop(opts, :eof?, false), # because we know the SAUCE exists, we can reduce the scope to search if the scope was already not passed # we can probably compress this scope some, but walking backward negatively without knowing the size has some issues unless using EOF character scoped_opts = Keyword.put_new_lazy(match_opts, :scope, fn -> {0, (bin_size - 128)} end), {pos, _len} <- do_match_comments(bin, eof?, scoped_opts), # comments must actually be at least 69 bytes long. We need to ensure we didn't match some random COMNT data that is truncated or otherwise invalid. true <- (bin_size - pos - Sauce.minimum_comment_block_byte_size()) >= 0 do {pos, sauce_pos - pos} else _ -> :nomatch end end @doc """ Searches for the first occurrence of a SAUCE comment block fragment in a binary and returns the `{position, length}` corresponding to the SAUCE comment block. If no comment block is found, :nomatch is returned. Options may be provided according to `:binary.match`. Additionally, the `:eof?` boolean option may be specified to search for a match against the end-of-file character (EOF) that is required in a SAUCE file. When searching sub-binaries or damaged files, you may wish to avoid this requirement by specifying `[eof?: false]` as an option, while conversely you may wish to set it true to explicitly check for a correct comment block. The returned `{position, length}` will always match that of the comment block itself and does not include the EOF character as part of the position or length. Note: A comment block cannot exist by definition if no SAUCE record exists. If the binary has no SAUCE, the comments data will still be returned. If you wish to manage the presence of an erroneous comment block for fixing a SAUCE, cleaning a file, or other purposes, use `match_comment_block` which adds this requirement. """ @spec match_comments_fragment(binary(), Enum.t()) :: {non_neg_integer(), pos_integer()} | :nomatch def match_comments_fragment(bin, opts \\ [eof?: false]) def match_comments_fragment(bin, opts) when is_binary(bin) do with bin_size when bin_size >= Sauce.minimum_comment_block_byte_size() <- byte_size(bin), {eof?, match_opts} = Keyword.pop(opts, :eof?, false), {pos, _len} <- do_match_comments(bin, eof?, match_opts), # comments must actually be at least 69 bytes long. We need to ensure we didn't match some random COMNT data that is truncated or otherwise invalid. true <- (bin_size - pos - Sauce.minimum_comment_block_byte_size()) >= 0 do case match_sauce_record(bin, [{:scope, {pos, bin_size - pos}}]) do :nomatch -> {pos, bin_size - pos} {sauce_position, _len} -> {pos, sauce_position - pos} end else _ -> :nomatch end end defp do_match_comments(bin, true, match_opts) do case :binary.match(bin, <<Sauce.eof_character(), Sauce.comment_id()>>, match_opts) do :nomatch -> :nomatch {pos, length} -> {pos + 1, length - 1} end end defp do_match_comments(bin, _eof?, match_opts) do :binary.match(bin, <<Sauce.comment_id()>>, match_opts) end @doc """ Searches for the first occurrence of a SAUCE record in a binary and returns the `{pos, length}` corresponding to the start of the SAUCE block. If no comment block is found, :nomatch is returned. Options may be provided according to `:binary.match`. Additionally, the `:eof?` boolean option may be specified to search for a match against the end-of-file character (EOF) that is required in a SAUCE file. This can be used to search for a SAUCE record without comments, a sub-binary, or damaged files. Note that when comments are present, specifying `[eof?: true]` will result in never matching a SAUCE record. If you need to verify that the EOF character is present before SAUCE data, it is suggested that you first call `match_comment_block(bin, [eof?: true])` or read the SAUCE comment lines field to get the length and position of the comment block, and check the result to verify the EOF character's correct position with respect to the SAUCE data. The returned `{position, length}` will always match that of the comment block itself and does not include the EOF character as part of the position or length. """ @spec match_sauce_record(binary(), Enum.t()) :: {non_neg_integer(), pos_integer()} | :nomatch def match_sauce_record(bin, opts \\ [eof?: false]) def match_sauce_record(bin, opts) when is_binary(bin) do # we check to see if the binary is at least big enough to accommodate the rest of the SAUCE. with bin_size when bin_size >= Sauce.sauce_record_byte_size() <- byte_size(bin), {eof?, match_opts} = Keyword.pop(opts, :eof?, false), {pos, _len} <- do_match_sauce(bin, eof?, match_opts), # SAUCE must actually be at least 128 bytes long. we need to ensure we didn't match some random SAUCE data that is truncated or otherwise invalid. 0 <- (bin_size - pos - Sauce.sauce_record_byte_size()) do {pos, Sauce.sauce_record_byte_size()} else _ -> :nomatch end end defp do_match_sauce(bin, true, match_opts) do case :binary.match(bin, <<Sauce.eof_character(), Sauce.sauce_id()>>, match_opts) do :nomatch -> :nomatch {pos, length} -> {pos + 1, length - 1} end end defp do_match_sauce(bin, _eof?, match_opts) do :binary.match(bin, <<Sauce.sauce_id()>>, match_opts) end defp read_sauce_comment_lines(sauce_bin) do with {:ok, comment_lines} <- read_field(sauce_bin, :comment_lines), lines <- comment_lines |> :binary.decode_unsigned(:little) do {:ok, lines} end end defp extract_comments(bin, comment_lines) when is_binary(bin) and is_integer(comment_lines) and comment_lines > 0 do with sauce_byte_size = Sauce.sauce_byte_size(comment_lines), bin_size when bin_size >= sauce_byte_size <- byte_size(bin), comment_block_size = Sauce.comment_block_byte_size(comment_lines), comments_offset = bin_size - sauce_byte_size, <<_file_contents_bin::binary-size(comments_offset), comments_bin::binary-size(comment_block_size), _sauce_record::binary-size(Sauce.sauce_record_byte_size())>> = bin, true <- matches_comment_block?(comments_bin) do {:ok, comments_bin} else {:error, _reason} = err -> err _ -> {:error, :no_comments} end end defp extract_comments(bin, _comment_lines) when is_binary(bin) do # no comments present but we tried to grab them anyway {:error, :no_comments} end defp extract_sauce_record(bin) do with bin_size when bin_size >= Sauce.sauce_record_byte_size() <- byte_size(bin), sauce_offset = bin_size - Sauce.sauce_record_byte_size(), <<_ ::binary-size(sauce_offset), sauce_record_bin::binary-size(Sauce.sauce_record_byte_size())>> = bin, true <- matches_sauce?(sauce_record_bin) do {:ok, sauce_record_bin} else {:error, _reason} = err -> err _ -> {:error, :no_sauce} end end defp extract_contents(bin) when is_binary(bin) do with {:ok, {_sauce_record_bin, comment_lines}} <- sauce_handle(bin) do bin_size = byte_size(bin) offset = case extract_comments(bin, comment_lines) do {:ok, _comments} -> bin_size - Sauce.sauce_byte_size(comment_lines) {:error, :no_comments} -> bin_size - Sauce.sauce_record_byte_size() end <<contents_bin::binary-size(offset), _rest::binary>> = bin {:ok, contents_bin} else {:error, :no_sauce} -> {:ok, bin} end end defp maybe_extract_comments(bin, comment_lines) do case extract_comments(bin, comment_lines) do {:ok, _comments_bin} = result -> result {:error, :no_comments} -> {:ok, <<>>} end end end
lib/saucexages/io/sauce_binary.ex
0.800068
0.498413
sauce_binary.ex
starcoder
defmodule IVCU do @moduledoc """ Provides an API to validate, convert, and save files to abstract storages. ## Usage Suppose you have [a definition](`IVCU.Definition`) `MyApp.Image`. Then you can save a file from `Plug.Upload` struct like this. def save(%Plug.Upload{path: path}) do path |> IVCU.File.from_path() |> IVCU.save(MyApp.Image) end See [Getting Started](./guides/getting_started.md) guide for more information. """ alias IVCU.File @default_collection_traverser IVCU.CollectionTraverser.SyncTraverser @doc """ Save the file with provided definition. ## Algorithm - validate the original file; - generate new filenames; - apply transformations to the file providing new filenames; - put the new files in the storage. > #### Note {: .info} > > `definition` must implement [Definition](`IVCU.Definition`) > behaviour. > #### Note {: .info} > > Expected order of files is the same as order in which versions > were provided in the `definition`. """ @spec save(File.t(), module) :: {:ok, [File.t()]} | {:error, term} def save(file, definition) when is_atom(definition) do with :ok <- definition.validate(file) do do_save(file, definition) end end defp do_save(file, definition) do collection_traverser = Application.get_env( :ivcu, :collection_traverser, @default_collection_traverser ) collection_traverser.traverse(definition.versions(), fn version -> new_filename = definition.filename(version, file) with {:ok, converted} <- definition.converter().convert(file, version, new_filename), :ok <- definition.storage().put(converted) do definition.converter().clean!(converted) {:ok, converted} end end) end @doc """ Delete files from the storage. ## Algorithm - generate filenames for versions specified in the `definition` module; - delete the files from the storage. > #### Note {: .info} > > `definition` must implement [Definition](`IVCU.Definition`) > behaviour. """ @spec delete(File.t(), module) :: :ok | {:error, term} def delete(file, definition) when is_atom(definition) do with {:ok, _} <- do_delete(file, definition) do :ok end end defp do_delete(file, definition) do collection_traverser = Application.get_env( :ivcu, :collection_traverser, @default_collection_traverser ) collection_traverser.traverse(definition.versions(), fn version -> filename = definition.filename(version, file) with :ok <- definition.storage().delete(%{file | filename: filename}) do {:ok, nil} end end) end @doc """ Return urls to access all the versions of the file in the storage. > #### Note {: .info} > > `definition` must implement [Definition](`IVCU.Definition`) > behaviour. """ @spec urls(File.t(), module) :: %{required(atom) => url} when url: String.t() def urls(file, definition) when is_atom(definition) do for version <- definition.versions(), into: %{} do filename = definition.filename(version, file) {version, definition.storage().url(%{file | filename: filename})} end end end
lib/ivcu.ex
0.848722
0.422683
ivcu.ex
starcoder
defmodule Statistics do alias Statistics.Math @moduledoc """ Descriptive statistics functions """ @doc """ Sum the contents of a list Calls Enum.sum/1 """ @spec sum(list) :: number def sum(list) when is_list(list), do: do_sum(list, 0) defp do_sum([], t), do: t defp do_sum([x | xs], t), do: do_sum(xs, t + x) @doc """ Calculate the mean from a list of numbers ## Examples iex> Statistics.mean([]) nil iex> Statistics.mean([1,2,3]) 2.0 """ @spec mean(list(number)) :: float() | nil def mean(list) when is_list(list), do: do_mean(list, 0, 0) defp do_mean([], 0, 0), do: nil defp do_mean([], t, l), do: t / l defp do_mean([x | xs], t, l) do do_mean(xs, t + x, l + 1) end @doc """ Get the median value from a list. ## Examples iex> Statistics.median([]) nil iex> Statistics.median([1,2,3]) 2 iex> Statistics.median([1,2,3,4]) 2.5 """ @spec median(list) :: number def median([]), do: nil def median(list) when is_list(list) do midpoint = (length(list) / 2) |> Float.floor() |> round {l1, l2} = Enum.sort(list) |> Enum.split(midpoint) case length(l2) > length(l1) do true -> [med | _] = l2 med false -> [m1 | _] = l2 [m2 | _] = Enum.reverse(l1) mean([m1, m2]) end end @doc """ Get the most frequently occuring value ## Examples iex> Statistics.mode([]) nil iex> Statistics.mode([1,2,3,2,4,5,2,6,7,2,8,9]) 2 """ @spec mode(list) :: number def mode([]), do: nil def mode(list) when is_list(list) do h = hist(list) max = Map.values(h) |> Enum.max() h |> Enum.find(fn {_, val} -> val == max end) |> elem(0) end @doc """ Get a frequency count of the values in a list ## Examples iex> Statistics.hist([]) nil iex> Statistics.hist([1,2,3,2,4,5,2,5,1,2,5,5]) %{1 => 2, 2 => 4, 3 => 1, 4 => 1, 5 => 4} """ @spec hist(list) :: map def hist([]), do: nil def hist(list) when is_list(list) do list |> Enum.reduce(%{}, fn tag, acc -> Map.update(acc, tag, 1, &(&1 + 1)) end) end @doc """ Get the minimum value from a list iex> Statistics.min([]) nil iex> Statistics.min([1,2,3]) 1 If a non-empty list is provided, it is a call to Enum.min/1 """ @spec min(list) :: number def min([]), do: nil def min(list) do Enum.min(list) end @doc """ Get the maximum value from a list iex> Statistics.max([]) nil iex> Statistics.max([1,2,3]) 3 If a non-empty list is provided, it is a call to Enum.max/1 """ @spec max(list) :: number def max([]), do: nil def max(list) do Enum.max(list) end @doc """ Get the quartile cutoff value from a list responds to only first and third quartile. ## Examples iex> Statistics.quartile([1,2,3,4,5,6,7,8,9],:first) 3 iex> Statistics.quartile([1,2,3,4,5,6,7,8,9],:third) 7 """ # TODO change these to call `percentile/2` @spec quartile(list, atom) :: number def quartile(list, :first) do list |> split |> elem(0) |> median end def quartile(list, :third) do list |> split |> elem(1) |> median end @doc """ Get the nth percentile cutoff from a list ## Examples iex> Statistics.percentile([], 50) nil iex> Statistics.percentile([1], 50) 1 iex> Statistics.percentile([1,2,3,4,5,6,7,8,9],80) 7.4 iex> Statistics.percentile([1,2,3,4,5,6,7,8,9],100) 9 """ @spec percentile(list, number) :: number def percentile([], _), do: nil def percentile([x], _), do: x def percentile(list, 0), do: min(list) def percentile(list, 100), do: max(list) def percentile(list, n) when is_list(list) and is_number(n) do s = Enum.sort(list) r = n / 100.0 * (length(list) - 1) f = :erlang.trunc(r) lower = Enum.at(s, f) upper = Enum.at(s, f + 1) lower + (upper - lower) * (r - f) end @doc """ Get range of data ## Examples iex> Statistics.range([1,2,3,4,5,6]) 5 """ @spec range(list) :: number def range([]), do: nil def range(list) when is_list(list) do max(list) - min(list) end @doc """ Calculate the inter-quartile range ## Examples iex> Statistics.iqr([]) nil iex> Statistics.iqr([1,2,3,4,5,6,7,8,9]) 4 """ @spec iqr(list) :: number def iqr([]), do: nil def iqr(list) when is_list(list) do {first, second} = split(list) median(second) - median(first) end @doc """ Calculate variance from a list of numbers ## Examples iex> Statistics.variance([]) nil iex> Statistics.variance([1,2,3,4]) 1.25 iex> Statistics.variance([55,56,60,65,54,51,39]) 56.48979591836735 """ @spec variance(list) :: number def variance([]), do: nil def variance(list) when is_list(list) do list_mean = mean(list) list |> Enum.map(fn x -> (list_mean - x) * (list_mean - x) end) |> mean end @doc """ Calculate the standard deviation of a list ## Examples iex> Statistics.stdev([]) nil iex> Statistics.stdev([1,2]) 0.5 """ @spec stdev(list) :: number def stdev([]), do: nil def stdev(list) do list |> variance |> Math.sqrt() end @doc """ Calculate the trimmed mean of a list. Can specify cutoff values as a tuple, or simply choose the IQR min/max as the cutoffs ## Examples iex> Statistics.trimmed_mean([], :iqr) nil iex> Statistics.trimmed_mean([1,2,3], {1,3}) 2.0 iex> Statistics.trimmed_mean([1,2,3,4,5,5,6,6,7,7,8,8,10,11,12,13,14,15], :iqr) 7.3 """ @spec trimmed_mean(list, atom) :: number @spec trimmed_mean(list, tuple) :: number def trimmed_mean([], _), do: nil def trimmed_mean(list, :iqr) do {first, second} = split(list) trimmed_mean(list, {median(first), median(second)}) end def trimmed_mean(list, {low, high}) do list |> Enum.reject(fn x -> x < low or x > high end) |> mean end @doc """ Calculates the harmonic mean from a list Harmonic mean is the number of values divided by the sum of the reciprocal of all the values. ## Examples iex> Statistics.harmonic_mean([]) nil iex> Statistics.harmonic_mean([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) 4.5204836768674568 """ @spec harmonic_mean(list) :: number def harmonic_mean([]), do: nil def harmonic_mean(list) when is_list(list) do do_harmonic_mean(list, 0, 0) end defp do_harmonic_mean([], t, l), do: l / t defp do_harmonic_mean([x | xs], t, l) do do_harmonic_mean(xs, t + 1 / x, l + 1) end @doc """ Calculate the geometric mean of a list Geometric mean is the nth root of the product of n values ## Examples iex> Statistics.geometric_mean([]) nil iex> Statistics.geometric_mean([1,2,3]) 1.8171205928321397 """ @spec geometric_mean(list) :: number def geometric_mean([]), do: nil def geometric_mean(list) when is_list(list) do do_geometric_mean(list, 1, 0) end defp do_geometric_mean([], p, l), do: Math.pow(p, 1 / l) defp do_geometric_mean([x | xs], p, l) do do_geometric_mean(xs, p * x, l + 1) end @doc """ Calculates the nth moment about the mean for a sample. Generally used to calculate coefficients of skewness and kurtosis. Returns the n-th central moment as a float The denominator for the moment calculation is the number of observations, no degrees of freedom correction is done. ## Examples iex> Statistics.moment([1,2,3,4,5,6,7,8,9,8,7,6,5,4,3],3) -1.3440000000000025 iex> Statistics.moment([], 2) nil """ @spec moment(list, pos_integer) :: number def moment(list, n \\ 1) # empty list has no moment def moment([], _), do: nil # By definition the first moment about the mean is 0. def moment(_, 1), do: 0.0 # Otherwise def moment(list, n) when is_list(list) and is_number(n) do lmean = mean(list) list |> Enum.map(&Math.pow(&1 - lmean, n)) |> mean end @doc """ Computes the skewness of a data set. For normally distributed data, the skewness should be about 0. A skewness value > 0 means that there is more weight in the left tail of the distribution. ## Examples iex> Statistics.skew([]) nil iex> Statistics.skew([1,2,3,2,1]) 0.3436215967445454 """ @spec skew(list) :: number def skew([]), do: nil def skew(list) do m2 = moment(list, 2) m3 = moment(list, 3) m3 / Math.pow(m2, 1.5) end @doc """ Computes the kurtosis (Fisher) of a list. Kurtosis is the fourth central moment divided by the square of the variance. ## Examples iex> Statistics.kurtosis([]) nil iex> Statistics.kurtosis([1,2,3,2,1]) -1.1530612244897964 """ @spec kurtosis(list) :: number def kurtosis([]), do: nil def kurtosis(list) do m2 = moment(list, 2) m4 = moment(list, 4) # pearson p = m4 / Math.pow(m2, 2.0) # fisher p - 3 end @doc """ Calculate a standard `z` score for each item in a list ## Examples iex> Statistics.zscore([3,2,3,4,5,6,5,4,3]) [-0.7427813527082074, -1.5784103745049407, -0.7427813527082074, 0.09284766908852597, 0.9284766908852594, 1.7641057126819928, 0.9284766908852594, 0.09284766908852597, -0.7427813527082074] """ @spec zscore(list) :: list def zscore(list) when is_list(list) do lmean = mean(list) lstdev = stdev(list) for n <- list, do: (n - lmean) / lstdev end @doc """ Calculate the the Pearson product-moment correlation coefficient of two lists. The two lists are presumed to represent matched pairs of observations, the `x` and `y` of a simple regression. ## Examples iex> Statistics.correlation([1,2,3,4], [1,3,5,6]) 0.9897782665572894 """ @spec correlation(list, list) :: number def correlation(x, y) when length(x) == length(y) do xmean = mean(x) ymean = mean(y) numer = Enum.zip(x, y) |> Enum.map(fn {xi, yi} -> (xi - xmean) * (yi - ymean) end) |> sum denom_x = x |> Enum.map(fn xi -> (xi - xmean) * (xi - xmean) end) |> sum denom_y = y |> Enum.map(fn yi -> (yi - ymean) * (yi - ymean) end) |> sum numer / Math.sqrt(denom_x * denom_y) end @doc """ Calculate the covariance of two lists. Covariance is a measure of how much two random variables change together. The two lists are presumed to represent matched pairs of observations, such as the `x` and `y` of a simple regression. ## Examples iex> Statistics.covariance([1,2,3,2,1], [1,4,5.2,7,99]) -17.89 """ @spec covariance(list, list) :: number def covariance(x, y) when length(x) == length(y) do xmean = mean(x) ymean = mean(y) size = length(x) Enum.zip(x, y) |> Enum.map(fn {xi, yi} -> (xi - xmean) * (yi - ymean) end) |> Enum.map(fn i -> i / (size - 1) end) |> sum end ## helpers and other flotsam import Integer, only: [is_even: 1, is_odd: 1] # Split a list into two equal lists. # Needed for getting the quartiles. defp split(list) when is_list(list) do do_split(Enum.sort(list), length(list)) end defp do_split(sorted_list, l) when is_even(l) do m = :erlang.trunc(l / 2) {Enum.take(sorted_list, m), Enum.drop(sorted_list, m)} end defp do_split(sorted_list, l) when is_odd(l) do m = :erlang.trunc((l + 1) / 2) {Enum.take(sorted_list, m), Enum.drop(sorted_list, m - 1)} end end
lib/statistics.ex
0.822866
0.485173
statistics.ex
starcoder
defmodule Comet.Supervisor do use Supervisor @moduledoc """ Primary Supervisor for Comet This Supervisor will manage the `ChromeLauncher` and `:poolboy`. You can opt-in to the Supervisor managing `Comet.CacheWorker`. This Supervisor should be used directly in your own app's supervisor tree. You must pass in the options for configuring the workers. All of the options available: * `pool:` - the keyword list of pool options passed to `:poolboy.child_spec/3` as the 2nd argument. Please refer to `:poolboy`'s documentation for greater detail. * `name:` - defaults to `{:local, :comet_pool}` (you probably shouldn't change this one) * `size:` - default to `1` * `max_overflow:` - defaults to `0` * `strategy:` - defaults to `:fifo` (you probably shouldn't change this one) * `worker_module:` **(required)** the module in your app that used `Comet.TabWorker` * `worker:` - the keyword list of worker options passed to `:poolboy.child_spec/3` as the 3rd arugment and used by your `TabWorker` module. * `launch_url:` - **(required)** the url a new tab will navigate to to ready itself for inbound requests. *Note: `launch_url:` while required for usage it is currently flaged as non-public API and subject to change in the future. We will do our best to manage the debt*. * `cache_worker:` - allow you to opt-in to having `Comet.Supervisor` manage a cache. The advantage here is the cache itself will be tied to the lifecycle of the Supervisor and can be restarted at the appropriate time if necessary. Valid options: * `Comet.CacheWorker` - you can supply the module directly * Your own custom caching module. If you go this route, please refer to `Comet.CacheWorker`'s documentation. ## Example config config :my_app, :comet, pool: [ size: 5, worker_module: MyApp.TabWorker ], worker: [ launch_url: "https://example.com" ], cache_worker: MyApp.CustomCacheWorker In environments you do not want to run Comet just pass `:ignore` as the opts value. A simple way to do this is to only define the config you want to use in the environments that will use Comet, with a default config value of `:ignore` ## Example supervisor setup **Note that this example uses the [Elixir 1.5 supervisor syntax](https://github.com/elixir-lang/elixir/blob/v1.5/CHANGELOG.md#streamlined-child-specs).** children = [ ... {Comet.Supervisor, Application.get_env(:my_app, :comet, :ignore)} ] """ @default_pool_opts [ name: {:local, :comet_pool}, size: 1, max_overflow: 0, strategy: :fifo ] @doc false def start_link(opts) do Supervisor.start_link(__MODULE__, opts, name: :comet_supervisor) end @doc false def child_spec(opts) do %{ id: __MODULE__, start: {__MODULE__, :start_link, [opts]}, restart: :permanent, type: :supervisor } end @doc false def init(:ignore), do: :ignore def init(opts) do left_merge = fn(_, v, _) -> v end pool_opts = opts |> Keyword.get(:pool, []) |> Keyword.merge(@default_pool_opts, left_merge) worker_opts = Keyword.get(opts, :worker, []) children = [ ChromeLauncher, :poolboy.child_spec(:comet_pool, pool_opts, worker_opts) ] Keyword.get(opts, :cache_worker) |> case do nil -> children false -> children mod -> List.insert_at(children, 1, mod) end |> Supervisor.init(strategy: :rest_for_one) end end
lib/comet/supervisor.ex
0.76947
0.462291
supervisor.ex
starcoder
defmodule RDF.Serialization.Format do @moduledoc """ A behaviour for RDF serialization formats. A `RDF.Serialization` for a format can be implemented like this defmodule SomeFormat do use RDF.Serialization.Format import RDF.Sigils @id ~I<http://example.com/some_format> @name :some_format @extension "ext" @media_type "application/some-format" end When `@id`, `@name`, `@extension` and `@media_type` module attributes are defined the resp. behaviour functions are generated automatically and return these values. Then you'll have to do the main work by implementing a `RDF.Serialization.Encoder` and a `RDF.Serialization.Decoder` for the format. By default it is assumed that these are defined in `Encoder` and `Decoder` moduler under the `RDF.Serialization.Format` module of the format, i.e. in the example above in `SomeFormat.Encoder` and `SomeFormat.Decoder`. If you want them in another module, you'll have to override the `encoder/0` and/or `decoder/0` functions in your `RDF.Serialization.Format` module. """ alias RDF.{Dataset, Graph} @doc """ An IRI of the serialization format. """ @callback id :: RDF.IRI.t @doc """ An name atom of the serialization format. """ @callback name :: atom @doc """ The usual file extension for the serialization format. """ @callback extension :: String.t @doc """ The MIME type of the serialization format. """ @callback media_type :: String.t @doc """ A map with the supported options of the `Encoder` and `Decoder` for the serialization format. """ @callback options :: map @doc """ The `RDF.Serialization.Decoder` module for the serialization format. """ @callback decoder :: module @doc """ The `RDF.Serialization.Encoder` module for the serialization format. """ @callback encoder :: module defmacro __using__(_) do quote bind_quoted: [], unquote: true do @behaviour unquote(__MODULE__) @decoder __MODULE__.Decoder @encoder __MODULE__.Encoder @impl unquote(__MODULE__) def decoder, do: @decoder @impl unquote(__MODULE__) def encoder, do: @encoder @impl unquote(__MODULE__) def options, do: %{} defoverridable [decoder: 0, encoder: 0, options: 0] @spec read_string(String.t, keyword) :: {:ok, Graph.t | Dataset.t} | {:error, any} def read_string(content, opts \\ []), do: RDF.Serialization.Reader.read_string(decoder(), content, opts) @spec read_string!(String.t, keyword) :: Graph.t | Dataset.t def read_string!(content, opts \\ []), do: RDF.Serialization.Reader.read_string!(decoder(), content, opts) @spec read_file(Path.t, keyword) :: {:ok, Graph.t | Dataset.t} | {:error, any} def read_file(file, opts \\ []), do: RDF.Serialization.Reader.read_file(decoder(), file, opts) @spec read_file!(Path.t, keyword) :: Graph.t | Dataset.t def read_file!(file, opts \\ []), do: RDF.Serialization.Reader.read_file!(decoder(), file, opts) @spec write_string(Graph.t | Dataset.t, keyword) :: {:ok, String.t} | {:error, any} def write_string(data, opts \\ []), do: RDF.Serialization.Writer.write_string(encoder(), data, opts) @spec write_string!(Graph.t | Dataset.t, keyword) :: String.t def write_string!(data, opts \\ []), do: RDF.Serialization.Writer.write_string!(encoder(), data, opts) @spec write_file(Graph.t | Dataset.t, Path.t, keyword) :: :ok | {:error, any} def write_file(data, path, opts \\ []), do: RDF.Serialization.Writer.write_file(encoder(), data, path, opts) @spec write_file!(Graph.t | Dataset.t, Path.t, keyword) :: :ok def write_file!(data, path, opts \\ []), do: RDF.Serialization.Writer.write_file!(encoder(), data, path, opts) @before_compile unquote(__MODULE__) end end defmacro __before_compile__(_env) do quote do if !Module.defines?(__MODULE__, {:id, 0}) && Module.get_attribute(__MODULE__, :id) do @impl unquote(__MODULE__) def id, do: @id end if !Module.defines?(__MODULE__, {:name, 0}) && Module.get_attribute(__MODULE__, :name) do @impl unquote(__MODULE__) def name, do: @name end if !Module.defines?(__MODULE__, {:extension, 0}) && Module.get_attribute(__MODULE__, :extension) do @impl unquote(__MODULE__) def extension, do: @extension end if !Module.defines?(__MODULE__, {:media_type, 0}) && Module.get_attribute(__MODULE__, :media_type) do @impl unquote(__MODULE__) def media_type, do: @media_type end end end end
lib/rdf/serialization/format.ex
0.850562
0.517815
format.ex
starcoder
defmodule Flux.Websocket.Frame do @moduledoc """ Convenience functions for building websocket frames. """ defstruct fin: false, reserved: %{}, opcode: nil, mask?: false, payload_length: nil, mask: nil, payload: nil, close_code: nil, ref: nil opcodes = [ continue: 0x0, text: 0x1, binary: 0x2, close: 0x8, ping: 0x9, pong: 0xA ] @typedoc """ A parsed frame recieved from a client. """ @type t :: %__MODULE__{ fin: boolean, reserved: map, opcode: atom, mask?: boolean, payload_length: non_neg_integer, mask: integer, payload: binary | iodata, close_code: pos_integer | nil } @typedoc """ Atom representations of the various opcodes that are available when building a websocket frame. """ @type opcode :: :continue | :text | :binary | :close | :ping | :pong @doc """ Builds a server websocket frame with the given opcode and payload. Supports binaries and iodata as payloads. Because this is a server frame, it does NOT mask the payload. """ @spec build_frame(opcode, iodata | binary) :: iodata def build_frame(type, payload) do [ frame_header(type, payload), payload ] end defp frame_header(type, payload) do << fdf8:f53e:61e4::18, 0::1, 0::1, 0::1, opcode_from_atom(type)::4, 0::1, payload_length(payload)::bitstring >> end defp payload_length(payload) when is_list(payload) do len = IO.iodata_length(payload) len_size = integer_bit_size(len) do_payload_length(len, len_size) end defp payload_length(payload) do len = byte_size(payload) len_size = integer_bit_size(len) do_payload_length(len, len_size) end defp do_payload_length(len, len_size) when len_size > 16 do <<127::7, len::64>> end defp do_payload_length(len, len_size) when len_size > 7 do <<126::7, len::16>> end defp do_payload_length(len, _) do <<len::7>> end defp integer_bit_size(int) when int > 0 do :math.log2(int) end defp integer_bit_size(_), do: 1 @doc """ Gives the corresponding atomic representation of an opcode For example: ``` iex(1)> opcode_to_atom(1) :text ``` """ @spec opcode_to_atom(non_neg_integer) :: opcode for {atom, code} <- opcodes do def opcode_to_atom(unquote(code)), do: unquote(atom) end def opcode_to_atom(code) when code in 0x3..0x7 or code in 0xB..0xF, do: :reserved def opcode_to_atom(_), do: :error @doc """ Gives the integer that an opcode atom represents For example: ``` iex(1)> opcode_from_atom(:text) 1 ``` """ @spec opcode_from_atom(opcode) :: non_neg_integer for {atom, code} <- opcodes do def opcode_from_atom(unquote(atom)), do: unquote(code) end def opcode_from_atom(_), do: :error end
lib/flux/websocket/frame.ex
0.854354
0.718767
frame.ex
starcoder
defmodule UberMulti do @moduledoc """ Provides the wrapper function for `Ecto.Multi`. There are two differences when using UberMulti compared to the normal Multi methods. The first is the addition of the 'keys' argument. The keys provided in this argument are used in two ways. First, each key is used to try and extract a result from the list of previous changes in the execution of the Multi. If a result is not found, the key is instead used as-is as the parameter. In this way you can mix and match between providing your own arguments and automatically fetching them from previous Multi results. The second difference is in the function that will be run when the Multi is executed. Given that the changes list is being preprocessed into a list of parameters to pass to the function, it will not need to take in the list of changes as normal, but simply take in the parameters it expects to perform its function. So an UberMulti call which uses the following function: ``` def add_things(thing1, thing2) do {:ok, thing1 + thing2} end ``` Might look like: ``` UberMulti.run(multi, :add_things, [:get_thing1, :get_thing2], &add_things/2) ``` Obviously this is very contrived, but shows how you can easily combine non-multi and multi-designed functions without having to wrap all the calls yourself and manually extract the results for the next one in the chain. Another change is that the 'run' method will also automatically wrap the responses from any methods called that do not conform to the two element tuple response pattern. If a method returns '{:ok, term()}' or '{:error, term()}' the response will be used as is. By default any non-conforming response will be wrapped in a success tuple, see `run/4` for configuration options. """ alias Ecto.Multi @typedoc "An '%Ecto.Multi{}' struct. See 'Ecto.Multi' for detailed specs." @type multi :: Ecto.Multi.t() @typedoc "A name to tag the multi function with. Must be unique within a single multi chain." @type name :: any() @typedoc "One or more keys, with a single key optionally being in a list." @type keys :: key | [key] @typedoc "The key to extract the parameters from using the changes map, or a value to pass directly to the callback." @type key :: any() @typedoc "A callback function to call with the parameters extracted from the changes map." @type run :: function() @typedoc "Whether or not to trust the result by default if it does not come wrapped in a result tuple." @type trust_result :: boolean() | fun() @doc """ Adds a function to run as part of the Multi. By default, functions which do not wrap their results in success tuples have their results returned as a successful call. By providing `false` as the last argument this behaviour is changed and results will be wrapped in an error tuple instead. Optionally, for finer grained control, a function which takes the result and returns a tagged value can be passed in as the last parameter instead of a boolean value. ## Examples UberMulti.run(multi, :send_email, [:get_hot_news_items, :get_top_forum_post_headlines], fn(news, posts) -> ...do stuff... end) #Ecto.Multi{} Multi.new() |> UberMulti.run(:list_stars, ["Milky Way"], &Astronomy.list_stars/1) |> UberMulti.run(:reverse, [:list_stars], &Enum.reverse/1) |> UberMulti.run(:classify, [:reverse], &Astronomy.classify_stars/1, # Assume result is ok for brevity & {:ok, Enum.all?(&1, fn star -> star.class in ~w"O B A F G K M")}) |> Repo.transaction() """ @spec run(multi, name, keys, run, trust_result) :: multi def run(multi, name, keys, run, trust_result \\ false) do Multi.run(multi, name, fn _, changes -> extracted_args = extract_args(keys, changes) apply(run, extracted_args) |> maybe_wrap_response(trust_result) end) end @spec maybe_wrap_response(response :: term, trust_result) :: {:ok, term()} | {:error, term()} defp maybe_wrap_response(response, fun) when is_function(fun), do: fun.(response) defp maybe_wrap_response({:ok, _} = response, _), do: response defp maybe_wrap_response({:error, _} = response, _), do: response defp maybe_wrap_response(response, true), do: {:ok, response} defp maybe_wrap_response(response, false), do: {:error, response} @spec extract_args(keys :: term() | [term()], changes :: map()) :: [term()] defp extract_args(keys, changes) do keys |> List.wrap() |> Enum.map(fn key -> Map.get(changes, key, key) end) end end
lib/uber_multi.ex
0.879082
0.937153
uber_multi.ex
starcoder
defmodule EVM.MachineState do @moduledoc """ Module for tracking the current machine state, which is roughly equivilant to the VM state for an executing contract. This is most often seen as µ in the Yellow Paper. """ alias EVM.Gas alias EVM.Stack alias EVM.MachineState alias EVM.ProgramCounter alias EVM.ExecEnv alias EVM.Operation.Metadata # g defstruct gas: nil, # pc program_counter: 0, # m memory: <<>>, # i active_words: 0, previously_active_words: 0, # s stack: [] @type program_counter :: integer() @type memory :: binary() @type t :: %__MODULE__{ gas: Gas.t(), program_counter: program_counter, memory: memory, active_words: integer(), stack: Stack.t() } @doc """ Returns a new execution environment less the amount of gas specified. ## Examples iex> machine_state = %EVM.MachineState{gas: 10, stack: [1, 1], program_counter: 0} iex> exec_env = %EVM.ExecEnv{machine_code: <<EVM.Operation.metadata(:add).id>>} iex> EVM.MachineState.subtract_gas(machine_state, exec_env) %EVM.MachineState{gas: 7, stack: [1, 1]} """ @spec subtract_gas(MachineState.t(), ExecEnv.t()) :: MachineState.t() def subtract_gas(machine_state, exec_env) do cost = Gas.cost(machine_state, exec_env) %{machine_state | gas: machine_state.gas - cost} end @doc """ After a memory operation, we may have incremented the total number of active words. This function takes a memory offset accessed and updates the machine state accordingly. ## Examples iex> %EVM.MachineState{active_words: 2} |> EVM.MachineState.maybe_set_active_words(1) %EVM.MachineState{active_words: 2} iex> %EVM.MachineState{active_words: 2} |> EVM.MachineState.maybe_set_active_words(3) %EVM.MachineState{active_words: 3} iex> %EVM.MachineState{active_words: 2} |> EVM.MachineState.maybe_set_active_words(1) %EVM.MachineState{active_words: 2} """ @spec maybe_set_active_words(t, EVM.val()) :: t def maybe_set_active_words(machine_state, last_word) do %{machine_state | active_words: max(machine_state.active_words, last_word)} end @doc """ Pops n values off the stack ## Examples iex> EVM.MachineState.pop_n(%EVM.MachineState{stack: [1, 2, 3]}, 2) {[1 ,2], %EVM.MachineState{stack: [3]}} """ @spec pop_n(MachineState.t(), integer()) :: {list(EVM.val()), MachineState.t()} def pop_n(machine_state, n) do {values, stack} = Stack.pop_n(machine_state.stack, n) machine_state = %{machine_state | stack: stack} {values, machine_state} end @doc """ Push a values onto the stack ## Examples iex> EVM.MachineState.push(%EVM.MachineState{stack: [2, 3]}, 1) %EVM.MachineState{stack: [1, 2, 3]} """ @spec push(MachineState.t(), EVM.val()) :: MachineState.t() def push(machine_state, value) do %{machine_state | stack: Stack.push(machine_state.stack, value)} end @doc """ Increments the program counter ## Examples iex> EVM.MachineState.move_program_counter(%EVM.MachineState{program_counter: 9}, EVM.Operation.metadata(:add), [1, 1]) %EVM.MachineState{program_counter: 10} """ @spec move_program_counter(MachineState.t(), Metadata.t(), list(EVM.val())) :: MachineState.t() def move_program_counter(machine_state, operation_metadata, inputs) do next_postion = ProgramCounter.next(machine_state.program_counter, operation_metadata, inputs) %{machine_state | program_counter: next_postion} end end
apps/evm/lib/evm/machine_state.ex
0.829734
0.536434
machine_state.ex
starcoder
defmodule ExSDP.RepeatTimes do @moduledoc """ This module represents the field of SDP that specifies rebroadcasts of a session. Works directly in conjunction with timing `t` parameter. - active_duration - how long session will last - repeat_interval - interval of session rebroadcast - offsets - offset between scheduled rebroadcast If `start_time` of `t` is set to today 3 pm, `active_duration` is set to `3h`, `repeat_interval` is set to `14d` and `offsets` are `0 4d` then the session will be rebroadcasted today at 3 pm and on Thursday 3 pm every two week until `end_time` of param `t`. For more details please see [RFC4566 Section 5.10](https://tools.ietf.org/html/rfc4566#section-5.10). """ use Bunch.Access @enforce_keys [:repeat_interval, :active_duration, :offsets] defstruct @enforce_keys @type t :: %__MODULE__{ repeat_interval: non_neg_integer(), active_duration: non_neg_integer(), offsets: [non_neg_integer()] } @unit_mappings %{ "d" => 86_400, "h" => 3600, "m" => 60, "s" => 1 } @valid_keys Map.keys(@unit_mappings) @type reason :: :duration_nan | :interval_nan | :no_offsets | :malformed_repeat | {:invalid_offset | :invalid_unit, binary()} @spec parse(binary()) :: {:ok, t()} | {:error, reason} def parse(repeat) do case String.split(repeat, " ") do [interval, duration | offsets] = as_list -> if compact?(as_list) do parse_compact(as_list) else parse_explicit(interval, duration, offsets) end _malformed_repeat -> {:error, :malformed_repeat} end end defp compact?(parts) do Enum.any?(parts, fn time -> Enum.any?(@valid_keys, fn unit -> String.ends_with?(time, unit) end) end) end defp parse_explicit(_interval, _duration, []), do: {:error, :no_offsets} defp parse_explicit(interval, duration, offsets) do with {interval, ""} <- Integer.parse(interval), {duration, ""} <- Integer.parse(duration), {:ok, offsets} <- process_offsets(offsets) do explicit_repeat = %__MODULE__{ repeat_interval: interval, active_duration: duration, offsets: offsets } {:ok, explicit_repeat} else {:error, _} = error -> error _other_error -> {:error, :malformed_repeat} end end defp process_offsets(offsets, acc \\ []) defp process_offsets([], acc), do: {:ok, Enum.reverse(acc)} defp process_offsets([offset | rest], acc) do case Integer.parse(offset) do {offset, ""} when offset >= 0 -> process_offsets(rest, [offset | acc]) {_, _} -> {:error, {:invalid_offset, offset}} end end defp parse_compact(list) do with [_ | _] = result <- decode_compact(list) do result |> Enum.reverse() |> build_compact() end end defp decode_compact(list) do Enum.reduce_while(list, [], fn "0", acc -> {:cont, [0 | acc]} elem, acc -> case Integer.parse(elem) do {value, unit} when unit in @valid_keys -> time = value * @unit_mappings[unit] {:cont, [time | acc]} {_, invalid_unit} -> {:halt, {:error, {:invalid_unit, invalid_unit}}} end end) end defp build_compact(list) defp build_compact([_, _]), do: {:error, :no_offsets} defp build_compact([interval, duration | offsets]) do compact = %__MODULE__{ repeat_interval: interval, active_duration: duration, offsets: offsets } {:ok, compact} end end defimpl String.Chars, for: ExSDP.RepeatTimes do @impl true def to_string(repeat_times) do serialized_fields = [ Integer.to_string(repeat_times.repeat_interval), Integer.to_string(repeat_times.active_duration) ] ++ Enum.map(repeat_times.offsets, &Integer.to_string/1) Enum.join(serialized_fields, " ") end end
lib/ex_sdp/repeat_times.ex
0.848863
0.716318
repeat_times.ex
starcoder
defmodule HashSet do @moduledoc """ A set store. The `HashSet` is meant to work well with both small and large sets. It is an implementation of the `Set` behaviour. For more information about the functions and their APIs, please consult the `Set` module. """ @behaviour Set # The ordered record contains a single bucket. @ordered_threshold 8 defrecordp :ordered, HashSet, size: 0, bucket: [] # The bucketed record contains a series of buckets. @expand_load 5 @contract_load 2 @node_bitmap 0b1111 @node_shift 4 @node_size 16 @node_template :erlang.make_tuple(@node_size, []) @expand_default (@node_size * @expand_load) @contract_default @contract_load defrecordp :trie, HashSet, size: 0, depth: 0, expand_on: @expand_default, contract_on: @contract_default, root: @node_template import Bitwise @compile :inline_list_funcs @compile { :inline, bucket_hash: 1, bucket_index: 1, bucket_nth_index: 2, bucket_next: 1 } @doc """ Creates a new empty set. """ def new() do ordered() end @doc """ Creates a new set from the given enumerable. ## Examples iex> HashSet.new [1, 1, 2, 3, 3] |> HashSet.to_list [1,2,3] """ def new(members) do Enum.reduce members, ordered(), fn member, set -> put(set, member) end end def union(set1, set2) when is_record(set1, HashSet) and is_record(set2, HashSet) and elem(set1, 1) <= elem(set2, 1) do set_fold set1, set2, fn v1, acc -> put(acc, v1) end end def union(set1, set2) when is_record(set1, HashSet) and is_record(set2, HashSet) do set_fold set2, set1, fn v, acc -> put(acc, v) end end def intersection(set1, set2) when is_record(set1, HashSet) and is_record(set2, HashSet) and elem(set1, 1) <= elem(set2, 1) do set_filter set1, fn e -> set_member?(set2, e) end end def intersection(set1, set2) when is_record(set1, HashSet) and is_record(set2, HashSet) do set_filter set2, fn e -> set_member?(set1, e) end end def difference(set1, set2) when is_record(set1, HashSet) and is_record(set2, HashSet) do set_filter set1, fn m -> not set_member?(set2, m) end end def member?(set, member) when is_record(set, HashSet) do set_member?(set, member) end def empty(_) do ordered() end def size(set) do elem(set, 1) end def to_list(ordered(bucket: bucket)) do bucket end def to_list(set) do set_fold(set, [], &[&1|&2]) |> :lists.reverse end def put(set, member) do { set, _ } = set_put(set, member) set end def delete(set, member) do { set, _ } = set_delete(set, member) set end def equal?(set1, set2) do size = elem(set1, 1) case elem(set2, 1) do ^size -> set_equal?(set1, set2) _ -> false end end def subset?(set1, set2) do set_equal?(set1, set2) end def disjoint?(set1, set2) do set_disjoint?(set1, set2) end @doc false def reduce(ordered(bucket: bucket), acc, fun) do :lists.foldl(fun, acc, bucket) end def reduce(trie() = set, acc, fun) do set_fold(set, acc, fun) end ## HashSet-wide functions defp set_filter(ordered(bucket: bucket, size: size), fun) do { new, removed_count } = bucket_filter(bucket, fun, [], 0) ordered(bucket: new, size: size - removed_count) end defp set_filter(trie(root: root, depth: depth, size: size) = set, fun) do { new, removed_count } = node_filter(root, depth, fun, @node_size) if depth > 0 and trie(set, :contract_on) >= (size - removed_count) do contract_trie(trie(root: new, size: size - removed_count, depth: depth, contract_on: trie(set, :contract_on), expand_on: trie(set, :expand_on))) else trie(size: size - removed_count, root: new, depth: depth) end end defp set_put(ordered(size: @ordered_threshold, bucket: bucket), member) do root = node_relocate(bucket, 0) set_put(trie(size: @ordered_threshold, root: root), member) end defp set_put(ordered(size: size, bucket: bucket) = set, member) do { new, count } = bucket_put(bucket, member) { ordered(set, size: size + count, bucket: new), count } end defp set_put(trie(root: root, depth: depth, size: size, expand_on: size, contract_on: contract_on) = set, member) do root = node_expand(root, depth, depth + 1) set = trie(set, root: root, depth: depth + 1, expand_on: size * @node_size, contract_on: contract_on * @node_size) set_put(set, member) end defp set_put(trie(root: root, size: size, depth: depth) = set, member) do pos = bucket_hash(member) { root, count } = node_put(root, depth, pos, member) { trie(set, size: size + count, root: root), count } end defp set_member?(ordered(bucket: bucket), member) do :lists.member(member, bucket) end defp set_member?(trie(root: root, depth: depth), member) do :lists.member(member, node_bucket(root, depth, bucket_hash(member))) end defp set_delete(ordered(bucket: bucket, size: size) = set, member) do case bucket_delete(bucket, member) do { _, 0 } -> { set, 0 } { new_bucket, -1 } -> { ordered(set, size: size - 1, bucket: new_bucket), -1 } end end defp set_delete(trie(root: root, size: size, depth: depth) = set, member) do pos = bucket_hash(member) case node_delete(root, depth, pos, member) do { _, 0 } -> { set, 0 } { root, -1 } -> { if depth > 0 and trie(set, :contract_on) == size do root = node_contract(root, depth) trie(set, root: root, size: size - 1, depth: depth - 1, contract_on: div(size, @node_size), expand_on: div(trie(set, :expand_on), @node_size)) else trie(set, size: size - 1, root: root) end, -1 } end end defp set_equal?(set1, set2) do try do reduce(set1, true, fn member, acc -> case member?(set2, member) do true -> acc _ -> throw(:error) end end) catch :error -> false end end defp set_disjoint?(set1, set2) do try do reduce(set1, true, fn member, acc -> case member?(set2, member) do false -> acc _ -> throw(:error) end end) catch :error -> false end end defp set_fold(ordered(bucket: bucket), acc, fun) do bucket_fold(bucket, acc, fun) end defp set_fold(trie(root: root, depth: depth), acc, fun) do node_fold(root, depth, acc, fun, @node_size) end ## Bucket helpers defp bucket_filter([e|bucket], fun, acc, count) do case fun.(e) do true -> bucket_filter(bucket, fun, [e|acc], count) false -> bucket_filter(bucket, fun, acc, count + 1) end end defp bucket_filter([], _fun, acc, count) do { :lists.reverse(acc), count } end defp bucket_put([m|_]=bucket, member) when m > member do { [member|bucket], 1 } end defp bucket_put([member|bucket], member) do { [member|bucket], 0 } end defp bucket_put([e|bucket], member) do { rest, count } = bucket_put(bucket, member) { [e|rest], count } end defp bucket_put([], member) do { [member], 1 } end defp bucket_put!([m|_]=bucket, member) when m > member, do: [member|bucket] defp bucket_put!([member|bucket], member), do: [member|bucket] defp bucket_put!([e|bucket], member), do: [e|bucket_put!(bucket, member)] defp bucket_put!([], member), do: [member] # Deletes a key from the bucket defp bucket_delete([m,_|_]=bucket, member) when m > member do { bucket, 0 } end defp bucket_delete([member|bucket], member) do { bucket, -1 } end defp bucket_delete([e|bucket], member) do { rest, count } = bucket_delete(bucket, member) { [e|rest], count } end defp bucket_delete([], _member) do { [], 0 } end defp bucket_fold(bucket, acc, fun) do :lists.foldl(fun, acc, bucket) end defp bucket_hash(key) do :erlang.phash2(key) end defp bucket_nth_index(hash, n) do (hash >>> (@node_shift * n)) &&& @node_bitmap end defp bucket_index(hash) do hash &&& @node_bitmap end defp bucket_next(hash) do hash >>> @node_shift end # Trie resizing defp contract_trie(trie(depth: 0) = set) do set end defp contract_trie(trie(root: root, depth: depth, size: size, contract_on: contract_on, expand_on: expand_on) = set) when size <= contract_on do new_contract_on = div(contract_on, @node_size) new_expand_on = div(expand_on, @node_size) if new_contract_on == 0, do: new_contract_on = @contract_default if new_expand_on == 0, do: new_expand_on = @expand_default contract_trie(trie(set, root: node_contract(root, depth), size: size, depth: depth - 1, contract_on: new_contract_on, expand_on: new_expand_on)) end defp contract_trie(set) do set end # Node helpers # Gets a bucket from the node defp node_bucket(node, 0, hash) do elem(node, bucket_index(hash)) end defp node_bucket(node, depth, hash) do child = elem(node, bucket_index(hash)) node_bucket(child, depth - 1, bucket_next(hash)) end defp node_put(node, 0, hash, member) do pos = bucket_index(hash) { new, count } = bucket_put(elem(node, pos), member) { set_elem(node, pos, new), count } end defp node_put(node, depth, hash, member) do pos = bucket_index(hash) { new, count } = node_put(elem(node, pos), depth - 1, bucket_next(hash), member) { set_elem(node, pos, new), count } end # Deletes a key from the bucket defp node_delete(node, 0, hash, member) do pos = bucket_index(hash) case bucket_delete(elem(node, pos), member) do { _, 0 } -> { node, 0 } { new, -1 } -> { set_elem(node, pos, new), -1 } end end defp node_delete(node, depth, hash, member) do pos = bucket_index(hash) case node_delete(elem(node, pos), depth - 1, bucket_next(hash), member) do { _, 0 } -> { node, 0 } { new, -1 } -> { set_elem(node, pos, new), -1 } end end defp node_fold(bucket, -1, acc, fun, _) do bucket_fold(bucket, acc, fun) end defp node_fold(node, depth, acc, fun, count) when count >= 1 do acc = node_fold(:erlang.element(count, node), depth - 1, acc, fun, @node_size) node_fold(node, depth, acc, fun, count - 1) end defp node_fold(_node, _, acc, _fun, 0) do acc end defp node_filter(bucket, -1, fun, _) do bucket_filter(bucket, fun, [], 0) end defp node_filter(node, depth, fun, count) when count >= 1 do case node_filter(:erlang.element(count, node), depth - 1, fun, @node_size) do { _, 0 } -> node_filter(node, depth, fun, count - 1) { new_element, count1 } -> { new_node, count2 } = node_filter(:erlang.setelement(count, node, new_element), depth, fun, count - 1) { new_node, count1 + count2 } end end defp node_filter(node, _, _fun, 0) do { node, 0 } end defp node_relocate(node // @node_template, bucket, n) do :lists.foldl fn member, acc -> pos = member |> bucket_hash() |> bucket_nth_index(n) set_elem(acc, pos, bucket_put!(elem(acc, pos), member)) end, node, bucket end # Node resizing defp node_expand({ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16 }, 0, n) do { node_relocate(b1, n), node_relocate(b2, n), node_relocate(b3, n), node_relocate(b4, n), node_relocate(b5, n), node_relocate(b6, n), node_relocate(b7, n), node_relocate(b8, n), node_relocate(b9, n), node_relocate(b10, n), node_relocate(b11, n), node_relocate(b12, n), node_relocate(b13, n), node_relocate(b14, n), node_relocate(b15, n), node_relocate(b16, n) } end defp node_expand({ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16 }, depth, n) do depth = depth - 1 { node_expand(b1, depth, n), node_expand(b2, depth, n), node_expand(b3, depth, n), node_expand(b4, depth, n), node_expand(b5, depth, n), node_expand(b6, depth, n), node_expand(b7, depth, n), node_expand(b8, depth, n), node_expand(b9, depth, n), node_expand(b10, depth, n), node_expand(b11, depth, n), node_expand(b12, depth, n), node_expand(b13, depth, n), node_expand(b14, depth, n), node_expand(b15, depth, n), node_expand(b16, depth, n) } end defp node_contract({ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16 }, depth) when depth > 0 do depth = depth - 1 { node_contract(b1, depth), node_contract(b2, depth), node_contract(b3, depth), node_contract(b4, depth), node_contract(b5, depth), node_contract(b6, depth), node_contract(b7, depth), node_contract(b8, depth), node_contract(b9, depth), node_contract(b10, depth), node_contract(b11, depth), node_contract(b12, depth), node_contract(b13, depth), node_contract(b14, depth), node_contract(b15, depth), node_contract(b16, depth) } end defp node_contract({ b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16 }, 0) do b1 |> each_contract(b2) |> each_contract(b3) |> each_contract(b4) |> each_contract(b5) |> each_contract(b6) |> each_contract(b7) |> each_contract(b8) |> each_contract(b9) |> each_contract(b10) |> each_contract(b11) |> each_contract(b12) |> each_contract(b13) |> each_contract(b14) |> each_contract(b15) |> each_contract(b16) end defp each_contract([m1|acc], [m2|_]=bucket) when m1 < m2, do: [m1|each_contract(acc, bucket)] defp each_contract(acc, [m|bucket]), do: [m|each_contract(acc, bucket)] defp each_contract([], bucket), do: bucket defp each_contract(acc, []), do: acc @doc false def inspect_depth(trie(depth: d)), do: d @doc false def inspect_contract(trie(contract_on: c)), do: c @doc false def inspect_expand(trie(expand_on: e)), do: e end defimpl Enumerable, for: HashSet do def reduce(set, acc, fun), do: HashSet.reduce(set, acc, fun) def member?(set, v), do: HashSet.member?(set, v) def count(set), do: HashSet.size(set) end
lib/elixir/lib/hash_set.ex
0.789274
0.664241
hash_set.ex
starcoder
defmodule Inky.PixelUtil do @moduledoc """ PixelUtil maps pixels to bitstrings to be sent to an Inky screen """ def pixels_to_bits(pixels, width, height, rotation_degrees, color_map) do {outer_axis, dimension_vectors} = rotation_degrees |> normalised_rotation() |> rotation_opts() dimension_vectors |> rotated_ranges(width, height) |> do_pixels_to_bits( &pixels[pixel_key(outer_axis, &1, &2)], &(color_map[&1] || color_map.miss) ) end @doc """ Only exposed for testing purposes. Do not use. iex> Enum.map( ...> [-360, -270, -180, -90, 0, 90, 180, 270, 360, 450], ...> &Inky.PixelUtil.normalised_rotation/1 ...> ) [0, 1, 2, 3, 0, 1, 2, 3, 0, 1] """ def normalised_rotation(degrees) do r = degrees |> div(90) |> rem(4) if(r < 0, do: r + 4, else: r) end @doc """ Only exposed for testing purposes. Do not use. iex> Enum.map([3, 1, 2, 0], &Inky.PixelUtil.rotation_opts/1) [{:x, {{:x, -1}, {:y, 1}}}, {:x, {{:x, 1}, {:y, -1}}}, {:y, {{:y, -1}, {:x, -1}}}, {:y, {{:y, 1}, {:x, 1}}}] """ def rotation_opts(rotation) do case rotation do 3 -> {:x, {{:x, -1}, {:y, 1}}} 1 -> {:x, {{:x, 1}, {:y, -1}}} 2 -> {:y, {{:y, -1}, {:x, -1}}} 0 -> {:y, {{:y, 1}, {:x, 1}}} end end defp rotated_ranges({i_spec, j_spec}, i_n, j_m) do { rotated_dimension(i_n, j_m, i_spec), rotated_dimension(i_n, j_m, j_spec) } end defp rotated_dimension(width, _height, {:x, 1}), do: 0..(width - 1) defp rotated_dimension(width, _height, {:x, -1}), do: (width - 1)..0 defp rotated_dimension(_width, height, {:y, 1}), do: 0..(height - 1) defp rotated_dimension(_width, height, {:y, -1}), do: (height - 1)..0 defp do_pixels_to_bits({i_range, j_range}, pixel_picker, cmap) do for i <- i_range, j <- j_range, do: <<cmap.(pixel_picker.(i, j))::size(1)>>, into: <<>> end defp pixel_key(:x, i, j), do: {i, j} defp pixel_key(:y, i, j), do: {j, i} end
lib/display/pixelutil.ex
0.861247
0.580263
pixelutil.ex
starcoder
defmodule Day5 do def from_file(path) do File.read!(path) |> String.split(",") |> Enum.map(&Integer.parse/1) |> Enum.map(&(elem(&1, 0))) end def modify(memory, address, value) do memory |> List.delete_at(address) |> List.insert_at(address, value) end def read_instruction(value) do {params, inst} = Integer.digits(value) |> Enum.split((value |> Integer.digits |> length) - 2) {Enum.reverse(params), Integer.undigits(inst)} end def execute(%{:memory => memory, :pc => pc} = runtime) do {modes, inst} = read_instruction(Enum.at(memory, pc)) if inst == 99 do runtime else case exec_inst(runtime, inst, modes) do %{} = runtime -> execute(runtime) :error -> runtime end end end def value(memory, address, mode) do if mode == 0 do Enum.at(memory, Enum.at(memory, address)) else Enum.at(memory, address) end end def mode(modes, param) do case Enum.fetch(modes, param) do {:ok, mode} -> mode :error -> 0 end end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 1, modes) do x = value(memory, pc + 1, mode(modes, 0)) y = value(memory, pc + 2, mode(modes, 1)) address = Enum.at(memory, pc + 3) %{runtime | :memory => memory |> modify(address, x + y), :pc => pc + 4} end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 2, modes) do x = value(memory, pc + 1, mode(modes, 0)) y = value(memory, pc + 2, mode(modes, 1)) address = Enum.at(memory, pc + 3) %{runtime | :memory => memory |> modify(address, x * y), :pc => pc + 4} end def exec_inst(%{:memory => memory, :pc => pc, :input => input} = runtime, 3, _) do address = Enum.at(memory, pc + 1) %{runtime | :memory => memory |> modify(address, input), :pc => pc + 2} end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 4, _) do address = Enum.at(memory, pc + 1) %{runtime | :memory => memory, :pc => pc + 2, :output => Enum.at(memory, address)} end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 5, modes) do jump_if_true = value(memory, pc + 1, mode(modes, 0)) jump_to = value(memory, pc + 2, mode(modes, 1)) if jump_if_true != 0 do %{runtime | :pc => jump_to} else %{runtime | :pc => pc + 3} end end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 6, modes) do jump_if_false = value(memory, pc + 1, mode(modes, 0)) jump_to = value(memory, pc + 2, mode(modes, 1)) if jump_if_false == 0 do %{runtime | :pc => jump_to} else %{runtime | :pc => pc + 3} end end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 7, modes) do first = value(memory, pc + 1, mode(modes, 0)) second = value(memory, pc + 2, mode(modes, 1)) address = Enum.at(memory, pc + 3) if first < second do %{runtime | :memory => memory |> modify(address, 1), :pc => pc + 4} else %{runtime | :memory => memory |> modify(address, 0), :pc => pc + 4} end end def exec_inst(%{:memory => memory, :pc => pc} = runtime, 8, modes) do first = value(memory, pc + 1, mode(modes, 0)) second = value(memory, pc + 2, mode(modes, 1)) address = Enum.at(memory, pc + 3) if first == second do %{runtime | :memory => memory |> modify(address, 1), :pc => pc + 4} else %{runtime | :memory => memory |> modify(address, 0), :pc => pc + 4} end end def exec_inst(_, _, inst, _) do IO.puts("invalid instruction #{inst}") :error end def run(program, input) do execute(%{:memory => program, :pc => 0, :input => input, :output => nil}) end def solution do IO.puts("#{from_file("day5_input.txt") |> run(1) |> Map.get(:output)}") IO.puts("#{from_file("day5_input.txt") |> run(5) |> Map.get(:output)}") end end
lib/day5.ex
0.552781
0.511595
day5.ex
starcoder
defmodule ExIbus.Message do use Bitwise @moduledoc """ A struct that keeps information about Ibus Message It contains 3 main fields: * `:src` - message source * `:dst` - message destination (receiver) * `:msg` - message content Module also contain several functions to operate with a message """ @enforce_keys [:src, :dst, :msg] defstruct src: <<0x00>>, dst: <<0x00>>, msg: <<0x00>> @type t :: %__MODULE__{src: binary, dst: binary, msg: binary} defimpl Inspect do def inspect(%ExIbus.Message{} = message, _) do message |> ExIbus.Message.raw() |> String.codepoints() |> Enum.map(&"0x#{Base.encode16(&1)}") |> Enum.join(" ") end end @doc """ It's helper function for creating message structs fast ## Example: ```elixir iex(1)> ExIbus.Message.create(<<0x18>>, <<0xFF>>, <<0x02, 0x01>>) %ExIbus.Message{src: <<0x18>>, dst: <<0xFF>>, msg: <<0x02, 0x01>>} ``` """ @spec create(binary, binary, binary) :: ExIbus.Message.t() def create(src, dst, msg), do: %__MODULE__{src: src, dst: dst, msg: msg} @doc """ Create a raw binary message with length byte and last XOR byte as well. ## Example: ```elixir iex(1)> ExIbus.Message.raw(%ExIbus.Message{src: <<0x68>>, dst: <<0x18>>, msg: <<0x0A, 0x00>>}) <<104, 4, 24, 10, 0, 126>> ``` This message should be sent into Ibus can and will be normally received by car """ @spec raw(ExIbus.Message.t()) :: binary def raw(%__MODULE__{src: src, dst: dst, msg: msg} = message) do full = src <> <<len(message)>> <> dst <> msg full <> <<xor(full)>> end @doc """ Check if given raw message is valid Ibus message Function is really usefull for scanning Ibus can from car. """ @spec valid?(binary) :: boolean def valid?(<<src::size(8), lng::size(8), dst::size(8), msg::binary>> = rawMsg) when is_binary(rawMsg) do case byte_size(<<dst>> <> msg) == lng && byte_size(msg) > 0 do false -> false true -> # msg will contain xor byte aswell and we have to remove it msg = :binary.part(msg, 0, byte_size(msg) - 1) rawMsg == %__MODULE__{src: <<src>>, dst: <<dst>>, msg: msg} |> raw() end end def valid?(_), do: false @doc """ Will try to create a new `ExIbus.Message.t` from given raw binary message """ @spec parse(binary) :: {:ok, ExIbus.Message.t()} | {:error, term} def parse(<<src::size(8), _::size(8), dst::size(8), msg::binary>> = raw) do case valid?(raw) do false -> {:error, "Wrong message passed for parsing"} true -> # msg will contain xor byte aswell and we have to remove it msg = :binary.part(msg, 0, byte_size(msg) - 1) {:ok, %__MODULE__{src: <<src>>, dst: <<dst>>, msg: msg}} end end def parse(_), do: {:error, "Wrong message passed"} # Calculate xor (checksum) for message # It's a last byte for ibus message defp xor(msg) when is_binary(msg) do msg |> :binary.bin_to_list() |> Enum.reduce(0, fn x, acc -> Bitwise.bxor(acc, x) end) end defp xor(_), do: <<0x00>> # Calculate length of Ibus message # Note that in Ibus protocol source of mesage should not be calculated in length defp len(%__MODULE__{dst: dst, msg: msg}) do byte_size(dst <> msg) + 1 end defp len(_), do: 0 end
lib/ex_ibus/message.ex
0.879509
0.722551
message.ex
starcoder
defmodule ExBreak.Breaker do @moduledoc """ A server that serves as a circuit breaker for a single function """ @typedoc """ A struct representing the state of a circuit breaker - `break_count` The number of breaks that have occurred - `tripped` Whether the circuit breaker is tripped - `tripped_at` The time at which the circuit breaker was tripped (or `nil`, if un-tripped) - `on_trip` A function called when the breaker trips """ @type t :: %__MODULE__{ break_count: non_neg_integer, tripped: boolean, tripped_at: DateTime.t() | nil } defstruct break_count: 0, tripped: false, tripped_at: nil use Agent, restart: :temporary @doc """ Start a new breaker. """ @spec start_link(Keyword.t()) :: Agent.on_start() def start_link(_opts) do Agent.start_link(fn -> new() end) end @doc """ Create a new circuit breaker. """ @spec new :: t def new do %__MODULE__{} end @doc """ Increment a circuit breaker. If the new break count exceeds the given threshold, the breaker is also marked as tripped. iex> {:ok, pid} = ExBreak.Breaker.start_link([]) iex> ExBreak.Breaker.increment(pid, 10) iex> ExBreak.Breaker.is_tripped(pid, 60) false """ @spec increment(pid, pos_integer, (t -> any) | nil) :: :ok def increment(pid, threshold, on_trip \\ nil) do Agent.update(pid, fn breaker -> break_count = breaker.break_count + 1 tripped = break_count >= threshold tripped_at = if tripped, do: DateTime.utc_now() breaker = Map.merge(breaker, %{break_count: break_count, tripped: tripped, tripped_at: tripped_at}) if tripped && on_trip do spawn_link(fn -> on_trip.(breaker) end) end breaker end) end @doc """ Determine whether the given circuit breaker is tripped. The second argument, timeout_sec, is the time that must pass for a tripped circuit breaker to re-open. iex> {:ok, pid} = ExBreak.Breaker.start_link([]) iex> ExBreak.Breaker.increment(pid, 10) iex> ExBreak.Breaker.is_tripped(pid, 10) false iex> {:ok, pid} = ExBreak.Breaker.start_link([]) iex> ExBreak.Breaker.increment(pid, 1) iex> ExBreak.Breaker.is_tripped(pid, 10) true iex> {:ok, pid} = ExBreak.Breaker.start_link([]) iex> ExBreak.Breaker.increment(pid, 1) iex> ExBreak.Breaker.is_tripped(pid, 0) false """ @spec is_tripped(pid, pos_integer) :: boolean def is_tripped(pid, timeout_sec) do Agent.get(pid, fn breaker -> if breaker.tripped do DateTime.diff(DateTime.utc_now(), breaker.tripped_at, :second) < timeout_sec else false end end) end @doc """ Reset a tripped circuit breaker by creating a new circuit breaker. If the circuit breaker is not tripped, it is simply returned. iex> {:ok, pid} = ExBreak.Breaker.start_link([]) iex> ExBreak.Breaker.increment(pid, 1) iex> ExBreak.Breaker.is_tripped(pid, 10) true iex> ExBreak.Breaker.reset_tripped(pid) iex> ExBreak.Breaker.is_tripped(pid, 10) false """ @spec reset_tripped(pid) :: :ok def reset_tripped(pid) do Agent.update(pid, fn breaker -> if breaker.tripped, do: new(), else: breaker end) end end
lib/ex_break/breaker.ex
0.855081
0.505127
breaker.ex
starcoder
defmodule Grizzly.CommandClass.ThermostatFanMode.Set do @moduledoc """ Command module for the ThermostatFanMode command class SET command Command Options: * `:mode` - The mode of the fan: `:auto_low`, `:low`, `:auto_high`, `:high`, `:auto_medium`, `:medium`, `:circulation`, `:humidity_circulation`, `:left_right`, `:up_down`, `:quiet` * `:seq_number` - The sequence number for the packet * `:retries` - The number of times the command should be retried (default 2) """ @behaviour Grizzly.Command alias Grizzly.Packet alias Grizzly.Command.{EncodeError, Encoding} alias Grizzly.CommandClass.ThermostatFanMode @type t :: %__MODULE__{ mode: ThermostatFanMode.thermostat_fan_mode(), seq_number: Grizzly.seq_number(), retries: non_neg_integer() } @type opt :: {:mode, ThermostatFanMode.thermostat_fan_mode()} | {:seq_number, Grizzly.seq_number(), retries: non_neg_integer()} defstruct mode: nil, seq_number: nil, retries: 2 @spec init([opt]) :: {:ok, t} def init(args) do {:ok, struct(__MODULE__, args)} end @spec encode(t) :: {:ok, binary} | {:error, EncodeError.t()} def encode(%__MODULE__{mode: _mode, seq_number: seq_number} = command) do with {:ok, encoded} <- Encoding.encode_and_validate_args(command, %{ mode: {:encode_with, ThermostatFanMode, :encode_thermostat_fan_mode} }) do binary = Packet.header(seq_number) <> <<0x44, 0x01, encoded.mode>> {:ok, binary} end end @spec handle_response(t, Packet.t()) :: {:continue, t} | {:done, {:error, :nack_response}} | {:done, :ok} | {:retry, t} def handle_response(%__MODULE__{seq_number: seq_number}, %Packet{ seq_number: seq_number, types: [:ack_response] }) do {:done, :ok} end def handle_response(%__MODULE__{seq_number: seq_number, retries: 0}, %Packet{ seq_number: seq_number, types: [:nack_response] }) do {:done, {:error, :nack_response}} end def handle_response(%__MODULE__{seq_number: seq_number, retries: n} = command, %Packet{ seq_number: seq_number, types: [:nack_response] }) do {:retry, %{command | retries: n - 1}} end def handle_response(command, _), do: {:continue, command} end
lib/grizzly/command_class/thermostat_fan_mode/set.ex
0.884981
0.468791
set.ex
starcoder
defmodule Freddy.Connection do @moduledoc """ Stable AMQP connection. """ alias Freddy.Utils.Backoff alias Freddy.Utils.MultikeyMap alias Freddy.Adapter alias Freddy.Core.Channel @params_docs [ adapter: """ Freddy adapter. Can be any module, but also can be passed as an alias `:amqp` or `:sandox` """, backoff: """ Backoff can be specified either as a 1-arity function that accepts attempt number (starting from `1`) , or as a tuple `{module, function, arguments}` (in this case attempt number will appended to the arguments) or as a backoff config. """, host: "The hostname of the broker (defaults to \"localhost\")", port: "The port the broker is listening on (defaults to `5672`)", username: "The name of a user registered with the broker (defaults to \"guest\")", password: "<PASSWORD> (defaults to \"<PASSWORD>\")", virtual_host: "The name of a virtual host in the broker (defaults to \"/\")", channel_max: "The channel_max handshake parameter (defaults to `0`)", frame_max: "The frame_max handshake parameter (defaults to `0`)", heartbeat: "The hearbeat interval in seconds (defaults to `10`)", connection_timeout: "The connection timeout in milliseconds (defaults to `5000`)", ssl_options: "Enable SSL by setting the location to cert files (defaults to `none`)", client_properties: "A list of extra client properties to be sent to the server, defaults to `[]`", socket_options: """ Extra socket options. These are appended to the default options. See `:inet.setopts/2` and `:gen_tcp.connect/4` for descriptions of the available options. """ ] @options_doc @params_docs |> Enum.map(fn {param, value} -> " * `:#{param}` - #{value}" end) |> Enum.join("\n") @type connection :: GenServer.server() @type connection_spec :: connection_params | connection_uri @type connection_uri :: String.t() @typedoc """ Keyword list of AMQP connection params. ## Options #{@options_doc} """ @type connection_params :: [ adapter: atom, backoff: Backoff.spec(), host: String.t(), port: integer, username: String.t(), password: String.t(), virtual_host: String.t(), channel_max: non_neg_integer, frame_max: non_neg_integer, heartbeat: non_neg_integer, connection_timeout: timeout, client_properties: [{String.t(), atom, String.t()}], ssl_options: term, socket_options: [any], auth_mechanisms: [function] ] @typedoc @params_docs[:adapter] @type adapter :: :amqp | :sandbox | module use Connection @doc """ Start a new AMQP connection. `connection_opts` can be supplied either as keyword list - in this case connection will be established to one RabbitMQ server - or as a list of keyword list - in this case `Freddy.Connection` will first attempt to establish connection to the host specified by the first element of the list, then to the second, if the first one has failed, and so on. ## Options #{@options_doc} ## Backoff configuration Backoff config specifies how intervals should be calculated between reconnection attempts. ### Available options * `:type` - should be `:constant`, `:normal` or `:jitter`. When type is set to `:constant`, interval between all reconnection attempts is the same, defined by option `:start`. When type is set to `:normal`, intervals between reconnection attempts are incremented exponentially. When type is set to `:jitter`, intervals are also incremented exponentially, but with randomness or jitter (see `:backoff.rand_increment/2`). Defaults to `:jitter`. * `:start` - an initial backoff interval in milliseconds. Defaults to `1000`. * `:max` - specifies maximum backoff interval in milliseconds. Defaults to `10000`. """ @spec start_link(connection_spec | [connection_spec, ...], GenServer.options()) :: GenServer.on_start() def start_link(connection_opts \\ [], gen_server_opts \\ []) do Connection.start_link(__MODULE__, connection_opts, gen_server_opts) end @doc """ Closes an AMQP connection. This will cause process to reconnect. """ @spec close(connection, timeout) :: :ok | {:error, reason :: term} def close(connection, timeout \\ 5000) do Connection.call(connection, {:close, timeout}) end @doc """ Stops the connection process """ def stop(connection) do GenServer.stop(connection) end @doc """ Opens a new AMQP channel """ @spec open_channel(connection, timeout) :: {:ok, Channel.t()} | {:error, reason :: term} def open_channel(connection, timeout \\ 5000) do Connection.call(connection, :open_channel, timeout) end @doc """ Returns underlying connection PID """ @spec get_connection(connection) :: {:ok, Freddy.Adapter.connection()} | {:error, :closed} def get_connection(connection) do Connection.call(connection, :get) end @doc false @spec child_spec(term) :: Supervisor.child_spec() def child_spec(opts) do %{ id: __MODULE__, start: {__MODULE__, :start_link, [opts]} } end # Connection callbacks import Record defrecordp :state, adapter: nil, hosts: nil, connection: nil, channels: MultikeyMap.new(), backoff: Backoff.new([]) @impl true def init(opts) do Process.flag(:trap_exit, true) {adapter, opts} = Keyword.pop(opts, :adapter, :amqp) {backoff, opts} = Keyword.pop(opts, :backoff, []) state = state( hosts: prepare_connection_hosts(opts), adapter: Adapter.get(adapter), backoff: Backoff.new(backoff) ) {:connect, :init, state} end defp prepare_connection_hosts(opts) when is_list(opts) do if Keyword.keyword?(opts) do [opts] else if Enum.all?(opts, &Keyword.keyword?/1) do opts else raise "Connection options must be supplied either as keywords or as a list of keywords" end end end @impl true def connect(_info, state(adapter: adapter, hosts: hosts, backoff: backoff) = state) do case do_connect(hosts, adapter, nil) do {:ok, connection} -> adapter.link_connection(connection) new_backoff = Backoff.succeed(backoff) {:ok, state(state, connection: connection, backoff: new_backoff)} _error -> {interval, new_backoff} = Backoff.fail(backoff) {:backoff, interval, state(state, backoff: new_backoff)} end end defp do_connect([], _adapter, acc) do acc end defp do_connect([host_opts | rest], adapter, _acc) do case adapter.open_connection(host_opts) do {:ok, connection} -> {:ok, connection} error -> do_connect(rest, adapter, error) end end @impl true def disconnect(_info, state) do {:connect, :reconnect, state(state, connection: nil)} end @impl true def handle_call(_, _, state(connection: nil) = state) do {:reply, {:error, :closed}, state} end def handle_call(:get, _from, state(connection: connection) = state) do {:reply, {:ok, connection}, state} end def handle_call( :open_channel, {from, _ref}, state(adapter: adapter, connection: connection, channels: channels) = state ) do try do case Channel.open(adapter, connection) do {:ok, %{chan: pid} = chan} -> monitor_ref = Process.monitor(from) channel_ref = Channel.monitor(chan) channels = MultikeyMap.put(channels, [monitor_ref, channel_ref, pid], chan) {:reply, {:ok, chan}, state(state, channels: channels)} {:error, _reason} = reply -> {:reply, reply, state} end catch :exit, {:noproc, _} -> {:reply, {:error, :closed}, state} _, _ -> {:reply, {:error, :closed}, state} end end def handle_call({:close, timeout}, _from, state(adapter: adapter, connection: connection) = state) do {:disconnect, :close, close_connection(adapter, connection, timeout), state} end @impl true def handle_info( {:EXIT, connection, {:shutdown, :normal}}, state(connection: connection) = state ) do {:noreply, state(state, connection: nil)} end def handle_info({:EXIT, connection, reason}, state(connection: connection) = state) do {:disconnect, {:error, reason}, state} end def handle_info({:EXIT, pid, reason}, state(channels: channels) = state) do case MultikeyMap.pop(channels, pid) do {nil, ^channels} -> {:stop, reason, state} {_channel, new_channels} -> {:noreply, state(state, channels: new_channels)} end end def handle_info({:DOWN, ref, _, _pid, _reason}, state) do {:noreply, close_channel(ref, state)} end def handle_info(_info, state) do {:noreply, state} end @impl true def terminate(_reason, state(adapter: adapter, connection: connection)) do if connection do adapter.close_connection(connection) end end defp close_channel(ref, state(channels: channels) = state) do case MultikeyMap.pop(channels, ref) do {nil, ^channels} -> state {channel, new_channels} -> Channel.close(channel) state(state, channels: new_channels) end end defp close_connection(adapter, connection, timeout) do try do adapter.close_connection(connection) receive do {:EXIT, ^connection, _reason} -> :ok after timeout -> Process.exit(connection, :kill) receive do {:EXIT, ^connection, _reason} -> :ok end end catch :exit, {:noproc, _} -> {:error, :closed} end end end
lib/freddy/connection.ex
0.892146
0.498108
connection.ex
starcoder
defmodule Earmark.Transform do import Earmark.Helpers, only: [replace: 3] alias Earmark.Options alias Earmark.TagSpecificProcessors, as: TSP alias Earmark.EarmarkParserProxy, as: Proxy @compact_tags ~w[a code em strong del] # https://www.w3.org/TR/2011/WD-html-markup-20110113/syntax.html#void-element @void_elements ~W(area base br col command embed hr img input keygen link meta param source track wbr) @moduledoc ~S""" #### Structure Conserving Transformers For the convenience of processing the output of `EarmarkParser.as_ast` we expose two structure conserving mappers. ##### `map_ast` takes a function that will be called for each node of the AST, where a leaf node is either a quadruple like `{"code", [{"class", "inline"}], ["some code"], %{}}` or a text leaf like `"some code"` The result of the function call must be - for nodes → a quadruple of which the third element will be ignored -- that might change in future, and will therefore classically be `nil`. The other elements replace the node - for strings → strings A third parameter `ignore_strings` which defaults to `false` can be used to avoid invocation of the mapper function for text nodes As an example let us transform an ast to have symbol keys iex(1)> input = [ ...(1)> {"h1", [], ["Hello"], %{title: true}}, ...(1)> {"ul", [], [{"li", [], ["alpha"], %{}}, {"li", [], ["beta"], %{}}], %{}}] ...(1)> map_ast(input, fn {t, a, _, m} -> {String.to_atom(t), a, nil, m} end, true) [ {:h1, [], ["Hello"], %{title: true}}, {:ul, [], [{:li, [], ["alpha"], %{}}, {:li, [], ["beta"], %{}}], %{}} ] **N.B.** If this returning convention is not respected `map_ast` might not complain, but the resulting transformation might not be suitable for `Earmark.Transform.transform` anymore. From this follows that any function passed in as value of the `postprocessor:` option must obey to these conventions. ##### `map_ast_with` this is like `map_ast` but like a reducer an accumulator can also be passed through. For that reason the function is called with two arguments, the first element being the same value as in `map_ast` and the second the accumulator. The return values need to be equally augmented tuples. A simple example, annotating traversal order in the meta map's `:count` key, as we are not interested in text nodes we use the fourth parameter `ignore_strings` which defaults to `false` iex(2)> input = [ ...(2)> {"ul", [], [{"li", [], ["one"], %{}}, {"li", [], ["two"], %{}}], %{}}, ...(2)> {"p", [], ["hello"], %{}}] ...(2)> counter = fn {t, a, _, m}, c -> {{t, a, nil, Map.put(m, :count, c)}, c+1} end ...(2)> map_ast_with(input, 0, counter, true) {[ {"ul", [], [{"li", [], ["one"], %{count: 1}}, {"li", [], ["two"], %{count: 2}}], %{count: 0}}, {"p", [], ["hello"], %{count: 3}}], 4} #### Postprocessors and Convenience Functions These can be declared in the fields `postprocessor` and `registered_processors` in the `Options` struct, `postprocessor` is prepened to `registered_processors` and they are all applied to non string nodes (that is the quadtuples of the AST which are of the form `{tag, atts, content, meta}` All postprocessors can just be functions on nodes or a `TagSpecificProcessors` struct which will group function applications depending on tags, as a convienience tuples of the form `{tag, function}` will be transformed into a `TagSpecificProcessors` struct. iex(3)> add_class1 = &Earmark.AstTools.merge_atts_in_node(&1, class: "class1") ...(3)> m1 = Earmark.Options.make_options!(postprocessor: add_class1) |> make_postprocessor() ...(3)> m1.({"a", [], nil, nil}) {"a", [{"class", "class1"}], nil, nil} We can also use the `registered_processors` field: iex(4)> add_class1 = &Earmark.AstTools.merge_atts_in_node(&1, class: "class1") ...(4)> m2 = Earmark.Options.make_options!(registered_processors: add_class1) |> make_postprocessor() ...(4)> m2.({"a", [], nil, nil}) {"a", [{"class", "class1"}], nil, nil} Knowing that values on the same attributes are added onto the front the following doctest demonstrates the order in which the processors are executed iex(5)> add_class1 = &Earmark.AstTools.merge_atts_in_node(&1, class: "class1") ...(5)> add_class2 = &Earmark.AstTools.merge_atts_in_node(&1, class: "class2") ...(5)> add_class3 = &Earmark.AstTools.merge_atts_in_node(&1, class: "class3") ...(5)> m = Earmark.Options.make_options!(postprocessor: add_class1, registered_processors: [add_class2, {"a", add_class3}]) ...(5)> |> make_postprocessor() ...(5)> [{"a", [{"class", "link"}], nil, nil}, {"b", [], nil, nil}] ...(5)> |> Enum.map(m) [{"a", [{"class", "class3 class2 class1 link"}], nil, nil}, {"b", [{"class", "class2 class1"}], nil, nil}] We can see that the tuple form has been transformed into a tag specific transformation **only** as a matter of fact, the explicit definition would be: iex(6)> m = make_postprocessor( ...(6)> %Earmark.Options{ ...(6)> registered_processors: ...(6)> [Earmark.TagSpecificProcessors.new({"a", &Earmark.AstTools.merge_atts_in_node(&1, target: "_blank")})]}) ...(6)> [{"a", [{"href", "url"}], nil, nil}, {"b", [], nil, nil}] ...(6)> |> Enum.map(m) [{"a", [{"href", "url"}, {"target", "_blank"}], nil, nil}, {"b", [], nil, nil}] We can also define a tag specific transformer in one step, which might (or might not) solve potential performance issues when running too many processors iex(7)> add_class4 = &Earmark.AstTools.merge_atts_in_node(&1, class: "class4") ...(7)> add_class5 = &Earmark.AstTools.merge_atts_in_node(&1, class: "class5") ...(7)> add_class6 = &Earmark.AstTools.merge_atts_in_node(&1, class: "class6") ...(7)> tsp = Earmark.TagSpecificProcessors.new([{"a", add_class5}, {"b", add_class5}]) ...(7)> m = Earmark.Options.make_options!( ...(7)> postprocessor: add_class4, ...(7)> registered_processors: [tsp, add_class6]) ...(7)> |> make_postprocessor() ...(7)> [{"a", [], nil, nil}, {"c", [], nil, nil}, {"b", [], nil, nil}] ...(7)> |> Enum.map(m) [{"a", [{"class", "class6 class5 class4"}], nil, nil}, {"c", [{"class", "class6 class4"}], nil, nil}, {"b", [{"class", "class6 class5 class4"}], nil, nil}] Of course the mechanics shown above is hidden if all we want is to trigger the postprocessor chain in `Earmark.as_html`, here goes a typical example iex(8)> add_target = fn node -> # This will only be applied to nodes as it will become a TagSpecificProcessors ...(8)> if Regex.match?(~r{\.x\.com\z}, Earmark.AstTools.find_att_in_node(node, "href", "")), do: ...(8)> Earmark.AstTools.merge_atts_in_node(node, target: "_blank"), else: node end ...(8)> options = [ ...(8)> registered_processors: [{"a", add_target}, {"p", &Earmark.AstTools.merge_atts_in_node(&1, class: "example")}]] ...(8)> markdown = [ ...(8)> "http://hello.x.com", ...(8)> "", ...(8)> "[some](url)", ...(8)> ] ...(8)> Earmark.as_html!(markdown, options) "<p class=\"example\">\n<a href=\"http://hello.x.com\" target=\"_blank\">http://hello.x.com</a></p>\n<p class=\"example\">\n<a href=\"url\">some</a></p>\n" ##### Use case: Modification of Link Attributes depending on the URL This would be done as follows ```elixir Earmark.as_html!(markdown, registered_processors: {"a", my_function_that_is_invoked_only_with_a_nodes}) ``` ##### Use case: Modification of the AST according to Annotations **N.B.** Annotation are an _experimental_ feature in 1.4.16-pre and are documented [here](https://github.com/RobertDober/earmark_parser/#annotations) By annotating our markdown source we can then influence the rendering. In this example we will just add some decoration iex(9)> markdown = [ "A joke %% smile", "", "Charming %% in_love" ] ...(9)> add_smiley = fn {_, _, _, meta} = quad, _acc -> ...(9)> case Map.get(meta, :annotation) do ...(9)> "%% smile" -> {quad, "\u1F601"} ...(9)> "%% in_love" -> {quad, "\u1F60d"} ...(9)> _ -> {quad, nil} ...(9)> end ...(9)> text, nil -> {text, nil} ...(9)> text, ann -> {"#{text} #{ann}", nil} ...(9)> end ...(9)> Earmark.as_ast!(markdown, annotations: "%%") |> Earmark.Transform.map_ast_with(nil, add_smiley) |> Earmark.transform "<p>\nA joke ὠ1</p>\n<p>\nCharming ὠd</p>\n" #### Structure Modifying Transformers For structure modifications a tree traversal is needed and no clear pattern of how to assist this task with tools has emerged yet. """ def make_postprocessor(options) def make_postprocessor(%{postprocessor: nil, registered_processors: rps}), do: _make_postprocessor(rps) def make_postprocessor(%{postprocessor: pp, registered_processors: rps}), do: _make_postprocessor([pp|rps]) @line_end ~r{\n\r?} @doc false def postprocessed_ast(lines, options) def postprocessed_ast(lines, options) when is_binary(lines), do: lines |> String.split(@line_end) |> postprocessed_ast(options) # This is an optimisation (buuuuuh) but we want a minimal impact of postprocessing code when it is not required # It is also a case of the mantra "Handle the simple case first" (yeeeeah) def postprocessed_ast(lines, %Options{registered_processors: [], postprocessor: nil}=options), do: Proxy.as_ast(lines, options) def postprocessed_ast(lines, %Options{}=options) do {status, ast, messages} = Proxy.as_ast(lines, options) prep = make_postprocessor(options) ast1 = map_ast(ast, prep, Map.get(options, :ignore_strings)) {status, ast1, messages} end def postprocessed_ast(lines, options), do: postprocessed_ast(lines, Options.make_options!(options)) @doc """ Transforms an AST to html, also accepts the result of `map_ast_with` for convenience """ def transform(ast, options \\ %{initial_indent: 0, indent: 2, compact_output: false}) def transform({ast, _}, options), do: transform(ast, options) def transform(ast, options) when is_list(options) do transform(ast, options|>Enum.into(%{initial_indent: 0, indent: 2, compact_output: false})) end def transform(ast, options) when is_map(options) do options1 = options |> Map.put_new(:indent, 2) |> Map.put_new(:compact_output, false) ast # |> IO.inspect |> _maybe_remove_paras(options1) |> to_html(options1) end @doc ~S""" This is a structure conserving transformation iex(11)> {:ok, ast, _} = EarmarkParser.as_ast("- one\n- two\n") ...(11)> map_ast(ast, &(&1)) [{"ul", [], [{"li", [], ["one"], %{}}, {"li", [], ["two"], %{}}], %{}}] A more useful transformation iex(12)> {:ok, ast, _} = EarmarkParser.as_ast("- one\n- two\n") ...(12)> fun = fn {_, _, _, _}=n -> Earmark.AstTools.merge_atts_in_node(n, class: "private") ...(12)> string -> string end ...(12)> map_ast(ast, fun) [{"ul", [{"class", "private"}], [{"li", [{"class", "private"}], ["one"], %{}}, {"li", [{"class", "private"}], ["two"], %{}}], %{}}] However the usage of the `ignore_strings` option renders the code much simpler iex(13)> {:ok, ast, _} = EarmarkParser.as_ast("- one\n- two\n") ...(13)> map_ast(ast, &Earmark.AstTools.merge_atts_in_node(&1, class: "private"), true) [{"ul", [{"class", "private"}], [{"li", [{"class", "private"}], ["one"], %{}}, {"li", [{"class", "private"}], ["two"], %{}}], %{}}] """ def map_ast(ast, fun, ignore_strings \\ false) do _walk_ast(ast, fun, ignore_strings, []) end @doc ~S""" This too is a structure perserving transformation but a value is passed to the mapping function as an accumulator, and the mapping function needs to return the new node and the accumulator as a tuple, here is a simple example iex(14)> {:ok, ast, _} = EarmarkParser.as_ast("- 1\n\n2\n- 3\n") ...(14)> summer = fn {"li", _, [v], _}=n, s -> {v_, _} = Integer.parse(v); {n, s + v_} ...(14)> n, s -> {n, s} end ...(14)> map_ast_with(ast, 0, summer, true) {[{"ul", [], [{"li", [], ["1"], %{}}], %{}}, {"p", [], ["2"], %{}}, {"ul", [], [{"li", [], ["3"], %{}}], %{}}], 4} or summing all numbers iex(15)> {:ok, ast, _} = EarmarkParser.as_ast("- 1\n\n2\n- 3\n") ...(15)> summer = fn {_, _, _, _}=n, s -> {n, s} ...(15)> n, s -> {n_, _} = Integer.parse(n); {"*", s+n_} end ...(15)> map_ast_with(ast, 0, summer) {[{"ul", [], [{"li", [], ["*"], %{}}], %{}}, {"p", [], ["*"], %{}}, {"ul", [], [{"li", [], ["*"], %{}}], %{}}], 6} """ def map_ast_with(ast, value, fun, ignore_strings \\ false) do _walk_ast_with(ast, value, fun, ignore_strings, []) end defp _make_postprocessor(processors) do processors_ = processors |> Enum.map( fn %TSP{}=tsp -> TSP.make_postprocessor(tsp) just_a_fun -> just_a_fun end) fn node -> processors_ |> Enum.reduce(node, fn processor, node -> processor.(node) end) end end defp _maybe_add_newline1(options) defp _maybe_add_newline1(%Options{compact_output: true}), do: [] defp _maybe_add_newline1(_), do: ?\n @crlf_rgx ~r{(?:\n\r?)+} defp _maybe_compact(element, options) defp _maybe_compact(element, %{compact_output: false}), do: element defp _maybe_compact(element, _options) do String.replace(element, @crlf_rgx, " ") end defp to_html(ast, options) do _to_html(ast, options, Map.get(options, :initial_indent, 0)) |> IO.iodata_to_binary end defp _to_html(ast, options, level, verbatim \\ false) defp _to_html({:comment, _, content, _}, options, _level, _verbatim) do ["<!--", Enum.intersperse(content, ?\n), "-->", _maybe_add_newline1(options)] end defp _to_html({"code", atts, children, meta}, options, level, _verbatim) do verbatim = meta |> Map.get(:verbatim, false) [ _open_tag1("code", atts), _to_html(children, Map.put(options, :smartypants, false), level, verbatim), "</code>"] end defp _to_html({tag, atts, children, _}, options, level, verbatim) when tag in @compact_tags do [_open_tag1(tag, atts), children |> Enum.map(&_to_html(&1, options, level, verbatim)), "</", tag, ?>] end defp _to_html({tag, atts, _, _}, options, level, _verbatim) when tag in @void_elements do [ make_indent(options, level), _open_tag1(tag, atts), _maybe_add_newline1(options) ] end defp _to_html(elements, options, level, verbatim) when is_list(elements) do elements |> Enum.map(&_to_html(&1, options, level, verbatim)) end defp _to_html(element, options, _level, false) when is_binary(element) do element |> _maybe_compact(options) |> escape(options) end defp _to_html(element, options, level, true) when is_binary(element) do [make_indent(options, level), element] end defp _to_html({"pre", atts, children, meta}, options, level, _verbatim) do verbatim = meta |> Map.get(:verbatim, false) [ make_indent(options, level), _open_tag1("pre", atts), _to_html(children, Map.merge(options, %{smartypants: false, compact_output: false}), level, verbatim), "</pre>", _maybe_add_newline1(options)] end defp _to_html({tag, atts, children, meta}, options, level, _verbatim) do verbatim = meta |> Map.get(:verbatim, false) [ make_indent(options, level), _open_tag1(tag, atts), _maybe_add_newline1(options), _to_html(children, options, level+1, verbatim), _close_tag1(tag, options, level)] end defp _add_trailing_nl(node) defp _add_trailing_nl(text) when is_binary(text), do: [text, "\n"] defp _add_trailing_nl(node), do: node defp _close_tag1(tag, options, level) do [make_indent(options, level), "</", tag, ?>, _maybe_add_newline1(options)] end defp escape(element, options) defp escape("", _opions) do [] end @dbl1_rgx ~r{(^|[-–—/\(\[\{"”“\s])'} @dbl2_rgx ~r{(^|[-–—/\(\[\{‘\s])\"} defp escape(element, %{smartypants: true} = options) do # Unfortunately these regexes still have to be left. # It doesn't seem possible to make _escape_to_iodata1 # transform, for example, "--'" to "–‘" without # significantly complicating the code to the point # it outweights the performance benefit. element = element |> replace(@dbl1_rgx, "\\1‘") |> replace(@dbl2_rgx, "\\1“") escape = Map.get(options, :escape, true) _escape_to_iodata1(element, 0, element, [], true, escape, 0) end defp escape(element, %{escape: escape}) do _escape_to_iodata1(element, 0, element, [], false, escape, 0) end defp escape(element, _options) do _escape_to_iodata1(element, 0, element, [], false, true, 0) end defp _make_att1(name_value_pair, tag) defp _make_att1({name, value}, _) do [" ", name, "=\"", value, "\""] end defp make_indent(options, level) defp make_indent(%Options{compact_output: true}, _level) do "" end defp make_indent(%{indent: indent}, level) do Stream.cycle([" "]) |> Enum.take(level*indent) end # Optimized HTML escaping + smartypants, insipred by Plug.HTML # https://github.com/elixir-plug/plug/blob/v1.11.0/lib/plug/html.ex # Do not escape HTML entities defp _escape_to_iodata1("&#x" <> rest, skip, original, acc, smartypants, escape, len) do _escape_to_iodata1(rest, skip, original, acc, smartypants, escape, len + 3) end escapes = [ {?<, "&lt;"}, {?>, "&gt;"}, {?&, "&amp;"}, {?", "&quot;"}, {?', "&#39;"} ] # Can't use character codes for multibyte unicode characters smartypants_escapes = [ {"---", "—"}, {"--", "–"}, {?', "’"}, {?", "”"}, {"...", "…"} ] # These match only if `smartypants` is true for {match, insert} <- smartypants_escapes do # Unlike HTML escape matches, smartypants matches may contain more than one character match_length = if is_binary(match), do: byte_size(match), else: 1 defp _escape_to_iodata1(<<unquote(match), rest::bits>>, skip, original, acc, true, escape, 0) do _escape_to_iodata1(rest, skip + unquote(match_length), original, [acc | unquote(insert)], true, escape, 0) end defp _escape_to_iodata1(<<unquote(match), rest::bits>>, skip, original, acc, true, escape, len) do part = binary_part(original, skip, len) _escape_to_iodata1(rest, skip + len + unquote(match_length), original, [acc, part | unquote(insert)], true, escape, 0) end end for {match, insert} <- escapes do defp _escape_to_iodata1(<<unquote(match), rest::bits>>, skip, original, acc, smartypants, true, 0) do _escape_to_iodata1(rest, skip + 1, original, [acc | unquote(insert)], smartypants, true, 0) end defp _escape_to_iodata1(<<unquote(match), rest::bits>>, skip, original, acc, smartypants, true, len) do part = binary_part(original, skip, len) _escape_to_iodata1(rest, skip + len + 1, original, [acc, part | unquote(insert)], smartypants, true, 0) end end defp _escape_to_iodata1(<<_char, rest::bits>>, skip, original, acc, smartypants, escape, len) do _escape_to_iodata1(rest, skip, original, acc, smartypants, escape, len + 1) end defp _escape_to_iodata1(<<>>, 0, original, _acc, _smartypants, _escape, _len) do original end defp _escape_to_iodata1(<<>>, skip, original, acc, _smartypants, _escape, len) do [acc | binary_part(original, skip, len)] end defp _maybe_remove_paras(ast, options) defp _maybe_remove_paras(ast, %Options{inner_html: true}) do Enum.map(ast, &_remove_para/1) end defp _maybe_remove_paras(ast, _), do: ast defp _open_tag1(tag, atts) defp _open_tag1(tag, atts) when tag in @void_elements do [?<, tag, Enum.map(atts, &_make_att1(&1, tag)), " />"] end defp _open_tag1(tag, atts) do [?<, tag, Enum.map(atts, &_make_att1(&1, tag)), ?>] end @pop {:__end__} defp _pop_to_pop(result, intermediate \\ []) defp _pop_to_pop([@pop, {tag, atts, _, meta}|rest], intermediate) do [{tag, atts, intermediate, meta}|rest] end defp _pop_to_pop([continue|rest], intermediate) do _pop_to_pop(rest, [continue|intermediate]) end defp _remove_para(ele_or_string) defp _remove_para({"p", _, content, _}), do: content |> Enum.map(&_add_trailing_nl/1) defp _remove_para(whatever), do: whatever defp _walk_ast(ast, fun, ignore_strings, result) defp _walk_ast([], _fun, _ignore_strings, result), do: Enum.reverse(result) defp _walk_ast([[]|rest], fun, ignore_strings, result) do _walk_ast(rest, fun, ignore_strings, _pop_to_pop(result)) end defp _walk_ast([string|rest], fun, ignore_strings, result) when is_binary(string) do new = if ignore_strings, do: string, else: fun.(string) _walk_ast(rest, fun, ignore_strings, [new|result]) end defp _walk_ast([{_, _, content, _}=tuple|rest], fun, ignore_strings, result) do {new_tag, new_atts, _, new_meta} = fun.(tuple) _walk_ast([content|rest], fun, ignore_strings, [@pop, {new_tag, new_atts, [], new_meta}|result]) end defp _walk_ast([[h|t]|rest], fun, ignore_strings, result) do _walk_ast([h, t|rest], fun, ignore_strings, result) end defp _walk_ast_with(ast, value, fun, ignore_strings, result) defp _walk_ast_with([], value, _fun, _ignore_strings, result), do: {Enum.reverse(result), value} defp _walk_ast_with([[]|rest], value, fun, ignore_strings, result) do _walk_ast_with(rest, value, fun, ignore_strings, _pop_to_pop(result)) end defp _walk_ast_with([string|rest], value, fun, ignore_strings, result) when is_binary(string) do if ignore_strings do _walk_ast_with(rest, value, fun, ignore_strings, [string|result]) else {news, newv} = fun.(string, value) _walk_ast_with(rest, newv, fun, ignore_strings, [news|result]) end end defp _walk_ast_with([{_, _, content, _}=tuple|rest], value, fun, ignore_strings, result) do {{new_tag, new_atts, _, new_meta}, new_value} = fun.(tuple, value) _walk_ast_with([content|rest], new_value, fun, ignore_strings, [@pop, {new_tag, new_atts, [], new_meta}|result]) end defp _walk_ast_with([[h|t]|rest], value, fun, ignore_strings, result) do _walk_ast_with([h, t|rest], value, fun, ignore_strings, result) end end # SPDX-License-Identifier: Apache-2.0
lib/earmark/transform.ex
0.758511
0.734239
transform.ex
starcoder
require Utils defmodule D9 do @moduledoc """ --- Day 9: Encoding Error --- With your neighbor happily enjoying their video game, you turn your attention to an open data port on the little screen in the seat in front of you. Though the port is non-standard, you manage to connect it to your computer through the clever use of several paperclips. Upon connection, the port outputs a series of numbers (your puzzle input). The data appears to be encrypted with the eXchange-Masking Addition System (XMAS) which, conveniently for you, is an old cypher with an important weakness. XMAS starts by transmitting a preamble of 25 numbers. After that, each number you receive should be the sum of any two of the 25 immediately previous numbers. The two numbers will have different values, and there might be more than one such pair. The first step of attacking the weakness in the XMAS data is to find the first number in the list (after the preamble) which is not the sum of two of the 25 numbers before it. What is the first number that does not have this property? --- Part Two --- The final step in breaking the XMAS encryption relies on the invalid number you just found: you must find a contiguous set of at least two numbers in your list which sum to the invalid number from step 1. Again consider the above example: To find the encryption weakness, add together the smallest and largest number in this contiguous range; in this example, these are 15 and 47, producing 62. """ @behaviour Day defp find(sum, nums, target, [next | remaining] = list) do case sum + next do new_sum when new_sum < target -> find(new_sum, nums ++ [next], target, remaining) new_sum when new_sum == target -> nums = [next | nums] Enum.min(nums) + Enum.max(nums) _ -> [first | rest] = nums find(sum - first, rest, target, list) end end defp scan(sums, [_ | new_window], [next | remaining]) do if next in sums do {_, sums} = Enum.split(sums, 24) new_sums = for i <- new_window, do: i + next scan(sums ++ new_sums, new_window ++ [next], remaining) else next end end defp scan(preamble, remaining) do sums = for i <- preamble, j <- preamble, i != j, do: i + j scan(sums, preamble, remaining) end @impl true def solve(input) do input = input |> Utils.to_ints() {preamble, remaining} = Enum.split(input, 25) part_1 = scan(preamble, remaining) part_2 = find(0, [], part_1, input) { part_1, part_2 } end end
lib/days/09.ex
0.642096
0.724724
09.ex
starcoder
defmodule Toolshed.Top do @default_n 10 @moduledoc """ Find the top processes """ @spec top_reductions(any()) :: :"do not show this result in output" def top_reductions(n \\ @default_n), do: top(order: :reductions, n: n) @spec top_mailbox(any()) :: :"do not show this result in output" def top_mailbox(n \\ @default_n), do: top(order: :mailbox, n: n) @spec top_total_heap_size(any()) :: :"do not show this result in output" def top_total_heap_size(n \\ @default_n), do: top(order: :total_heap_size, n: n) @spec top_heap_size(any()) :: :"do not show this result in output" def top_heap_size(n \\ @default_n), do: top(order: :heap_size, n: n) @spec top_stack_size(any()) :: :"do not show this result in output" def top_stack_size(n \\ @default_n), do: top(order: :stack_size, n: n) @doc """ List the top processes Options: * `:order` - the sort order for the results (`:reductions`, `:delta_reductions`, `:mailbox`, `:delta_mailbox`, `:total_heap_size`, `:delta_total_heap_size`, `:heap_size`, `:delta_heap_size`, `:stack_size`, `:delta_stack_size`) * `:n` - the max number of processes to list """ def top(opts \\ []) do order = Keyword.get(opts, :order, :delta_reductions) n = Keyword.get(opts, :n, @default_n) tid = toolshed_top_tid() Process.list() |> Enum.map(&process_info/1) |> Enum.filter(fn info -> info != %{} end) |> Enum.map(fn info -> previous_info = :ets.lookup(tid, info.pid) d = add_deltas(info, previous_info) :ets.insert(tid, {info.pid, info}) d end) |> Enum.sort(sort(order)) |> print_summary() |> Enum.take(n) |> format_header() |> Enum.each(&format/1) IEx.dont_display_result() end defp toolshed_top_tid() do case Process.get(:toolshed_top) do nil -> tid = :ets.new(:toolshed_top, []) Process.put(:toolshed_top, tid) tid tid -> tid end end defp sort(:reductions), do: fn x, y -> x.reductions > y.reductions end defp sort(:delta_reductions), do: fn x, y -> x.delta_reductions > y.delta_reductions end defp sort(:mailbox), do: fn x, y -> x.message_queue_len > y.message_queue_len end defp sort(:delta_mailbox), do: fn x, y -> x.delta_message_queue_len > y.delta_message_queue_len end defp sort(:total_heap_size), do: fn x, y -> x.total_heap_size > y.total_heap_size end defp sort(:delta_total_heap_size), do: fn x, y -> x.delta_total_heap_size > y.delta_total_heap_size end defp sort(:heap_size), do: fn x, y -> x.heap_size > y.heap_size end defp sort(:delta_heap_size), do: fn x, y -> x.delta_heap_size > y.delta_heap_size end defp sort(:stack_size), do: fn x, y -> x.stack_size > y.stack_size end defp sort(:delta_stack_size), do: fn x, y -> x.delta_stack_size > y.delta_stack_size end defp sort(_other), do: sort(:delta_reductions) defp process_info(pid) do organize_info(pid, Process.info(pid)) end # Ignore deceased processes defp organize_info(_pid, nil), do: %{} defp organize_info(pid, info) do %{ pid: pid, application: get_application(pid), total_heap_size: Keyword.get(info, :total_heap_size), heap_size: Keyword.get(info, :heap_size), stack_size: Keyword.get(info, :stack_size), reductions: Keyword.get(info, :reductions), message_queue_len: Keyword.get(info, :message_queue_len), name: process_name(pid, info) } end defp process_name(pid, info) do registered_name(info) || initial_call_name(pid, info) || short_pid_to_string(pid) end defp registered_name(info) do case Keyword.get(info, :registered_name) do nil -> nil name -> to_string(name) end end defp initial_call_name(pid, info) do case get_in(info, [:dictionary, :"$initial_call"]) do {m, f, a} -> IO.iodata_to_binary([ :erlang.pid_to_list(pid), "=", to_string(m), ".", to_string(f), "/", to_string(a) ]) _ -> nil end end defp short_pid_to_string(pid) do IO.iodata_to_binary(:erlang.pid_to_list(pid)) end defp add_deltas(info, []) do %{ delta_total_heap_size: info.total_heap_size, delta_heap_size: info.heap_size, delta_stack_size: info.stack_size, delta_reductions: info.reductions, delta_message_queue_len: info.message_queue_len } |> Map.merge(info) end defp add_deltas(info, [{_pid, previous_info}]) do %{ delta_total_heap_size: info.total_heap_size - previous_info.total_heap_size, delta_heap_size: info.heap_size - previous_info.heap_size, delta_stack_size: info.stack_size - previous_info.stack_size, delta_reductions: info.reductions - previous_info.reductions, delta_message_queue_len: info.message_queue_len - previous_info.message_queue_len } |> Map.merge(info) end defp get_application(pid) do case :application.get_application(pid) do {:ok, app} -> app :undefined -> :undefined end end defp print_summary(infos) do cnt = Enum.count(infos) IO.puts("Total processes: #{cnt}\n") infos end defp format_header(infos) do :io.format( IO.ANSI.cyan() <> "~-12ts ~-28ts ~5ts/~-5ts ~5ts/~-5ts ~5ts/~-5ts ~5ts/~-5ts ~5ts/~-5ts~n" <> IO.ANSI.white(), [ "Application", "Name or PID", "Reds", "Δ", "Mbox", "Δ", "Total", "Δ", "Heap", "Δ", "Stack", "Δ" ] ) infos end defp format(info) do :io.format( "~-12ts ~-28ts ~5ts/~-5ts ~5ts/~-5ts ~5ts/~-5ts ~5ts/~-5ts ~5ts/~-5ts~n", [ String.slice(to_string(info.application), 0, 12), String.slice(info.name, 0, 28), format_num(info.reductions), format_num(info.delta_reductions), format_num(info.message_queue_len), format_num(info.delta_message_queue_len), format_num(info.total_heap_size), format_num(info.delta_total_heap_size), format_num(info.heap_size), format_num(info.delta_heap_size), format_num(info.stack_size), format_num(info.delta_stack_size) ] ) end defp format_num(x) when x < 10 * 1024, do: Integer.to_string(x) defp format_num(x) when x < 10 * 1024 * 1024, do: Integer.to_string(div(x, 1024)) <> "K" defp format_num(x), do: Integer.to_string(div(x, 1024 * 1024)) <> "M" end
lib/toolshed/top.ex
0.748076
0.565599
top.ex
starcoder
defmodule RailwayIpc.Publisher do @moduledoc """ Publishes Protobuf messages to the configured message bus. You will define one publisher for each message bus exchange to which you want to publish. Define a module that "uses" `RailwayIpc.Publisher`, specifying the name of the exchange. Add a function that calls `publish` to publish your protobuf message. For example: ``` defmodule MyApp.MyPublisher do use RailwayIpc.Publisher, exchange: "my:exchange" def publish_something(params) do proto = # initialize a Protobuf using params case publish(proto, "json_protobuf") do {:ok, _info} -> :ok {:error, error} -> IO.puts(error) end end end ``` #### A Note on Exchange Types The RailwayIpc package enforces an exchange type of "fanout" for ALL consumers and publishers. At this time, you cannot override this option. """ alias RailwayIpc.Core.Payload alias RailwayIpc.Publisher.Server alias RailwayIpc.Publisher.Telemetry alias RailwayIpc.Storage.OutgoingMessage defmacro __using__(opts) do quote do @doc """ Publish a message to the configured exchange. Optionally, provide a `format`. Supported formats are `"binary_protobuf"` and `"json_protobuf"`. If `format` is not provided, `"binary_protobuf"` is used. `publish` will return a `{:ok, info}` tuple if successful. `info` is a map containing information about the published message (i.e. exchange, encoded message, format, etc.). The shape of the `info` map may change in future versions. If the message fails to publish, a `{:error, msg}` tuple will be returned. `msg` will be a string containing the error message. """ def publish(message, format \\ "binary_protobuf") do exchange = unquote(Keyword.get(opts, :exchange)) RailwayIpc.Publisher.publish(exchange, message, format) end end end @doc """ Starts the publisher GenServer. """ def start_link(options \\ []) do config = %Server{adapter: Application.fetch_env!(:railway_ipc, :message_bus)} GenServer.start_link(Server, config, options) end @doc false def child_spec(opts) do %{ id: __MODULE__, start: {__MODULE__, :start_link, [opts]}, type: :worker, restart: :permanent, shutdown: 500 } end @doc """ Publishes a protobuf message to an exchange in the given format. """ def publish(exchange, proto, format) do %{exchange: exchange, protobuf: proto, format: format} |> emit_telemetry_start() |> encode_message() |> store_message() |> publish_to_message_bus() |> emit_telemetry_finish() end defp emit_telemetry_start(info) do %{exchange: exchange, protobuf: proto} = info start_time = Telemetry.emit_publish_start(__MODULE__, exchange, proto) Map.put(info, :start_time, start_time) end defp encode_message(info) do %{protobuf: proto, format: format} = info case Payload.encode(proto, format) do {:ok, encoded, type} -> {:ok, Map.merge(info, %{encoded: encoded, type: type})} {:error, error} -> {:error, Map.put(info, :error, error)} end end defp store_message({:ok, info}) do %{protobuf: proto, encoded: encoded, exchange: exchange, type: type} = info msg = %OutgoingMessage{ protobuf: proto, encoded: encoded, exchange: exchange, type: type } case message_store().insert(msg) do {:ok, _} -> {:ok, info} {:error, error} -> {:error, Map.put(info, :error, error)} end end defp store_message({:error, error}), do: {:error, error} defp publish_to_message_bus({:ok, info}) do %{exchange: exchange, encoded: encoded, format: format} = info case GenServer.call(__MODULE__, {:publish, exchange, encoded, format}) do :ok -> {:ok, info} {:error, reason} -> {:error, Map.put(info, :error, reason)} end end defp publish_to_message_bus({:error, error}), do: {:error, error} defp emit_telemetry_finish({status, info}) do %{start_time: start, exchange: exchange, protobuf: proto} = info case status do :ok -> Telemetry.emit_publish_stop(__MODULE__, start, exchange, proto, info.encoded) {:ok, info} :error -> Telemetry.emit_publish_error(__MODULE__, start, exchange, proto, info.error) {:error, info.error} end end defp message_store do Application.fetch_env!(:railway_ipc, :storage) end end
lib/railway_ipc/publisher.ex
0.914171
0.737442
publisher.ex
starcoder
defmodule Arguments.Parser do @moduledoc """ Parses arguments into a map for easy pattern matching """ @doc """ Parse will take a list of incoming arguments and a list of built arguments to generate a map """ @spec parse(String.t, [map]) :: map def parse(incoming, arguments) do incoming |> OptionParser.parse( strict: get_switches(arguments), aliases: get_aliases(arguments)) |> flags_to_map() |> apply_commands(arguments) |> just_flags() |> apply_defaults(arguments) end @spec get_switches([map]) :: map defp get_switches(arguments) do arguments |> Enum.filter(&Map.get(&1, :type)) |> Enum.map(&({&1.name, &1.type})) end @spec get_aliases([map]) :: map defp get_aliases(arguments) do arguments |> Enum.filter(&Map.get(&1, :alias)) |> Enum.map(&({&1.alias, &1.name})) end @spec get_aliases({term, any}) :: term defp just_flags({flags, _}), do: flags @spec flags_to_map({[{any, any}], [String.t], any}) :: {map, [String.t]} defp flags_to_map({kw, incoming_args, _bad_args}) do {Map.new(kw), incoming_args} end @spec apply_commands({map, []}, any) :: {map, []} defp apply_commands({flags, []}, _), do: {flags, []} @spec apply_commands({map, [String.t]}, [map]) :: {map, [String.t]} defp apply_commands({flags, incoming_args}, arguments) do arguments |> Enum.reduce({flags, incoming_args}, &apply_command/2) end @spec apply_command(map, {map, []}) :: {map, list} defp apply_command( %{argument_type: :command, string_name: str_name, name: cmd_name, arguments: args}, {flags, [str_name | incoming_args]}) do arguments = Enum.zip(args, incoming_args) used = Enum.count(arguments) + 1 flags = arguments |> Enum.reduce(flags, &add_new_flags/2) |> Map.put(cmd_name, true) # Tag command {flags, Enum.drop(incoming_args, used)} end @spec apply_command(any, term) :: term defp apply_command(_, acc), do: acc @spec apply_defaults(map, [map]) :: map defp apply_defaults(flags, arguments) do arguments |> Enum.reduce(flags, &apply_default/2) end # Only apply the default if it exists in the list of flags @spec apply_defaults(map, map) :: map defp apply_default(%{name: name, defaults: defaults}, flags) when is_list(defaults) do case Map.get(flags, name) do nil -> flags _ -> Enum.reduce(defaults, flags, &add_new_flags/2) end end @spec apply_defaults(map, map) :: map defp apply_default(%{name: name, defaults: default_fn}, flags) do case Map.get(flags, name) do nil -> flags value -> Enum.reduce(apply_fn(default_fn, value), flags, &add_new_flags/2) end end @spec apply_defaults(any, term) :: term defp apply_default(_, flags), do: flags @spec apply_defaults(any, any) :: Enum.t defp apply_fn(f, value) do {func, _} = Code.eval_quoted(f, []) func.(value) end # Apply flag IF not already flagged @spec add_new_flags({atom, any}, map) :: map defp add_new_flags({k, v}, flags) do Map.put_new(flags, k, v) end end
lib/parser.ex
0.784567
0.411643
parser.ex
starcoder
defmodule AdventOfCode.Y2020.Day11 do def run() do AdventOfCode.Helpers.Data.read_from_file("2020/day11.txt") |> to_room() |> Stream.iterate(&iterate/1) |> Stream.chunk_every(2, 1) |> Stream.filter(fn [new, last] -> new == last end) |> Enum.take(1) |> (fn [[x, _]] -> x end).() |> occupied_seats() end def occupied_seats(%{seats: seats}) do seats |> Enum.filter(fn {_, state} -> state == "#" end) |> Enum.count() end def to_room(lines) do seats = lines |> Enum.with_index() |> Enum.map(&create_row/1) |> Enum.reduce(&Map.merge/2) %{width: String.graphemes(hd(lines)) |> Enum.count(), height: Enum.count(lines), seats: seats} end def create_row({line, row_nr}) do line |> String.graphemes() |> Enum.with_index() |> Enum.filter(fn {spot, _} -> spot != "." end) |> Enum.reduce(%{}, fn {spot, col}, acc -> Map.put(acc, {row_nr, col}, spot) end) end def iterate(%{seats: seats} = room) do new_seats = seats |> Enum.map(fn {{row, col} = pos, state} -> {pos, update_room(room, row, col, state)} end) |> Map.new() %{room | seats: new_seats} end def update_room(room, row, col, state) do update_room(occupied(room, row, col), state) end def update_room(occupied, state) when occupied == 0 and state == "L", do: "#" def update_room(occupied, state) when occupied >= 5 and state == "#", do: "L" def update_room(_, state), do: state def occupied(room, row, col) do ["N", "E", "S", "W", "NW", "NE", "SE", "SW"] |> Enum.map(fn direction -> occupied(room, direction, row, col) end) |> Enum.reduce(&(&1 + &2)) end def check_dir(%{height: height, width: width}, _, row, col) when row < 0 or row >= height or col < 0 or col >= width do 0 end def check_dir(%{seats: seats} = room, direction, row, col) do case Map.get(seats, {row, col}) do nil -> occupied(room, direction, row, col) "#" -> 1 "L" -> 0 end end def occupied(room, "N", row, col), do: check_dir(room, "N", row - 1, col) def occupied(room, "E", row, col), do: check_dir(room, "E", row, col + 1) def occupied(room, "S", row, col), do: check_dir(room, "S", row + 1, col) def occupied(room, "W", row, col), do: check_dir(room, "W", row, col - 1) def occupied(room, "NW", row, col), do: check_dir(room, "NW", row - 1, col - 1) def occupied(room, "NE", row, col), do: check_dir(room, "NE", row - 1, col + 1) def occupied(room, "SE", row, col), do: check_dir(room, "SE", row + 1, col + 1) def occupied(room, "SW", row, col), do: check_dir(room, "SW", row + 1, col - 1) end
lib/2020/day11.ex
0.567218
0.545346
day11.ex
starcoder
defmodule Axon do @moduledoc """ A high-level interface for creating neural network models. Axon is built entirely on top of Nx numerical definitions, so every neural network can be JIT or AOT compiled using any Nx compiler, or even transformed into high-level neural network formats like TensorFlow Lite and ONNX. All Axon models start with an input layer, specifying the expected input shape of the training data: input = Axon.input({nil, 784}) Notice you can specify the batch dimension as `nil`. You can then compose inputs with other layers: model = input |> Axon.dense(128, activation: :relu) |> Axon.batch_norm() |> Axon.dropout(rate: 0.8) |> Axon.dense(64) |> Axon.tanh() |> Axon.dense(10) |> Axon.activation(:softmax) You can inspect the model for a nice summary: IO.inspect(model) ----------------------------------------------------- Model ===================================================== Layer Shape Parameters ===================================================== input_1 (input) {nil, 784} 0 dense_2 (dense) {nil, 128} 100480 relu_3 (relu) {nil, 128} 0 batch_norm_4 (batch_norm) {nil, 128} 256 dropout_5 (dropout) {nil, 128} 0 dense_6 (dense) {nil, 64} 8256 tanh_7 (tanh) {nil, 64} 0 dense_8 (dense) {nil, 10} 650 softmax_9 (softmax) {nil, 10} 0 ----------------------------------------------------- Under the hood, Axon models are represented as Elixir structs. You can initialize and apply models using the macros `Axon.init/2` and `Axon.predict/4`: params = Axon.init(model, compiler: EXLA) Axon.predict(model, params, inputs, compiler: EXLA) Both `Axon.init/2` and `Axon.predict/4` can be used from within Nx defn or outside. Combining the Axon model creation API with the optimization and training APIs, you can create and train neural networks with ease: model = Axon.input({nil, 784}) |> Axon.dense(128, activation: :relu) |> Axon.layer_norm() |> Axon.dropout() |> Axon.dense(10, activation: :softmax) IO.inspect model final_params = model |> Axon.Training.step(:categorical_cross_entropy, Axon.Optimizers.adamw(0.005)) |> Axon.Training.train(train_images, train_labels, epochs: 10, compiler: EXLA) """ alias __MODULE__, as: Axon @type t :: %__MODULE__{} @doc false defstruct [:id, :name, :output_shape, :parent, :op, :params, :opts] @doc """ Adds an input layer to the network. Input layers specify a model's inputs. Input layers are always the root layers of the neural network. ## Options * `name` - Layer name. """ @doc type: :layer def input(input_shape, opts \\ []) do {id, name} = unique_identifiers(:input, opts[:name]) %Axon{id: id, name: name, output_shape: input_shape, parent: nil, op: :input, params: []} end @doc """ Adds a dense layer to the network. The dense layer implements: output = activation(dot(input, kernel) + bias) where `activation` is given by the `:activation` option and both `kernel` and `bias` are layer parameters. `units` specifies the number of output units. Compiles to `Axon.Layers.dense/3`. ## Options * `name` - Layer name. * `kernel_initializer` - Initializer for `kernel` weights. * `bias_initializer` - Initializer for `bias` weights. * `activation` - Element-wise activation function. """ @doc type: :layer def dense(%Axon{output_shape: parent_shape} = x, units, opts \\ []) when is_integer(units) and units > 0 do {id, name} = unique_identifiers(:dense, opts[:name]) weight_init = opts[:kernel_initializer] || :glorot_uniform bias_init = opts[:bias_initializer] || :zeros activation = opts[:activation] kernel_shape = Axon.Shape.dense_kernel(parent_shape, units) bias_shape = Axon.Shape.dense_bias(parent_shape, units) output_shape = Axon.Shape.dense(parent_shape, units) weight = param(name <> "_weight", kernel_shape, weight_init) bias = param(name <> "_bias", bias_shape, bias_init) node = %Axon{ id: id, name: name, output_shape: output_shape, parent: x, op: :dense, params: [bias, weight], opts: [] } if activation do node |> activation(activation) else node end end @doc """ Adds a convolution layer to the network. The convolution layer implements a general dimensional convolutional layer - which convolves a kernel over the input to produce an output. Compiles to `Axon.Layers.conv/4`. ## Options * `name` - Layer name. * `kernel_initializer` - Initializer for `kernel` weights. * `bias_initializer` - Initializer for `bias` weights. * `activation` - Element-wise activation function. * `kernel_size` - Size of the kernel spatial dimensions. * `strides` - Stride during convolution. * `padding` - Padding to the spatial dimensions of the input. * `input_dilation` - Dilation to apply to input. * `kernel_dilation` - Dilation to apply to kernel. """ @doc type: :layer def conv(%Axon{output_shape: parent_shape} = x, units, opts \\ []) when is_integer(units) and units > 0 do {id, name} = unique_identifiers(:conv, opts[:name]) kernel_init = opts[:kernel_initializer] || :glorot_uniform bias_init = opts[:bias_initializer] || :zeros activation = opts[:activation] kernel_size = opts[:kernel_size] || 1 strides = opts[:strides] || 1 padding = opts[:padding] || :valid input_dilation = opts[:input_dilation] || 1 kernel_dilation = opts[:kernel_dilation] || 1 inner_rank = Nx.rank(parent_shape) - 2 kernel_size = tuple_or_duplicate(:kernel_size, kernel_size, inner_rank) strides = list_or_duplicate(:strides, strides, inner_rank) input_dilation = list_or_duplicate(:input_dilation, input_dilation, inner_rank) kernel_dilation = list_or_duplicate(:kernel_dilation, kernel_dilation, inner_rank) kernel_shape = Axon.Shape.conv_kernel(parent_shape, units, kernel_size) bias_shape = Axon.Shape.conv_bias(parent_shape, units, kernel_size) output_shape = Axon.Shape.conv( parent_shape, kernel_shape, strides, padding, input_dilation, kernel_dilation ) kernel = param(name <> "_kernel", kernel_shape, kernel_init) bias = param(name <> "_bias", bias_shape, bias_init) node = %Axon{ id: id, name: name, output_shape: output_shape, parent: x, op: :conv, params: [bias, kernel], opts: [ strides: strides, padding: padding, input_dilation: input_dilation, kernel_dilation: kernel_dilation ] } if activation do node |> activation(activation) else node end end @doc """ Adds a depthwise convolution layer to the network. The depthwise convolution layer implements a general dimensional depthwise convolution - which is a convolution where the feature group size is equal to the number of input channels. Channel multiplier grows the input channels by the given factor. An input factor of 1 means the output channels are the same as the input channels. Compiles to `Axon.Layers.depthwise_conv`/4. ## Options * `name` - Layer name. * `kernel_initializer` - Initializer for `kernel` weights. * `bias_initializer` - Initializer for `bias` weights. * `activation` - Element-wise activation function. * `kernel_size` - Size of the kernel spatial dimensions. * `strides` - Stride during convolution. * `padding` - Padding to the spatial dimensions of the input. * `input_dilation` - Dilation to apply to input. * `kernel_dilation` - Dilation to apply to kernel. """ @doc type: :layer def depthwise_conv(%Axon{output_shape: parent_shape} = x, channel_multiplier, opts \\ []) when is_integer(channel_multiplier) and channel_multiplier >= 1 do {id, name} = unique_identifiers(:depthwise_conv, opts[:name]) kernel_init = opts[:kernel_initializer] || :glorot_uniform bias_init = opts[:bias_initializer] || :zeros activation = opts[:activation] kernel_size = opts[:kernel_size] || 1 strides = opts[:strides] || 1 padding = opts[:padding] || :valid input_dilation = opts[:input_dilation] || 1 kernel_dilation = opts[:kernel_dilation] || 1 inner_rank = Nx.rank(parent_shape) - 2 kernel_size = tuple_or_duplicate(:kernel_size, kernel_size, inner_rank) strides = list_or_duplicate(:strides, strides, inner_rank) input_dilation = list_or_duplicate(:input_dilation, input_dilation, inner_rank) kernel_dilation = list_or_duplicate(:kernel_dilation, kernel_dilation, inner_rank) kernel_shape = Axon.Shape.depthwise_conv_kernel(parent_shape, channel_multiplier, kernel_size) bias_shape = Axon.Shape.depthwise_conv_bias(parent_shape, channel_multiplier, kernel_size) output_shape = Axon.Shape.depthwise_conv( parent_shape, kernel_shape, strides, padding, input_dilation, kernel_dilation ) kernel = param(name <> "_kernel", kernel_shape, kernel_init) bias = param(name <> "_bias", bias_shape, bias_init) node = %Axon{ id: id, name: name, output_shape: output_shape, parent: x, op: :depthwise_conv, params: [bias, kernel], opts: [ strides: strides, padding: padding, input_dilation: input_dilation, kernel_dilation: kernel_dilation ] } if activation do node |> activation(activation) else node end end @doc """ Adds a depthwise separable 2-dimensional convolution to the network. Depthwise separable convolutions break the kernel into kernels for each dimension of the input and perform a depthwise conv over the input with each kernel. Compiles to `Axon.Layers.separable_conv2d/6`. ## Options * `name` - Layer name. * `kernel_initializer` - Initializer for `kernel` weights. * `bias_initializer` - Initializer for `bias` weights. * `activation` - Element-wise activation function. * `kernel_size` - Size of the kernel spatial dimensions. * `strides` - Stride during convolution. * `padding` - Padding to the spatial dimensions of the input. * `input_dilation` - Dilation to apply to input. * `kernel_dilation` - Dilation to apply to kernel. """ @doc type: :layer def separable_conv2d(%Axon{output_shape: parent_shape} = x, channel_multiplier, opts \\ []) when is_integer(channel_multiplier) and channel_multiplier >= 1 do {id, name} = unique_identifiers(:separable_conv2d, opts[:name]) kernel_init = opts[:kernel_initializer] || :glorot_uniform bias_init = opts[:bias_initializer] || :zeros activation = opts[:activation] kernel_size = opts[:kernel_size] || 1 strides = opts[:strides] || 1 padding = opts[:padding] || :valid input_dilation = opts[:input_dilation] || 1 kernel_dilation = opts[:kernel_dilation] || 1 inner_rank = Nx.rank(parent_shape) - 2 kernel_size = tuple_or_duplicate(:kernel_size, kernel_size, inner_rank) strides = list_or_duplicate(:strides, strides, inner_rank) input_dilation = list_or_duplicate(:input_dilation, input_dilation, inner_rank) kernel_dilation = list_or_duplicate(:kernel_dilation, kernel_dilation, inner_rank) k1_shape = Axon.Shape.separable_conv2d_kernel(parent_shape, channel_multiplier, kernel_size, 1) k2_shape = Axon.Shape.separable_conv2d_kernel(parent_shape, channel_multiplier, kernel_size, 2) b1_shape = Axon.Shape.separable_conv2d_bias(parent_shape, channel_multiplier, kernel_size) b2_shape = Axon.Shape.separable_conv2d_bias(parent_shape, channel_multiplier, kernel_size) output_shape = Axon.Shape.depthwise_conv( parent_shape, Axon.Shape.depthwise_conv_kernel(parent_shape, channel_multiplier, kernel_size), strides, padding, input_dilation, kernel_dilation ) k1 = param(name <> "_kernel_1", k1_shape, kernel_init) b1 = param(name <> "_bias_1", b1_shape, bias_init) k2 = param(name <> "_kernel_2", k2_shape, kernel_init) b2 = param(name <> "_bias_2", b2_shape, bias_init) node = %Axon{ id: id, name: name, output_shape: output_shape, parent: x, op: :separable_conv2d, params: [b1, k1, b2, k2], opts: [ strides: strides, padding: padding, input_dilation: input_dilation, kernel_dilation: kernel_dilation ] } if activation do node |> activation(activation) else node end end @doc """ Adds a depthwise separable 3-dimensional convolution to the network. Depthwise separable convolutions break the kernel into kernels for each dimension of the input and perform a depthwise conv over the input with each kernel. Compiles to `Axon.Layers.separable_conv3d/8`. ## Options * `name` - Layer name. * `kernel_initializer` - Initializer for `kernel` weights. * `bias_initializer` - Initializer for `bias` weights. * `activation` - Element-wise activation function. * `kernel_size` - Size of the kernel spatial dimensions. * `strides` - Stride during convolution. * `padding` - Padding to the spatial dimensions of the input. * `input_dilation` - Dilation to apply to input. * `kernel_dilation` - Dilation to apply to kernel. """ @doc type: :layer def separable_conv3d(%Axon{output_shape: parent_shape} = x, channel_multiplier, opts \\ []) when is_integer(channel_multiplier) and channel_multiplier >= 1 do {id, name} = unique_identifiers(:separable_conv3d, opts[:name]) kernel_init = opts[:kernel_initializer] || :glorot_uniform bias_init = opts[:bias_initializer] || :zeros activation = opts[:activation] kernel_size = opts[:kernel_size] || 1 strides = opts[:strides] || 1 padding = opts[:padding] || :valid input_dilation = opts[:input_dilation] || 1 kernel_dilation = opts[:kernel_dilation] || 1 inner_rank = Nx.rank(parent_shape) - 2 kernel_size = tuple_or_duplicate(:kernel_size, kernel_size, inner_rank) strides = list_or_duplicate(:strides, strides, inner_rank) input_dilation = list_or_duplicate(:input_dilation, input_dilation, inner_rank) kernel_dilation = list_or_duplicate(:kernel_dilation, kernel_dilation, inner_rank) k1_shape = Axon.Shape.separable_conv3d_kernel(parent_shape, channel_multiplier, kernel_size, 1) k2_shape = Axon.Shape.separable_conv3d_kernel(parent_shape, channel_multiplier, kernel_size, 2) k3_shape = Axon.Shape.separable_conv3d_kernel(parent_shape, channel_multiplier, kernel_size, 3) b1_shape = Axon.Shape.separable_conv3d_bias(parent_shape, channel_multiplier, kernel_size) b2_shape = Axon.Shape.separable_conv3d_bias(parent_shape, channel_multiplier, kernel_size) b3_shape = Axon.Shape.separable_conv3d_bias(parent_shape, channel_multiplier, kernel_size) output_shape = Axon.Shape.depthwise_conv( parent_shape, Axon.Shape.depthwise_conv_kernel(parent_shape, channel_multiplier, kernel_size), strides, padding, input_dilation, kernel_dilation ) k1 = param(name <> "_kernel_1", k1_shape, kernel_init) b1 = param(name <> "_bias_1", b1_shape, bias_init) k2 = param(name <> "_kernel_2", k2_shape, kernel_init) b2 = param(name <> "_bias_2", b2_shape, bias_init) k3 = param(name <> "_kernel_3", k3_shape, kernel_init) b3 = param(name <> "_bias_3", b3_shape, bias_init) node = %Axon{ id: id, name: name, output_shape: output_shape, parent: x, op: :separable_conv3d, params: [b1, k1, b2, k2, b3, k3], opts: [ strides: strides, padding: padding, input_dilation: input_dilation, kernel_dilation: kernel_dilation ] } if activation do node |> activation(activation) else node end end @doc """ Adds an activation layer to the network. Activation layers are element-wise functions typically called after the output of another layer. ## Options - `name` - Layer name. """ @doc type: :activation def activation(%Axon{output_shape: shape} = x, activation, opts \\ []) when is_atom(activation) do id = System.unique_integer([:positive, :monotonic]) name = opts[:name] || "#{Atom.to_string(activation)}_#{id}" %Axon{id: id, name: name, output_shape: shape, parent: x, op: activation, params: []} end ## Activation @activation_layers [:celu, :elu, :exp, :gelu, :hard_sigmoid, :hard_silu, :hard_tanh] ++ [:leaky_relu, :linear, :log_sigmoid, :relu, :relu6] ++ [:sigmoid, :silu, :selu, :softmax, :softplus, :softsign, :tanh] for activation <- @activation_layers do @doc """ Adds #{Atom.to_string(activation)} activation layer to the network. See `Axon.Activations.#{Atom.to_string(activation)}/1` for more details. ## Options - `name` - Layer name. """ @doc type: :activation def unquote(activation)(%Axon{} = x, opts \\ []) do activation(x, unquote(activation), opts) end end ## Dropout @dropout_layers [:dropout, :feature_alpha_dropout, :spatial_dropout, :alpha_dropout] for dropout <- @dropout_layers do @doc """ Adds #{Atom.to_string(dropout)} layer to the network. See `Axon.Layers.#{Atom.to_string(dropout)}` for more details. ## Options * `:name` - Layer name. * `:rate` - Dropout rate. """ @doc type: :layer def unquote(dropout)(%Axon{} = x, opts \\ []) do dropout(x, unquote(dropout), opts) end end defp dropout(%Axon{output_shape: parent_shape} = x, dropout, opts) do {id, name} = unique_identifiers(dropout, opts[:name]) rate = opts[:rate] || 0.5 %Axon{ id: id, name: name, op: dropout, output_shape: parent_shape, parent: x, params: [], opts: [ rate: rate ] } end ## Pooling @pooling_layers [:max_pool, :avg_pool, :lp_pool] for pool <- @pooling_layers do @doc """ Adds #{Atom.to_string(pool)} layer to the network. See `Axon.Layers.#{Atom.to_string(pool)}` for more details. ## Options * `:name` - Layer name. * `:kernel_size` - Pooling kernel size. * `:strides` - Pooling strides. """ @doc type: :layer def unquote(pool)(%Axon{} = x, opts \\ []) do pool(x, unquote(pool), opts) end end defp pool(%Axon{output_shape: parent_shape} = x, pool, opts) do {id, name} = unique_identifiers(pool, opts[:name]) kernel_size = opts[:kernel_size] || 1 strides = opts[:strides] || 1 padding = opts[:padding] || :valid inner_rank = Nx.rank(parent_shape) - 2 kernel_size = tuple_or_duplicate(:kernel_size, kernel_size, inner_rank) strides = list_or_duplicate(:strides, strides, inner_rank) output_shape = Axon.Shape.pool(parent_shape, kernel_size, strides, padding) %Axon{ id: id, name: name, output_shape: output_shape, parent: x, op: pool, params: [], opts: [ kernel_size: kernel_size, strides: strides, padding: padding ] } end ## Adaptive Pooling @adaptive_pooling_layers [:adaptive_avg_pool, :adaptive_max_pool] for pool <- @adaptive_pooling_layers do @doc """ Adds #{Atom.to_string(pool)} layer to the network. See `Axon.Layers.#{Atom.to_string(pool)}` for more details. ## Options * `:name` - Layer name. * `:output_size` - Layer output size. """ @doc type: :layer def unquote(pool)(%Axon{} = x, opts \\ []) do adaptative_pool(x, unquote(pool), opts) end end defp adaptative_pool(%Axon{output_shape: parent_shape} = x, pool, opts) do {id, name} = unique_identifiers(pool, opts[:name]) inner_rank = Nx.rank(parent_shape) - 2 output_size = tuple_or_duplicate(:output_size, opts[:output_size], inner_rank) output_shape = Axon.Shape.adaptive_pool(parent_shape, output_size) %Axon{ id: id, name: name, output_shape: output_shape, parent: x, op: pool, params: [], opts: [ output_size: output_size ] } end ## Normalization @normalization_layers [:batch_norm, :layer_norm, :instance_norm] for norm <- @normalization_layers do @doc """ Adds #{Atom.to_string(norm)} layer to the network. See `Axon.Layers.#{Atom.to_string(norm)}` for more details. ## Options * `:name` - Layer name. * `:gamma_initializer` - Gamma parameter initializer. * `:beta_initializer` - Beta parameter initializer. * `:channel_index` - Input feature index used for calculating mean and variance. * `:epsilon` - Numerical stability term. """ @doc type: :layer def unquote(norm)(%Axon{} = x, opts \\ []) do norm(x, unquote(norm), opts) end end defp norm(%Axon{output_shape: shape} = x, norm, opts) do {id, name} = unique_identifiers(norm, opts[:name]) gamma_init = opts[:gamma_initializer] || :glorot_uniform beta_init = opts[:beta_initializer] || :zeros channel_index = opts[:channel_index] || 1 epsilon = opts[:epsilon] || 1.0e-5 gamma_shape = Axon.Shape.norm_param(shape, channel_index) beta_shape = Axon.Shape.norm_param(shape, channel_index) gamma = param(name <> "_gamma", gamma_shape, gamma_init) beta = param(name <> "_beta", beta_shape, beta_init) %Axon{ id: id, name: name, output_shape: shape, parent: x, op: norm, params: [beta, gamma], opts: [ epsilon: epsilon, channel_index: channel_index ] } end @doc """ Adds a group normalization layer to the network. See `Axon.Layers.group_norm` for more details. ## Options * `:name` - Layer name. * `:gamma_initializer` - Gamma parameter initializer. * `:beta_initializer` - Beta parameter initializer. * `:channel_index` - Input feature index used for calculating mean and variance. * `:epsilon` - Numerical stability term. """ @doc type: :layer def group_norm(%Axon{output_shape: shape} = x, group_size, opts \\ []) when is_integer(group_size) and group_size >= 1 do {id, name} = unique_identifiers(:group_norm, opts[:name]) gamma_init = opts[:gamma_initializer] || :glorot_uniform beta_init = opts[:beta_initializer] || :zeros channel_index = opts[:channel_index] || 1 epsilon = opts[:epsilon] || 1.0e-5 gamma_shape = Axon.Shape.norm_param(shape, channel_index) beta_shape = Axon.Shape.norm_param(shape, channel_index) gamma = param(name <> "_gamma", gamma_shape, gamma_init) beta = param(name <> "_beta", beta_shape, beta_init) node = %Axon{ id: id, name: name, output_shape: shape, parent: x, op: :group_norm, params: [beta, gamma], opts: [ epsilon: epsilon, channel_index: channel_index, group_size: group_size ] } node end @doc """ Applies the given `Nx` expression to the input. ## Options * `name` - Layer name. """ @doc type: :composition def nx(%Axon{output_shape: shape} = x, fun, opts \\ []) when is_function(fun, 1) do {id, name} = unique_identifiers(:nx, opts[:name]) param = Nx.Defn.Expr.parameter(:nx, {:f, 32}, shape, 0) expr = if Nx.Defn.Compiler.current() do fun.(param) else Nx.Defn.jit(fun, [param], compiler: Axon.Defn) end node = %Axon{ id: id, name: name, output_shape: expr.shape, parent: x, op: :nx, params: [], opts: [ fun: fun ] } node end @doc """ Adds a flatten layer to the network. This layer will flatten all but the batch dimensions of the input into a single layer. Typically called to flatten the output of a convolution for use with a dense layer. ## Options * `:name` - Layer name. """ @doc type: :composition def flatten(%Axon{output_shape: shape} = x, opts \\ []) do {id, name} = unique_identifiers(:flatten, opts[:name]) new_shape = Axon.Shape.flatten(shape) %Axon{id: id, name: name, output_shape: new_shape, parent: x, op: :flatten, params: []} end @doc """ Adds a concatenate layer to the network. This layer will concatenate inputs along the last dimension unless specified otherwise. ## Options * `:name` - Layer name. * `:axis` - Concatenate axis. """ @doc type: :composition def concatenate(%Axon{output_shape: x_shape} = x, %Axon{output_shape: y_shape} = y) do {id, name} = unique_identifiers(:concatenate, nil) axis = Nx.rank(x_shape) - 1 output_shape = Axon.Shape.concatenate([x_shape, y_shape], axis) %Axon{ id: id, name: name, output_shape: output_shape, parent: [x, y], op: :concatenate, params: [], opts: [axis: axis] } end @doc type: :composition def concatenate([%Axon{output_shape: shape} | _] = inputs) when is_list(inputs) do {id, name} = unique_identifiers(:concatenate, nil) axis = Nx.rank(shape) - 1 input_shapes = inputs |> Enum.map(fn %Axon{output_shape: shape} -> shape end) output_shape = Axon.Shape.concatenate(input_shapes, axis) %Axon{ id: id, name: name, output_shape: output_shape, parent: inputs, op: :concatenate, params: [], opts: [axis: axis] } end @element_wise_layers [:add, :subtract, :multiply] for op <- @element_wise_layers do @doc """ Adds a #{op} layer to the network. This layer performs an element-wise #{Atom.to_string(op)} operation on input layers. All input layers must be the same shape. ## Options * `:name` - Layer name. """ @doc type: :layer def unquote(op)(%Axon{output_shape: shape} = x, %Axon{output_shape: shape} = y) do {id, name} = unique_identifiers(unquote(op), nil) %Axon{id: id, name: name, output_shape: shape, parent: [x, y], op: unquote(op), params: []} end @doc type: :layer def unquote(op)([%Axon{output_shape: shape} | rest] = inputs) do {id, name} = unique_identifiers(unquote(op), nil) output_shape = Enum.reduce(rest, shape, fn %Axon{output_shape: shape}, acc -> unless shape == acc do raise ArgumentError, "all input shapes must match" end end) %Axon{ id: id, name: name, output_shape: output_shape, parent: inputs, op: unquote(op), params: [] } end end @doc """ Compiles the given model to `{init_fn, predict_fn}`. """ def compile(model) do Axon.Compiler.__compile__(model) end @doc """ Compiles and runs the given models initialization function with the given compiler options. """ @doc type: :execution defmacro init(model, opts \\ []) do define_init(model, :init, [], opts) end @doc """ Compiles and runs the given Axon model with `params` on `input` with the given compiler options. """ @doc type: :execution defmacro predict(model, params, input, opts \\ []) do define_predict(model, :predict, [params, input], opts) end ## Implementation defp define_init(model, caller, args, opts \\ []) do quote do Nx.Defn.Kernel.transform(unquote(args), fn args -> model = unquote(model) opts = unquote(opts) caller = unquote(caller) Axon.Compiler.__jit_init__(model, caller, args, opts) end) end end defp define_predict(model, caller, args, opts \\ []) do quote do Nx.Defn.Kernel.transform(unquote(args), fn args -> model = unquote(model) opts = unquote(opts) caller = unquote(caller) Axon.Compiler.__jit_predict__(model, caller, args, opts) end) end end ## Inspection defimpl Inspect do import Inspect.Algebra def inspect(axon, _opts) do title = "Model" header = ["Layer", "Shape", "Parameters"] {_, cache} = axon_to_rows(axon, %{}) rows = cache |> Enum.sort() |> Enum.unzip() |> Kernel.elem(1) rows |> TableRex.Table.new(header, title) |> TableRex.Table.render!( header_separator_symbol: "=", title_separator_symbol: "=", vertical_style: :off ) |> string() end defp axon_to_rows(%{id: id} = graph, cache) do case cache do %{^id => row} -> {row, cache} %{} -> {row, cache} = do_axon_to_rows(graph, cache) cache = Map.put(cache, id, row) {row, cache} end end defp do_axon_to_rows(%Axon{op: op, parent: parents, name: name, output_shape: shape}, cache) when is_list(parents) do {names, cache} = Enum.map_reduce(parents, cache, fn %Axon{name: name} = graph, cache -> {_, cache} = axon_to_rows(graph, cache) {name, cache} end) row = [name <> " ( #{Atom.to_string(op)} #{inspect(names)} )", "#{inspect(shape)}", 0] {row, cache} end defp do_axon_to_rows( %Axon{op: op, params: params, parent: parent, name: name, output_shape: shape}, cache ) do cache = if parent do {_, cache} = axon_to_rows(parent, cache) cache else cache end num_params = params |> Enum.reduce(0, fn %Axon.Parameter{shape: shape}, acc -> acc + Nx.size(shape) end) row = [name <> " ( #{Atom.to_string(op)} )", "#{inspect(shape)}", "#{num_params}"] {row, cache} end end ## Helpers defp tuple_or_duplicate(key, tuple_or_integer, rank) do cond do is_tuple(tuple_or_integer) -> if tuple_size(tuple_or_integer) != rank do raise ArgumentError, "expected #{inspect(key)} to be a #{rank}-element tuple, " <> "got: #{inspect(tuple_or_integer)}" end is_integer(tuple_or_integer) -> Tuple.duplicate(tuple_or_integer, rank) true -> raise ArgumentError, "expected #{inspect(key)} to be an integer or a tuple, " <> "got: #{inspect(tuple_or_integer)}" end end defp list_or_duplicate(key, list_or_integer, rank) do cond do is_list(list_or_integer) -> if length(list_or_integer) != rank do raise ArgumentError, "expected #{inspect(key)} to be a #{rank}-element list, " <> "got: #{inspect(list_or_integer)}" end is_integer(list_or_integer) -> List.duplicate(list_or_integer, rank) true -> raise ArgumentError, "expected #{inspect(key)} to be an integer or a list, " <> "got: #{inspect(list_or_integer)}" end end defp unique_identifiers(type, nil) do id = System.unique_integer([:positive, :monotonic]) {id, Atom.to_string(type) <> "_#{id}"} end defp unique_identifiers(_type, name), do: {System.unique_integer([:positive, :monotonic]), name} defp param(name, shape, initializer, _opts \\ []) do id = System.unique_integer([:positive, :monotonic]) %Axon.Parameter{id: id, name: name, shape: shape, initializer: initializer} end end
lib/axon.ex
0.953373
0.737454
axon.ex
starcoder
defmodule RulEx.Fixtures.Eval do def test_cases, do: valid_expressions() ++ invalid_expressions() ++ value_expressions() def valid_expressions do comparison_test_cases() ++ comparison_operands_type_mismatch() ++ [ # By default we reject unknown operands %{ expr: ["custom", [:var, "boolean", false]], db: %{}, expected: {:error, "unsupported operand 'custom' provided"}, message: "by default we reject any unknown operand" } ] end def value_expressions do # Eval on `val` and `var` is done based on the [ # truthiness of the resolved value %{ expr: [:val, "number", 10], db: %{}, expected: {:ok, true}, message: "value `10` is truthy and must yield true" }, %{ expr: [:val, "number", 0], db: %{}, expected: {:ok, true}, message: "value `0` is truthy and must yield true" }, %{ expr: [:val, "string", ""], db: %{}, expected: {:ok, true}, message: "value `` (empty string) is truthy and must yield true" }, %{ expr: [:val, "date", "2021-01-01"], db: %{}, expected: {:ok, true}, message: "value `2021-01-01` (typed as date) is truthy and must yield true" }, %{ expr: [:val, "any", nil], db: %{}, expected: {:ok, false}, message: "value `nil` is falsy and must yield false" }, %{ expr: [:val, "boolean", false], db: %{}, expected: {:ok, false}, message: "value `false` is falsy and must yield false" }, %{ expr: [:var, "number", "x"], db: %{"x" => 10}, expected: {:ok, true}, message: "value `10` is truthy and must yield true" }, %{ expr: [:var, "number", "x"], db: %{"x" => 0}, expected: {:ok, true}, message: "value `0` is truthy and must yield true" }, %{ expr: [:var, "string", "x"], db: %{"x" => ""}, expected: {:ok, true}, message: "value `` (empty string) is truthy and must yield true" }, %{ expr: [:var, "date", "x"], db: %{"x" => "2021-01-01"}, expected: {:ok, true}, message: "value `2021-01-01` (typed as date) is truthy and must yield true" }, %{ expr: [:var, "any", "x"], db: %{"x" => nil}, expected: {:error, "invalid value 'nil' given for type 'any'"}, message: "`var` does not allow for value `nil` to be yielded back" }, %{ expr: [:var, "boolean", "x"], db: %{"x" => false}, expected: {:ok, false}, message: "value `false` is falsy and must yield false" } ] end def invalid_expressions do [ %{ expr: 10, db: %{}, expected: {:error, "invalid expression given"}, message: "rejects non valid expression `10`" }, %{ expr: [], db: %{}, expected: {:error, "invalid expression given"}, message: "rejects empty list as an expression" }, %{ expr: [:var], db: %{}, expected: {:error, "invalid expression given"}, message: "rejects incomplete `var` expression" }, %{ expr: [:val], db: %{}, expected: {:error, "invalid expression given"}, message: "rejects incomplete `val` expression" }, %{ expr: [:val, "any"], db: %{}, expected: {:error, "invalid expression given"}, message: "rejects incomplete `val` expression" }, %{ expr: [:val, "any", "value", "extra"], db: %{}, expected: {:error, "invalid expression given"}, message: "rejects incorrect `val` expression" }, %{ expr: [:var, "any", "value", "default", "extra"], db: %{}, expected: {:error, "invalid expression given"}, message: "rejects incorrect `var` expression" }, %{ expr: [:>], db: %{}, expected: {:error, nil}, message: "rejects empty list as an expression" }, %{ expr: [:>, 10], db: %{}, expected: {:error, nil}, message: "rejects empty list as an expression" }, %{ expr: [:>, 10, 20, 30], db: %{}, expected: {:error, nil}, message: "rejects empty list as an expression" }, %{ expr: [:=, 10, 20, 30], db: %{}, expected: {:error, nil}, message: "rejects empty list as an expression" }, %{ expr: [:=, 10], db: %{}, expected: {:error, nil}, message: "rejects empty list as an expression" }, %{ expr: [:!], db: %{}, expected: {:error, nil}, message: "rejects incomplete `!` expression" }, %{ expr: [:!, 10], db: %{}, expected: {:error, "invalid expression given"}, message: "rejects incorrect `!` expression" }, %{ expr: [:&, 10, 20], db: %{}, expected: {:error, "non expressions provided"}, message: "rejects incorrect `&` expression" }, %{ expr: [:|, 10, 20], db: %{}, expected: {:error, "non expressions provided"}, message: "rejects incorrect `|` expression" } ] end defp comparison_test_cases do [ # When data type is number %{ expr: [:>, [:val, "number", 10], [:val, "number", 9]], db: %{}, expected: {:ok, true}, message: "comparison greater than on numbers" }, %{ expr: [:>, [:val, "number", 9], [:val, "number", 10]], db: %{}, expected: {:ok, false}, message: "comparison greater than on numbers" }, %{ expr: [:>, [:val, "number", 10], [:val, "number", 10]], db: %{}, expected: {:ok, false}, message: "comparison greater than on numbers" }, %{ expr: [:>=, [:val, "number", 10], [:val, "number", 9]], db: %{}, expected: {:ok, true}, message: "comparison greater than or equals on numbers" }, %{ expr: [:>=, [:val, "number", 9], [:val, "number", 10]], db: %{}, expected: {:ok, false}, message: "comparison greater than or equals on numbers" }, %{ expr: [:>=, [:val, "number", 10], [:val, "number", 10]], db: %{}, expected: {:ok, true}, message: "comparison greater than or equals on numbers" }, %{ expr: [:<, [:val, "number", 10], [:val, "number", 9]], db: %{}, expected: {:ok, false}, message: "comparison less than on numbers" }, %{ expr: [:<, [:val, "number", 9], [:val, "number", 10]], db: %{}, expected: {:ok, true}, message: "comparison less than on numbers" }, %{ expr: [:<, [:val, "number", 10], [:val, "number", 10]], db: %{}, expected: {:ok, false}, message: "comparison less than on numbers" }, %{ expr: [:<=, [:val, "number", 10], [:val, "number", 9]], db: %{}, expected: {:ok, false}, message: "comparison less than or equals on numbers" }, %{ expr: [:<=, [:val, "number", 9], [:val, "number", 10]], db: %{}, expected: {:ok, true}, message: "comparison less than or equals on numbers" }, %{ expr: [:<=, [:val, "number", 10], [:val, "number", 10]], db: %{}, expected: {:ok, true}, message: "comparison less than or equals on numbers" }, %{ expr: [:=, [:val, "number", 10], [:val, "number", 9]], db: %{}, expected: {:ok, false}, message: "comparison equals on numbers" }, %{ expr: [:=, [:val, "number", 9], [:val, "number", 10]], db: %{}, expected: {:ok, false}, message: "comparison equals on numbers" }, %{ expr: [:=, [:val, "number", 10], [:val, "number", 10]], db: %{}, expected: {:ok, true}, message: "comparison equals on numbers" }, %{ expr: [:!=, [:val, "number", 10], [:val, "number", 9]], db: %{}, expected: {:ok, true}, message: "comparison equals not equals on numbers" }, %{ expr: [:!=, [:val, "number", 9], [:val, "number", 10]], db: %{}, expected: {:ok, true}, message: "comparison equals not equals on numbers" }, %{ expr: [:!=, [:val, "number", 10], [:val, "number", 10]], db: %{}, expected: {:ok, false}, message: "comparison equals not equals on numbers" } # TODO: other datatype # TODO: edge cases like dates and times ] end defp comparison_operands_type_mismatch do Enum.flat_map(RulEx.Operands.comparison(), fn op -> [ %{ expr: [op, [:val, "string", 10], [:val, "number", 9]], db: %{}, expected: {:error, "type mismatch in `#{op}` operand"}, message: "on numbers comparison operands must reject input if arguments have mismatching types" } ] end) end end
test/fixtures/eval.ex
0.805517
0.523177
eval.ex
starcoder
defmodule Himamo.Logzero do @type ext_float :: float | :logzero @doc ~S""" Returns the `:logzero` constant. """ @spec const() :: :logzero def const, do: :logzero @doc ~S""" Extended exponential function. Standard exponential function `e^x`, extended to handle the input `0`: * `e^LOGZERO = 0` """ @spec ext_exp(ext_float) :: float def ext_exp(:logzero), do: 0.0 def ext_exp(x), do: :math.exp(x) @doc ~S""" Extended (natural) logarithm function. Standard natural logarithm function `ln(x)`, extended to handle the input `0`: * `log(0) = LOGZERO` Function is named `log` instead of `ln` to be consistent with erlang's `:math.log`. """ @spec ext_log(float) :: ext_float def ext_log(0.0), do: :logzero def ext_log(x) when is_float(x), do: :math.log(x) @doc ~S""" Extended (natural) logarithm sum function. Computes the extended natural logarithm of the sum of `x` and `y` (inputs are given as extended logarithms): * `ext_log_sum(ext_log(x), ext_log(y)) = ext_log(x + y)` * `ext_log_sum(LOGZERO, ext_log(y)) = ext_log(y)` * `ext_log_sum(ext_log(x), LOGZERO) = ext_log(x)` """ @spec ext_log_sum(ext_float, ext_float) :: ext_float def ext_log_sum(log_x, :logzero), do: log_x def ext_log_sum(:logzero, log_y), do: log_y def ext_log_sum(log_x, log_y) do if log_x > log_y do log_x + ext_log(1 + :math.exp(log_y - log_x)) else log_y + ext_log(1 + :math.exp(log_x - log_y)) end end @doc ~S""" Extended (natural) logarithm product function. Computes the extended natural logarithm of the product of x and y: * `ext_log_product(ext_log(x), ext_log(y)) = ext_log(x) + ext_log(y)` * `ext_log_product(LOGZERO, ext_log(y)) = LOGZERO` * `ext_log_product(ext_log(x), LOGZERO) = LOGZERO` """ @spec ext_log_product(ext_float, ext_float) :: ext_float def ext_log_product(_log_x, :logzero), do: :logzero def ext_log_product(:logzero, _log_y), do: :logzero def ext_log_product(log_x, log_y) when is_float(log_x) and is_float(log_y) do log_x + log_y end @doc false def sum_log_values(enum) do Enum.reduce(enum, const, fn element, sum -> ext_log_sum(sum, element) end) end end
lib/himamo/logzero.ex
0.8874
0.652926
logzero.ex
starcoder
defmodule Andy.Profiles.Rover.GMDefs.ObservingOther do @moduledoc "The GM definition for :observing_other" alias Andy.GM.{GenerativeModelDef, Intention, Conjecture, ConjectureActivation} import Andy.GM.Utils import Andy.Utils, only: [now: 0] def gm_def() do %GenerativeModelDef{ name: :observing_other, min_round_duration: 1_000, conjectures: [ conjecture(:observed) ], contradictions: [], priors: %{ observed: %{ about: :other, values: %{ is: false, direction: :unknown, proximity: :unknown, duration: 0, recently_believed_or_tried?: false } } }, intentions: %{ face: [ %Intention{ intent_name: :turn, valuator: face_turning_valuator(), repeatable: true }, %Intention{ intent_name: :say, valuator: face_saying_valuator(), repeatable: false } ] } } end # Conjectures # goal defp conjecture(:observed) do %Conjecture{ name: :observed, activator: goal_activator( # until observing the other succeeded or failed for at least 5 consecutive seconds fn %{duration: duration} -> duration >= 5_000 end, :other ), predictors: [ no_change_predictor("*:*:proximity_mod", default: %{detected: :unknown}), no_change_predictor("*:*:direction_mod", default: %{detected: :unknown}) ], valuator: observed_belief_valuator(), intention_domain: [:face] } end # Conjecture belief valuators defp observed_belief_valuator() do fn conjecture_activation, [_round | previous_rounds] = rounds -> about = conjecture_activation.about conjecture_name = ConjectureActivation.conjecture_name(conjecture_activation) proximity = latest_perceived_value( rounds, about, "*:*:proximity_mod", :detected, default: :unknown ) direction = latest_perceived_value(rounds, about, "*:*:direction_mod", :detected, default: :unknown) facing? = less_than?(absolute(direction), 181) now = now() duration = case believed_since(previous_rounds, about, :observed, :is, facing?) do nil -> 0 since -> now - since end recently_observed_or_tried? = recent_believed_values(rounds, about, conjecture_name, matching: %{}, since: now - 10_000) |> Enum.any?(&(&1.duration >= 5_000)) %{ is: facing?, direction: direction, proximity: proximity, duration: duration, recently_observed_or_tried: recently_observed_or_tried? } end end # Intention valuators defp face_turning_valuator() do fn %{direction: direction, recently_observed_or_tried: recently_observed_or_tried?} -> cond do # don't bother if in the last 20 secs we observed the other, or failed to, for at least 5 consecutive secs recently_observed_or_tried? -> nil direction == :unknown -> turn_direction = Enum.random([:right, :left]) %{value: %{turn_direction: turn_direction, turn_time: 1}, duration: 1} abs(direction) < 181 -> nil true -> turn_direction = if direction < 0, do: :left, else: :right %{value: %{turn_direction: turn_direction, turn_time: 0.5}, duration: 0.5} end end end defp face_saying_valuator() do fn %{direction: direction, recently_observed_or_tried: recently_observed_or_tried?} -> name_of_other = Andy.name_of_other() cond do name_of_other == nil -> nil # don't bother if in the last 20 secs we observed the other, or failed to, for at least 5 consecutive secs recently_observed_or_tried? -> nil direction == :unknown -> saying(" #{name_of_other}, where are you?") abs(direction) < 181 -> saying("I'm watching you, #{name_of_other}") true -> saying("There you are, #{name_of_other}") end end end end
lib/andy/profiles/rover/gm_defs/observing_other.ex
0.842378
0.574693
observing_other.ex
starcoder
defmodule Mayo.Map do @doc """ Checks the minimum number of the keys in the map. iex> Mayo.Map.min(%{foo: "bar"}, 1) %{foo: "bar"} iex> Mayo.Map.min(%{}, 1) {:error, %Mayo.Error{type: "map.min"}} """ def min(value, limit) when is_map(value) do if length(Map.keys(value)) < limit do {:error, %Mayo.Error{ type: "map.min" }} else value end end def min(value, _), do: value @doc """ Checks the maximum number of the keys in the map. iex> Mayo.Map.max(%{foo: "bar"}, 1) %{foo: "bar"} iex> Mayo.Map.max(%{foo: "bar", baz: "boo"}, 1) {:error, %Mayo.Error{type: "map.max"}} """ def max(value, limit) when is_map(value) do if length(Map.keys(value)) > limit do {:error, %Mayo.Error{ type: "map.max" }} else value end end def max(value, _), do: value @doc """ Checks the number of the keys in the map. iex> Mayo.Map.length(%{foo: "bar"}, 1) %{foo: "bar"} iex> Mayo.Map.length(%{foo: "bar"}, 1..3) %{foo: "bar"} iex> Mayo.Map.length(%{}, 1) {:error, %Mayo.Error{type: "map.length"}} """ def length(value, limit) when is_map(value) do if test_length(value, limit) do value else {:error, %Mayo.Error{ type: "map.length" }} end end def length(value, _), do: value defp test_length(value, limit) when is_number(limit), do: length(Map.keys(value)) == limit defp test_length(value, limit), do: Enum.member?(limit, length(Map.keys(value))) @doc """ Checks the presence of keys. iex> Mayo.Map.with(%{foo: "bar", baz: "boo"}, [:foo, :baz]) %{foo: "bar", baz: "boo"} iex> Mayo.Map.with(%{foo: "bar"}, [:foo, :baz]) {:error, %Mayo.Error{type: "map.with", paths: [:baz]}} """ def with(value, peers) when is_map(value) do Enum.reduce peers, value, fn key, acc -> case acc do {:error, _} = err -> err _ -> case Map.fetch(acc, key) do :error -> {:error, %Mayo.Error{ paths: [key], type: "map.with" }} _ -> acc end end end end def with(value, _), do: value @doc """ Forbids the presence of keys. iex> Mayo.Map.without(%{foo: "bar"}, [:baz, :boo]) %{foo: "bar"} iex> Mayo.Map.without(%{foo: "bar", baz: "boo"}, [:baz, :boo]) {:error, %Mayo.Error{type: "map.without", paths: [:baz]}} """ def without(value, peers) when is_map(value) do Enum.reduce peers, value, fn key, acc -> case acc do {:error, _} = err -> err _ -> case Map.fetch(acc, key) do :error -> acc _ -> {:error, %Mayo.Error{ paths: [key], type: "map.without" }} end end end end def without(value, _), do: value @doc """ Rename a key to another name. iex> Mayo.Map.rename(%{foo: "bar"}, :foo, :baz) %{baz: "bar"} iex> Mayo.Map.rename(%{foo: "bar"}, :bar, :baz) %{foo: "bar"} """ def rename(value, from, to) when is_map(value) do case Map.fetch(value, from) do {:ok, result} -> Map.put(value, to, result) |> Map.delete(from) :error -> value end end def rename(value, _, _), do: value end
lib/mayo/map.ex
0.779154
0.440529
map.ex
starcoder
defmodule Observable.Repo do @moduledoc """ Defines observable functionality for an `Ecto.Repo`. Observable functionality is defined as the ability to hook into the lifecyle of a struct to perform some kind of work based on the repo action performed. ## Setup Lets say we have a `Post` schema. Each post can have many topics. Users can subscribe to topics. Whenever a post is created, we are responsible for informing the subscribed users. ### Repo Given the above, lets setup our new "observable" repo. defmodule Repo do use Ecto.Repo, otp_app: :my_app use Observable.Repo end We have defined our repo as normal - but with the addition of `use Observable.Repo` to bring in the required observable functionality. ### Observer Lets create our new observer now. defmodule SubscribersObserver do use Observable, :observer # Lets ignore posts that dont have any topics. def handle_notify(:insert, {_repo, _old, %Post{topics: []}) do :ok end def handle_notify(:insert, {_repo, _old, %Post{topics: topics}}) do # Do work required to inform subscribed users end end The response given by an observer must be one of three formats: * `:ok` - typically used when ignoring a notification. * `{:ok, result}` - a valid operation. * `{:error, result}` - an invalid operation that will trigger a transaction rollback. ### Schema Now that we have our observer set up, lets modify our Post schema to support notifying our observers. defmodule Post do use Ecto.Schema use Observable, :notifier schema "posts" do field(:title, :string) field(:body, :string) field(:topics, {:array, :string}, default: []) end observations do action(:insert, [SubscribersObserver]) end end The actions must be either `:insert`, `:update`, or `:delete`. We can add as many observers to a given action as needed. Simply add them to the list. For example, we can define an observation for a `:delete` action - which will notify 2 observers: action(:delete, [ObserverOne, ObserverTwo]) ### Usage Now that we are starting to use "observable" behaviour, we must modify the way in which we insert posts with our repo. def create_post(params \\ %{}) do %Post{} |> Post.changeset(params) |> Repo.insert_and_notify() end Instead of the normal `c:Ecto.Repo.insert/2` function being called, we instead use `c:insert_and_notify/3`. This performs the exact same action as `c:Ecto.Repo.insert/2` (and returns the same results). The only change is the insert operation is done using an `Ecto.Multi` operation. Each observer is then added to the multi. The final multi is passed to a transaction. Any observer that fails, will fail the entire transaction. Lets say we want to let our users know when a posts topic changes to to something they have subscribed to. We must modify our observer for this functionality. def handle_notify(:update, {_repo, %Post{topics: old_topics}, %Post{topics: new_topics}}) when old_topics != new_topics do # Get any additional topics and inform subscribed users. end # Define a "catch all" def handle_notify(_action, _data) do :ok end Now, lets modify our schema to reflect the updates to our observer. observations do action(:insert, [SubscribersObserver]) action(:update, [SubscribersObserver]) end Given the above, we can now notify users during updates. def update_post(post, params \\ %{}) do post |> Post.changeset(params) |> Repo.update_and_notify() end All of the functionality above can be carried over with a `action(:delete, [SubscribersObserver])` observation and the `c:delete_and_notify/3` function being invoked. """ alias Ecto.Multi @doc """ Inserts a struct defined via `Ecto.Schema` or a changeset and informs observers. Upon success, the repo, old struct and new struct are passed in the form - `{repo, old, new}` - to any observer that was assigned to observe the `:insert` action for the schema. This will return whatever response that `c:Ecto.Repo.insert/2` returns. Please see its documentation for further details. The `update_opts` are the options passed to the `Ecto.Multi.insert/2` operation. Since the entire operation is wrapped in a transaction, we can also pass `transaction_opts` which will be used with `c:Ecto.Repo.transaction/2`. Any observer must return a valid response as detailed in the "Observer" section above. """ @callback insert_and_notify( struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), insert_opts :: Keyword.t(), transaction_opts :: Keyword.t() ) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} @doc """ Updates a changeset using its primary key and informs observers. Upon success, the repo, old struct and new struct are passed in the form - `{repo, old, new}` - to any observer that was assigned to observe the `:update` action for the schema. This will return whatever response that `c:Ecto.Repo.update/2` returns. Please see its documentation for further details. The `update_opts` are the options passed to the `Ecto.Multi.update/3` operation. Since the entire operation is wrapped in a transaction, we can also pass `transaction_opts` which will be used with `c:Ecto.Repo.transaction/2`. Any observer must return a valid response as detailed in the "Observer" section above. """ @callback update_and_notify( changeset :: Ecto.Changeset.t(), update_opts :: Keyword.t(), transaction_opts :: Keyword.t() ) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} @doc """ Deletes a struct using its primary key and informs observers. Upon success, the repo, old struct and new struct are passed in the form - `{repo, old, new}` - to any observer that was assigned to observe the `:delete` action for the schema. This will return whatever response that `c:Ecto.Repo.delete/2` returns. Please see its documentation for further details. The `delete_opts` are the options passed to the `Ecto.Multi.delete/2` operation. Since the entire operation is wrapped in a transaction, we can also pass `transaction_opts` which will be used with `c:Ecto.Repo.transaction/2`. Any observer must return a valid response as detailed in the "Observer" section above. """ @callback delete_and_notify( struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), delete_opts :: Keyword.t(), transaction_opts :: Keyword.t() ) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} defmacro __using__(_opts) do quote do @behaviour Observable.Repo def insert_and_notify(changeset_or_schema, insert_opts \\ [], transaction_opts \\ []) do multi = Multi.new() |> Multi.insert(:data, changeset_or_schema, insert_opts) Observable.Repo.notify(__MODULE__, :insert, changeset_or_schema, multi, transaction_opts) end def update_and_notify(changeset, update_opts \\ [], transaction_opts \\ []) do multi = Multi.new() |> Multi.update(:data, changeset, update_opts) Observable.Repo.notify(__MODULE__, :update, changeset, multi, transaction_opts) end def delete_and_notify(changeset_or_schema, delete_opts \\ [], transaction_opts \\ []) do multi = Multi.new() |> Multi.delete(:data, changeset_or_schema, delete_opts) Observable.Repo.notify(__MODULE__, :delete, changeset_or_schema, multi, transaction_opts) end end end @doc false def notify(repo, action, changeset_or_schema, multi, opts \\ []) do old_data = case changeset_or_schema do %Ecto.Changeset{data: data} -> data other -> other end multi = old_data.__struct__ |> Observable.observers(action) |> Enum.reduce(multi, fn observer, multi -> Multi.run(multi, :"#{observer}", fn repo, %{data: new_data} -> case Observable.notify_one(observer, action, {repo, old_data, new_data}) do :ok -> {:ok, nil} result -> result end end) end) case repo.transaction(multi, opts) do {:ok, %{data: data}} -> {:ok, data} {:error, _, error_value, _} -> {:error, error_value} end end end
lib/observable/repo.ex
0.93315
0.573559
repo.ex
starcoder
defmodule Elsa.Wrapper do @moduledoc """ Provides a supervisable wrapper for the Elsa supervision tree to manage brod producers and consumers. Provides convenience functions for starting producer and consumer processes directly without the default supervisors brod interposes between them and the application. By taking over supervision of the processes directly interacting with Kafka by way of a brod client, these processes can be registered independently from the client process so in the event of the client dropping its connection, the record of what processes should be producing or consuming from what topics and partitions can be immediately reconstructed instead of being dropped. """ use GenServer require Logger @default_delay 5_000 @default_kill_timeout 5_000 def start_link(args) do GenServer.start_link(__MODULE__, args) end def init(args) do Process.flag(:trap_exit, true) state = %{ mfa: Keyword.fetch!(args, :mfa), started: :erlang.system_time(:milli_seconds), delay: Keyword.get(args, :delay, @default_delay), kill_timeout: Keyword.get(args, :kill_timeout, @default_kill_timeout), register: Keyword.get(args, :register, :no_register) } case start(state) do {:ok, pid} -> unless state.register == :no_register do Elsa.Registry.register_name(state.register, pid) end {:ok, Map.put(state, :pid, pid)} {:error, reason} -> Logger.error( "#{__MODULE__}:#{inspect(self())} : wrapped process #{inspect(state.mfa)} failed to init for reason #{ inspect(reason) }" ) Process.sleep(state.delay) {:stop, reason} end end def handle_info({:EXIT, pid, reason}, %{pid: pid, delay: delay, started: started} = state) do lifetime = :erlang.system_time(:milli_seconds) - started max(delay - lifetime, 0) |> Process.sleep() {:stop, reason, state} end def handle_info(message, state) do Logger.info( "#{__MODULE__}:#{inspect(self())} : received invalid message #{inspect(message)}, current state: #{inspect(state)}" ) {:noreply, state} end def terminate(reason, %{pid: pid} = state) do if Process.alive?(pid), do: kill(pid, reason, state.kill_timeout) reason end def terminate(reason, _state), do: reason defp kill(pid, reason, timeout) do Process.exit(pid, reason) receive do {:EXIT, ^pid, _reason} -> :ok after timeout -> Process.exit(pid, :kill) end end defp start(%{mfa: {module, function, args}}) do apply(module, function, args) end end
lib/elsa/wrapper.ex
0.595963
0.415373
wrapper.ex
starcoder
defprotocol InvoiceTracker.TimeEntry do @doc """ Returns the time for the entry """ alias Timex.Duration @spec time(t) :: Duration.t() def time(entry) end defmodule InvoiceTracker.TimeSummary do @moduledoc """ A struct that summarizes time entries for an invoice period. """ alias InvoiceTracker.{ProjectTimeSummary, Rounding, TimeEntry} alias Timex.Duration defstruct total: Duration.zero(), projects: [] @type t :: %__MODULE__{total: Duration.t(), projects: [ProjectTimeSummary.t()]} defimpl TimeEntry do def time(summary), do: summary.total end @doc """ Rounds all of the times in the summary to the nearest tenth of an hour. Also reconciles project and detail entries so that, when rounded, they add up to the total (rounded) time. A TimeSummary should be rounded before reporting on it or generating an invoice for it. """ @spec rounded(t) :: t def rounded(summary) do summary |> Map.update!(:total, &Rounding.round_time/1) |> reconcile_projects end defp reconcile_projects(summary) do Map.update!(summary, :projects, &ProjectTimeSummary.reconciled(&1, summary.total)) end end defmodule InvoiceTracker.ProjectTimeSummary do @moduledoc """ A struct that summarizes time entries for a single project for an invoice period. """ alias InvoiceTracker.{Detail, Rounding, TimeEntry} alias Timex.Duration defstruct name: "", time: Duration.zero(), details: [] @type t :: %__MODULE__{ name: String.t(), time: Duration.t(), details: [Detail.t()] } defimpl TimeEntry do def time(summary), do: summary.time end @doc """ Reconciles a list of projects with a rounded total time. Times are rounded to the nearest tenth of an hour and then adjusted so that, when rounded, they add up to the total (rounded) time. Each project's details are also reconciled and rounded in the same way once the projects themselves have been reconciled and rounded. """ @spec reconciled([t], Duration.t()) :: [t] def reconciled(projects, total) do projects |> Rounding.reconcile(total) |> Enum.map(&reconcile_details/1) end defp reconcile_details(project) do Map.update!(project, :details, &Detail.reconciled(&1, project.time)) end end defmodule InvoiceTracker.Detail do @moduledoc """ A struct that represents a project activity detail entry for an invoice period. """ alias InvoiceTracker.{Rounding, TimeEntry} # credo:disable-for-next-line Credo.Check.Consistency.MultiAliasImportRequireUse alias Timex.Duration defstruct activity: "", time: Duration.zero() @type t :: %__MODULE__{activity: String.t(), time: Duration.t()} defimpl TimeEntry do def time(detail), do: detail.time end @doc """ Reconciles a list of detail entries with a rounded total time. Times are rounded to the nearest tenth of an hour and then adjusted so that, when rounded, they add up to the total (rounded) time. """ @spec reconciled([t], Duration.t()) :: [t] def reconciled(details, total), do: Rounding.reconcile(details, total) end
lib/invoice_tracker/time_summary.ex
0.848235
0.491212
time_summary.ex
starcoder
defmodule Cocktail.Span do @moduledoc """ Struct used to represent a span of time. It is composed of the following fields: * from: the start time of the span * until: the end time of the span When expanding a `t:Cocktail.Schedule.t/0`, if it has a duration it will produce a list of `t:t/0` instead of a list of `t:Cocktail.time/0`. """ @type t :: %__MODULE__{from: Cocktail.time(), until: Cocktail.time()} @type span_compat :: %{:from => Cocktail.time(), :until => Cocktail.time(), optional(atom) => any} @type overlap_mode :: :contains | :is_inside | :is_before | :is_after | :is_equal_to | :overlaps_the_start_of | :overlaps_the_end_of @enforce_keys [:from, :until] defstruct from: nil, until: nil @doc """ Creates a new `t:t/0` from the given start time and end time. ## Examples iex> new(~N[2017-01-01 06:00:00], ~N[2017-01-01 10:00:00]) %Cocktail.Span{from: ~N[2017-01-01 06:00:00], until: ~N[2017-01-01 10:00:00]} """ @spec new(Cocktail.time(), Cocktail.time()) :: t def new(from, until), do: %__MODULE__{from: from, until: until} @doc """ Uses `Timex.compare/2` to determine which span comes first. Compares `from` first, then, if equal, compares `until`. ## Examples iex> span1 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 10:00:00]) ...> span2 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 10:00:00]) ...> compare(span1, span2) 0 iex> span1 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 10:00:00]) ...> span2 = new(~N[2017-01-01 07:00:00], ~N[2017-01-01 12:00:00]) ...> compare(span1, span2) -1 iex> span1 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 10:00:00]) ...> span2 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 07:00:00]) ...> compare(span1, span2) 1 """ @spec compare(span_compat, span_compat) :: Timex.Comparable.compare_result() def compare(%{from: t, until: until1}, %{from: t, until: until2}), do: Timex.compare(until1, until2) def compare(%{from: from1}, %{from: from2}), do: Timex.compare(from1, from2) @doc """ Returns an `t:overlap_mode/0` to describe how the first span overlaps the second. ## Examples iex> span1 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 10:00:00]) ...> span2 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 10:00:00]) ...> overlap_mode(span1, span2) :is_equal_to iex> span1 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 10:00:00]) ...> span2 = new(~N[2017-01-01 07:00:00], ~N[2017-01-01 09:00:00]) ...> overlap_mode(span1, span2) :contains iex> span1 = new(~N[2017-01-01 07:00:00], ~N[2017-01-01 09:00:00]) ...> span2 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 10:00:00]) ...> overlap_mode(span1, span2) :is_inside iex> span1 = new(~N[2017-01-01 06:00:00], ~N[2017-01-01 07:00:00]) ...> span2 = new(~N[2017-01-01 09:00:00], ~N[2017-01-01 10:00:00]) ...> overlap_mode(span1, span2) :is_before """ @spec overlap_mode(span_compat, span_compat) :: overlap_mode def overlap_mode(%{from: from, until: until}, %{from: from, until: until}), do: :is_equal_to # credo:disable-for-next-line def overlap_mode(%{from: from1, until: until1}, %{from: from2, until: until2}) do from_comp = Timex.compare(from1, from2) until_comp = Timex.compare(until1, until2) cond do from_comp <= 0 && until_comp >= 0 -> :contains from_comp >= 0 && until_comp <= 0 -> :is_inside Timex.compare(until1, from2) <= 0 -> :is_before Timex.compare(from1, until2) >= 0 -> :is_after from_comp < 0 && until_comp < 0 -> :overlaps_the_start_of from_comp > 0 && until_comp > 0 -> :overlaps_the_end_of end end end
lib/cocktail/span.ex
0.890644
0.777891
span.ex
starcoder
defmodule BPXE.BPMN.Interpolation do @moduledoc """ This is a generalized approach to interpolation in BPXE/BPMN, allowing to pass runtime-derived values into content and attributes of various nodes. The interpolation syntax is simple: anything enclosed between `${{` and `}}` will be considered an expression. Currently, there is no escaping. If the entire string is one interpolation, the type of the expression will be preserved. Otherwise, it'll be encoded into a string. """ @doc """ Interpolates a string if it has any interpolations in it. If it does, it returns a function that takes one argument, which is a callback that resolves the value of the given expression. If it doesn't, it'll return a string as is. """ def interpolate(string) when is_binary(string) do string = string |> String.trim() if String.contains?(string, "${{") do fn cb -> interpolate(string |> String.to_charlist(), [], cb) end else string end end defp interpolate([?$, ?{, ?{ | rest], acc, cb) do {expr, rest} = expression(rest, []) evaluated = cb.(expr) {evaluated, to_string_f} = case evaluated do {s, f} when is_function(f, 1) -> {s, f} s -> {s, &to_string/1} end case {evaluated, to_string_f} do {result, _} when acc == [] and rest == [] -> result {result, f} -> case f.(result) do {:ok, result} -> interpolate(rest, [result | acc], cb) {:error, reason} -> interpolate(rest, ["(error: #{inspect(reason)})" | acc], cb) result -> interpolate(rest, [result | acc], cb) end end end defp interpolate([c | rest], acc, cb) do interpolate(rest, [c | acc], cb) end defp interpolate([], acc, _cb) do acc |> Enum.reverse() |> :erlang.iolist_to_binary() end defp expression([?}, ?} | rest], collected) do {collected |> Enum.reverse() |> :erlang.iolist_to_binary(), rest} end defp expression([c | rest], collected) do expression(rest, [c | collected]) end end
lib/bpxe/bpmn/interpolation.ex
0.852522
0.729664
interpolation.ex
starcoder
defmodule Numeracy.Polynomial do @moduledoc """ Operations on polynomial """ import Numeracy.Precision @typedoc """ A representation of a polynomial, where the list is the coefficients of the polynomial, starting with the constant term """ @type polynomial :: list(number) @doc """ Add two polynomials """ @spec add(polynomial, polynomial) :: polynomial def add(first, second) do first_length = Enum.count(first) second_length = Enum.count(second) summed = for {a, b} <- Enum.zip(first, second), do: a + b summed ++ Enum.drop(first, second_length) ++ Enum.drop(second, first_length) end @doc """ Return the coefficient for the specified degree, returning 0 for undefined degrees """ @spec coefficient_of_degree(polynomial, non_neg_integer) :: number def coefficient_of_degree(polynomial, degree), do: Enum.at(polynomial, degree, 0) @doc """ Given a root of the polynomial, deflates it and returns the result as part of tuple indicating success. If it is not a root, return tuple indicating error and the original polynomial. """ @spec deflate(polynomial, number) :: {:error, polynomial} | {:ok, polynomial} def deflate(polynomial, root) do if is_root(polynomial, root) do degree = degree(polynomial) {:ok, deflate(polynomial, [], root, coefficient_of_degree(polynomial, degree), degree - 1)} else {:error, polynomial} end end @spec deflate(polynomial, polynomial, number, number, integer) :: polynomial defp deflate(_, result, _, remainder, degree) when degree < 0, do: result defp deflate(target, result, root, remainder, degree) do deflate(target, [remainder | result], root, remainder * root + coefficient_of_degree(target, degree), degree - 1) end @doc """ Determines whether the given value is a root of the polynomial, to within a specified precision """ @spec is_root(polynomial, number, float) :: boolean def is_root(polynomial, root, precision \\ 0.000001) when precision > 0 and precision < 1 do polynomial |> value_at(root) |> equal(0, precision) end @doc """ Returns the degree of the polynomial """ @spec degree(polynomial) :: non_neg_integer def degree(polynomial), do: Enum.count(polynomial) - 1 @doc """ Evaluates the polynomial at a specified value """ @spec value_at(polynomial, number) :: number def value_at(polynomial, value) do degree = degree(polynomial) evaluate(polynomial, value, degree, coefficient_of_degree(polynomial, degree)) end @spec evaluate(polynomial, number, integer, number) :: number defp evaluate(_, _, degree, answer) when degree == 0, do: answer defp evaluate(polynomial, value, degree, answer) when degree > 0 do evaluate(polynomial, value, degree - 1, answer * value + coefficient_of_degree(polynomial, degree - 1)) end end
lib/numeracy/polynomial.ex
0.94252
0.880797
polynomial.ex
starcoder
defmodule SecureX do alias SecureX.Context @moduledoc """ SecureX (An Advancement To ACL) is Role Based Access Control(RBAC) and Access Control List (ACL) to handle "User Roles And Permissions". You can handle all list of permissions attached to a specific object for certain users or give limited or full Access to specific module. It has 4 basic modules, `SecureX.Roles`, `SecureX.Res`, `SecureX.Permissions` and `SecureX.UserRoles`. All Modules have CRUD to maintain your RBAC. `SecureX` Module has validation for user. ## Installation If installing from Hex, use the latest version from there: ```elixir # mix.ex def deps do [ {:securex, "~> 1.0.5"} ] end ``` Now You need to add configuration for `securex` in your `config/config.ex`. You need to add Your Repo and User Schema in config. If you are using `binary_id` type for your project default as `@primary_keys`. You can pass `type: :binary_id`. ```elixir # config/config.exs config :securex, repo: MyApp.Repo, #required schema: MyApp.Schema.User, #required type: :binary_id #optional ``` SecureX comes with built-in support for apps. Just create migrations with `mix secure_x.gen.migration`. ```elixir iex> mix securex.gen.migration * creating priv/repo/migrations * creating priv/repo/migrations/20211112222439_create_table_roles.exs * creating priv/repo/migrations/20211112222440_create_table_resources.exs * creating priv/repo/migrations/20211112222441_create_table_permissions.exs * creating priv/repo/migrations/20211112222442_create_table_user_roles.exs ``` The Migrations added to your project. ```elixir iex> "Do you want to run this migration?" iex> mix ecto.migrate ``` You are Now Up and Running!!! ## Pagination with Scrivener SecureX Supports pagination with `Scrivener` & `ScrivenerEcto`, Please read the following documentations if you have : -> https://hexdocs.pm/scrivener_ecto/readme.html -> https://hexdocs.pm/scrivener/readme.html Please add to your project Repo, ``` use Scrivener, page_size: 10 ``` ## Guide You can also use SecureX as a Middleware. Valid inputs for permissions are "POST", "GET", "PUT", "DELETE", "read", "write", "edit" and "delete". Permissions have downward flow. i.e if you have defined permissions for a higher operation, It automatically assigns them permissions for lower operations. like "edit" grants permissions for all operations. Their hierarchy is in this order. ``` "read" < "write" < "edit" < "delete" "GET" < "POST" < "PUT" < "DELETE" 1 < 2 < 3 < 4 ``` ## Middlewares In RestApi or GraphiQL all you have to do, add a `Plug`. ## Examples ```elixir #lib/plugs/securex_plug.ex defmodule MyApp.Plugs.SecureXPlug do @behaviour Plug import Plug.Conn def init(default), do: default def call(conn, _) do with ["Bearer " <> token] <- get_req_header(conn, "authorization"), {:ok, claims} <- MyApp.Auth.Guardian.decode_and_verify(token), {:ok, user} <- MyApp.Auth.Guardian.resource_from_claims(claims), {:ok, %Plug.Conn{}} <- check_permissions(conn, user) do conn else {:error, error} -> conn |> put_resp_content_type("application/json") |> send_resp(403, Jason.encode!(%{errors: error})) |> Plug.Conn.halt() _ -> conn |> put_resp_content_type("application/json") |> send_resp(403, Jason.encode!(%{errors: ["Permission Denied"]})) |> Plug.Conn.halt() end end defp check_permissions(%{method: method, path_info: path_info} = conn, %{id: user_id}) do res = List.last(path_info) case SecureX.has_access?(user_id, res, method) do false -> {:error, false} true -> {:ok, conn} end end defp check_permissions(_, _), do: {:error, ["Invalid Request"]} end ``` You are all set. Please let us know about the issues and open issue on https://github.com/DevWasi/secruex/issues. Looking Forward to it :D. Happy Coding !!!!! """ @doc """ Check if user has access. ## Examples iex> has_access?(1, "users", "write") true iex> has_access?(1, "Gibberish", "bad_input") false """ @spec has_access?(any(), String.t(), any()) :: boolean() def has_access?(user_id, res_id, permission) when not is_nil(user_id) and not is_nil(res_id) and not is_nil(permission) do with value when is_integer(value) <- translate_permission(permission), %{id: res_id} <- Context.get_resource(res_id), roles <- Context.get_user_roles_by_user_id(user_id), %{permission: per} <- Context.get_permission_by(res_id, roles), true <- value <= per do true else _ -> false end end defp translate_permission(permission) do cond do permission in ["GET", "get", "READ", "read", "1", 1] -> 1 permission in [ "GET", "get", "READ", "read", "1", 1, "POST", "post", "write", "WRITE", "2", 2 ] -> 2 permission in [ "GET", "get", "READ", "read", "1", 1, "POST", "post", "write", "WRITE", "2", 2, "UPDATE", "update", "PUT", "put", "edit", "EDIT", "3", 3 ] -> 3 permission in [ "GET", "get", "READ", "read", "1", 1, "POST", "post", "write", "WRITE", "2", 2, "UPDATE", "update", "PUT", "put", "edit", "EDIT", "3", 3, "DELETE", "delete", "DROP", "drop", "REMOVE", "remove", "4", 4 ] -> 4 true -> nil end end end
lib/securex.ex
0.790369
0.759091
securex.ex
starcoder
defmodule Tournament do @doc """ Given `input` lines representing two teams and whether the first of them won, lost, or reached a draw, separated by semicolons, calculate the statistics for each team's number of games played, won, drawn, lost, and total points for the season, and return a nicely-formatted string table. A win earns a team 3 points, a draw earns 1 point, and a loss earns nothing. Order the outcome by most total points for the season, and settle ties by listing the teams in alphabetical order. """ @spec tally(input :: list(String.t())) :: String.t() def tally(input) do tally(%{}, input) |> Enum.sort(&compare_record/2) |> report() end defp tally(table, []), do: table defp tally(table, [head | tail]) do case Regex.run(~R/^([[:alnum:] ]+);([[:alnum:] ]+);(win|draw|loss)$/u, head) do [_match, lhs_team, rhs_team, result] -> update_table(table, lhs_team, result) |> update_table(rhs_team, toggle_result(result)) |> tally(tail) _ -> tally(table, tail) end end defp update_table(table, team, result) do Map.put_new(table, team, {0, 0, 0, 0, 0}) |> Map.update!(team, &update_record(&1, result)) end defp toggle_result("win"), do: "loss" defp toggle_result("draw"), do: "draw" defp toggle_result("loss"), do: "win" defp update_record({mp, w, d, l, p}, "win"), do: {mp + 1, w + 1, d, l, p + 3} defp update_record({mp, w, d, l, p}, "draw"), do: {mp + 1, w, d + 1, l, p + 1} defp update_record({mp, w, d, l, p}, "loss"), do: {mp + 1, w, d, l + 1, p} defp compare_record({lhs_team, {_, _, _, _, lhs_point}}, {rhs_team, {_, _, _, _, rhs_point}}) do case {lhs_point, rhs_point} do {point, point} -> lhs_team <= rhs_team _ -> lhs_point >= rhs_point end end defp report(table) do [ "Team | MP | W | D | L | P" | Enum.map(table, &interpolate/1) ] |> Enum.join("\n") end defp interpolate({team, {mp, w, d, l, p}}) do [ String.pad_trailing(team, 30), " | ", pad_number(mp), " | ", pad_number(w), " | ", pad_number(d), " | ", pad_number(l), " | ", pad_number(p) ] end defp pad_number(number), do: to_string(number) |> String.pad_leading(2) end
tournament/lib/tournament.ex
0.846451
0.614741
tournament.ex
starcoder
defmodule PlugBodyDigest do @moduledoc """ Plug to verify the request body against the digest value sent in the HTTP 'Digest' header, as defined in [RFC3230, section 4.3.2](https://tools.ietf.org/html/rfc3230#section-4.3.2). Supported digests are "sha-512", "sha-256" and "sha". ## Options * `:on_success` - an optional callback for updating the `Plug.Conn` state upon success; possible values include: * `nil` (the default) - do nothing * `{m, f, a}` - call the function identified by the atom `f` in module `m`; the function receives the current `Plug.Conn` struct along with any additional parameters in the list `a`, and is expected to return the updated `Plug.Conn` struct; see the example below * `:on_failure` - an optional callback for updating the `Plug.Conn` state upon failure; possible values include: * `{PlugBodyDigest, :failure, []}` (the default) - halt the connection with an appropriate response; see `failure/3` below * `{PlugBodyDigest, :optional, []}` - make the 'Digest' header optional; see `optional/3` below * `{m, f, a}` - call the function identified by the atom `f` in module `m`; the function receives the current `Plug.Conn` struct, the error reason (see `t:error_reason/0`) and the algorithm list (a string, for possible use in a 'Want-Digest' response header) along with any additional parameters in the list `a`, and is expected to return the updated `Plug.Conn` struct * `nil` - do nothing ## Example # Update the Plug.Parsers configuration, adding the `:body_reader` # option: plug Plug.Parsers, parsers: [:urlencoded, :json], body_reader: {PlugBodyDigest, :digest_body_reader, []}, json_decoder: Jason # Invoke PlugBodyDigest after Plug.Parsers plug PlugBodyDigest, on_success: {Plug.Conn, :assign, [:valid_digest, true]}, on_failure: {PlugBodyDigest, :optional, []} """ import Plug.Conn require Logger alias PlugBodyDigest.Crypto @behaviour Plug @algorithms [:sha512, :sha256, :sha] @default_on_success nil @default_on_failure {__MODULE__, :failure, []} @typedoc """ Error reasons, passed to the failure callback. Server errors: * `:body_not_read` - the request body was not read, because the request's 'Content-Type' is not handled by `Plug.Parsers`; see `digest_body_reader/3` * `:multipart` - the request contained a multipart content-type, which is not supported by `PlugBodyDigest`; see `digest_body_reader/3` * `:bad_algorithm` - the digest function invocation failed for the selected algorithm; verify that the `:crypto` application was started and that it supports the necessary algorithms Client errors: * `:no_digest_header` - no 'Digest' header was included in the request * `:algorithm_mismatch` - none of the supported digest algorithms was included in the 'Digest' request header * `:malformed_digest_value` - the digest value in the 'Digest' request header could not be decoded * `:digest_mismatch` - the calculated digest value for the request body does not match the expected value specified in the 'Digest' request header """ @type error_reason :: :body_not_read | :multipart | :bad_algorithm | :no_digest_header | :algorithm_mismatch | :malformed_digest_value | :digest_mismatch @impl true @spec init(Keyword.t()) :: Keyword.t() def init(opts), do: opts @impl true @spec call(Plug.Conn.t(), Keyword.t()) :: Plug.Conn.t() def call(conn, opts) do # The `algorithms` option is currently undocumented: it is a bit awkward to # use, since the configuration of the Plug needs to match that of the # `body_reader` function... algorithms = Keyword.get(opts, :algorithms, @algorithms) case verify(conn, algorithms) do :ok -> opts |> Keyword.get(:on_success, @default_on_success) |> on_success(conn) {:error, reason} -> want_digest = algorithms |> Enum.map(&Crypto.algorithm_name/1) |> Enum.join(",") opts |> Keyword.get(:on_failure, @default_on_failure) |> on_failure(conn, reason, want_digest) end end defp verify(%{private: %{plug_body_digest: {:error, _reason} = error}}, _algorithms) do error end defp verify(%{private: %{plug_body_digest: state}}, _algorithms) do Crypto.verify(state) end defp verify(%{body_params: %Plug.Conn.Unfetched{aspect: :body_params}}, _algorithms) do {:error, :body_not_read} end defp verify(%{private: %{plug_multipart: :done}}, _algorithms) do {:error, :multipart} end defp verify(%{body_params: empty} = conn, algorithms) when empty == %{} do conn |> update_digest("", algorithms: algorithms) |> verify(algorithms) end defp on_success(nil, conn), do: conn defp on_success({m, f, a}, conn), do: apply(m, f, [conn | a]) defp on_success(fun, conn) when is_function(fun, 1), do: fun.(conn) defp on_failure(nil, conn, _reason, _want_digest), do: conn defp on_failure({m, f, a}, conn, reason, want_digest), do: apply(m, f, [conn, reason, want_digest | a]) defp on_failure(fun, conn, reason, want_digest) when is_function(fun, 3), do: fun.(conn, reason, want_digest) defp on_failure(fun, conn, reason, _want_digest) when is_function(fun, 2), do: fun.(conn, reason) @doc """ The default failure function. It logs an error, returns a 500 'Server Error' response and halts the connection in the following scenarios: * If the request body was not read, because the request's 'Content-Type' is not handled by `Plug.Parsers`; see `digest_body_reader/3` * If the digest function invocation failed for the selected algorithm Otherwise logs the failure, returns a 403 'Forbidden' response with a 'Want-Digest' response header listing the supported algorithms, and halts the connection. """ @spec failure(Plug.Conn.t(), error_reason(), String.t()) :: Plug.Conn.t() def failure(conn, :body_not_read, _want_digest) do Logger.error("Cannot verify digest: content type not handled by Plug.Parsers") conn |> send_resp(500, "") |> halt() end def failure(conn, :multipart, _want_digest) do Logger.error("Cannot verify digest: multipart content types are not supported") conn |> send_resp(500, "") |> halt() end def failure(conn, :bad_algorithm, want_digest) do Logger.error("Invalid algorithm configuration: #{want_digest}") conn |> send_resp(500, "") |> halt() end def failure(conn, reason, want_digest) do Logger.info("Digest failure: #{reason}") conn |> put_resp_header("want-digest", want_digest) |> send_resp(403, "") |> halt() end @doc """ An alternative failure handler function, allowing requests without a 'Digest' request header. All other errors are handled as described for `failure/3`. """ @spec optional(Plug.Conn.t(), error_reason(), String.t()) :: Plug.Conn.t() def optional(conn, :no_digest_header, _want_digest), do: conn def optional(conn, reason, want_digest), do: failure(conn, reason, want_digest) @doc """ Custom request body reader for `Plug.Parsers`, updating the digest value(s) while the request body is being read. Add or update `Plug.Parsers` (e.g. in the application's Phoenix endpoint) with the `:body_reader` option: plug Plug.Parsers, parsers: [:urlencoded, :json], body_reader: {PlugBodyDigest, :digest_body_reader, []}, json_decoder: Jason Only works for parsers that respect the `:body_reader` option, including `Plug.Parsers.URLENCODED` and `Plug.Parsers.JSON`. Not supported are `Plug.Parsers.MULTIPART` and content types that are ignored by `Plug.Parsers` through the `:pass` option. """ @spec digest_body_reader(Plug.Conn.t(), Keyword.t(), Keyword.t()) :: {:ok, binary(), Plug.Conn.t()} | {:more, binary(), Plug.Conn.t()} | {:error, term()} def digest_body_reader(conn, read_opts, digest_opts \\ []) do case Plug.Conn.read_body(conn, read_opts) do {status, body, conn} -> {status, body, update_digest(conn, body, digest_opts)} error -> error end end # Error condition, no need to do anything defp update_digest(%{private: %{plug_body_digest: {:error, _}}} = conn, _data, _opts), do: conn # This is not the first pass: update with new data defp update_digest(%{private: %{plug_body_digest: state}} = conn, data, _opts) do put_private(conn, :plug_body_digest, Crypto.update(state, data)) end # First pass, look for Digest header and select one algorithm defp update_digest(conn, data, opts) do algorithms = Keyword.get(opts, :algorithms, @algorithms) with {:ok, digest_header} <- get_digest_header(conn), {:ok, algorithm, expected} <- select_algorithm(digest_header, algorithms), {:ok, initial} <- Crypto.init(algorithm, expected) do put_private(conn, :plug_body_digest, Crypto.update(initial, data)) else error -> put_private(conn, :plug_body_digest, error) end end defp get_digest_header(conn) do case get_req_header(conn, "digest") do [] -> {:error, :no_digest_header} digest_headers -> {:ok, parse_digest(digest_headers)} end end defp parse_digest(digest_headers) do digest_headers |> Enum.flat_map(&:binary.split(&1, ",", [:global])) |> Enum.map(&String.trim/1) |> Enum.map(fn instance -> case :binary.split(instance, "=") do [algorithm, digest] -> {String.downcase(algorithm), digest} _otherwise -> nil end end) |> Enum.into(%{}) end defp select_algorithm(_digests, []), do: {:error, :algorithm_mismatch} defp select_algorithm(digests, [algorithm | more]) do case Map.get(digests, Crypto.algorithm_name(algorithm)) do nil -> select_algorithm(digests, more) expected_b64 -> case Base.decode64(expected_b64) do :error -> {:error, :malformed_digest_value} {:ok, expected} -> {:ok, algorithm, expected} end end end end
lib/plug_body_digest.ex
0.878092
0.786848
plug_body_digest.ex
starcoder
defmodule RssWatcher do @moduledoc """ A small worker that watches a single RSS feed, parses the changes, and dispatches updates. ## Installation ### Dependencies Add the following to your dependencies: ```elixir {:rss_watcher, "~> 0.1.0"} ``` For _easy mode_, you can use the default adapters to fetch and parse RSS feeds. Just add the following to your dependancies, and you should be good to go. ``` elixir {:tesla, "~> 1.2.1"}, # For HTTP requests {:fiet, "~> 0.2.1"}, # For RSS parsing {:timex, "~> 3.0"}, # For timestamp parsing ``` And add `Timex` to your list of applications. ``` elixir extra_applications: [ ..., :timex] ``` ### Adapters `RssWatcher.HTTP.Tesla` is provided by default. To use, add the following dependancies to your dependency list. See module configuration around middleware, and additional adapter options. ``` elixir {:tesla, "~> 1.2.1"} ``` For RSS parsing, `RssWatcher.Feed.Fiet` is provided by default, and handles parsing XML and timestamps. To use, add the following dependencies to your dependency list. ``` elixir {:fiet, "~> 0.2.1"}, {:timex, "~> 3.0"} ``` And add timex to your list of applications. ``` elixir extra_applications: [ ... :timex] ``` """ @moduledoc since: "0.1.0" use GenServer alias RssWatcher.{Subscription} require Logger @type url :: String.t() @type callback :: {module, atom} | function @type options :: [refresh_interval: integer] @doc since: "0.1.0" @doc """ RssWatcher is a worker, so the recommended usage is to add it as a child to your supervisor. ### API Example ``` elixir children = [ {RssWatcher, url: "http://example.com/rss", callback: {Notifications, broadcast, ["#channel_id"]} } ] Supervisor.start_link(children, strategy: :one_for_one)) ``` Or, with a dynamic supervisor: ``` elixir children = [ {DynamicSupervisor, strategy: :one_for_one, name: MyApp.RssSupervisor} ] Supervisor.start_link(children, strategy: :one_for_one) ... DynamicSupervisor.start_child( MyApp.RssSupervisor, { RssWatcher, url: "http://example.com/rss", callback: {Notifications, broadcast, ["#channel_id"]} } ) ``` Each `RssWatcher` worker requires at least a _url_, and a _callback_. Additional configuration can be provided to use alternate adapters. ### Url (required) The url should be a string, which resolves to an RSS feed. ### Callback (required) The callback can be in the form of `{module, function, arguments}`, or an anonymous/suspended function. If `{module, function, arguments}` format, the callback will be dispatched with an additional argument - the parsed XML. Otherwise, the parsed XML will be the only argument provided. See below for examples. ### Additional Configuration The configuration is provided as a keyword list. The available options (and their defaults) are listed below - `:refresh_interval` - integer. How often the feed is checked, in seconds. Defautls to `60`. - `:rss_parser` - Atom/RSS 2.0 parser module. Defaults to `RssWatcher.Feed.Fiet`, - `:rss_parser_options`- options for the above parser. Defaults to `[]`, - `:http_client` - HTTP client for fetching updates. Defaults to `RssWatcher.HTTP.Tesla`, - `:http_client_options` - options for the above client. Default to `[]`. See adapter module for configuration options. ### Examples ``` elixir {RssWatcher, url: "http://example.com/rss", callback: {Notifications, broadcast, ["#channel_id"]}, refresh_interval: 60 } {RssWatcher, url: "http://example.com/rss", callback: fn xml -> Notifications.broadcast(xml) end, refresh_interval: 60 } {RssWatcher, url: "http://example.com/rss", callback: &Notifications.broadcast/1, refresh_interval: 60 } ``` """ @spec start_link(Keyword.t()) :: :ignore | {:error, any} | {:ok, pid} def start_link(options) do with {url, options} when not is_nil(url) <- Keyword.pop(options, :url), {fun, options} when not is_nil(fun) <- Keyword.pop(options, :callback) do state = %{subscription: Subscription.new(url, fun, options)} GenServer.start_link(__MODULE__, state, name: __MODULE__) else _ -> raise ArgumentError, message: ":url and :callback options are required. Provided options: #{inspect(options)}" end end @impl true @doc false def init(state) do schedule_poll(state.subscription.refresh_interval * 1000) {:ok, state} end @impl true @doc false def handle_info(:poll, state) do subscription = case Subscription.find_updates(state.subscription) do {:ok, updated_subscription} -> Subscription.dispatch_pending(updated_subscription) {:error, reason} -> Logger.warn(fn -> "Unable to find updates for #{state.subscription.url}: #{inspect(reason)}" end) state.subscription end schedule_poll(subscription.refresh_interval * 1000) {:noreply, %{state | subscription: subscription}} end defp schedule_poll(time), do: Process.send_after(self(), :poll, time) end
lib/rss_watcher.ex
0.905723
0.883387
rss_watcher.ex
starcoder
defmodule Goth.Client do alias Goth.Config alias Goth.Token @moduledoc """ `Goth.Client` is the module through which all interaction with Google's APIs flows. For the most part, you probably don't want to use this module directly, but instead use the other modules that cache and wrap the underlying API calls. ## Available Options The first parameter is either the token scopes or a tuple of the service account client email and its scopes. Additional token attributes are controlled through options. Available values: - `iat` - The time the assertion was issued, default to now. - `sub` - The email address of the user for which the application is requesting delegated access. Default values is taken from the config `:actor_email`. See [Google's Documentation](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#authorizingrequests) for more details. """ @doc """ *Note:* Most often, you'll want to use `Goth.Token.for_scope/1` instead of this method. As the docs for `Goth.Token.for_scope/1` note, it will return a cached token if one already exists, thus saving you the cost of a round-trip to the server to generate a new token. `Goth.Client.get_access_token/1`, on the other hand will always hit the server to retrieve a new token. """ def get_access_token(scope), do: get_access_token({:default, scope}, []) def get_access_token(scope, opts) when is_binary(scope) and is_list(opts) do get_access_token({:default, scope}, opts) end def get_access_token({account, scope}, opts) when is_binary(scope) and is_list(opts) do {:ok, token_source} = Config.get(account, :token_source) get_access_token(token_source, {account, scope}, opts) end @doc false def get_access_token(source, info, opts \\ []) # Fetch an access token from Google's metadata service for applications running # on Google's Cloud platform. def get_access_token(type, scope, opts) when is_atom(type) and is_binary(scope) do get_access_token(type, {:default, scope}, opts) end def get_access_token(:metadata, {service_account, scope}, _opts) do headers = [{"Metadata-Flavor", "Google"}] account = Application.get_env(:goth, :metadata_account, "default") metadata = Application.get_env(:goth, :metadata_url, "http://metadata.google.internal") endpoint = "computeMetadata/v1/instance/service-accounts" url_base = "#{metadata}/#{endpoint}/#{account}" url = "#{url_base}/token" {:ok, token} = HTTPoison.get(url, headers) {:ok, Token.from_response_json({service_account, scope}, token.body)} end # Fetch an access token from Google's OAuth service using a JWT def get_access_token(:oauth_jwt, {account, scope}, opts) do %{sub: sub} = destruct_opts(opts) endpoint = Application.get_env(:goth, :endpoint, "https://www.googleapis.com") url = "#{endpoint}/oauth2/v4/token" body = {:form, [ grant_type: "urn:ietf:params:oauth:grant-type:jwt-bearer", assertion: jwt({account, scope}, opts) ]} headers = [{"Content-Type", "application/x-www-form-urlencoded"}] {:ok, response} = HTTPoison.post(url, body, headers) if response.status_code >= 200 && response.status_code < 300 do {:ok, Token.from_response_json({account, scope}, sub, response.body)} else {:error, "Could not retrieve token, response: #{response.body}"} end end # Fetch an access token from Google's OAuth service using a refresh token def get_access_token(:oauth_refresh, {account, scope}, _opts) do {:ok, refresh_token} = Config.get(:refresh_token) {:ok, client_id} = Config.get(:client_id) {:ok, client_secret} = Config.get(:client_secret) endpoint = Application.get_env(:goth, :endpoint, "https://www.googleapis.com") url = "#{endpoint}/oauth2/v4/token" body = {:form, [ grant_type: "refresh_token", refresh_token: refresh_token, client_id: client_id, client_secret: client_secret ]} headers = [{"Content-Type", "application/x-www-form-urlencoded"}] {:ok, response} = HTTPoison.post(url, body, headers) if response.status_code >= 200 && response.status_code < 300 do {:ok, Token.from_response_json({account, scope}, response.body)} else {:error, "Could not retrieve token, response: #{response.body}"} end end def claims(scope, opts \\ []) def claims(scope, iat) when is_integer(iat), do: claims(scope, iat: iat) def claims(scope, opts) when is_binary(scope), do: claims({:default, scope}, opts) def claims({account, scope}, opts) when is_list(opts) do %{iat: iat, sub: sub} = destruct_opts(opts) {:ok, email} = Config.get(account, :client_email) c = %{ "iss" => email, "scope" => scope, "aud" => "https://www.googleapis.com/oauth2/v4/token", "iat" => iat, "exp" => iat + 10 } if sub do Map.put(c, "sub", sub) else c end end def json(scope, opts \\ []) def json(scope, iat) when is_integer(iat), do: json(scope, iat: iat) def json(scope, opts) when is_binary(scope), do: json({:default, scope}, opts) def json({account, scope}, opts) when is_list(opts) do claims({account, scope}, opts) |> Poison.encode!() end def jwt(info, opts \\ []) def jwt(scope, iat) when is_integer(iat), do: jwt(scope, iat: iat) def jwt(scope, opts) when is_binary(scope), do: jwt({:default, scope}, opts) def jwt({account, scope}, opts) when is_list(opts) do {:ok, key} = Config.get(account, :private_key) claims({account, scope}, opts) |> JsonWebToken.sign(%{alg: "RS256", key: JsonWebToken.Algorithm.RsaUtil.private_key(key)}) end @doc "Retrieves the project ID from Google's metadata service" def retrieve_metadata_project do headers = [{"Metadata-Flavor", "Google"}] endpoint = "computeMetadata/v1/project/project-id" metadata = Application.get_env(:goth, :metadata_url, "http://metadata.google.internal") url = "#{metadata}/#{endpoint}" HTTPoison.get!(url, headers).body end defp destruct_opts(opts) do defaults = [ iat: :os.system_time(:seconds), sub: case Config.get(:actor_email) do {:ok, sub} -> sub _ -> nil end ] defaults |> Keyword.merge(opts) |> Enum.into(%{}) end end
lib/goth/client.ex
0.77137
0.417271
client.ex
starcoder
if Code.ensure_loaded?(Sidewalk) do defmodule ActiveJorb.QueueAdapter.Sidekiq do @moduledoc """ Uses the [sidewalk](https://hex.pm/packages/sidewalk) library to enqueue jobs with Sidekiq. ## Example ``` iex> job = %ActiveJorb.Job{job_class: "MyJob", arguments: [1, 2, 3], queue_name: "high"} iex> ActiveJorb.QueueAdapter.Sidekiq.enqueue(job) {:ok, "some-job-id"} ``` """ @behaviour ActiveJorb.QueueAdapter @doc """ Used to normalize an `%ActiveJorb.Job{}` into a `%Sidewalk.Job{}`. You shouldn't use this directly. """ @spec normalize(ActiveJorb.Job.t()) :: Sidewalk.Job.t() def normalize(job) do %Sidewalk.Job{ class: "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper", wrapped: job.job_class, queue: job.queue_name, args: [job] } end @spec add_job_id(ActiveJorb.Job.t()) :: ActiveJorb.Job.t() defp add_job_id(job) do %{job | job_id: UUID.uuid4()} end @doc """ Enqueues a job at some future date. """ @spec enqueue_at(ActiveJorb.Job.t(), NaiveDateTime.t()) :: ActiveJorb.QueueAdapter.response() def enqueue_at(job = %ActiveJorb.Job{}, timestamp = %NaiveDateTime{}) do normalized_timestamp = normalize_timestamp(timestamp) job |> add_job_id() |> normalize() |> Sidewalk.Client.enqueue_at(normalized_timestamp) end def enqueue_at(_, _) do {:error, "you must provide both an %ActiveJorb.Job{} and a NaiveDateTime."} end @spec normalize_timestamp(NaiveDateTime.t()) :: float() defp normalize_timestamp(timestamp) do ts = timestamp |> DateTime.from_naive!("Etc/UTC") |> DateTime.to_unix() ts / 1 end @doc """ Enqueues a job that will be executed immediately. """ @spec enqueue(ActiveJorb.Job.t()) :: ActiveJorb.QueueAdapter.response() def enqueue(job = %ActiveJorb.Job{}) do job |> add_job_id() |> normalize() |> Sidewalk.Client.enqueue() end def enqueue(_) do {:error, "you must provide an %ActiveJorb.Job{}"} end end end
lib/active_jorb/queue_adapter/sidekiq.ex
0.809427
0.657195
sidekiq.ex
starcoder
defmodule GenSpoxy.Cache do @moduledoc """ This behaviour is responsible for implementing a caching layer on top of the prerender """ defmacro __using__(opts) do quote bind_quoted: [opts: opts] do alias Spoxy.Cache alias GenSpoxy.Stores.Ets @behaviour Spoxy.Cache.Behaviour @store_module Keyword.get(opts, :store_module, Ets) @prerender_module Keyword.get(opts, :prerender_module) cache_module = __MODULE__ tasks_executor_mod = String.to_atom("#{cache_module}.TasksExecutor") @tasks_executor_mod tasks_executor_mod config = Keyword.get(opts, :config, []) executor_opts = Keyword.merge(config, cache_module: __MODULE__) defmodule @tasks_executor_mod do use GenSpoxy.Prerender.PeriodicTasksExecutor, executor_opts end tasks_executor_sup_mod = String.to_atom("#{tasks_executor_mod}.Supervisor") defmodule tasks_executor_sup_mod do use GenSpoxy.Prerender.Supervisor, supervised_module: tasks_executor_mod end def async_get_or_fetch(req, opts \\ []) do req_key = calc_req_key(req) mods = {@prerender_module, @store_module, @tasks_executor_mod} Cache.async_get_or_fetch(mods, req, req_key, opts) end def get_or_fetch(req, opts \\ []) do req_key = calc_req_key(req) mods = {@prerender_module, @store_module, @tasks_executor_mod} Cache.get_or_fetch(mods, req, req_key, opts) end @doc """ receives a request `req`, determines it's signature (a.k.a `req_key`), then it fetches the local cache. it returns `nil` in case there is nothing in cache if the cache is empty or the data is stale a background fetch task is issued """ def get_and_trigger_async_fetch(req, opts \\ []) do req_key = calc_req_key(req) mods = {@prerender_module, @store_module, @tasks_executor_mod} Cache.get_and_trigger_async_fetch(mods, req, req_key, opts) end @doc """ receives a request `req`, determines it's signature (a.k.a `req_key`), then it fetches the local cache. it returns `nil` in case there is nothing in cache, else returns the cached entry """ def get(req, opts \\ []) do req_key = calc_req_key(req) Cache.get(@store_module, req_key, opts) end def refresh_req!(req, opts) do req_key = calc_req_key(req) mods = {@prerender_module, @store_module} Cache.refresh_req!(mods, req, req_key, opts) end def await(task) do {:ok, resp, total} = Task.await(task) end def do_req(req) do Cache.do_req(@prerender_module, req) end def store_req!(opts) do Cache.store_req!(@store_module, opts) end def lookup_req(table_name, req_key) do Cache.lookup_req(@store_module, table_name, req_key) end @impl true def should_invalidate?(req, resp, metadata) do Cache.should_invalidate?(req, resp, metadata) end # defoverridable [should_invalidate?: 3] defp calc_req_key(req) do apply(@prerender_module, :calc_req_key, [req]) end end end end
lib/cache/gen_cache.ex
0.826151
0.427217
gen_cache.ex
starcoder
defmodule Timber.Plug.HTTPContext do @moduledoc """ Automatically captures the HTTP method, path, and request_id in Plug-based frameworks like Phoenix and adds it to the context. By adding this data to the context, you'll be able to associate all the log statements that occur while processing that HTTP request. ## Adding the Plug `Timber.Plug.HTTPContext` can be added to your plug pipeline using the standard `Plug.Builder.plug/2` macro. The point at which you place it determines what state Timber will receive the connection in, therefore it's recommended you place it as close to the origin of the request as possible. ### Plug (Standalone or Plug.Router) If you are using Plug without a framework, your setup will vary depending on your architecture. The call to `plug Timber.Plug.HTTPContext` should be grouped with any other plugs you call prior to performing business logic. Timber expects query parameters to have already been fetched on the connection using `Plug.Conn.fetch_query_params/2`. ### Phoenix Phoenix's flexibility means there are multiple points in the plug pipeline where the `Timber.Plug.HTTPContext` can be inserted. The recommended place is in `endpoint.ex`. Make sure that you insert this plug immediately before your `Router` plug. ## Request ID Timber does its best to track the request ID for every HTTP request in order to help you filter your logs easily. If you are calling the `Plug.RequestId` plug in your pipeline, you should make sure that `Timber.Plug.HTTPContext` appears _after_ that plug so that it can pick up the correct ID. By default, Timber expects your request ID to be stored using the header name "X-Request-ID" (casing irrelevant), but that may not fit all needs. If you use a custom header name for your request ID, you can pass that name as an option to the plug: ``` plug Timber.Plug, request_id_header: "req-id" ``` """ @behaviour Plug alias Timber.Contexts.HTTPContext @doc false @impl true def init(opts) do opts end @doc false @impl true def call(%{method: method, request_path: request_path} = conn, opts) do request_id_header = Keyword.get(opts, :request_id_header, "x-request-id") remote_addr = Timber.Plug.get_client_ip(conn) request_id = case Timber.Plug.get_request_id(conn, request_id_header) do [{_, request_id}] -> request_id [] -> nil end %HTTPContext{ method: method, path: request_path, request_id: request_id, remote_addr: remote_addr } |> Timber.add_context() conn end end
lib/timber_plug/http_context.ex
0.851814
0.651985
http_context.ex
starcoder
defprotocol RDF.Data do @moduledoc """ An abstraction over the different data structures for collections of RDF statements. """ @type t :: RDF.Description.t() | RDF.Graph.t() | RDF.Dataset.t() @doc """ Adds statements to a RDF data structure. As opposed to the specific `add` functions on the RDF data structures, which always return the same structure type than the first argument, `merge` might result in another RDF data structure, eg. merging two `RDF.Description` with different subjects results in a `RDF.Graph` or adding a quad to a `RDF.Graph` with a different name than the graph context of the quad results in a `RDF.Dataset`. But it is always guaranteed that the resulting structure has a `RDF.Data` implementation. """ def merge(data, input, opts \\ []) @doc """ Deletes statements from a RDF data structure. As opposed to the `delete` functions on RDF data structures directly, this function only deletes exactly matching structures. """ def delete(data, input, opts \\ []) @doc """ Deletes one statement from a RDF data structure and returns a tuple with deleted statement and the changed data structure. """ def pop(data) @doc """ Checks if the given statement exists within a RDF data structure. """ def include?(data, input, opts \\ []) @doc """ Checks if a RDF data structure contains statements about the given resource. """ def describes?(data, subject) @doc """ Returns a `RDF.Description` of the given subject. Note: On a `RDF.Dataset` this will return an aggregated `RDF.Description` with the statements about this subject from all graphs. """ def description(data, subject) @doc """ Returns all `RDF.Description`s within a RDF data structure. Note: On a `RDF.Dataset` this will return aggregated `RDF.Description`s about the same subject from all graphs. """ def descriptions(data) @doc """ Returns the list of all statements of a RDF data structure. """ def statements(data) @doc """ Returns the set of all resources which are subject of the statements of a RDF data structure. """ def subjects(data) @doc """ Returns the set of all properties used within the statements of RDF data structure. """ def predicates(data) @doc """ Returns the set of all resources used in the objects within the statements of a RDF data structure. """ def objects(data) @doc """ Returns the set of all resources used within the statements of a RDF data structure """ def resources(data) @doc """ Returns the count of all resources which are subject of the statements of a RDF data structure. """ def subject_count(data) @doc """ Returns the count of all statements of a RDF data structure. """ def statement_count(data) @doc """ Returns a nested map of the native Elixir values of a RDF data structure. When a `:context` option is given with a `RDF.PropertyMap`, predicates will be mapped to the terms defined in the `RDF.PropertyMap`, if present. """ def values(data, opts \\ []) @doc """ Returns a map representation of a RDF data structure where each element from its statements is mapped with the given function. """ def map(data, fun) @doc """ Checks if two RDF data structures are equal. Two RDF data structures are considered to be equal if they contain the same triples. - comparing two `RDF.Description`s it's just the same as `RDF.Description.equal?/2` - comparing two `RDF.Graph`s differs in `RDF.Graph.equal?/2` in that the graph name is ignored - comparing two `RDF.Dataset`s differs in `RDF.Dataset.equal?/2` in that the dataset name is ignored - a `RDF.Description` is equal to a `RDF.Graph`, if the graph has just one description which equals the given description - a `RDF.Description` is equal to a `RDF.Dataset`, if the dataset has just one graph which contains only the given description - a `RDF.Graph` is equal to a `RDF.Dataset`, if the dataset has just one graph which equals the given graph; note that in this case the graph names must match """ def equal?(data1, data2) end defimpl RDF.Data, for: RDF.Description do alias RDF.{Description, Graph, Dataset, Statement} def merge(description, input, opts \\ []) def merge(%Description{subject: subject} = description, {s, _, _} = triple, opts) do with ^subject <- Statement.coerce_subject(s) do Description.add(description, triple, opts) else _ -> Graph.new(description) |> Graph.add(triple, opts) end end def merge(description, {_, _, _, _} = quad, opts), do: Dataset.new(description) |> Dataset.add(quad, opts) def merge( %Description{subject: subject} = description, %Description{subject: other_subject} = other_description, opts ) when other_subject == subject, do: Description.add(description, other_description, opts) def merge(description, %Description{} = other_description, opts), do: Graph.new(description) |> Graph.add(other_description, opts) def merge(description, %Graph{} = graph, opts), do: RDF.Data.merge(graph, description, opts) def merge(description, %Dataset{} = dataset, opts), do: RDF.Data.merge(dataset, description, opts) def delete(description, input, opts \\ []) def delete( %Description{subject: subject} = description, %Description{subject: other_subject}, _opts ) when subject != other_subject, do: description def delete(description, input, opts), do: Description.delete(description, input, opts) def pop(description), do: Description.pop(description) def include?(description, input, opts \\ []), do: Description.include?(description, input, opts) def describes?(description, subject), do: Description.describes?(description, subject) def description(%Description{subject: subject} = description, s) do with ^subject <- Statement.coerce_subject(s) do description else _ -> Description.new(s) end end def descriptions(description), do: [description] def statements(description), do: Description.statements(description) def subjects(%Description{subject: subject}), do: MapSet.new([subject]) def predicates(description), do: Description.predicates(description) def objects(description), do: Description.objects(description) def resources(%Description{subject: subject} = description), do: Description.resources(description) |> MapSet.put(subject) def subject_count(_), do: 1 def statement_count(description), do: Description.count(description) def values(description, opts \\ []), do: Description.values(description, opts) def map(description, fun), do: Description.map(description, fun) def equal?(description, %Description{} = other_description) do Description.equal?(description, other_description) end def equal?(description, %Graph{} = graph) do with [single_description] <- Graph.descriptions(graph) do Description.equal?(description, single_description) else _ -> false end end def equal?(description, %Dataset{} = dataset) do RDF.Data.equal?(dataset, description) end def equal?(_, _), do: false end defimpl RDF.Data, for: RDF.Graph do alias RDF.{Description, Graph, Dataset, Statement} def merge(graph, input, opts \\ []) def merge(%Graph{name: name} = graph, {_, _, _, graph_context} = quad, opts) do with ^name <- Statement.coerce_graph_name(graph_context) do Graph.add(graph, quad, opts) else _ -> Dataset.new(graph) |> Dataset.add(quad, opts) end end def merge(graph, {_, _, _} = triple, opts), do: Graph.add(graph, triple, opts) def merge(description, {_, _, _, _} = quad, opts), do: Dataset.new(description) |> Dataset.add(quad, opts) def merge(graph, %Description{} = description, opts), do: Graph.add(graph, description, opts) def merge( %Graph{name: name} = graph, %Graph{name: other_name} = other_graph, opts ) when other_name == name, do: Graph.add(graph, other_graph, opts) def merge(graph, %Graph{} = other_graph, opts), do: Dataset.new(graph) |> Dataset.add(other_graph, opts) def merge(graph, %Dataset{} = dataset, opts), do: RDF.Data.merge(dataset, graph, opts) def delete(graph, input, opts \\ []) def delete(%Graph{name: name} = graph, %Graph{name: other_name}, _opts) when name != other_name, do: graph def delete(graph, input, opts), do: Graph.delete(graph, input, opts) def pop(graph), do: Graph.pop(graph) def include?(graph, input, opts \\ []), do: Graph.include?(graph, input, opts) def describes?(graph, subject), do: Graph.describes?(graph, subject) def description(graph, subject), do: Graph.description(graph, subject) || Description.new(subject) def descriptions(graph), do: Graph.descriptions(graph) def statements(graph), do: Graph.statements(graph) def subjects(graph), do: Graph.subjects(graph) def predicates(graph), do: Graph.predicates(graph) def objects(graph), do: Graph.objects(graph) def resources(graph), do: Graph.resources(graph) def subject_count(graph), do: Graph.subject_count(graph) def statement_count(graph), do: Graph.triple_count(graph) def values(graph, opts \\ []), do: Graph.values(graph, opts) def map(graph, fun), do: Graph.map(graph, fun) def equal?(graph, %Description{} = description), do: RDF.Data.equal?(description, graph) def equal?(graph, %Graph{} = other_graph), do: Graph.equal?( %Graph{graph | name: nil}, %Graph{other_graph | name: nil} ) def equal?(graph, %Dataset{} = dataset), do: RDF.Data.equal?(dataset, graph) def equal?(_, _), do: false end defimpl RDF.Data, for: RDF.Dataset do alias RDF.{Description, Graph, Dataset, Statement} def merge(dataset, input, opts \\ []) def merge(dataset, {_, _, _} = triple, opts), do: Dataset.add(dataset, triple, opts) def merge(dataset, {_, _, _, _} = quad, opts), do: Dataset.add(dataset, quad, opts) def merge(dataset, %Description{} = description, opts), do: Dataset.add(dataset, description, opts) def merge(dataset, %Graph{} = graph, opts), do: Dataset.add(dataset, graph, opts) def merge(dataset, %Dataset{} = other_dataset, opts), do: Dataset.add(dataset, other_dataset, opts) def delete(dataset, input, opts \\ []) def delete(%Dataset{name: name} = dataset, %Dataset{name: other_name}, _opts) when name != other_name, do: dataset def delete(dataset, input, opts), do: Dataset.delete(dataset, input, opts) def pop(dataset), do: Dataset.pop(dataset) def include?(dataset, input, opts), do: Dataset.include?(dataset, input, opts) def describes?(dataset, subject), do: Dataset.who_describes(dataset, subject) != [] def description(dataset, subject) do subject = Statement.coerce_subject(subject) Enum.reduce(Dataset.graphs(dataset), Description.new(subject), fn %Graph{descriptions: %{^subject => graph_description}}, description -> Description.add(description, graph_description) _, description -> description end) end def descriptions(dataset) do dataset |> subjects |> Enum.map(&description(dataset, &1)) end def statements(dataset), do: Dataset.statements(dataset) def subjects(dataset), do: Dataset.subjects(dataset) def predicates(dataset), do: Dataset.predicates(dataset) def objects(dataset), do: Dataset.objects(dataset) def resources(dataset), do: Dataset.resources(dataset) def subject_count(dataset), do: dataset |> subjects |> Enum.count() def statement_count(dataset), do: Dataset.statement_count(dataset) def values(dataset, opts \\ []), do: Dataset.values(dataset, opts) def map(dataset, fun), do: Dataset.map(dataset, fun) def equal?(dataset, %Description{} = description) do with [graph] <- Dataset.graphs(dataset) do RDF.Data.equal?(description, graph) else _ -> false end end def equal?(dataset, %Graph{} = graph) do with [single_graph] <- Dataset.graphs(dataset) do Graph.equal?(graph, single_graph) else _ -> false end end def equal?(dataset, %Dataset{} = other_dataset) do Dataset.equal?( %Dataset{dataset | name: nil}, %Dataset{other_dataset | name: nil} ) end def equal?(_, _), do: false end
lib/rdf/data.ex
0.933975
0.960212
data.ex
starcoder
defmodule MazesWeb.RectangularMazeView do use MazesWeb, :view import MazesWeb.MazeHelper alias Mazes.Maze def square_size(maze) do Integer.floor_div(max_svg_width(), Enum.max([maze.width, maze.height])) end def svg_width(maze) do 2 * svg_padding() + square_size(maze) * maze.width end def svg_height(maze) do 2 * svg_padding() + square_size(maze) * maze.height end def square_center(maze, {x, y} = _vertex) do { svg_padding() + square_size(maze) * (x - 0.5), svg_padding() + square_size(maze) * (y - 0.5) } end def square(maze, {x, y} = vertex, settings, colors) do content_tag(:rect, "", width: format_number(square_size(maze) + 1), height: format_number(square_size(maze) + 1), style: "fill: #{ vertex_color( maze, vertex, colors, settings.show_colors, settings.hue, settings.saturation ) }", x: format_number(svg_padding() + square_size(maze) * (x - 1)), y: format_number(svg_padding() + square_size(maze) * (y - 1)) ) end def north_wall(maze, {x, y} = _vertex) do content_tag(:line, "", style: line_style(maze), x1: format_number(svg_padding() + square_size(maze) * (x - 1)), y1: format_number(svg_padding() + square_size(maze) * (y - 1)), x2: format_number(svg_padding() + square_size(maze) * x), y2: format_number(svg_padding() + square_size(maze) * (y - 1)) ) end def south_wall(maze, {x, y} = _vertex) do content_tag(:line, "", style: line_style(maze), x1: format_number(svg_padding() + square_size(maze) * (x - 1)), y1: format_number(svg_padding() + square_size(maze) * y), x2: format_number(svg_padding() + square_size(maze) * x), y2: format_number(svg_padding() + square_size(maze) * y) ) end def east_wall(maze, {x, y} = _vertex) do content_tag(:line, "", style: line_style(maze), x1: format_number(svg_padding() + square_size(maze) * x), y1: format_number(svg_padding() + square_size(maze) * (y - 1)), x2: format_number(svg_padding() + square_size(maze) * x), y2: format_number(svg_padding() + square_size(maze) * y) ) end def west_wall(maze, {x, y} = _vertex) do content_tag(:line, "", style: line_style(maze), x1: format_number(svg_padding() + square_size(maze) * (x - 1)), y1: format_number(svg_padding() + square_size(maze) * (y - 1)), x2: format_number(svg_padding() + square_size(maze) * (x - 1)), y2: format_number(svg_padding() + square_size(maze) * y) ) end end
lib/mazes_web/views/rectangular_maze_view.ex
0.557966
0.425665
rectangular_maze_view.ex
starcoder
defmodule Faker.Airports do import Faker, only: [sampler: 2, localize: 1] @moduledoc """ Functions for generating airports related data """ @doc """ Returns a random ICAO ## Examples iex> Faker.Airports.icao() "SNOS" iex> Faker.Airports.icao() "UNBG" iex> Faker.Airports.icao() "KLOM" iex> Faker.Airports.icao() "HCME" """ @spec icao() :: String.t() sampler(:icao, [ "KMSY", "OIHR", "HTMA", "SSAQ", "GQPP", "KLBL", "VEBG", "SWII", "EVVA", "UHKM", "RPVD", "LSZL", "FTTY", "SNCP", "VOAT", "KBVU", "HEGR", "DIGL", "SNJB", "RORH", "FXMM", "DRRM", "YTMY", "ESNK", "WIDE", "HUTO", "VRMG", "SKPS", "NZRO", "AYWK", "LSGN", "VARG", "ULDD", "PAKY", "HUJI", "NZGM", "GVMT", "LIEE", "CYYG", "FALK", "VYHH", "USCC", "PABE", "SPHZ", "RKSW", "SPLN", "LGNX", "FMNC", "CYLC", "VNSK", "KEWN", "YGTE", "NVSZ", "LIBP", "YELD", "VNLK", "KAXA", "FNUE", "BITE", "LICJ", "YGIA", "OATN", "HANJ", "LFBP", "RPVM", "VOMM", "CYCO", "FKKD", "PFKK", "KBKL", "YHPN", "WMKM", "FMMH", "OIBL", "FMNL", "LEPA", "WBKN", "YMGT", "LHUD", "YBTL", "VCCW", "EGPD", "KDEC", "ENOL", "FASS", "FXTK", "RCQC", "SADL", "EBZR", "FLBA", "MMTN", "LFLM", "PGWT", "OPGT", "VNMA", "EDTD", "KGAB", "LFRB", "PAEH", "OIBJ", "UAAT", "BIBL", "OITK", "VIGG", "KTVC", "SGEN", "PTPN", "DAAZ", "LFKJ", "YTKY", "SNOS", "SETH", "MRNC", "SSLT", "VNDT", "SDKK", "PAYA", "ESUP", "EGHJ", "YRTP", "YRYH", "MMMX", "KBBG", "HABD", "FYOE", "TFFR", "EGET", "RKPC", "KBIS", "SEST", "SNGA", "CYOJ", "WIOG", "HEMA", "DXXX", "DRZA", "SBCN", "MMPA", "OINZ", "19AK", "ENAL", "SBBW", "VNDH", "LPSJ", "LLEY", "ZYTL", "HLLQ", "LPPS", "SATM", "ZSRG", "SKCL", "AGGH", "SUME", "SVCU", "USKK", "CYJT", "SUVO", "KPLN", "FAGI", "NGTU", "VVCM", "EPGL", "FQVL", "LELN", "PAOO", "LBIA", "ZYJM", "SMWS", "LLSD", "FVWT", "LHPR", "OMAA", "RPMF", "FCPA", "FAKZ", "FPPR", "UTAT", "YDBI", "CYEY", "KPWM", "EGJJ", "LFTW", "XBRO", "KEWK", "PAFA", "ZSGS", "CYQG", "WALJ", "SBLN", "FAAG", "LTCR", "VLOS", "LQTZ", "OPOR", "SVVP", "EGJB", "FMSF", "LTAN", "VLTK", "GMMB", "BGSS", "VVDH", "RCFG", "RJOM", "NTGU", "MMMD", "FMNQ", "GLGE", "ZWTN", "LIMW", "SUMO", "EGPI", "LFBT", "YBRU", "FZGV", "LRCS", "SAMM", "SAZC", "KPWT", "VTBK", "UELL", "EDAC", "SAOS", "SCBE", "UHBB", "LFBX", "FBSN", "KEND", "EGMC", "GBYD", "MNRT", "FIMP", "OIFS", "ZSGZ", "CYBQ", "KTMT", "SAWM", "NZGB", "KAHN", "CYGM", "USDS", "HUMA", "NZTG", "FYTM", "LFMH", "YSMR", "NVVI", "KHVR", "KDYL", "ENVD", "SNCE", "FBSP", "LYBE", "DFEL", "CYCS", "RJCO", "HCMA", "OPSK", "CKQ3", "YGBI", "YMMB", "CYYL", "SCGZ", "CYWK", "ZGSZ", "VYPU", "UNAA", "MMLC", "SNGI", "UKDD", "LELC", "AYNU", "SNLB", "SWTY", "SBFL", "KDVL", "ZPBS", "VIST", "HEOW", "HTSY", "OEWD", "LECO", "ZGBH", "LFSR", "SAOU", "CYMM", "EGFH", "SAZP", "YBRM", "FCOM", "PAHX", "SNJK", "SMST", "MYRP", "PHDH", "SLJE", "YTAA", "EDLM", "NWWO", "WPMN", "AYML", "FQBR", "CYZE", "OMFJ", "HCME", "KLBE", "UIAA", "KLFT", "AYOG", "HTTG", "LFMR", "FZRB", "CYID", "ENHK", "AYLU", "CYDM", "WIBT", "PTYA", "KBNL", "SVPE", "YGDI", "VTSR", "SCSE", "HSKI", "SYOR", "RJSN", "HSSM", "DTKA", "HKMB", "LBBG", "VOTV", "AYMC", "TFFJ", "ZGHY", "HSPN", "KMOT", "LTAW", "LUKK", "UUYH", "YSGT", "OITT", "OIBI", "KBYS", "OPDI", "OYHD", "EGDL", "FOOH", "LIQN", "KTEX", "OPFA", "FZAM", "HBBO", "OAHR", "LIRH", "ZBCF", "LPLA", "CYNL", "SAEM", "EFMI", "WATT", "SADP", "AGGS", "KARR", "VTSP", "MGTK", "FCBS", "LGKL", "FAUT", "FASZ", "YLHR", "ZGMX", "MBNC", "SNMB", "ESPA", "FMNV", "UKLL", "TFFB", "YSPK", "SBCF", "UKDE", "AYMH", "GAKO", "WMKA", "SWBR", "KAIA", "LIBR", "LTFD", "ZPPP", "SMCA", "MMIA", "YLEC", "RPMJ", "FEFI", "DNAA", "PAFV", "ENKR", "YFST", "FMCZ", "NGKT", "FAKP", "NFNA", "UNBG", "KJST", "CYSN", "UNKL", "SWTS", "HUKS", "MMPE", "ESGT", "RPLC", "ESSD", "WAMJ", "VVVH", "WMKP", "HESC", "DFEE", "SWNS", "SVON", "MHCA", "KALS", "GQNT", "DBBK", "LFRK", "YPMP", "YCCT", "UKKG", "EDVE", "CYRQ", "KGLS", "CYXP", "CYPQ", "PHSF", "KDRT", "SWSI", "KPBI", "UMMG", "HKGA", "DIBK", "PAMY", "ZYJZ", "BIRF", "YGTN", "LBWN", "EFET", "OLKA", "NTGQ", "EDDL", "HCMK", "VTSS", "SARS", "SPHI", "YORB", "YLEO", "USNN", "SEMA", "LKZA", "EKEB", "DISP", "NTGV", "RJKA", "VVCI", "SPAO", "EFKS", "CYHK", "SKIG", "SKTL", "ENDU", "UWSS", "EILT", "SEAM", "LSZS", "YMTG", "YCHK", "OPCL", "SYKM", "SVPM", "CYFT", "KTUL", "HSHG", "FOGI", "KLYH", "LGKV", "PASX", "YRSB", "SEMH", "CYCB", "LDOS", "DFOU", "GUNZ", "FASB", "KRXE", "VNSR", "EDDP", "GQNA", "ORBI", "VTCT", "MUCF", "SWMW", "PAKP", "KMKL", "KMEM", "WMKD", "FXMF", "LIML", "UJAP", "LILQ", "YOEN", "SNNU", "KDYS", "SBVG", "LYTV", "RPMG", "KEUG", "PAWI", "LTAF", "YLOV", "MDPC", "UNKY", "FMSC", "HALL", "FACD", "ZLLL", "WADS", "RCKH", "YERN", "OPBN", "KO43", "SNGJ", "NZKE", "LGKO", "EGPR", "NZTK", "KLIT", "VIJR", "SBCZ", "KDAA", "MMTP", "SBBG", "SKAM", "WAMH", "LSZB", "CYHM", "TVSV", "VHHH", "KCCB", "MUMJ", "SWKO", "ZHNY", "MMTG", "GLTN", "OSLK", "FAVR", "KALW", "PAWS", "SEGU", "FXNK", "YSHK", "NZRA", "RKJJ", "SLCA", "YKMP", "ZMBN", "KDTW", "UTNU", "YVRD", "KACK", "PASH", "HLLM", "RKNY", "SDRS", "KBFI", "EDTL", "MHBL", "CYQB", "EHTW", "HLLB", "AYBU", "WBGQ", "LFCK", "LFRJ", "CZBD", "EFOU", "WBKP", "ZUTC", "SAZY", "UARR", "NFFA", "TNCS", "SBTR", "KCMI", "CYQK", "PFNO", "KLAX", "KGSO", "GOTT", "ORBM", "AYFI", "YARA", "HESX", "SKFL", "UWLL", "MMLM", "ZSYW", "MBGT", "CYCZ", "KABY", "BGGH", "LIPB", "LFCE", "AYSE", "AYTI", "ENNA", "LEMD", "LELO", "SBCA", "SNBU", "WITM", "CYER", "LFMO", "YNRC", "FEFS", "MTPP", "KSLK", "SVSA", "LCPH", "YDPD", "SKEJ", "WMKE", "ENEV", "MBMC", "CYKA", "EGPU", "KDEH", "LFLS", "FXSK", "EBBR", "SPAY", "OISS", "KE13", "LIDT", "KSDF", "PHNY", "SBIZ", "CYIF", "NFTE", "SAVJ", "SNMU", "SBLJ", "MRDK", "LLBS", "KABR", "FKKW", "ZSSS", "DRZR", "HESN", "OPLA", "DNCA", "LFBH", "SNJN", "LPBJ", "SACO", "YDDF", "KCSG", "OEPS", "SBKG", "KAUN", "EDFE", "VLPS", "YSNB", "VVPR", "GMFO", "WIOK", "SBHT", "EBKT", "HEBL", "SKCN", "LIMJ", "WBKM", "EFLA", "KRUT", "DNKA", "ZMMN", "VYNS", "YDLK", "PAUM", "ZLZW", "MMLO", "WATM", "URMO", "KSFO", "VEDZ", "ZYAS", "SNVB", "FATH", "LPPI", "HCMD", "WRLB", "LFOB", "UUEE", "AGGN", "LTFC", "ZSLQ", "LFRT", "ZSBB", "ZMUL", "CNM5", "UNWW", "KHXD", "FAGM", "LGKJ", "ROMY", "YDBY", "OERK", "FZFP", "SDNH", "FADQ", "SGCO", "CAL4", "SUCM", "TNCC", "OPTA", "WMBA", "SOOM", "DAOO", "OAMN", "EIIM", "UUYY", "OPGD", "KSHD", "LTAQ", "KBNO", "KINT", "NCMR", "LTFE", "ZLXY", "GQNS", "GMMN", "FCOS", "OPTH", "CYRT", "YBOK", "FAMD", "CZEM", "SGAY", "ROYN", "CYSJ", "CCK4", "ENKB", "LFKC", "KIYK", "ZYTN", "FMMS", "ZGHZ", "EFHV", "PACM", "HTMI", "VLSV", "WAWW", "LLMZ", "NSFI", "KBUB", "NWWU", "EGPO", "NVVQ", "NWWM", "SKAD", "SDBY", "ULKK", "KAIK", "NTTP", "PACZ", "MYAW", "OPTT", "AYBA", "EYKA", "GOBD", "FCBB", "ENGM", "EBLG", "SACC", "YCNM", "KADW", "RPVE", "RPVA", "ETNG", "SAZG", "GQNI", "VYSW", "MRCA", "LFLL", "CYFR", "GMMZ", "ETSI", "VIBN", "YPIR", "KORF", "OIAM", "BGAA", "HLZW", "VAKS", "UTNN", "SCTN", "KADS", "HCMU", "HAKL", "SVSR", "MMBT", "ENLK", "DATM", "VNRT", "LFGW", "SBUG", "SAWT", "SKNQ", "ZSSH", "AYKB", "HKWJ", "SBTG", "EGQS", "LIPT", "YSMP", "SATU", "CYWY", "SAVB", "SNTK", "MUBY", "SWCW", "WALV", "URFB", "NTGT", "VEMH", "YKBR", "CYND", "OIBB", "SKAP", "KLOM", "SBCO", "YSWG", "USRR", "RJTT", "LTBE", "WAKP", "KSPI", "EGXU", "LEJR", "KGCN", "CYPE", "YPXM", "YFNE", "VLSB", "CYGT", "MTJA", "OODQ", "SSGY", "GQNE", "WIOM", "YMUG", "USTO", "YGLG", "NSMA", "KIGM", "MYCC", "SYMB", "LFOO", "KGTR", "AYAT", "LFMI", "DTTJ", "EDGS", "YEIN", "EPPO", "SBQV", "KIPT", "UAFO", "UBBN", "KGFK", "KAMW", "KMHK", "SNBR", "EGDY", "EGGP", "ZBUL", "LIRF", "VYMK", "CDK2", "SKAN", "GASK", "UHOO", "URRP", "FZIR", "MMDA", "KPHH", "K0B8", "GMAZ", "SBGU", "LSZM", "LYVA", "RPLU", "VYLS", "ZYTX", "PAKU", "UAOO", "FCPD", "SAVS", "DBBN", "YMLT", "HSNN", "NCMH", "ZLSN", "DAAT", "AYGG", "EHBK", "3IS8", "SCFT", "OPNK", "HEPS", "SDJC", "MBSY", "OPCH", "GCXO", "FZUG", "UMMS", "YBNA", "FLCP", "KVCT", "NZTS", "FBJW", "ZLDH", "LIER", "SBPL", "SOOR", "KICT", "FKKR", "CYEV", "CYHO", "KDGW", "VGSY", "DIDL", "KAMN", "PAFE", "SKBU", "ROIG", "CYLU", "KBNA", "VTED", "VVDN", "NZWK", "SYKR", "SGES", "OITM", "YBIL", "PATG", "LFPO", "LFBG", "UATT", "FAMH", "EDXO", "SDTO", "DAUI", "KVLD", "ZSJJ", "BISI", "VVCS", "FKKB", "YMWA", "KBBD", "KPIH", "LFLU", "CYBL", "LTBS", "KOAK", "RPMV", "KJMS", "RJOT", "CTU5", "NIUE", "HKKG", "VARK", "SPJJ", "KBPT", "SKTA", "RPMM", "EPBK", "YPAM", "FZQA", "YMJM", "YMEN", "SBMC", "CYXC", "WICT", "CYFC", "AGGY", "VYPT", "SKMR", "ZMBR", "AGOK", "KGYY", "FZRQ", "EGQB", "YBLA" ]) @doc """ Returns a random IATA ## Examples iex> Faker.Airports.iata() "BFU" iex> Faker.Airports.iata() "FMM" iex> Faker.Airports.iata() "YUS" iex> Faker.Airports.iata() "YPH" """ @spec iata() :: String.t() sampler(:iata, [ "MKE", "CBY", "PDX", "THQ", "DRT", "HDF", "CAX", "YQS", "ERZ", "EYR", "MOM", "ACN", "AKL", "CVT", "BGA", "AGS", "KSO", "MQA", "FIH", "USI", "GBK", "NON", "BNJ", "DUD", "SHO", "STB", "ANI", "AFT", "WBD", "MJF", "PSD", "BXT", "PZY", "WEF", "DPE", "ZAC", "KNG", "KVX", "IXN", "CAF", "PBQ", "KBV", "CCJ", "GNN", "VRL", "DBD", "XAP", "URY", "RJK", "OKH", "ATY", "SAZ", "LNX", "YRG", "NYN", "YEY", "HGU", "DYA", "OAZ", "AUO", "HVD", "LSA", "RTM", "ODB", "VCP", "IGO", "ILP", "TRC", "YQC", "BWM", "USM", "NHS", "ANX", "SLA", "SZS", "ORK", "NJC", "HRS", "MEN", "KSK", "YOJ", "SQX", "GRR", "CHO", "MEH", "ILI", "WBQ", "BGN", "IPL", "YWP", "YGK", "NCR", "ZAR", "OVL", "CSB", "IIL", "ZAG", "BIS", "MYR", "DHT", "USK", "BDS", "YVP", "YKA", "ZYL", "WTN", "LTN", "RVV", "BOZ", "UTW", "BFU", "BYK", "AHE", "KCA", "AQJ", "YGV", "YSM", "AGU", "ALB", "SEW", "BME", "VOT", "TCP", "CKY", "BSJ", "XCH", "KUH", "LVB", "WYA", "JUT", "SCC", "NAT", "LEF", "GIU", "EHM", "ASH", "TJH", "LRD", "DOM", "SDY", "SHW", "GRW", "VLC", "VOH", "HKK", "TUF", "CEK", "XYA", "TCX", "ZHA", "YKL", "ZWA", "GES", "RMQ", "BTM", "PQC", "YVM", "BVR", "IVR", "OAR", "GON", "KNI", "APC", "RIG", "PQE", "OKD", "QCR", "MDH", "JMO", "SKX", "ODA", "TUB", "DLX", "ADA", "VYD", "IIA", "PBU", "WMN", "SMW", "SJC", "PPW", "QYY", "FAU", "VHC", "BRL", "OOR", "ADT", "VLV", "MAK", "ANU", "NDM", "MDP", "KHE", "CNB", "VPY", "URT", "KEV", "ZOJ", "GMA", "BSY", "TOM", "FLZ", "COS", "UPG", "BLB", "ALN", "VSG", "MMJ", "PTK", "BEH", "GGW", "RHG", "YWJ", "BEQ", "MSL", "VTL", "YHT", "DDC", "POZ", "CBM", "JLN", "ODJ", "NOR", "VDE", "BCO", "QHR", "RBO", "TNR", "QOB", "KWL", "KOS", "CYI", "NWI", "TIU", "YQL", "PDZ", "CRF", "OSZ", "MEK", "LAI", "VIN", "CMN", "YRL", "QOJ", "FRD", "SYM", "JNG", "TOX", "CAU", "BYW", "ODR", "JSY", "GWY", "ZKM", "BXD", "SUV", "YSH", "HGR", "CKW", "PEL", "CDY", "OWB", "PNR", "MOU", "YWA", "TGU", "SFC", "FRF", "LPA", "GGS", "ADC", "SPJ", "YXE", "SZV", "CYB", "IRI", "TFM", "PZA", "SFG", "ALQ", "MWZ", "MQT", "CIW", "VLG", "YGB", "PBB", "HTU", "ICT", "CUO", "NEG", "YUB", "ASO", "YBC", "ORX", "NZE", "DJA", "EIH", "GOJ", "SUH", "EGH", "GOQ", "OHE", "HFA", "ISB", "YPH", "BNS", "PCP", "MQL", "LBD", "YAA", "ZBY", "SUJ", "PUX", "YKQ", "YCD", "KEQ", "LKY", "SSG", "BVH", "CBW", "PSO", "GAY", "KOP", "LSZ", "SUF", "WLA", "HTZ", "BGW", "TCU", nil, "PZO", "MDI", "BRU", "XAB", "AIK", "FGU", "YKU", "KIJ", "YOL", "DXB", "KBR", "RTS", "MRU", "WGT", "YSL", "LSM", "AYK", "CIQ", "CGH", "LKH", "SZX", "LMO", "FSI", "PER", "ISP", "BMK", "MQK", "OSY", "PFN", "MVP", "EDM", "ASC", "SIA", "KWE", "GAW", "FDH", "KEE", "RCO", "TYP", "XML", "ILF", "SAQ", "OIM", "ABX", "IWO", "LSI", "BMY", "ESU", "FVL", "YKS", "HLH", "STM", "YCR", "PAK", "PMF", "BHW", "PPR", "YMJ", "SAL", "ARD", "AOR", "BSF", "TJQ", "TIQ", "MCI", "CHM", "TIN", "PIA", "LDI", "ZBF", "LAS", "FMM", "GHB", "QPG", "UDJ", "NAE", "LOD", "ACJ", "PXO", "XKH", "YYL", "EBS", "PLD", "KRT", "EUC", "IKP", "LUW", "BCT", "LGL", "YHI", "YZY", "MCX", "BEG", "LBO", "MDZ", "YMT", "BOB", "YXJ", "URC", "RRS", "SDH", "BUI", "NML", "MXD", "BBS", "XMY", "EBB", "ORE", "DGF", "DPS", "KSE", "UEL", "GVN", "GTI", "INO", "DAN", "GRQ", "WIK", "ONR", "ADG", "CMA", "AQI", "HNL", "BTH", "WDH", "TEC", "MST", "TNI", "CSY", "SBG", "SKH", "KUD", "EFL", "TAN", "JCB", "WHK", "SHB", "GPI", "PGZ", "YLK", "CLJ", "ELH", "TAW", "URA", "OSK", "WLC", "TWB", "WTP", "KGD", "KSM", "YMM", "UBJ", "VLI", "WKR", "SLJ", "FEK", "MEA", "SRZ", "OAG", "IXL", "HLS", "ELO", "BGB", "BLK", "LZR", "AYO", "POM", "MAL", "KAA", "GVI", "RDS", "TKQ", "EWI", "OZZ", "PMS", "ATZ", "BIG", "HOK", "CEN", "NLS", "BFP", "BWI", "DAD", "PIZ", "OKI", "MAZ", "TUQ", "CBF", "XVN", "DQH", "YSB", "OXP", "DGW", "INJ", "TCZ", "MBC", "IDR", "SKV", "YHK", "MSO", "BXO", "CFO", "CCM", "YYJ", "AAU", "FKQ", "DRS", "IXQ", "MVD", "HTG", "SVO", "YNC", "ASY", "XAC", "KKW", "ADF", "KGE", "BMP", "TIP", "OBN", "UBP", "ERF", "OSW", "ING", "PDC", "FLI", "OEC", "ONU", "MFC", "SID", "MMP", "NEL", "AUT", "VNO", "UUK", "HYA", "TUC", "UMR", "YRB", "AEP", "PSL", "UUD", "TEZ", "GRI", "BZI", "VCT", "MZR", "CZE", "COK", "SPX", "PTJ", "NGE", "NTX", "YHU", "LYS", "XSE", "OKQ", "DLU", "DAX", "ARC", "EOH", "OLC", "LRU", "MBH", "KOE", "JAM", "DDU", "DMK", "CIY", "EDL", "TKG", "AFF", "SQM", "YXK", "SGV", "SNF", "MHO", "ACB", "TDT", "KST", "AAN", "SOM", "CBH", "VRB", "VUS", "ISK", "SLS", "JAF", "TTJ", "MNQ", "LPY", "OCJ", "ABA", "FMU", "SXI", "KAD", "FAI", "YOT", "VEV", "KAY", "GUW", "YQU", "GVR", "KMK", "LOA", "SOJ", "BCN", "COV", "LAP", "HDR", "FLN", "BTW", "YVT", "ORP", "TRR", "MZH", "RUY", "SDT", "MMY", "ORF", "CAL", "RKA", "VDA", "YAI", "KSA", "ATS", "GIC", "YTL", "YGJ", "RHL", "QUG", "GDE", "MYG", "WTA", "YFS", "TME", "GLX", "CVN", "RIM", "LAJ", "SMG", "ANC", "EVV", "APL", "TID", "USN", "KRG", "EIN", "OTR", "SDE", "ARZ", "MKL", "SVD", "GEA", "YWM", "LOH", "SGS", "TRG", "BZN", "ELM", "NAL", "CIM", "KUN", "LSQ", "CDF", "CGV", "ZSA", "MXJ", "RCB", "LFB", "QDB", "AJY", "SBI", "DTL", "WHU", "RUH", "AVP", "GPA", "CIA", "GAQ", "QIG", "SWA", "IOA", "JFR", "INV", "AZO", "EDI", "BLP", "EEN", "BIH", "CUV", "RRR", "MXX", "VVK", "RSU", "IXV", "CML", "MMD", "HDH", "LAF", "RNS", "MOJ", "XGG", "LVR", "ICK", "PIP", "TYM", "SYU", "OTM", "TER", "LBY", "APT", "TCH", "LLA", "ANN", "TVA", "ZBE", "MFF", "TUU", "CNN", "SGX", "APB", "ERA", "ZNE", "SHH", "FSP", "VAD", "RSA", "ESD", "LGS", "ZAJ", "KBZ", "PES", "OFU", "EPR", "GJA", "SXS", "MIU", "CPO", "BOJ", "KUV", "WAC", "YHD", "MSA", "YAW", "ULU", "TAZ", "HUF", "LCV", "KKZ", "ZRJ", "VKX", "WBO", "OME", "CAS", "IEV", "CBP", "UPV", "SIO", "KFA", "UKA", "AAM", "FRW", "DUE", "YST", "OKT", "MDX", "BRQ", "VOZ", "AUA", "IVC", "BVZ", "YPA", "DTR", "GNS", "QVE", "OSR", "MXN", "BLV", "LCG", "CKZ", "TCB", "ZST", "AYQ", "MGQ", "KCZ", "JEE", "GAG", "TVU", "BWE", "DZN", "XDE", "LEQ", "GYN", "VVC", "DSS", "AAZ", "IGM", "KGL", "JIW", "BYF", "MOT", "WSR", "AIF", "ELF", "YUS", "MDT", "LBJ", "FNE", "YAZ", "NTQ", "LUQ", "LBE", "GOT", "DBB", "AIT", "PSW", "CHB", "FXO", "LUD", "TBP", "CRV", "WLS", "JHM", "WXN", "MQE", "DAM", "BCB", "NCL", "YQT", "APH", "BZY", "BJV", "REA", "GTF", "ACE", "VVO", "FEL", "VNE", "KRK", "NWA", "MKZ", "HTY", "CWE", "QIQ", "TNH", "KSW", "CZX", "UBR", "KTR", "STZ", "XTR", "AUX", "MGM", "MGT", "ULX", "SFO", "WTD", "DOK", "DNB", "KJK", "ZEM", "GRJ", "CJA", "IAH", "BIE", "VHZ", "KAC", "RAH", "ATP", "GFN", "ARG", "RGO", "OKA", "URO", "CEB", "KQT", "LLW", "YFE", "YAS", "TRS", "PSA", "FTI", "MFN", "GVA", "CTU", "YWK", "QJB", "KIX", "WIO", "BKM", "PNH", "SZB", "NOJ", "AOC", "ADU", "ABF", "RBV", "PRA", "NNX", "NAV", "POX", "EKO", "NTE", "TUN", "UEE", "SZG", "RDG", "JMK", "CKB", "CEX", "FOT", "WAT", "VIC", "MTQ", "BOO", "HBU", "MOI", "NDJ", "GUL", "CGR", "UIR", "JAL", "GUX", "HNH", "LTK", "UNK", "ZUH", "LUG", "WSG", "OCC", "VGO", "YPN", "IVW", "TQL", "NUU", "EBM", "HYS", "BUQ", "YDN", "CRK", "GUJ", "YMS", "YMG", "ZCN", "YUM", "GUB", "URM", "MRO", "BEO", "TEY", "YPR", "AMC", "SIL", "IRO", "QLS", "QZO", "MQD", "NPL", "DEM", "OHT", "OSO", "SKD", "HWN", "NCE", "ZND", "GAN", "CGD", "ZPQ", "LED" ]) @spec name() :: String.t() localize(:name) end
lib/faker/airports.ex
0.598077
0.513303
airports.ex
starcoder
defmodule Geotz do @moduledoc """ Provides functions for fast timezone lookup for a given latitude and longtitude """ alias Geotz.Data @tzdata Data.tz_data() @tzlist Data.tz_list() @tzlength length(Data.tz_list()) @doc """ Get a timezone from a given latitude and longtitude. ## Examples iex> Geotz.lookup(19.432551, -99.191673) "America/Mexico_City" iex> Geotz.lookup("19.432551", "-99.191673") "America/Mexico_City" iex> Geotz.lookup("19.432551", -99.191673) "America/Mexico_City" iex> Geotz.lookup(Decimal.new(19.432551), Decimal.new(-99.191673)) "America/Mexico_City" """ @spec lookup(float | String.t | Decimal.t, float | String.t | Decimal.t) :: String.t if Code.ensure_compiled?(Decimal) do def lookup(%Decimal{} = lat, %Decimal{} = lng) do lat |> Decimal.to_float() |> lookup(Decimal.to_float(lng)) end end def lookup(<<lat::binary>>, lng), do: lat |> String.to_float() |> lookup(lng) def lookup(lat, <<lng::binary>>), do: lat |> lookup(String.to_float(lng)) def lookup(lat, lng) do n = -1 x = (180.0 + lng) * 48 / 360.00000000000006 y = ( 90.0 - lat) * 24 / 180.00000000000003 u = x |> Float.floor |> round v = y |> Float.floor |> round i = v * 96 + u * 2 ia = String.at(@tzdata, i) ib = String.at(@tzdata, i + 1) <<a, _>> = <<ia::binary, 0>> <<b, _>> = <<ib::binary, 0>> i = a * 56 + b - 1995 find(i, n, x, y, u, v) end defp find(i, n, x, y, u, v) when i + @tzlength < 3136 do n = n + i + 1 x = :math.fmod((x - u) * 2, 2) y = :math.fmod((y - v) * 2, 2) u = x |> Float.floor |> round v = y |> Float.floor |> round i = n * 8 + v * 4 + u * 2 + 2304 ia = String.at(@tzdata, i) ib = String.at(@tzdata, i + 1) <<a, _>> = <<ia::binary, 0>> <<b, _>> = <<ib::binary, 0>> i = a * 56 + b - 1995 find(i, n, x, y, u, v) end defp find(i, _n, _x, _y, _u, _v), do: Enum.at(@tzlist, i + @tzlength - 3136) end
lib/geotz.ex
0.799599
0.584805
geotz.ex
starcoder
defmodule Definition.Schema.Validation do @moduledoc """ Defines custom functions for easily validating requirements for commonly encountered data types evaluate to a boolean. """ @doc """ Evaluates whether or not the supplied value is a valid ISO8601-formatted timestamp. # Examples iex> Definition.Schema.Validation.ts?("2020-18-10, 16:00 EST") false iex> DateTime.utc_now |> to_string |> Definition.Schema.Validation.ts? true """ @spec ts?(input :: String.t()) :: boolean def ts?(input) when is_binary(input) do case DateTime.from_iso8601(input) do {:ok, _, _} -> true _ -> false end end def ts?(_), do: false @doc """ Evaluates whether or not a supplied 2-element list of binary values represents a range of timestamps in ISO8601 format. # Examples iex> Definition.Schema.Validation.temporal_range?(["2010-12-10", "present"]) false iex> Definition.Schema.Validation.temporal_range?(["2010-12-10T00:00:00Z", "2019-10-05 12:15:00Z"]) true """ @spec temporal_range?([String.t()]) :: boolean def temporal_range?([start, stop]) when is_binary(start) and is_binary(stop) do with {:ok, start_ts, _} <- DateTime.from_iso8601(start), {:ok, stop_ts, _} <- DateTime.from_iso8601(stop) do case DateTime.compare(start_ts, stop_ts) do :lt -> true :eq -> true :gt -> false end else _ -> false end end def temporal_range?(_), do: false @doc """ Evaluates whether or not a supplied list of floating point numeric values represents a geospatial bounding box of coordinates. # Examples iex> Definition.Schema.Validation.bbox?("123.2, 156.0, 47.9, 84.23") false iex> Definition.Schema.Validation.bbox?([13.21, 24.21, 67.1, 93.256]) true """ @spec bbox?(bbox :: [float]) :: boolean def bbox?([x1, y1, x2, y2] = bbox) when x1 <= x2 and y1 <= y2 do Enum.all?(bbox, &is_float/1) end def bbox?(_), do: false @doc """ Evaluates whether or not a supplied binary value represents a valid email address. # Example iex> Definition.Schema.Validation.email?("<EMAIL>") true """ @spec email?(input :: String.t()) :: boolean def email?(input) when is_binary(input) do Regex.match?(~r/^[A-Za-z0-9._%+-+']+@[A-Za-z0-9.-]+\.[A-Za-z]+$/, input) end def email?(_), do: false @doc """ Evaluates whether or not a supplied value is considered "empty" according to that types term representation. # Examples iex> Definition.Schema.Validation.empty?(" ") true iex> Definition.Schema.Validation.empty?(%{}) true iex> Definition.Schema.Validation.empty?([0]) false """ @spec empty?(input :: String.t() | list | map) :: boolean def empty?(""), do: true def empty?([]), do: true def empty?(input) when input == %{}, do: true def empty?(input) when is_binary(input) do case String.trim(input) do "" -> true _ -> false end end def empty?(_), do: false @doc """ Evaluates the inverse of `empty?/1`; quickly check to ensure a value is not empty without negative predicates (not, !) """ @spec not_empty?(input :: String.t() | list | map) :: boolean def not_empty?(input), do: not empty?(input) @doc """ Evaluates whether or not a supplied value is considered to be `nil`. """ @spec not_nil?(term) :: boolean def not_nil?(nil), do: false def not_nil?(_), do: true @doc """ Evaluates whether input matches an acceptable table name pattern. """ @spec table_name?(term) :: boolean def table_name?(input) when is_binary(input) do case String.split(input, "__", trim: true) do [_, _] -> true _ -> false end end def table_name?(_), do: false @doc """ Evaluates whether input is a positive integer. """ @spec pos_integer?(term) :: boolean def pos_integer?(input) when is_integer(input) and input > 0, do: true def pos_integer?(_), do: false @doc """ Evaluates whether input is a valid port number. """ @spec is_port?(term) :: boolean def is_port?(input) when is_integer(input) and 0 <= input and input <= 65_535, do: true def is_port?(_), do: false end
apps/definition/lib/definition/schema/validation.ex
0.932898
0.541469
validation.ex
starcoder