code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
|---|---|---|---|---|---|
defmodule Ecto.Rut do
@moduledoc """
Provides simple, sane and terse shortcuts for Ecto models.
Ecto.Rut is a wrapper around `Ecto.Repo` methods that usually require you to pass
the module as the subject and sometimes even require you do extra work before hand,
(as in the case of `Repo.insert/3`) to perform operations on your database. Ecto.Rut
tries to reduce code repetition by following the "Convention over Configuration"
ideology.
For example, once set up, it allows you to do this on a model called `Post`:
```
# Create a Post
Post.insert(title: "Introduction to Elixir", categories: ["Elixir", "Programming"])
# Get all Posts
Post.all
# Get a Post with its id
Post.get(3)
# Get a Post with another attribute
Post.get_by(published_date: "2016-02-24")
# Delete a Post
Post.delete(lame_post)
```
## Installation
Once added to your mix dependencies, all you need to do is call `use Ecto.Rut` in
your Ecto models:
```
defmodule YourApp.Post do
use Ecto.Schema
use Ecto.Rut
# Schema, Changeset and other stuff...
end
```
### Phoenix Projects
If you're using Ecto with a Phoenix project, instead of calling `use Ecto.Rut` in all of
your models, you can just call it once in the `model/0` method of your `web/web.ex` file:
```
# web/web.ex
def model do
quote do
use Ecto.Schema
use Ecto.Rut
# Other stuff...
end
end
```
## Configuration
You can also pass options to Ecto.Rut when calling `use` on it. These values are inferred
automatically by Ecto.Rut, but you can set them yourself in those special cases where it
can't. The two options are:
- `:model`
You set this when your Ecto Model is different from the module where you are calling `use`
- `:repo`
You set this when your app's Ecto.Repo module is set differently.
```
defmodule YourApp.OtherNamespace.Post do
use Ecto.Schema
use Ecto.Rut, model: YourApp.Post, repo: YourApp.CustomEcto.Repo
# Other Stuff
end
```
## Export Changeset
Methods like `c:insert/1` or `c:update/2` depend on your model exporting a public function called
`changeset(struct, params)` with all your desired validations and constraints applied to
the casted struct.
Ecto.Rut uses this function to convert maps, keyword lists and other types into `Ecto.Changeset`,
before updating or inserting them into the database.
Phoenix projects generate them for your models automatically, but for other Elixir projects,
you can [see an example here](https://hexdocs.pm/ecto/Ecto.Changeset.html).
"""
@doc false
defmacro __using__(opts \\ []) do
quote bind_quoted: [opts: opts] do
@model opts[:model] || __MODULE__
@app opts[:app] || @model |> Module.split |> Enum.drop(-1) |> Module.concat
@repo opts[:repo] || @app |> Module.concat("Repo")
# Default Changeset method
def changeset(struct, params) do
Ecto.Changeset.cast(struct, params, [])
end
defoverridable [changeset: 2]
# Simple Methods
def all, do: call(:all, [@model])
def delete_all, do: call(:delete_all, [@model])
def get(id), do: call(:get, [@model, id])
def get!(id), do: call(:get!, [@model, id])
def get_by(clauses), do: call(:get_by, [@model, clauses])
def get_by!(clauses), do: call(:get_by!, [@model, clauses])
def delete(struct), do: call(:delete, [struct])
def delete!(struct), do: call(:delete!, [struct])
# Insert and Insert!
Enum.each [:insert, :insert!], fn method ->
def unquote(method)(%{__struct__: Ecto.Changeset} = changeset) do
call(unquote(method), [changeset])
end
def unquote(method)(%{__struct__: @model} = struct) do
struct
|> Map.from_struct
|> unquote(method)()
end
def unquote(method)(map) when is_map(map) do
@model
|> Kernel.struct
|> changeset(map)
|> unquote(method)()
end
def unquote(method)(keywords) do
keywords
|> ExUtils.Keyword.to_map
|> unquote(method)()
end
end
# Update and Update!
Enum.each [:update, :update!], fn method ->
def unquote(method)(%{__struct__: Ecto.Changeset} = changeset) do
call(unquote(method), [changeset])
end
def unquote(method)(%{__struct__: @model} = struct, new \\ nil) do
[struct, map] =
cond do
is_nil(new) -> [get!(struct.id), Map.from_struct(struct)]
ExUtils.is_struct?(new) -> [struct, Map.from_struct(new)]
ExUtils.is_pure_map?(new) -> [struct, new]
Keyword.keyword?(new) -> [struct, ExUtils.Keyword.to_map(new)]
end
struct
|> changeset(map)
|> unquote(method)()
end
end
# Private Methods
defp call(method, args \\ []) do
apply(@repo, method, args)
end
end
end
@doc """
Fetches all entries from the Datastore for the Model
See `c:Ecto.Repo.all/2`.
## Example
```
Post.all
```
"""
@callback all :: [Ecto.Schema.t] | no_return
@doc """
Fetches a single struct from the data store where the primary key matches the given id.
Returns nil if no result was found. If the struct in the queryable has no or more than one
primary key, it will raise an argument error. See `c:Ecto.Repo.get/3`.
## Example
```
Post.get(3)
Post.get("0e531047-6bd2-4ab1-94c3-817fba988dbe")
```
"""
@callback get(id :: term) :: Ecto.Schema.t | nil | no_return
@doc """
Similar to `c:get/1` but raises `Ecto.NoResultsError` if no record was found.
Also see `c:Ecto.Repo.get!/3`.
"""
@callback get!(id :: term) :: Ecto.Schema.t | nil | no_return
@doc """
Fetches a single struct from the data store that matches the passed clauses.
Returns `nil` if no result was found. See `c:Ecto.Repo.get_by/3`.
## Example
```
Post.get_by(title: "Introduction to Elixir")
Post.get_by(published_date: "2015-10-15")
```
"""
@callback get_by(clauses :: Keyword.t) :: Ecto.Schema.t | nil | no_return
@doc """
Similar to `c:get_by/1` but raises `Ecto.NoResultsError` if no record was found.
Also see `c:Ecto.Repo.get_by!/3`.
"""
@callback get_by!(clauses :: Keyword.t) :: Ecto.Schema.t | nil | no_return
@doc """
Deletes a struct using its primary key.
Returns `{:ok, struct}` if the struct was successfully deleted or `{:error, changeset}`
if there was a validation or a known constraint error.
See `c:Ecto.Repo.delete/2`.
## Example
```
case Post.delete(post) do
{:ok, struct} -> # Deleted with success
{:error, changeset} -> # Something went wrong
end
```
"""
@callback delete(struct_or_changeset :: Ecto.Schema.t | Ecto.Changeset.t) :: {:ok, Ecto.Schema.t} | {:error, Ecto.Changeset.t}
@doc """
Similar to `c:delete/1` but returns the struct or raises if the changeset is invalid.
Also see `c:Ecto.Repo.delete!/2`.
"""
@callback delete!(struct_or_changeset :: Ecto.Schema.t | Ecto.Changeset.t) :: Ecto.Schema.t | no_return
@doc """
Deletes all entries of the model
Returns a tuple containing the number of items deleted. Also see `c:Ecto.Repo.delete_all/2`.
## Example
```
Post.delete_all
# => {34, nil}
```
"""
@callback delete_all :: {integer, nil | [term]} | no_return
@doc """
Inserts a new record (Can be a struct, changeset, keyword list or a map).
In case a changeset is given, the changes in the changeset are merges with the struct fields
and all of them are sent to the database.
In case a struct, keyword list or a map is given, they are first converted to a changeset, with
all non-nil fields as part of the changeset and inserted into the database if it's valid.
Returns a {:ok, struct} if it was successfully inserted, or a {:error, changeset} is there was a
validation or a known constraint error.
Also see `c:Ecto.Repo.insert/2`.
## Requires a changeset method
This method depends on your model exporting a public changeset function. [See this for more
details](#module-export-changeset).
## Example
```
Post.insert(title: "Introduction to Elixir")
Post.insert(%{title: "Building your first Phoenix app"})
Post.insert(%Post{title: "Concurrency in Elixir", categories: ["programming", "elixir"]})
Post.changeset(%Post{}, %{title: "Ecto for dummies"}) |> Post.insert
```
"""
@callback insert(struct :: Ecto.Schema.t | Ecto.Changeset.t | Map.t | Keyword.t) :: {:ok, Ecto.Schema.t} | {:error, Ecto.Changeset.t}
@doc """
Similar to `c:insert/1` but returns the struct or raises if the changeset is invalid.
Also see `c:Ecto.Repo.insert!/2`.
"""
@callback insert!(struct :: Ecto.Schema.t | Ecto.Changeset.t | Map.t | Keyword.t) :: Ecto.Schema.t | no_return
@doc """
Updates the database record using a modified struct or a changeset.
This method only accepts one argument; either a modified struct or a changeset. It uses the
struct or changeset's primary key to update the correct record in the database. If no primary
key is found, `Ecto.NoPrimaryKeyFieldError` will be raised.
Returns `{:ok, struct}` if the struct has been successfully updated or `{:error, changeset}`
if there was a validation or a known constraint error.
## Requires a changeset method
This method depends on your model exporting a public changeset function. [See this for more
details](#module-export-changeset).
## Example
```
post = Post.get_by!(id: 3)
post = %{ post | title: "Updated post title"}
Post.update(post)
```
"""
@callback update(modified_struct_or_changeset :: Ecto.Schema.t | Ecto.Changeset.t) :: {:ok, Ecto.Schema.t} | {:error, Ecto.Changeset.t}
@doc """
Updates the database record using a Keyword list or a Map and a Struct for comparison.
This method accepts two arguments, the first being the struct that has to be updated, and
the second being a Map or a Keyword List of the new values.
Returns `{:ok, struct}` if the struct has been successfully updated or `{:error, changeset}`
if there was a validation or a known constraint error. Also see `c:Ecto.Repo.update/2`.
## Requires a changeset method
This method depends on your model exporting a public changeset function. [See this for more
details](#module-export-changeset).
## Example
```
post = Post.get_by!(id: 3)
Post.update(post, title: "New post title", author_id: new_author_id)
```
"""
@callback update(struct :: Ecto.Schema.t, params :: Map.t | Keyword.t) :: {:ok, Ecto.Schema.t} | {:error, Ecto.Changeset.t}
@doc """
Similar to `c:update/1` but returns the struct or raises if the changeset is invalid.
"""
@callback update!(modified_struct_or_changeset :: Ecto.Schema.t | Ecto.Changeset.t) :: Ecto.Schema.t | no_return
@doc """
Similar to `c:update/2` but returns the struct or raises if the changeset is invalid.
"""
@callback update!(struct :: Ecto.Schema.t, params :: Map.t | Keyword.t) :: Ecto.Schema.t | no_return
end
|
lib/ecto_rut.ex
| 0.847684
| 0.768027
|
ecto_rut.ex
|
starcoder
|
defmodule Vivid.RGBA do
alias __MODULE__
import :math, only: [pow: 2]
defstruct ~w(red green blue alpha a_red a_green a_blue)a
@ascii_luminance_map {" ", ".", ":", "-", "=", "+", "*", "#", "%", "@"}
@ascii_luminance_map_length 10
@moduledoc """
Defines a colour in RGBA colour space.
The three colour channels and the alpha channels are stored as numeric
values between 0 and 1.
## Example
iex> use Vivid
...> RGBA.black()
#Vivid.RGBA<{0, 0, 0, 1}>
iex> RGBA.white()
#Vivid.RGBA<{1, 1, 1, 1}>
iex> RGBA.init(1, 0, 0, 0.5)
#Vivid.RGBA<{1, 0, 0, 0.5}>
"""
@type zero_to_one :: number
@opaque t :: %RGBA{
red: zero_to_one,
green: zero_to_one,
blue: zero_to_one,
alpha: zero_to_one,
a_red: zero_to_one,
a_green: zero_to_one,
a_blue: zero_to_one
}
# I would put this at the bottom, but it has to be defined *before* it's
# used in the guard.
defmacrop zero_to_one?(value) do
quote do: is_number(unquote(value)) and unquote(value) >= 0 and unquote(value) <= 1
end
@doc """
Create a colour. Like magic.
* `red` the red component of the colour.
* `green` the green component of the colour.
* `blue` the blue component of the colour.
All values are between zero and one.
When the alpha argument is omitted it is assumed to be `1` (ie completely
opaque).
## Example
iex> Vivid.RGBA.init(0.1, 0.2, 0.3, 0.4)
#Vivid.RGBA<{0.1, 0.2, 0.3, 0.4}>
"""
@spec init(zero_to_one, zero_to_one, zero_to_one) :: RGBA.t()
def init(red, green, blue), do: init(red, green, blue, 1)
@doc """
Create a colour. Like magic.
* `red` the red component of the colour.
* `green` the green component of the colour.
* `blue` the blue component of the colour.
* `alpha` the opacity of the colour.
All values are between zero and one.
## Example
iex> Vivid.RGBA.init(0.1, 0.2, 0.3, 0.4)
#Vivid.RGBA<{0.1, 0.2, 0.3, 0.4}>
"""
@spec init(zero_to_one, zero_to_one, zero_to_one, zero_to_one) :: RGBA.t()
def init(red, green, blue, 1)
when zero_to_one?(red) and zero_to_one?(green) and zero_to_one?(blue) do
%RGBA{
red: red,
green: green,
blue: blue,
alpha: 1,
a_red: red,
a_green: green,
a_blue: blue
}
end
def init(red, green, blue, 0)
when zero_to_one?(red) and zero_to_one?(green) and zero_to_one?(blue) do
%RGBA{
red: red,
green: green,
blue: blue,
alpha: 0,
a_red: 0,
a_green: 0,
a_blue: 0
}
end
def init(red, green, blue, alpha)
when zero_to_one?(red) and zero_to_one?(green) and zero_to_one?(blue) and
zero_to_one?(alpha) do
%RGBA{
red: red,
green: green,
blue: blue,
alpha: alpha,
a_red: red * alpha,
a_green: green * alpha,
a_blue: blue * alpha
}
end
@doc """
Shorthand for white.
## Example
iex> Vivid.RGBA.white
#Vivid.RGBA<{1, 1, 1, 1}>
"""
@spec white() :: RGBA.t()
def white, do: RGBA.init(1, 1, 1)
@doc """
Shorthand for black.
## Example
iex> Vivid.RGBA.black
#Vivid.RGBA<{0, 0, 0, 1}>
"""
@spec black() :: RGBA.t()
def black, do: RGBA.init(0, 0, 0)
@doc """
Return the red component of the colour.
## Example
iex> Vivid.RGBA.init(0.7, 0.6, 0.5, 0.4)
...> |> Vivid.RGBA.red
0.7
"""
@spec red(RGBA.t()) :: zero_to_one
def red(%RGBA{red: r}), do: r
@doc """
Return the green component of the colour.
## Example
iex> Vivid.RGBA.init(0.7, 0.6, 0.5, 0.4)
...> |> Vivid.RGBA.green
0.6
"""
@spec green(RGBA.t()) :: zero_to_one
def green(%RGBA{green: g}), do: g
@doc """
Return the blue component of the colour.
## Example
iex> Vivid.RGBA.init(0.7, 0.6, 0.5, 0.4)
...> |> Vivid.RGBA.blue
0.5
"""
@spec blue(RGBA.t()) :: zero_to_one
def blue(%RGBA{blue: b}), do: b
@doc """
Return the alpha component of the colour.
## Example
iex> Vivid.RGBA.init(0.7, 0.6, 0.5, 0.4)
...> |> Vivid.RGBA.alpha
0.4
"""
@spec alpha(RGBA.t()) :: zero_to_one
def alpha(%RGBA{alpha: a}), do: a
@doc """
Convert a colour to HTML style hex.
## Example
iex> Vivid.RGBA.init(0.7, 0.6, 0.5)
...> |> Vivid.RGBA.to_hex
"#B39980"
"""
@spec to_hex(RGBA.t()) :: String.t()
def to_hex(%RGBA{red: r, green: g, blue: b, alpha: 1}) do
r = r |> int_to_hex
g = g |> int_to_hex
b = b |> int_to_hex
"#" <> r <> g <> b
end
def to_hex(%RGBA{red: r, green: g, blue: b, alpha: a}) do
r = r |> int_to_hex
g = g |> int_to_hex
b = b |> int_to_hex
a = a |> int_to_hex
"#" <> r <> g <> b <> a
end
@doc """
Blend two colours together using their alpha information using the "over" algorithm.
## Examples
iex> Vivid.RGBA.over(Vivid.RGBA.black, Vivid.RGBA.init(1,1,1, 0.5))
#Vivid.RGBA<{0.5, 0.5, 0.5, 1.0}>
"""
@spec over(RGBA.t(), RGBA.t()) :: RGBA.t()
def over(nil, %RGBA{} = colour), do: colour
def over(%RGBA{}, %RGBA{alpha: 1} = visible), do: visible
def over(%RGBA{} = visible, %RGBA{alpha: 0}), do: visible
def over(%RGBA{a_red: r0, a_green: g0, a_blue: b0, alpha: a0}, %RGBA{
a_red: r1,
a_green: g1,
a_blue: b1,
alpha: a1
}) do
a = a0 + a1 * (1 - a0)
[r, g, b] =
[{r0, r1}, {g0, g1}, {b0, b1}]
|> Enum.map(fn {c0, c1} -> c1 + c0 * (1 - a1) end)
RGBA.init(r, g, b, a)
end
@doc """
Return the luminance of a colour, using some colour mixing ratios I found
on stack exchange.
## Examples
iex> Vivid.RGBA.init(1,0,0) |> Vivid.RGBA.luminance
0.2128
iex> Vivid.RGBA.white |> Vivid.RGBA.luminance
1.0
iex> Vivid.RGBA.black |> Vivid.RGBA.luminance
0.0
"""
@spec luminance(RGBA.t()) :: zero_to_one
def luminance(%RGBA{a_red: r, a_green: g, a_blue: b}) do
[rl, gl, bl] = [r, g, b] |> Enum.map(&pow(&1, 2.2))
0.2128 * rl + 0.7150 * gl + 0.0722 * bl
end
@doc """
Convert a colour to an ASCII character.
This isn't very scientific, but helps with debugging and is used in the
implementations of `String.Chars` for Vivid types.
The chacaters used (from black to white) are `" .:-=+*#%@"`. These are
chosen based on the `luminance/1` value of the colour.
"""
@spec to_ascii(RGBA.t()) :: String.t()
def to_ascii(%RGBA{} = colour) do
l = luminance(colour)
c = round(l * (@ascii_luminance_map_length - 1))
elem(@ascii_luminance_map, c)
end
defp int_to_hex(n) when zero_to_one?(n) do
h = Integer.to_string(round(n * 0xFF), 16)
case h |> String.length() do
1 -> "0" <> h
2 -> h
end
end
end
|
lib/vivid/rgba.ex
| 0.916236
| 0.798776
|
rgba.ex
|
starcoder
|
defmodule Fxnk.Flow do
@moduledoc """
`Fxnk.Flow` functions are used for control flow.
"""
import Fxnk.Functions, only: [curry: 1]
import Fxnk.List, only: [reduce_right: 3]
@doc """
`and_then/2` allows you to chain together `{:ok, _}` functions. Stops processing on the first `{:error, _}` and returns the error.
## Examples
iex> map = %{foo: "foo", bar: "bar", baz: "baz"}
iex> uppercase_okay = fn str -> {:ok, String.upcase(str) } end
iex> reverse_okay = fn str -> {:ok, String.reverse(str)} end
iex> map |> Map.fetch(:foo) |> Fxnk.Flow.and_then(uppercase_okay) |> Fxnk.Flow.and_then(reverse_okay)
{:ok, "OOF"}
iex> throw_error = fn _ -> {:error, :input_should_not_be_foo} end
iex> map |> Map.fetch(:foo) |> Fxnk.Flow.and_then(throw_error) |> Fxnk.Flow.and_then(reverse_okay)
{:error, :input_should_not_be_foo}
"""
@spec and_then({:error, any} | {:ok, any}, function()) :: any
def and_then({:ok, arg}, func), do: func.(arg)
def and_then({:error, error}, _), do: {:error, error}
@doc """
Curried `compose/2`.
## Examples
iex> reverseSort = Fxnk.Flow.compose([&Enum.reverse/1, &Enum.sort/1])
iex> reverseSort.([1,3,5,7,6,4,2])
[7, 6, 5, 4, 3, 2, 1]
"""
@spec compose([function(), ...]) :: (any() -> any())
def compose(fns) when is_list(fns) do
curry(fn arg -> compose(arg, fns) end)
end
@doc """
`compose/2` takes an input and a list of functions and runs the functions against the input
from right to left.
## Examples
iex> [1,3,5,7,6,4,2] |> Fxnk.Flow.compose([&Enum.reverse/1, &Enum.sort/1])
[7, 6, 5, 4, 3, 2, 1]
"""
@spec compose(any, [function(), ...]) :: any()
def compose(arg, fns) when is_list(fns) do
reduce_right(fns, arg, fn f, acc -> f.(acc) end)
end
@doc """
Curried `if_else/3`
## Examples
iex> multTwoIfLessThanTenOrDivideByTwo = Fxnk.Flow.if_else(fn x -> x < 10 end, fn x -> x * 2 end, fn x -> div(x, 2) end)
iex> multTwoIfLessThanTenOrDivideByTwo.(5)
10
iex> multTwoIfLessThanTenOrDivideByTwo.(20)
10
"""
@spec if_else(function(), function(), function()) :: (any() -> any())
def if_else(pred, passFunc, failFunc) do
curry(fn input -> if_else(input, pred, passFunc, failFunc) end)
end
@doc """
`if_else/4` takes an input, a predicate, a pass function and a fail function.
Runs the pass function if the predicate returns true when passed the input, otherwise runs the fail function.
## Examples
iex> Fxnk.Flow.if_else(5, fn x -> x < 10 end, fn x -> x * 2 end, fn x -> div(x, 2) end)
10
iex> Fxnk.Flow.if_else(20, fn x -> x < 10 end, fn x -> x * 2 end, fn x -> div(x, 2) end)
10
"""
@spec if_else(any, function(), function(), function()) :: (any() -> any())
def if_else(input, pred, passFunc, failFunc) do
case pred.(input) do
true -> passFunc.(input)
_ -> failFunc.(input)
end
end
@doc """
Curried `pipe/2`.
## Examples
iex> reverseSort = Fxnk.Flow.pipe([&Enum.sort/1, &Enum.reverse/1])
iex> reverseSort.([1,3,5,7,6,4,2])
[7, 6, 5, 4, 3, 2, 1]
"""
@spec pipe([function(), ...]) :: (any() -> any())
def pipe(fns) when is_list(fns) do
curry(fn arg -> pipe(arg, fns) end)
end
@doc """
`pipe/2` takes an input and a list of functions and runs the functions against the input
from left to right.
## Examples
iex> [1,3,5,7,6,4,2] |> Fxnk.Flow.pipe([&Enum.sort/1, &Enum.reverse/1])
[7, 6, 5, 4, 3, 2, 1]
"""
@spec pipe(any, [function(), ...]) :: any()
def pipe(arg, fns) when is_list(fns) do
Enum.reduce(fns, arg, fn f, acc -> f.(acc) end)
end
@doc """
Handle errors gracefully. When an error is encountered, apply a function to that error. When its not an error, do nothing.
## Example:
iex> make_error = fn message -> {:error, message} end
iex> handle_error = fn message -> Atom.to_string(message) end
iex> make_error.(:foo) |> Fxnk.Flow.on_error(handle_error)
"foo"
iex> make_success = fn message -> {:ok, message} end
iex> make_success.(:bar) |> Fxnk.Flow.on_error(handle_error)
{:ok, :bar}
"""
@spec on_error({:error, any()} | any(), function()) :: any()
def on_error({:error, x}, func), do: func.(x)
def on_error(x, _), do: x
@doc """
Curried `unless_is/3`.
## Examples
iex> multiplyByTwoUnlessGreaterThan10 = Fxnk.Flow.unless_is(fn n -> n > 10 end, fn x -> x * 2 end)
iex> multiplyByTwoUnlessGreaterThan10.(15)
15
iex> multiplyByTwoUnlessGreaterThan10.(2)
4
"""
@spec unless_is(function(), function()) :: (any() -> any())
def unless_is(pred, func) do
curry(fn input -> unless_is(input, pred, func) end)
end
@doc """
`unless_is` is a logic flow function, which takes an input, a predicate function, and an action function,
allowing the action function to run unless the input returns true when ran against the predicate.
## Example
iex> Fxnk.Flow.unless_is(15, fn n -> n > 10 end, fn x -> x * 2 end)
15
iex> Fxnk.Flow.unless_is(2, fn n -> n > 10 end, fn x -> x * 2 end)
4
"""
@spec unless_is(any, function(), function()) :: any()
def unless_is(input, pred, func) do
case pred.(input) do
true -> input
_ -> func.(input)
end
end
@doc """
Curried `until/3`.
## Examples
iex> timesTwoUntilGreaterThan100 = Fxnk.Flow.until(fn x -> x > 100 end, fn n -> n * 2 end)
iex> timesTwoUntilGreaterThan100.(1)
128
"""
@spec until(function(), function()) :: (any() -> any())
def until(pred, func) do
curry(fn init -> until(init, pred, func) end)
end
@doc """
`until/3` takes an input, a predicate function and an action function,
running the action function on the input until the predicate is satisfied.
## Examples
iex> Fxnk.Flow.until(1, fn x -> x > 100 end, fn n -> n * 2 end)
128
"""
@spec until(any, function(), function()) :: any()
def until(init, pred, func) do
case pred.(init) do
false -> until(func.(init), pred, func)
_ -> init
end
end
@doc """
Curried `when_is/3`.
## Examples
iex> timesTwoWhenGreaterThan10 = Fxnk.Flow.when_is(fn x -> x > 10 end, fn n -> n * 2 end)
iex> timesTwoWhenGreaterThan10.(15)
30
iex> timesTwoWhenGreaterThan10.(5)
5
"""
@spec when_is(function(), function()) :: (any() -> any())
def when_is(pred, func) do
curry(fn input -> when_is(input, pred, func) end)
end
@doc """
`when_is` is a logic flow function, which takes an input, a predicate function, and an action function,
allowing the action function to run when the input returns true when ran against the predicate.
## Examples
iex> Fxnk.Flow.when_is(15, fn x -> x > 10 end, fn n -> n * 2 end)
30
iex> Fxnk.Flow.when_is(5, fn x -> x > 10 end, fn n -> n * 2 end)
5
"""
@spec when_is(any, function(), function()) :: any()
def when_is(input, pred, func) do
case pred.(input) do
true -> func.(input)
_ -> input
end
end
end
|
lib/fxnk/flow.ex
| 0.870163
| 0.668988
|
flow.ex
|
starcoder
|
defmodule Bamboo.Mailer do
@moduledoc """
Functions for delivering emails using adapters and delivery strategies.
Adds `deliver_now/1`, `deliver_now!/1`, `deliver_later/1` and
`deliver_later!/1` functions to the mailer module in which it is used.
Bamboo [ships with several adapters][available-adapters]. It is also possible
to create your own adapter.
See the ["Getting Started" section of the README][getting-started] for an
example of how to set up and configure a mailer for use.
[available-adapters]: https://github.com/thoughtbot/bamboo/tree/master/lib/bamboo/adapters
[getting-started]: https://hexdocs.pm/bamboo/readme.html#getting-started
## Example
Creating a Mailer is as simple as defining a module in your application and
using the `Bamboo.Mailer`.
# some/path/within/your/app/mailer.ex
defmodule MyApp.Mailer do
use Bamboo.Mailer, otp_app: :my_app
end
The mailer requires some configuration within your application.
# config/config.exs
config :my_app, MyApp.Mailer,
adapter: Bamboo.MandrillAdapter, # Specify your preferred adapter
api_key: "my_api_key" # Specify adapter-specific configuration
Also you will want to define an email module for building email structs that
your mailer can send. See [`Bamboo.Email`] for more information.
# some/path/within/your/app/email.ex
defmodule MyApp.Email do
import Bamboo.Email
def welcome_email do
new_email(
to: "<EMAIL>",
from: "<EMAIL>",
subject: "Welcome to the app.",
html_body: "<strong>Thanks for joining!</strong>",
text_body: "Thanks for joining!"
)
end
end
You are now able to send emails with your mailer module where you see fit
within your application.
"""
@cannot_call_directly_error """
cannot call Bamboo.Mailer directly. Instead implement your own Mailer module
with: use Bamboo.Mailer, otp_app: :my_app
"""
require Logger
alias Bamboo.Formatter
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
@spec deliver_now(Bamboo.Email.t(), Enum.t()) ::
{:ok, Bamboo.Email.t()}
| {:ok, Bamboo.Email.t(), any}
| {:error, Exception.t() | String.t()}
def deliver_now(email, opts \\ []) do
{config, opts} = Keyword.split(opts, [:config])
config = build_config(config)
Bamboo.Mailer.deliver_now(config.adapter, email, config, opts)
end
@spec deliver_now!(Bamboo.Email.t(), Enum.t()) :: Bamboo.Email.t() | {Bamboo.Email.t(), any}
def deliver_now!(email, opts \\ []) do
{config, opts} = Keyword.split(opts, [:config])
config = build_config(config)
Bamboo.Mailer.deliver_now!(config.adapter, email, config, opts)
end
@spec deliver_later(Bamboo.Email.t()) ::
{:ok, Bamboo.Email.t()} | {:error, Exception.t() | String.t()}
def deliver_later(email, opts \\ []) do
config = build_config(opts)
Bamboo.Mailer.deliver_later(config.adapter, email, config)
end
@spec deliver_later!(Bamboo.Email.t()) :: Bamboo.Email.t()
def deliver_later!(email, opts \\ []) do
config = build_config(opts)
Bamboo.Mailer.deliver_later!(config.adapter, email, config)
end
otp_app = Keyword.fetch!(opts, :otp_app)
defp build_config(config: dynamic_config_overrides) do
Bamboo.Mailer.build_config(
__MODULE__,
unquote(otp_app),
dynamic_config_overrides
)
end
defp build_config(_) do
Bamboo.Mailer.build_config(__MODULE__, unquote(otp_app))
end
@spec deliver(any()) :: no_return()
def deliver(_email) do
raise """
you called deliver/1, but it has been renamed to deliver_now/1 to add clarity.
Use deliver_now/1 to send right away, or deliver_later/1 to send in the background.
"""
end
end
end
@doc """
Deliver an email right away.
Call your mailer with `deliver_now/1` to send an email right away. Call
`deliver_later/1` if you want to send in the background.
Pass in an argument of `response: true` if you need access to the response
from delivering the email.
A successful email delivery returns an ok tuple with the `Email` struct and
the response (if `response: true`) from calling `deliver` with your adapter.
A failure returns `{:error, error}` tuple.
Having the response returned from your adapter is useful if you need access to
any data sent back from your email provider in the response.
{:ok, email, response} = Email.welcome_email |> Mailer.deliver_now(response: true)
Pass in an argument of `config: %{}` if you would like to dynamically override
any keys in your application's default Mailer configuration.
Email.welcome_email
|> Mailer.deliver_now(config: %{username: "Emma", smtp_port: 2525})
"""
def deliver_now(_email, _opts \\ []) do
raise @cannot_call_directly_error
end
@doc """
Deliver an email right away.
Same as `deliver_now/2` but does not return an ok/error tuple.
If successful, this function returns the `Email` struct or an `Email`,
response tuple when setting `response: true`.
On failure, this function returns an error tuple: `{:error, error}`.
"""
def deliver_now!(_email, _opts \\ []) do
raise @cannot_call_directly_error
end
@doc """
Deliver an email in the background.
Call your mailer with `deliver_later/1` to send an email using the configured
`deliver_later_strategy`. If no `deliver_later_strategy` is set,
`Bamboo.TaskSupervisorStrategy` will be used. See
`Bamboo.DeliverLaterStrategy` to learn how to change how emails are delivered
with `deliver_later/1`.
If the email is successfully scheduled for delivery, this function will return
an `{:ok, email}`.
If the email is invalid, this function will return an `{:error, error}` tuple.
"""
def deliver_later(_email, _opts \\ []) do
raise @cannot_call_directly_error
end
@doc """
Deliver an email in the background.
Same as `deliver_later/2` but does not return an ok tuple and raises on
errors.
If successful, this function only returns the `Email` struct.
If the email is invalid, this function raises an error.
"""
def deliver_later!(_email, _opts \\ []) do
raise @cannot_call_directly_error
end
@doc false
def deliver_now(adapter, email, config, opts) do
with {:ok, email} <- validate_and_normalize(email, adapter),
%Bamboo.Email{blocked: false} = email <- apply_interceptors(email, config) do
if empty_recipients?(email) do
debug_unsent(email)
{:ok, email}
else
debug_sent(email, adapter)
case adapter.deliver(email, config) do
{:ok, response} -> format_response(email, response, opts)
{:error, _} = error -> error
end
end
else
%Bamboo.Email{blocked: true} = email -> {:ok, email}
response -> response
end
end
defp format_response(email, response, opts) do
put_response = Keyword.get(opts, :response, false)
if put_response do
{:ok, email, response}
else
{:ok, email}
end
end
@doc false
def deliver_now!(adapter, email, config, opts) do
case deliver_now(adapter, email, config, opts) do
{:ok, email, response} -> {email, response}
{:ok, email} -> email
{:error, error} -> raise error
end
end
@doc false
def deliver_later(adapter, email, config) do
with {:ok, email} <- validate_and_normalize(email, adapter),
%Bamboo.Email{blocked: false} = email <- apply_interceptors(email, config) do
if empty_recipients?(email) do
debug_unsent(email)
else
debug_sent(email, adapter)
config.deliver_later_strategy.deliver_later(adapter, email, config)
end
{:ok, email}
else
%Bamboo.Email{blocked: true} = email -> {:ok, email}
response -> response
end
end
@doc false
def deliver_later!(adapter, email, config) do
case deliver_later(adapter, email, config) do
{:ok, email} -> email
{:error, error} -> raise error
end
end
defp empty_recipients?(email) do
email.to == [] && email.cc == [] && email.bcc == []
end
defp debug_sent(email, adapter) do
Logger.debug(fn ->
"""
Sending email with #{inspect(adapter)}:
#{inspect(email, limit: 150)}
"""
end)
end
defp debug_unsent(email) do
Logger.debug(fn ->
"""
Email was not sent because recipients are empty.
Full email - #{inspect(email, limit: 150)}
"""
end)
end
defp validate_and_normalize(email, adapter) do
case validate(email, adapter) do
:ok -> {:ok, normalize_addresses(email)}
error -> error
end
end
defp validate(email, adapter) do
with :ok <- validate_from_address(email),
:ok <- validate_recipients(email),
:ok <- validate_attachment_support(email, adapter) do
:ok
end
end
defp validate_attachment_support(%{attachments: []} = _email, _adapter), do: :ok
defp validate_attachment_support(_email, adapter) do
if Code.ensure_loaded?(adapter) && function_exported?(adapter, :supports_attachments?, 0) &&
adapter.supports_attachments? do
:ok
else
{:error, "the #{adapter} does not support attachments yet."}
end
end
defp validate_from_address(%{from: nil}) do
{:error, %Bamboo.EmptyFromAddressError{}}
end
defp validate_from_address(%{from: {_, nil}}) do
{:error, %Bamboo.EmptyFromAddressError{}}
end
defp validate_from_address(_email), do: :ok
defp validate_recipients(%Bamboo.Email{} = email) do
if Enum.all?(
Enum.map([:to, :cc, :bcc], &Map.get(email, &1)),
&is_nil_recipient?/1
) do
{:error, Bamboo.NilRecipientsError.exception(email)}
else
:ok
end
end
defp is_nil_recipient?(nil), do: true
defp is_nil_recipient?({_, nil}), do: true
defp is_nil_recipient?([]), do: false
defp is_nil_recipient?([_ | _] = recipients) do
Enum.all?(recipients, &is_nil_recipient?/1)
end
defp is_nil_recipient?(_), do: false
defp apply_interceptors(email, config) do
interceptors = config[:interceptors] || []
Enum.reduce(interceptors, email, fn interceptor, email ->
apply(interceptor, :call, [email])
end)
end
@doc """
Wraps to, cc and bcc addresses in a list and normalizes email addresses.
Also formats the from address. Email normalization/formatting is done by
implementations of the [Bamboo.Formatter] protocol.
"""
def normalize_addresses(email) do
%{
email
| from: format(email.from, :from),
to: format(List.wrap(email.to), :to),
cc: format(List.wrap(email.cc), :cc),
bcc: format(List.wrap(email.bcc), :bcc)
}
end
defp format(record, type) do
Formatter.format_email_address(record, %{type: type})
end
def build_config(mailer, otp_app, optional_overrides \\ %{}) do
otp_app
|> Application.fetch_env!(mailer)
|> Map.new()
|> Map.merge(optional_overrides)
|> handle_adapter_config
end
defp handle_adapter_config(base_config = %{adapter: adapter}) do
adapter.handle_config(base_config)
|> Map.put_new(:deliver_later_strategy, Bamboo.TaskSupervisorStrategy)
end
end
|
lib/bamboo/mailer.ex
| 0.73782
| 0.447038
|
mailer.ex
|
starcoder
|
defmodule Tortoise.Events do
@moduledoc """
A PubSub exposing various system events from a Tortoise
connection. This allows the user to integrate with custom metrics
and logging solutions.
Please read the documentation for `Tortoise.Events.register/2` for
information on how to subscribe to events, and
`Tortoise.Events.unregister/2` for how to unsubscribe.
"""
@types [:connection, :status, :ping_response]
@doc """
Subscribe to messages on the client with the client id `client_id`
of the type `type`.
When a message of the subscribed type is dispatched it will end up
in the mailbox of the process that placed the subscription. The
received message will have the format:
{{Tortoise, client_id}, type, value}
Making it possible to pattern match on multiple message types on
multiple clients. The value depends on the message type.
Possible message types are:
- `:status` dispatched when the connection of a client changes
status. The value will be `:up` when the client goes online, and
`:down` when it goes offline.
- `:ping_response` dispatched when the connection receive a
response from a keep alive message. The value is the round trip
time in milliseconds, and can be used to track the latency over
time.
Other message types exist, but unless they are mentioned in the
possible message types above they should be considered for internal
use only.
It is possible to listen on all events for a given type by
specifying `:_` as the `client_id`.
"""
@spec register(Tortoise.client_id(), atom()) :: {:ok, pid()} | no_return()
def register(client_id, type) when type in @types do
{:ok, _pid} = Registry.register(__MODULE__, type, client_id)
end
@doc """
Unsubscribe from messages of `type` from `client_id`. This is the
reverse of `Tortoise.Events.register/2`.
"""
@spec unregister(Tortoise.client_id(), atom()) :: :ok | no_return()
def unregister(client_id, type) when type in @types do
:ok = Registry.unregister_match(__MODULE__, type, client_id)
end
@doc false
@spec dispatch(Tortoise.client_id(), type :: atom(), value :: term()) :: :ok
def dispatch(client_id, type, value) when type in @types do
:ok =
Registry.dispatch(__MODULE__, type, fn subscribers ->
for {pid, filter} <- subscribers,
filter == client_id or filter == :_ do
Kernel.send(pid, {{Tortoise, client_id}, type, value})
end
end)
end
end
|
lib/tortoise/events.ex
| 0.776369
| 0.475666
|
events.ex
|
starcoder
|
defmodule Mix.Tasks.Db.Cleanup do
use Mix.Task
import Mix.Ecto
@shortdoc "Performs database cleanup by deleting older rows in a specified table"
@moduledoc """
The db.cleanup task deletes old rows from a table.
By default, it deletes data from events that are older than 7 days.
## Examples
mix db.cleanup
mix db.cleanup -t devices -n 30
## Command Line Options
* `--repo` / `-r` - the repo to use (defaults to current app's repo)
* `--table` / `-t` - the table to cleanup
* `--num` / `-n` - day count. data before (current date - n) will be deleted
* `--count` / `-c` - number of first rows to delete (overrides day based deletion)
"""
@doc false
def run(args) do
repos = parse_repo(args)
{opts, _, _} =
OptionParser.parse(
args,
switches: [table: :string, num: :integer, force: :boolean, count: :integer],
aliases: [t: :table, n: :num, f: :force, c: :count]
)
if Mix.env() == :prod do
case opts[:force] do
true ->
nil
_ ->
Mix.shell().info("You need to specify --force / -f for prod environment")
System.halt(1)
end
end
table = opts[:table] || "events"
n =
case opts[:num] |> is_nil do
true -> 7
false -> opts[:num]
end
Enum.each(repos, fn repo ->
ensure_repo(repo, args)
{:ok, _, _} = Ecto.Migrator.with_repo(
repo,
fn repo ->
count = opts[:count]
query =
case count |> is_nil do
true ->
Mix.shell().info(
"Deleting rows from #{inspect(table)} older than #{inspect(n)} days..."
)
by_date_query(table, n)
_ ->
Mix.shell().info("Deleting first #{inspect(count)} data from #{inspect(table)}...")
n_data_query(table, count)
end
delete_data(repo, query)
end
)
end)
end
defp by_date_query(table, n) do
n = n * 86_400
n_date =
DateTime.utc_now()
|> DateTime.to_unix(:second)
|> Kernel.-(n)
|> DateTime.from_unix!()
"DELETE FROM #{table} WHERE inserted_at < '#{n_date}'"
end
defp n_data_query(table, n) do
"DELETE FROM #{table} where id in (select id from #{table} order by inserted_at limit #{n})"
end
defp delete_data(repo, query) do
query = Ecto.Adapters.SQL.query(repo, query, [])
case query do
{:ok, result} -> Mix.shell().info("Deleted #{result.num_rows} rows")
_ -> Mix.shell().info("An error occurred when trying to delete data")
end
end
end
|
lib/mix/tasks/db.cleanup.ex
| 0.760917
| 0.44065
|
db.cleanup.ex
|
starcoder
|
defmodule Axon.Layers do
@moduledoc ~S"""
Functional implementations of common neural network layer
operations.
Layers are the building blocks of neural networks. These
functional implementations can be used to express higher-level
constructs using fundamental building blocks. Neural network
layers are stateful with respect to their parameters.
These implementations do not assume the responsibility of
managing state - instead opting to delegate this responsibility
to the caller.
Basic neural networks can be seen as a composition of functions:
input
|> dense(w1, b1)
|> relu()
|> dense(w2, b2)
|> softmax()
These kinds of models are often referred to as deep feedforward networks
or multilayer perceptrons (MLPs) because information flows forward
through the network with no feedback connections. Mathematically,
a feedforward network can be represented as:
$$f(x) = f^{(3)}(f^{(2)}(f^{(1)}(x)))$$
You can see a similar pattern emerge if we condense the call stack
in the previous example:
softmax(dense(relu(dense(input, w1, b1)), w2, b2))
The chain structure shown here is the most common structure used
in neural networks. You can consider each function $f^{(n)}$ as a
*layer* in the neural network - for example $f^{(2)} is the 2nd
layer in the network. The number of function calls in the
structure is the *depth* of the network. This is where the term
*deep learning* comes from.
Neural networks are often written as the mapping:
$$y = f(x; \theta)$$
Where $x$ is the input to the neural network and $\theta$ are the
set of learned parameters. In Elixir, you would write this:
y = model(input, params)
From the previous example, `params` would represent the collection:
{w1, b1, w2, b2}
where `w1` and `w2` are layer *kernels*, and `b1` and `b2` are layer
*biases*.
"""
import Nx.Defn
import Axon.Shared
## Linear
@doc ~S"""
Functional implementation of a dense layer.
Linear transformation of the input such that:
$$y = xW^T + b$$
A dense layer or fully connected layer transforms
the input using the given kernel matrix and bias
to compute:
Nx.dot(input, kernel) + bias
Typically, both `kernel` and `bias` are learnable
parameters trained using gradient-based optimization.
## Parameter Shapes
* `input` - `{batch_size, * input_features}`
* `kernel` - `{input_features, output_features}`
* `bias` - `{}` or `{output_features}`
## Output Shape
`{batch_size, *, output_features}`
## Examples
iex> input = Nx.tensor([[1.0, 0.5, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0]], type: {:f, 32})
iex> kernel = Nx.tensor([[0.2], [0.3], [0.5], [0.8]], type: {:f, 32})
iex> bias = Nx.tensor([1.0], type: {:f, 32})
iex> Axon.Layers.dense(input, kernel, bias)
#Nx.Tensor<
f32[2][1]
[
[2.25],
[1.0]
]
>
"""
@doc type: :linear
defn dense(input, kernel, bias, _opts \\ []) do
assert_min_rank!("Axon.Layers.dense", "input", input, 2)
input
|> Nx.dot([Nx.rank(input) - 1], kernel, [0])
|> Nx.add(bias)
end
@doc ~S"""
Functional implementation of a bilinear layer.
Bilinear transformation of the input such that:
$$y = x_1^{T}Ax_2 + b$$
## Parameter Shapes
* `input1` - `{batch_size, ..., input1_features}`
* `input2` - `{batch_size, ..., input2_features}`
* `kernel` - `{out_features, input1_features, input2_features}`
## Output Shape
`{batch_size, ..., output_features}`
## Examples
iex> inp1 = Nx.iota({3, 2}, type: {:f, 32})
iex> inp2 = Nx.iota({3, 4}, type: {:f, 32})
iex> kernel = Nx.iota({1, 2, 4}, type: {:f, 32})
iex> bias = Nx.tensor(1.0)
iex> Axon.Layers.bilinear(inp1, inp2, kernel, bias)
#Nx.Tensor<
f32[3][1]
[
[39.0],
[455.0],
[1319.0]
]
>
"""
@doc type: :linear
defn bilinear(input1, input2, kernel, bias, _opts \\ []) do
assert_min_rank!("Axon.Layers.bilinear", "input1", input1, 2)
assert_min_rank!("Axon.Layers.bilinear", "input2", input2, 2)
assert_equal_rank!("Axon.Layers.bilinear", "input1", input1, "input2", input2)
assert_rank!("Axon.Layers.bilinear", "kernel", kernel, 3)
inp1_axes = transform(Nx.rank(input1), fn rank -> [rank - 1] end)
inp2_axes = transform(Nx.rank(input2), fn rank -> [rank - 1] end)
input1
|> Nx.dot(inp1_axes, [], kernel, [1], [])
|> Nx.dot([2], [0], input2, inp2_axes, [0])
|> Nx.add(bias)
end
## Convolutional
@doc """
Functional implementation of a general dimensional convolutional
layer.
Convolutional layers can be described as applying a convolution
over an input signal composed of several input planes. Intuitively,
the input kernel slides `output_channels` number of filters over
the input tensor to extract features from the input tensor.
Convolutional layers are most commonly used in computer vision,
but can also be useful when working with sequences and other input signals.
## Parameter Shapes
* `input` - `{batch_size, input_channels, input_spatial0, ..., input_spatialN}`
* `kernel` - `{output_channels, input_channels, kernel_spatial0, ..., kernel_spatialN}`
* `bias` - `{}` or `{output_channels}`
## Options
* `:strides` - kernel strides. Can be a scalar or a list
who's length matches the number of spatial dimensions in
the input tensor. Defaults to 1.
* `:padding` - zero padding on the input. Can be one of
`:valid`, `:same` or a general padding configuration
without interior padding for each spatial dimension
of the input.
* `:input_dilation` - input dilation factor. Equivalent
to applying interior padding on the input. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:kernel_dilation` - kernel dilation factor. Equivalent
to applying interior padding on the kernel. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:channels ` - channel configuration. One of `:first` or `:last`.
Defaults to `:first`.
## Examples
### One-dimensional convolution
iex> input = Nx.tensor([[[0.1294, -0.6638, 1.0251]], [[ 0.9182, 1.1512, -1.6149]]], type: {:f, 32})
iex> kernel = Nx.tensor([[[-1.5475, 1.2425]], [[0.1871, 0.5458]], [[-0.4488, 0.8879]]], type: {:f, 32})
iex> bias = Nx.tensor([0.7791, 0.1676, 1.5971], type: {:f, 32})
iex> Axon.Layers.conv(input, kernel, bias)
#Nx.Tensor<
f32[2][3][2]
[
[
[-0.24591797590255737, 3.08001708984375],
[-0.1704912781715393, 0.6029025316238403],
[0.9496372938156128, 2.80519962310791]
],
[
[0.7885514497756958, -3.0088953971862793],
[0.9677201509475708, -0.4984228312969208],
[2.207162380218506, -0.3534282445907593]
]
]
>
### Two-dimensional convolution
iex> input = Nx.tensor([[[[-1.0476, -0.5041], [-0.9336, 1.5907]]]], type: {:f, 32})
iex> kernel = Nx.tensor([
...> [[[0.7514, 0.7356], [1.3909, 0.6800]]],
...> [[[-0.3450, 0.4551], [-0.6275, -0.9875]]],
...> [[[1.8587, 0.4722], [0.6058, -1.0301]]]
...> ], type: {:f, 32})
iex> bias = Nx.tensor([1.9564, 0.2822, -0.5385], type: {:f, 32})
iex> Axon.Layers.conv(input, kernel, bias)
#Nx.Tensor<
f32[1][3][1][1]
[
[
[
[0.5815491676330566]
],
[
[-0.5707762241363525]
],
[
[-4.927865028381348]
]
]
]
>
### Three-dimensional convolution
iex> input = Nx.tensor([[[[[-0.6497], [1.0939]], [[-2.5465], [0.7801]]]]], type: {:f, 32})
iex> kernel = Nx.tensor([
...> [[[[ 0.7390], [-0.0927]], [[-0.8675], [-0.9209]]]],
...> [[[[-0.6638], [0.4341]], [[0.6368], [1.1846]]]]
...> ], type: {:f, 32})
iex> bias = Nx.tensor([-0.4101, 0.1776], type: {:f, 32})
iex> Axon.Layers.conv(input, kernel, bias)
#Nx.Tensor<
f32[1][2][1][1][1]
[
[
[
[
[0.49906185269355774]
]
],
[
[
[0.38622811436653137]
]
]
]
]
>
"""
@doc type: :convolutional
defn conv(input, kernel, bias, opts \\ []) do
assert_min_rank!("Axon.Layers.conv", "input", input, 3)
assert_equal_rank!("Axon.Layers.conv", "input", input, "kernel", kernel)
opts =
keyword!(opts,
strides: 1,
padding: :valid,
input_dilation: 1,
kernel_dilation: 1,
feature_group_size: 1,
batch_group_size: 1,
channels: :first,
mode: :inference
)
bias_reshape =
transform({Nx.shape(bias), Nx.rank(input) - 2, opts[:channels]}, fn {bias_shape, rank,
channels} ->
Axon.Shape.conv_bias_reshape(bias_shape, rank, channels)
end)
permutations =
transform({Nx.rank(input), opts[:channels]}, fn
{rank, :first} ->
Enum.to_list(0..(rank - 1))
{rank, :last} ->
spatial = Enum.to_list(1..(rank - 2)//1)
[0, rank - 1 | spatial]
{_rank, invalid} ->
raise ArgumentError, "invalid channel configuration, #{inspect(invalid)}"
end)
input
|> Nx.conv(kernel,
strides: opts[:strides],
padding: opts[:padding],
input_dilation: opts[:input_dilation],
kernel_dilation: opts[:kernel_dilation],
feature_group_size: opts[:feature_group_size],
batch_group_size: opts[:batch_group_size],
input_permutation: permutations,
output_permutation: permutations
)
|> Nx.add(Nx.reshape(bias, bias_reshape))
end
@doc """
Functional implementation of a general dimensional transposed
convolutional layer.
*Note: This layer is currently implemented as a fractionally strided
convolution by padding the input tensor. Please open an issue if you'd
like this behavior changed.*
Transposed convolutions are sometimes (incorrectly) referred to as
deconvolutions because it "reverses" the spatial dimensions
of a normal convolution. Transposed convolutions are a form of upsampling -
they produce larger spatial dimensions than the input tensor. They
can be thought of as a convolution in reverse - and are sometimes
implemented as the backward pass of a normal convolution.
## Options
* `:strides` - kernel strides. Can be a scalar or a list
who's length matches the number of spatial dimensions in
the input tensor. Defaults to 1.
* `:padding` - zero padding on the input. Can be one of
`:valid`, `:same` or a general padding configuration
without interior padding for each spatial dimension
of the input.
* `:input_dilation` - input dilation factor. Equivalent
to applying interior padding on the input. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:kernel_dilation` - kernel dilation factor. Equivalent
to applying interior padding on the kernel. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:channels ` - channel configuration. One of `:first` or `:last`.
Defaults to `:first`.
## Examples
iex> input = Nx.iota({1, 3, 3}, type: {:f, 32})
iex> kernel = Nx.iota({6, 3, 2}, type: {:f, 32})
iex> bias = Nx.tensor(1.0, type: {:f, 32})
iex> Axon.Layers.conv_transpose(input, kernel, bias)
#Nx.Tensor<
f32[1][6][4]
[
[
[40.0, 79.0, 94.0, 43.0],
[94.0, 205.0, 256.0, 133.0],
[148.0, 331.0, 418.0, 223.0],
[202.0, 457.0, 580.0, 313.0],
[256.0, 583.0, 742.0, 403.0],
[310.0, 709.0, 904.0, 493.0]
]
]
>
## References
* [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1)
* [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
@doc type: :convolutional
defn conv_transpose(input, kernel, bias, opts \\ []) do
assert_min_rank!("Axon.Layers.conv_transpose", "input", input, 3)
assert_equal_rank!("Axon.Layers.conv_transpose", "input", input, "kernel", kernel)
opts =
keyword!(opts,
strides: 1,
padding: :valid,
kernel_dilation: 1,
channels: :first,
mode: :inference
)
strides =
transform(
{Nx.rank(input), opts[:strides]},
fn
{_, [_ | _] = strides} -> strides
{rank, strides} -> List.duplicate(strides, rank - 2)
end
)
padding =
transform(
{Nx.shape(kernel), opts[:kernel_dilation], strides, opts[:padding]},
fn {shape, k_dilation, strides, padding} ->
Axon.Shape.conv_transpose_padding(shape, k_dilation, strides, padding)
end
)
ones = transform(Nx.rank(input), &List.duplicate(1, &1 - 2))
conv(input, kernel, bias,
strides: ones,
padding: padding,
input_dilation: strides,
kernel_dilation: opts[:kernel_dilation],
channels: opts[:channels]
)
end
@doc """
Functional implementation of a general dimensional depthwise
convolution.
Depthwise convolutions apply a single convolutional filter to
each input channel. This is done by setting `feature_group_size`
equal to the number of input channels. This will split the
`output_channels` into `input_channels` number of groups and
convolve the grouped kernel channels over the corresponding input
channel.
## Parameter Shapes
* `input` - `{batch_size, input_channels, input_spatial0, ..., input_spatialN}`
* `kernel` - `{output_channels, 1, kernel_spatial0, ..., kernel_spatialN}`
* `bias` - `{output_channels}` or `{}`
`output_channels` must be a multiple of the input channels.
## Options
* `:strides` - kernel strides. Can be a scalar or a list
who's length matches the number of spatial dimensions in
the input tensor. Defaults to 1.
* `:padding` - zero padding on the input. Can be one of
`:valid`, `:same` or a general padding configuration
without interior padding for each spatial dimension
of the input.
* `:input_dilation` - input dilation factor. Equivalent
to applying interior padding on the input. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:kernel_dilation` - kernel dilation factor. Equivalent
to applying interior padding on the kernel. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:channels ` - channel configuration. One of `:first` or `:last`.
Defaults to `:first`.
"""
@doc type: :convolutional
defn depthwise_conv(input, kernel, bias, opts \\ []) do
assert_min_rank!("Axon.Layers.depthwise_conv", "input", input, 3)
assert_equal_rank!("Axon.Layers.depthwise_conv", "input", input, "kernel", kernel)
opts =
keyword!(opts,
strides: 1,
padding: :valid,
input_dilation: 1,
kernel_dilation: 1,
channels: :first,
mode: :inference
)
num_groups =
transform({Nx.shape(input), opts[:channels]}, fn
{shape, :first} ->
elem(shape, 1)
{shape, :last} ->
elem(shape, tuple_size(shape) - 1)
end)
conv(input, kernel, bias,
strides: opts[:strides],
padding: opts[:padding],
input_dilation: opts[:input_dilation],
kernel_dilation: opts[:kernel_dilation],
feature_group_size: num_groups,
channels: opts[:channels]
)
end
@doc """
Functional implementation of a 2-dimensional separable depthwise
convolution.
The 2-d depthwise separable convolution performs 2 depthwise convolutions
each over 1 spatial dimension of the input.
## Parameter Shapes
* `input` - `{batch_size, input_channels, input_spatial0, ..., input_spatialN}`
* `k1` - `{output_channels, 1, kernel_spatial0, 1}`
* `b1` - `{output_channels}` or `{}`
* `k2` - `{output_channels, 1, 1, kernel_spatial1}`
* `b2` - `{output_channels}` or `{}`
`output_channels` must be a multiple of the input channels.
## Options
* `:strides` - kernel strides. Can be a scalar or a list
who's length matches the number of spatial dimensions in
the input tensor. Defaults to 1.
* `:padding` - zero padding on the input. Can be one of
`:valid`, `:same` or a general padding configuration
without interior padding for each spatial dimension
of the input.
* `:input_dilation` - input dilation factor. Equivalent
to applying interior padding on the input. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:kernel_dilation` - kernel dilation factor. Equivalent
to applying interior padding on the kernel. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:channels ` - channel configuration. One of `:first` or `:last`.
Defaults to `:first`.
## References
* [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)
"""
@doc type: :convolutional
defn separable_conv2d(input, k1, b1, k2, b2, opts \\ []) do
assert_rank!("Axon.Layers.separable_conv2d", "input", input, 4)
assert_equal_rank!("Axon.Layers.separable_conv2d", ["input", "kernel1", "kernel2"], [
input,
k1,
k2
])
input
|> depthwise_conv(k1, b1, opts)
|> depthwise_conv(k2, b2, opts)
end
@doc false
defn separable_conv2d(input, k1, k2, opts \\ []) do
separable_conv2d(input, k1, 0, k2, 0, opts)
end
@doc """
Functional implementation of a 3-dimensional separable depthwise
convolution.
The 3-d depthwise separable convolution performs 3 depthwise convolutions
each over 1 spatial dimension of the input.
## Parameter Shapes
* `input` - `{batch_size, input_channels, input_spatial0, input_spatial1, input_spatial2}`
* `k1` - `{output_channels, 1, kernel_spatial0, 1, 1}`
* `b1` - `{output_channels}` or `{}`
* `k2` - `{output_channels, 1, 1, kernel_spatial1, 1}`
* `b2` - `{output_channels}` or `{}`
* `k3` - `{output_channels, 1, 1, 1, 1, kernel_spatial2}`
* `b3` - `{output_channels}` or `{}`
`output_channels` must be a multiple of the input channels.
## Options
* `:strides` - kernel strides. Can be a scalar or a list
who's length matches the number of spatial dimensions in
the input tensor. Defaults to 1.
* `:padding` - zero padding on the input. Can be one of
`:valid`, `:same` or a general padding configuration
without interior padding for each spatial dimension
of the input.
* `:input_dilation` - input dilation factor. Equivalent
to applying interior padding on the input. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:kernel_dilation` - kernel dilation factor. Equivalent
to applying interior padding on the kernel. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Defaults to `1` or no dilation.
* `:channels ` - channel configuration. One of `:first` or `:last`.
Defaults to `:first`.
## References
* [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)
"""
@doc type: :convolutional
defn separable_conv3d(input, k1, b1, k2, b2, k3, b3, opts \\ []) do
assert_rank!("Axon.Layers.separable_conv3d", "input", input, 5)
assert_equal_rank!(
"Axon.Layers.separable_conv3d",
["input", "kernel1", "kernel2", "kernel3"],
[input, k1, k2, k3]
)
input
|> depthwise_conv(k1, b1, opts)
|> depthwise_conv(k2, b2, opts)
|> depthwise_conv(k3, b3, opts)
end
@doc false
defn separable_conv3d(input, k1, k2, k3, opts \\ []) do
separable_conv3d(input, k1, 0, k2, 0, k3, 0, opts)
end
@doc """
Functional implementation of a general dimensional max pooling layer.
Pooling is applied to the spatial dimension of the input tensor.
Max pooling returns the maximum element in each valid window of
the input tensor. It is often used after convolutional layers
to downsample the input even further.
## Options
* `kernel_size` - window size. Rank must match spatial dimension
of the input tensor. Required.
* `:strides` - kernel strides. Can be a scalar or a list
who's length matches the number of spatial dimensions in
the input tensor. Defaults to size of kernel.
* `:padding` - zero padding on the input. Can be one of
`:valid`, `:same` or a general padding configuration
without interior padding for each spatial dimension
of the input.
* `:window_dilations` - kernel dilation factor. Equivalent
to applying interior padding on the kernel. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Can be scalar or list who's length matches the number of
spatial dimensions in the input tensor. Defaults to `1` or no
dilation.
* `:channels ` - channel configuration. One of `:first` or `:last`.
Defaults to `:first`.
## Examples
iex> t = Nx.tensor([[
...> [0.051500000059604645, -0.7042999863624573, -0.32899999618530273],
...> [-0.37130001187324524, 1.6191999912261963, -0.11829999834299088],
...> [0.7099999785423279, 0.7282999753952026, -0.18639999628067017]]], type: {:f, 32})
iex> Axon.Layers.max_pool(t, kernel_size: 2)
#Nx.Tensor<
f32[1][3][1]
[
[
[0.051500000059604645],
[1.6191999912261963],
[0.7282999753952026]
]
]
>
"""
@doc type: :pooling
defn max_pool(input, opts \\ []) do
assert_min_rank!("Axon.Layers.max_pool", "input", input, 3)
opts =
keyword!(
opts,
[
:kernel_size,
strides: nil,
padding: :valid,
window_dilations: 1,
channels: :first,
mode: :inference
]
)
window_dimensions =
transform(
{Nx.rank(input), opts[:kernel_size], opts[:channels]},
fn {rank, kernel_size, channels} ->
Axon.Shape.pool_window_size(kernel_size, rank - 2, channels)
end
)
strides =
transform(
{Nx.rank(input), opts[:strides], window_dimensions, opts[:channels]},
fn
{_, nil, dims, _} -> Tuple.to_list(dims)
{_, [_ | _] = strides, _, :first} -> [1, 1 | strides]
{_, [_ | _] = strides, _, :last} -> [1 | strides] ++ [1]
{rank, strides, _, :first} -> [1, 1 | List.duplicate(strides, rank - 2)]
{rank, strides, _, :last} -> [1 | List.duplicate(strides, rank - 2)] ++ [1]
end
)
dilations =
transform(
{Nx.rank(input), opts[:window_dilations], opts[:channels]},
fn
{_, [_ | _] = dilations, :first} -> [1, 1 | dilations]
{rank, dilations, :first} -> [1, 1 | List.duplicate(dilations, rank - 2)]
{_, [_ | _] = dilations, :last} -> [1 | dilations] ++ [1]
{rank, dilations, :last} -> [1 | List.duplicate(dilations, rank - 2)] ++ [1]
end
)
padding =
transform(
opts[:padding],
fn
:same ->
:same
:valid ->
:valid
padding ->
[{0, 0}, {0, 0} | padding]
end
)
input
|> Nx.window_max(window_dimensions,
strides: strides,
padding: padding,
window_dilations: dilations
)
end
@doc """
A general dimensional functional average pooling layer.
Pooling is applied to the spatial dimension of the input tensor.
Average pooling returns the average of all elements in valid
windows in the input tensor. It is often used after convolutional
layers to downsample the input even further.
## Options
* `kernel_size` - window size. Rank must match spatial dimension
of the input tensor. Required.
* `:strides` - kernel strides. Can be a scalar or a list
who's length matches the number of spatial dimensions in
the input tensor. Defaults to 1.
* `:padding` - zero padding on the input. Can be one of
`:valid`, `:same` or a general padding configuration
without interior padding for each spatial dimension
of the input.
* `:window_dilations` - kernel dilation factor. Equivalent
to applying interior padding on the kernel. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Can be scalar or list who's length matches the number of
spatial dimensions in the input tensor. Defaults to `1` or no
dilation.
* `:channels ` - channel configuration. One of `:first` or `:last`.
Defaults to `:first`.
"""
@doc type: :pooling
defn avg_pool(input, opts \\ []) do
assert_min_rank!("Axon.Layers.avg_pool", "input", input, 3)
opts =
keyword!(
opts,
[
:kernel_size,
strides: nil,
padding: :valid,
window_dilations: 1,
channels: :first,
mode: :inference
]
)
window_dimensions =
transform(
{Nx.rank(input), opts[:kernel_size], opts[:channels]},
fn {rank, kernel_size, channels} ->
Axon.Shape.pool_window_size(kernel_size, rank - 2, channels)
end
)
strides =
transform(
{Nx.rank(input), opts[:strides], window_dimensions, opts[:channels]},
fn
{_, nil, dims, _} -> Tuple.to_list(dims)
{_, [_ | _] = strides, _, :first} -> [1, 1 | strides]
{_, [_ | _] = strides, _, :last} -> [1 | strides] ++ [1]
{rank, strides, _, :first} -> [1, 1 | List.duplicate(strides, rank - 2)]
{rank, strides, _, :last} -> [1 | List.duplicate(strides, rank - 2)] ++ [1]
end
)
dilations =
transform(
{Nx.rank(input), opts[:window_dilations], opts[:channels]},
fn
{_, [_ | _] = dilations, :first} -> [1, 1 | dilations]
{rank, dilations, :first} -> [1, 1 | List.duplicate(dilations, rank - 2)]
{_, [_ | _] = dilations, :last} -> [1 | dilations] ++ [1]
{rank, dilations, :last} -> [1 | List.duplicate(dilations, rank - 2)] ++ [1]
end
)
padding =
transform(
opts[:padding],
fn
:same ->
:same
:valid ->
:valid
padding ->
[{0, 0}, {0, 0} | padding]
end
)
input
|> Nx.window_mean(window_dimensions,
strides: strides,
padding: padding,
window_dilations: dilations
)
end
@doc ~S"""
Functional implementation of a general dimensional power average
pooling layer.
Pooling is applied to the spatial dimension of the input tensor.
Power average pooling computes the following function on each
valid window of the input tensor:
$$f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}$$
Where $p$ is given by the keyword argument `:norm`. As $p$ approaches
infinity, it becomes equivalent to max pooling.
## Options
* `:norm` - $p$ from above equation. Defaults to 2.
* `:kernel_size` - window size. Rank must match spatial dimension
of the input tensor. Required.
* `:strides` - kernel strides. Can be a scalar or a list
who's length matches the number of spatial dimensions in
the input tensor. Defaults to size of kernel.
* `:padding` - zero padding on the input. Can be one of
`:valid`, `:same` or a general padding configuration
without interior padding for each spatial dimension
of the input.
* `:window_dilations` - kernel dilation factor. Equivalent
to applying interior padding on the kernel. The amount
of interior padding applied is given by `kernel_dilation - 1`.
Can be scalar or list who's length matches the number of
spatial dimensions in the input tensor. Defaults to `1` or no
dilation.
* `:channels ` - channel configuration. One of `:first` or `:last`.
Defaults to `:first`.
## Examples
iex> t = Nx.tensor([[[0.9450, 0.4684, 1.8146], [1.2663, 0.4354, -0.0781], [-0.4759, 0.3251, 0.8742]]], type: {:f, 32})
iex> Axon.Layers.lp_pool(t, kernel_size: 2, norm: 2)
#Nx.Tensor<
f32[1][3][1]
[
[
[1.0547149181365967],
[1.3390626907348633],
[0.5763426423072815]
]
]
>
"""
@doc type: :pooling
defn lp_pool(input, opts \\ []) do
assert_min_rank!("Axon.Layers.lp_pool", "input", input, 3)
opts =
keyword!(
opts,
[
:kernel_size,
strides: nil,
padding: :valid,
window_dilations: 1,
norm: 2,
channels: :first,
mode: :inference
]
)
window_dimensions =
transform(
{Nx.rank(input), opts[:kernel_size], opts[:channels]},
fn {rank, kernel_size, channels} ->
Axon.Shape.pool_window_size(kernel_size, rank - 2, channels)
end
)
strides =
transform(
{Nx.rank(input), opts[:strides], window_dimensions, opts[:channels]},
fn
{_, nil, dims, _} -> Tuple.to_list(dims)
{_, [_ | _] = strides, _, :first} -> [1, 1 | strides]
{_, [_ | _] = strides, _, :last} -> [1 | strides] ++ [1]
{rank, strides, _, :first} -> [1, 1 | List.duplicate(strides, rank - 2)]
{rank, strides, _, :last} -> [1 | List.duplicate(strides, rank - 2)] ++ [1]
end
)
dilations =
transform(
{Nx.rank(input), opts[:window_dilations], opts[:channels]},
fn
{_, [_ | _] = dilations, :first} -> [1, 1 | dilations]
{rank, dilations, :first} -> [1, 1 | List.duplicate(dilations, rank - 2)]
{_, [_ | _] = dilations, :last} -> [1 | dilations] ++ [1]
{rank, dilations, :last} -> [1 | List.duplicate(dilations, rank - 2)] ++ [1]
end
)
padding =
transform(
opts[:padding],
fn
:same ->
:same
:valid ->
:valid
padding ->
[{0, 0}, {0, 0} | padding]
end
)
norm = opts[:norm]
input
|> Nx.power(norm)
|> Nx.window_sum(window_dimensions,
strides: strides,
padding: padding,
window_dilations: dilations
)
|> Nx.power(Nx.divide(Nx.tensor(1, type: Nx.type(input)), norm))
end
@doc """
Functional implementation of general dimensional adaptive average
pooling.
Adaptive pooling allows you to specify the desired output size
of the transformed input. This will automatically adapt the
window size and strides to obtain the desired output size. It
will then perform average pooling using the calculated window
size and strides.
Adaptive pooling can be useful when working on multiple inputs with
different spatial input shapes. You can guarantee the output of
an adaptive pooling operation is always the same size regardless
of input shape.
## Options
* `:output_size` - spatial output size. Must be a tuple with
size equal to the spatial dimensions in the input tensor.
Required.
* `:channels ` - channel configuration. One of `:first` or `:last`.
Defaults to `:first`.
"""
@doc type: :pooling
defn adaptive_avg_pool(input, opts \\ []) do
assert_min_rank!("Axon.Layers.adaptive_avg_pool", "input", input, 3)
opts = keyword!(opts, [:output_size, channels: :first, mode: :inference])
window_strides =
transform(
{Nx.shape(input), Nx.rank(input), opts[:output_size], opts[:channels]},
fn {shape, rank, output_size, channels} ->
Axon.Shape.adaptive_pool_window_strides(shape, output_size, rank - 2, channels)
end
)
window_dimensions =
transform(
{Nx.shape(input), Nx.rank(input), window_strides, opts[:output_size], opts[:channels]},
fn {shape, rank, strides, output_size, channels} ->
Axon.Shape.adaptive_pool_window_size(shape, strides, output_size, rank - 2, channels)
end
)
input
|> Nx.window_mean(window_dimensions, padding: :valid, strides: window_strides)
end
@doc """
Functional implementation of general dimensional adaptive max
pooling.
Adaptive pooling allows you to specify the desired output size
of the transformed input. This will automatically adapt the
window size and strides to obtain the desired output size. It
will then perform max pooling using the calculated window
size and strides.
Adaptive pooling can be useful when working on multiple inputs with
different spatial input shapes. You can guarantee the output of
an adaptive pooling operation is always the same size regardless
of input shape.
## Options
* `:output_size` - spatial output size. Must be a tuple with
size equal to the spatial dimensions in the input tensor.
Required.
"""
@doc type: :pooling
defn adaptive_max_pool(input, opts \\ []) do
assert_min_rank!("Axon.Layers.adaptive_max_pool", "input", input, 3)
opts = keyword!(opts, [:output_size, channels: :first, mode: :inference])
window_strides =
transform(
{Nx.shape(input), Nx.rank(input), opts[:output_size], opts[:channels]},
fn {shape, rank, output_size, channels} ->
Axon.Shape.adaptive_pool_window_strides(shape, output_size, rank - 2, channels)
end
)
window_dimensions =
transform(
{Nx.shape(input), Nx.rank(input), window_strides, opts[:output_size], opts[:channels]},
fn {shape, rank, strides, output_size, channels} ->
Axon.Shape.adaptive_pool_window_size(shape, strides, output_size, rank - 2, channels)
end
)
input
|> Nx.window_max(window_dimensions, padding: :valid, strides: window_strides)
end
@doc """
Functional implementation of general dimensional adaptive power
average pooling.
Computes:
$$f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}$$
Adaptive pooling allows you to specify the desired output size
of the transformed input. This will automatically adapt the
window size and strides to obtain the desired output size. It
will then perform max pooling using the calculated window
size and strides.
Adaptive pooling can be useful when working on multiple inputs with
different spatial input shapes. You can guarantee the output of
an adaptive pooling operation is always the same size regardless
of input shape.
## Options
* `:norm` - $p$ from above equation. Defaults to 2.
* `:output_size` - spatial output size. Must be a tuple with
size equal to the spatial dimensions in the input tensor.
Required.
"""
@doc type: :pooling
defn adaptive_lp_pool(input, opts \\ []) do
assert_min_rank!("Axon.Layers.adaptive_lp_pool", "input", input, 3)
opts = keyword!(opts, [:output_size, norm: 2, channels: :first, mode: :inference])
norm = opts[:norm]
window_strides =
transform(
{Nx.shape(input), Nx.rank(input), opts[:output_size], opts[:channels]},
fn {shape, rank, output_size, channels} ->
Axon.Shape.adaptive_pool_window_strides(shape, output_size, rank - 2, channels)
end
)
window_dimensions =
transform(
{Nx.shape(input), Nx.rank(input), window_strides, opts[:output_size], opts[:channels]},
fn {shape, rank, strides, output_size, channels} ->
Axon.Shape.adaptive_pool_window_size(shape, strides, output_size, rank - 2, channels)
end
)
input
|> Nx.power(norm)
|> Nx.window_sum(window_dimensions, padding: :valid, strides: window_strides)
|> Nx.power(Nx.divide(Nx.tensor(1, type: Nx.type(input)), norm))
end
## Normalization
@doc ~S"""
Functional implementation of batch normalization.
Normalizes the input by calculating mean and variance of the
input tensor along every dimension but the given `:channel_index`,
and then scaling according to:
$$y = \frac{x - E[x]}{\sqrt{Var[x] + \epsilon}} * \gamma + \beta$$
`gamma` and `beta` are often trainable parameters. If `training?` is
true, this method will compute a new mean and variance, and return
the updated `ra_mean` and `ra_var`. Otherwise, it will just compute
batch norm from the given ra_mean and ra_var.
## Options
* `:epsilon` - numerical stability term. $epsilon$ in the above
formulation.
* `:channel_index` - channel index used to determine reduction
axes for mean and variance calculation.
* `:momentum` - momentum to use for EMA update.
* `:training?` - if true, uses training mode batch norm. Defaults to false.
## References
* [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
@doc type: :normalization
defn batch_norm(input, gamma, beta, ra_mean, ra_var, opts \\ []) do
opts = keyword!(opts, epsilon: 1.0e-5, channel_index: 1, momentum: 0.1, mode: :inference)
training? =
transform(opts[:mode], fn
:inference -> false
:train -> true
end)
axes =
transform({Nx.axes(input), opts[:channel_index]}, fn {axes, channel} ->
Axon.Shape.batch_norm_axes(axes, channel)
end)
channel_index = opts[:channel_index]
num_channels =
transform({input, channel_index}, fn {inp, channel_idx} ->
elem(Nx.shape(inp), channel_idx)
end)
{gamma, beta, ra_mean, ra_var} =
transform(
{gamma, beta, ra_mean, ra_var, Nx.rank(input), num_channels, channel_index},
fn {g, b, m, v, rank, num_channels, channel_idx} ->
new_shape =
1
|> List.duplicate(rank)
|> List.to_tuple()
|> put_elem(channel_idx, num_channels)
{Nx.reshape(g, new_shape), Nx.reshape(b, new_shape), Nx.reshape(m, new_shape),
Nx.reshape(v, new_shape)}
end
)
transform(
{input, gamma, beta, ra_mean, ra_var, axes, opts[:epsilon], opts[:momentum], training?},
fn
{x, g, b, m, v, axes, eps, alpha, true} ->
{new_mean, new_var} = mean_and_variance(x, axes: axes)
out = normalize(x, new_mean, new_var, g, b, epsilon: eps)
ra_mean = update_ema(new_mean, m, alpha)
ra_var = update_ema(new_var, v, alpha)
%Axon.StatefulOutput{
output: out,
state: %{"mean" => ra_mean, "var" => ra_var}
}
{x, g, b, m, v, _, eps, _, _} ->
normalize(x, m, v, g, b, epsilon: eps)
end
)
end
defnp update_ema(obs, old, momentum) do
Nx.squeeze(momentum * old + (1 - momentum) * obs)
end
@doc ~S"""
Functional implementation of layer normalization.
Normalizes the input by calculating mean and variance of the
input tensor along the given feature dimension `:channel_index`.
$$y = \frac{x - E[x]}{\sqrt{Var[x] + \epsilon}} * \gamma + \beta$$
`gamma` and `beta` are often trainable parameters. This method does
not maintain an EMA of mean and variance.
## Options
* `:epsilon` - numerical stability term. $epsilon$ in the above
formulation.
* `:channel_index` - channel index used to determine reduction
axes for mean and variance calculation.
"""
@doc type: :normalization
defn layer_norm(input, gamma, beta, opts \\ []) do
opts = keyword!(opts, epsilon: 1.0e-5, channel_index: 1, mode: :inference)
axes = opts[:channel_index]
channel_index = opts[:channel_index]
num_channels =
transform({input, channel_index}, fn {inp, channel_idx} ->
elem(Nx.shape(inp), channel_idx)
end)
{gamma, beta} =
transform({gamma, beta, Nx.rank(input), num_channels, channel_index}, fn {g, b, rank,
num_channels,
channel_idx} ->
new_shape =
1
|> List.duplicate(rank)
|> List.to_tuple()
|> put_elem(channel_idx, num_channels)
{Nx.reshape(g, new_shape), Nx.reshape(b, new_shape)}
end)
{mean, var} = mean_and_variance(input, axes: [axes])
normalize(input, mean, var, gamma, beta, epsilon: opts[:epsilon])
end
@doc """
Functional implementation of group normalization.
Normalizes the input by reshaping input into groups of given
`:group_size` and then calculating the mean and variance along
every dimension but the input batch dimension.
$$y = \frac{x - E[x]}{\sqrt{Var[x] + \epsilon}} * \gamma + \beta$$
`gamma` and `beta` are often trainable parameters. This method does
not maintain an EMA of mean and variance.
## Options
* `:group_size` - channel group size. Size of each group to split
input channels into.
* `:epsilon` - numerical stability term. $epsilon$ in the above
formulation.
* `:channel_index` - channel index used to determine reduction
axes and group shape for mean and variance calculation.
## References
* [Group Normalization](https://arxiv.org/abs/1803.08494v3)
"""
@doc type: :normalization
defn group_norm(input, gamma, beta, opts \\ []) do
opts = keyword!(opts, [:group_size, epsilon: 1.0e-5, channel_index: 1, mode: :inference])
group_shape =
transform({Nx.shape(input), opts[:group_size], opts[:channel_index]}, fn {shape, groups,
channel} ->
Axon.Shape.group_norm_shape(shape, groups, channel)
end)
channel_index = opts[:channel_index]
num_channels =
transform({input, channel_index}, fn {inp, channel_idx} ->
elem(Nx.shape(inp), channel_idx)
end)
{gamma, beta} =
transform({gamma, beta, Nx.rank(input), num_channels, channel_index}, fn {g, b, rank,
num_channels,
channel_idx} ->
new_shape =
1
|> List.duplicate(rank)
|> List.to_tuple()
|> put_elem(channel_idx, num_channels)
{Nx.reshape(g, new_shape), Nx.reshape(b, new_shape)}
end)
x = Nx.reshape(input, group_shape)
axes = transform(Nx.rank(x), &Axon.Shape.group_norm_axes/1)
{mean, var} = mean_and_variance(x, axes: axes)
normalize(Nx.reshape(x, input), mean, var, gamma, beta, epsilon: opts[:epsilon])
end
@doc """
Functional implementation of instance normalization.
Normalizes the input by calculating mean and variance of the
input tensor along the spatial dimensions of the input.
$$y = \frac{x - E[x]}{\sqrt{Var[x] + \epsilon}} * \gamma + \beta$$
`gamma` and `beta` are often trainable parameters. If `training?` is
true, this method will compute a new mean and variance, and return
the updated `ra_mean` and `ra_var`. Otherwise, it will just compute
batch norm from the given ra_mean and ra_var.
## Options
* `:epsilon` - numerical stability term. $epsilon$ in the above
formulation.
* `:channel_index` - channel index used to determine reduction
axes for mean and variance calculation.
* `:momentum` - momentum to use for EMA update.
* `:training?` - if true, uses training mode batch norm. Defaults to false.
## References
* [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022v3)
"""
@doc type: :normalization
defn instance_norm(input, gamma, beta, ra_mean, ra_var, opts \\ []) do
opts = keyword!(opts, epsilon: 1.0e-5, channel_index: 1, momentum: 0.1, mode: :inference)
training? =
transform(opts[:mode], fn
:inference -> false
:train -> true
end)
axes =
transform({Nx.axes(input), opts[:channel_index]}, fn {axes, channel} ->
Axon.Shape.instance_norm_axes(axes, channel)
end)
channel_index = opts[:channel_index]
num_channels =
transform({input, channel_index}, fn {inp, channel_idx} ->
elem(Nx.shape(inp), channel_idx)
end)
{gamma, beta, ra_mean, ra_var} =
transform(
{gamma, beta, ra_mean, ra_var, Nx.rank(input), num_channels, channel_index},
fn {g, b, m, v, rank, num_channels, channel_idx} ->
new_shape =
1
|> List.duplicate(rank)
|> List.to_tuple()
|> put_elem(channel_idx, num_channels)
{Nx.reshape(g, new_shape), Nx.reshape(b, new_shape), Nx.reshape(m, new_shape),
Nx.reshape(v, new_shape)}
end
)
transform(
{input, gamma, beta, ra_mean, ra_var, axes, opts[:epsilon], opts[:momentum], training?},
fn
{x, g, b, m, v, axes, eps, alpha, true} ->
{new_mean, new_var} = mean_and_variance(x, axes: axes)
out = normalize(x, new_mean, new_var, g, b, epsilon: eps)
ra_mean = update_ema(new_mean, m, alpha)
ra_var = update_ema(new_var, v, alpha)
%Axon.StatefulOutput{
output: out,
state: %{"mean" => ra_mean, "var" => ra_var}
}
{x, g, b, m, v, _, eps, _, _} ->
normalize(x, m, v, g, b, epsilon: eps)
end
)
end
## Stochastic
# TODO: Manage the state of these RNGs
@doc ~S"""
Functional implementation of a dropout layer.
Applies a mask to some elements of the input tensor with probability
`rate` and scales the input tensor by a factor of $\frac{1}{1 - rate}$.
Dropout is a form of regularization that helps prevent overfitting
by preventing models from becoming too reliant on certain connections.
Dropout can somewhat be thought of as learning an ensemble of models
with random connections masked.
## Options
* `:rate` - dropout rate. Used to determine probability a connection
will be dropped. Required.
* `:noise_shape` - input noise shape. Shape of `mask` which can be useful
for broadcasting `mask` across feature channels or other dimensions.
Defaults to shape of input tensor.
## References
* [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](https://jmlr.org/papers/v15/srivastava14a.html)
"""
@doc type: :dropout
defn dropout(input, opts \\ []) do
opts = keyword!(opts, [:rate, noise_shape: Nx.shape(input), mode: :inference])
keep_prob = Nx.tensor(1, type: Nx.type(input)) - Nx.tensor(opts[:rate], type: Nx.type(input))
mask = Nx.less(Nx.random_uniform(opts[:noise_shape], type: Nx.type(input)), keep_prob)
mask =
transform(
{mask, Nx.shape(input)},
fn {mask, input_shape} ->
if Elixir.Kernel.==(Nx.shape(mask), input_shape),
do: mask,
else: Nx.broadcast(mask, input_shape)
end
)
out = Nx.select(mask, input / keep_prob, Nx.tensor(0, type: Nx.type(input)))
transform({input, out, opts[:mode]}, fn
{input, _, :inference} ->
input
{_, out, :train} ->
out
end)
end
@doc """
Functional implementation of an n-dimensional spatial
dropout layer.
Applies a mask to entire feature maps instead of individual
elements. This is done by calculating a mask shape equal to
the spatial dimensions of the input tensor with 1 channel,
and then broadcasting the mask across the feature dimension
of the input tensor.
## Options
* `:rate` - dropout rate. Used to determine probability a connection
will be dropped. Required.
# `:noise_shape` - input noise shape. Shape of `mask` which can be useful
for broadcasting `mask` across feature channels or other dimensions.
Defaults to shape of input tensor.
## References
* [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
"""
@doc type: :dropout
defn spatial_dropout(input, opts \\ []) do
assert_min_rank!("Axon.Layers.spatial_dropout", "input", input, 3)
opts = keyword!(opts, rate: 0.5, channels: :first, mode: :inference)
noise_shape =
transform({Nx.shape(input), opts[:channels]}, fn {shape, channels} ->
Axon.Shape.spatial_dropout_noise_shape(shape, channels)
end)
dropout(input, rate: opts[:rate], noise_shape: noise_shape, mode: opts[:mode])
end
@doc """
Functional implementation of an alpha dropout layer.
Alpha dropout is a type of dropout that forces the input
to have zero mean and unit standard deviation. Randomly
masks some elements and scales to enforce self-normalization.
## Options
* `:rate` - dropout rate. Used to determine probability a connection
will be dropped. Required.
# `:noise_shape` - input noise shape. Shape of `mask` which can be useful
for broadcasting `mask` across feature channels or other dimensions.
Defaults to shape of input tensor.
## References
* [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
"""
@doc type: :dropout
defn alpha_dropout(input, opts \\ []) do
opts = keyword!(opts, rate: 0.5, mode: :inference)
rate = opts[:rate]
alpha = Nx.tensor(1.6732632423543772848170429916717, type: Nx.type(input))
scale = Nx.tensor(1.0507009873554804934193349852946, type: Nx.type(input))
alpha_p = -alpha * scale
keep_prob = Nx.tensor(1, type: Nx.type(input)) - rate
mask = Nx.less(Nx.random_uniform(Nx.shape(input), type: Nx.type(input)), keep_prob)
a = Nx.rsqrt(keep_prob * Nx.power(Nx.tensor(1, type: Nx.type(input)) * alpha_p, 2))
b = -a * alpha_p * rate
x = Nx.select(mask, input, alpha_p)
out = a * x + b
transform({input, out, opts[:mode]}, fn
{input, _, :inference} ->
input
{_, out, :train} ->
out
end)
end
@doc """
Functional implementation of a feature alpha dropout layer.
Feature alpha dropout applies dropout in the same manner as
spatial dropout; however, it also enforces self-normalization
by masking inputs with the SELU activation function and scaling
unmasked inputs.
## Options
* `:rate` - dropout rate. Used to determine probability a connection
will be dropped. Required.
# `:noise_shape` - input noise shape. Shape of `mask` which can be useful
for broadcasting `mask` across feature channels or other dimensions.
Defaults to shape of input tensor.
"""
@doc type: :dropout
defn feature_alpha_dropout(input, opts \\ []) do
assert_min_rank!("Axon.Layers.feature_alpha_dropout", "input", input, 3)
opts = keyword!(opts, rate: 0.5, channels: :first, mode: :inference)
noise_shape =
transform({Nx.shape(input), opts[:channels]}, fn {shape, channels} ->
Axon.Shape.spatial_dropout_noise_shape(shape, channels)
end)
keep_prob = 1 - opts[:rate]
mask = Nx.less(Nx.random_uniform(noise_shape, type: Nx.type(input)), keep_prob)
mask =
transform(
{mask, Nx.shape(input)},
fn {mask, input_shape} ->
if Elixir.Kernel.==(Nx.shape(mask), input_shape),
do: mask,
else: Nx.broadcast(mask, input_shape)
end
)
out = Nx.select(mask, input / keep_prob, Nx.negate(Axon.Activations.selu(input)))
transform({input, out, opts[:mode]}, fn
{input, _, :inference} ->
input
{_, out, :train} ->
out
end)
end
## Global Pooling
@doc """
Functional implementation of global average pooling which averages across
the spatial dimensions of the input such that the only remaining dimensions
are the batch and feature dimensions.
Assumes data is configured in a channels-first like format.
## Parameter Shapes
* `input` - {batch_size, features, s1, ..., sN}
## Options
* `:keep_axes` - option to keep reduced axes with size 1 for each reduced
dimensions. Defaults to `false`
## Examples
iex> Axon.Layers.global_avg_pool(Nx.iota({3, 2, 3}, type: {:f, 32}))
#Nx.Tensor<
f32[3][2]
[
[1.0, 4.0],
[7.0, 10.0],
[13.0, 16.0]
]
>
iex> Axon.Layers.global_avg_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), keep_axes: true)
#Nx.Tensor<
f32[1][3][1][1]
[
[
[
[1.5]
],
[
[5.5]
],
[
[9.5]
]
]
]
>
"""
defn global_avg_pool(input, opts \\ []) do
assert_min_rank!("Axon.Layers.global_avg_pool", "input", input, 3)
opts = keyword!(opts, channels: :first, keep_axes: false, mode: :inference)
all_but_batch_and_feature =
transform({Nx.rank(input), opts[:channels]}, fn
{rank, :first} ->
for i <- 2..(rank - 1), do: i
{rank, :last} ->
for i <- 1..(rank - 2), do: i
end)
Nx.mean(input, axes: all_but_batch_and_feature, keep_axes: opts[:keep_axes])
end
@doc """
Functional implementation of global max pooling which computes maximums across
the spatial dimensions of the input such that the only remaining dimensions are
the batch and feature dimensions.
Assumes data is configured in a channels-first like format.
## Parameter Shapes
* `input` - {batch_size, s1, ..., sN, features}
## Options
* `:keep_axes` - option to keep reduced axes with size 1 for each reduced
dimensions. Defaults to `false`
## Examples
iex> Axon.Layers.global_max_pool(Nx.iota({3, 2, 3}, type: {:f, 32}))
#Nx.Tensor<
f32[3][2]
[
[2.0, 5.0],
[8.0, 11.0],
[14.0, 17.0]
]
>
iex> Axon.Layers.global_max_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), keep_axes: true)
#Nx.Tensor<
f32[1][3][1][1]
[
[
[
[3.0]
],
[
[7.0]
],
[
[11.0]
]
]
]
>
"""
defn global_max_pool(input, opts \\ []) do
assert_min_rank!("Axon.Layers.global_max_pool", "input", input, 3)
opts = keyword!(opts, keep_axes: false, channels: :first, mode: :inference)
all_but_batch_and_feature =
transform({Nx.rank(input), opts[:channels]}, fn
{rank, :first} ->
for i <- 2..(rank - 1), do: i
{rank, :last} ->
for i <- 1..(rank - 2), do: i
end)
Nx.reduce_max(input, axes: all_but_batch_and_feature, keep_axes: opts[:keep_axes])
end
@doc """
Functional implementation of global LP pooling which computes the following
function across spatial dimensions of the input:
$$f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}$$
Where $p$ is given by the keyword argument `:norm`. As $p$ approaches
infinity, it becomes equivalent to max pooling.
Assumes data is configured in a channels-first like format.
## Parameter Shapes
* `input` - {batch_size, s1, ..., sN, features}
## Options
* `:keep_axes` - option to keep reduced axes with size 1 for each reduced
dimensions. Defaults to `false`
* `:norm` - $p$ in above function. Defaults to 2
## Examples
iex> Axon.Layers.global_lp_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), norm: 1)
#Nx.Tensor<
f32[3][2]
[
[3.0, 12.0],
[21.0, 30.0],
[39.0, 48.0]
]
>
iex> Axon.Layers.global_lp_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 16}), keep_axes: true)
#Nx.Tensor<
f16[1][3][1][1]
[
[
[
[3.7421875]
],
[
[11.2265625]
],
[
[19.125]
]
]
]
>
"""
defn global_lp_pool(input, opts \\ []) do
assert_min_rank!("Axon.Layers.global_lp_pool", "input", input, 3)
opts = keyword!(opts, norm: 2, keep_axes: false, channels: :first, mode: :inference)
norm = opts[:norm]
all_but_batch_and_feature =
transform({Nx.rank(input), opts[:channels]}, fn
{rank, :first} ->
for i <- 2..(rank - 1), do: i
{rank, :last} ->
for i <- 1..(rank - 2), do: i
end)
input
|> Nx.power(norm)
|> Nx.sum(axes: all_but_batch_and_feature, keep_axes: opts[:keep_axes])
|> Nx.power(Nx.divide(Nx.tensor(1, type: Nx.type(input)), norm))
end
## Sparse
@doc """
Computes embedding by treating kernel matrix as a lookup table
for discrete tokens.
`input` is a vector of discrete values, typically representing tokens
(e.g. words, characters, etc.) from a vocabulary. `kernel` is a kernel
matrix of shape `{vocab_size, embedding_size}` from which the dense
embeddings will be drawn.
## Parameter Shapes
* `input` - `{batch_size, ..., seq_len}`
* `kernel` - `{vocab_size, embedding_size}`
## Examples
iex> input = Nx.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
iex> kernels = Nx.tensor([
...> [0.46299999952316284, 0.5562999844551086, 0.18170000612735748],
...> [0.9801999926567078, 0.09780000150203705, 0.5333999991416931],
...> [0.6980000138282776, 0.9240999817848206, 0.23479999601840973],
...> [0.31929999589920044, 0.42250001430511475, 0.7865999937057495],
...> [0.5519000291824341, 0.5662999749183655, 0.20559999346733093],
...> [0.1898999959230423, 0.9311000108718872, 0.8356000185012817],
...> [0.6383000016212463, 0.8794000148773193, 0.5282999873161316],
...> [0.9523000121116638, 0.7597000002861023, 0.08250000327825546],
...> [0.6622999906539917, 0.02329999953508377, 0.8205999732017517],
...> [0.9855999946594238, 0.36419999599456787, 0.5372999906539917]
...> ])
iex> Axon.Layers.embedding(input, kernels)
#Nx.Tensor<
f32[2][4][3]
[
[
[0.9801999926567078, 0.09780000150203705, 0.5333999991416931],
[0.6980000138282776, 0.9240999817848206, 0.23479999601840973],
[0.5519000291824341, 0.5662999749183655, 0.20559999346733093],
[0.1898999959230423, 0.9311000108718872, 0.8356000185012817]
],
[
[0.5519000291824341, 0.5662999749183655, 0.20559999346733093],
[0.31929999589920044, 0.42250001430511475, 0.7865999937057495],
[0.6980000138282776, 0.9240999817848206, 0.23479999601840973],
[0.9855999946594238, 0.36419999599456787, 0.5372999906539917]
]
]
>
"""
defn embedding(input, kernel, _opts \\ []) do
assert_rank!("Axon.Layers.embedding", "kernel", kernel, 2)
Nx.take(kernel, Nx.as_type(input, {:s, 64}), axis: 0)
end
## Shape
@doc """
Flattens input to shape of `{batch, units}` by folding outer
dimensions.
## Examples
iex> Axon.Layers.flatten(Nx.iota({1, 2, 2}, type: {:f, 32}))
#Nx.Tensor<
f32[1][4]
[
[0.0, 1.0, 2.0, 3.0]
]
>
"""
defn flatten(x, opts \\ []) do
opts = keyword!(opts, ignore_batch?: true, mode: :inference)
new_shape =
transform({Nx.shape(x), opts[:ignore_batch?]}, fn {shape, ignore} ->
Axon.Shape.flatten(shape, ignore)
end)
Nx.reshape(x, new_shape)
end
@doc false
# Internal version of Nx.reshape for constructing reshape layers
# without worrying about a batch dimension
defn reshape(x, opts \\ []) do
opts = keyword!(opts, [:to, ignore_batch?: true, mode: :inference])
transform({opts[:to], opts[:ignore_batch?]}, fn
{shape, true} ->
Nx.reshape(x, put_elem(shape, 0, elem(Nx.shape(x), 0)))
{shape, false} ->
Nx.reshape(x, shape)
end)
end
@doc false
# Internal version of Nx.pad for constructing pad layers without
# worrying about batch or channel dimensions
defn pad(x, opts \\ []) do
opts = keyword!(opts, [:padding_config, :value, :channels, mode: :inference])
config =
transform({opts[:padding_config], opts[:channels]}, fn
{config, :first} ->
[{0, 0, 0}, {0, 0, 0} | Enum.map(config, fn {x, y} -> {x, y, 0} end)]
{config, :last} ->
[{0, 0, 0} | Enum.map(config, fn {x, y} -> {x, y, 0} end)] ++ [{0, 0, 0}]
end)
Nx.pad(x, Nx.as_type(opts[:value], Nx.type(x)), config)
end
@doc false
# Internal version of Nx.transpose for constructing a transpose layer
# without worrying about a batch dimension
defn transpose(x, opts \\ []) do
opts = keyword!(opts, [:axes, ignore_batch?: true, mode: :inference])
axes =
transform({opts[:axes], opts[:ignore_batch?]}, fn
{axes, true} ->
[0 | Enum.map(axes, &(&1 + 1))]
{axes, false} ->
axes
end)
Nx.transpose(x, axes: axes)
end
@doc false
# Internal helper for constructing conditional layers without
# needing to use the if-macros in Axon.Compiler
defn cond(cond_input_expr, on_true_expr, on_false_expr, opts \\ []) do
opts = keyword!(opts, [:cond, mode: :inference])
cond_expr = opts[:cond].(cond_input_expr)
transform(cond_expr, fn cond_expr ->
cond_rank = Nx.rank(cond_expr)
cond_type = Nx.type(cond_expr)
unless Elixir.Kernel.and(
Elixir.Kernel.==(cond_rank, 0),
Elixir.Kernel.==(cond_type, {:u, 8})
) do
raise ArgumentError,
"cond_fn must return a scalar-boolean tensor" <>
" got result with rank #{inspect(cond_rank)} and" <>
" type #{inspect(cond_type)}"
end
end)
if cond_expr do
on_true_expr
else
on_false_expr
end
end
@doc false
# Internal helper for constructing bias layers without
defn bias(input, bias, _opts \\ []) do
input + bias
end
@doc """
Resizes a batch of tensors to the given shape using one of a
number of sampling methods.
Requires input option `:to` which should be a tuple specifying
the resized spatial dimensions of the input tensor. Input tensor
must be at least rank 3, with fixed `batch` and `channel` dimensions.
Resizing will upsample or downsample using the given resize method.
Supported resize methods are `:nearest, :linear, :bilinear, :trilinear,
:cubic, :bicubic, :tricubic`.
## Examples
iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})
iex> Axon.Layers.resize(img, to: {4, 4})
#Nx.Tensor<
f32[1][1][4][4]
[
[
[
[0.0, 1.0, 1.0, 2.0],
[3.0, 4.0, 4.0, 5.0],
[3.0, 4.0, 4.0, 5.0],
[6.0, 7.0, 7.0, 8.0]
]
]
]
>
iex> img = Nx.iota({1, 1, 3}, type: {:f, 32})
iex> Axon.Layers.resize(img, to: {2})
#Nx.Tensor<
f32[1][1][2]
[
[
[0.0, 2.0]
]
]
>
iex> img = Nx.iota({1, 2, 2, 2, 1}, type: {:f, 32})
iex> Axon.Layers.resize(img, to: {1, 3, 2})
#Nx.Tensor<
f32[1][2][1][3][2]
[
[
[
[
[2.0, 2.0],
[3.0, 3.0],
[3.0, 3.0]
]
],
[
[
[6.0, 6.0],
[7.0, 7.0],
[7.0, 7.0]
]
]
]
]
>
### Error cases
iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})
iex> Axon.Layers.resize(img, to: {4, 4}, method: :foo)
** (ArgumentError) invalid resize method :foo, resize method must be one of :nearest
"""
defn resize(input, opts \\ []) do
assert_min_rank!("Axon.Layers.resize", "input", input, 3)
opts =
keyword!(opts, [
:to,
method: :nearest,
channels: :first,
align_corners: false,
mode: :inference
])
output_shape = opts[:to]
spatial_dimensions =
transform({Nx.rank(input), opts[:channels]}, fn
{rank, :first} ->
Enum.to_list(2..(rank - 1))
{rank, :last} ->
Enum.to_list(1..(rank - 2))
end)
output_shape =
transform({input, spatial_dimensions, output_shape}, fn {input, spatial_dimensions,
output_shape} ->
unless Nx.rank(output_shape) == Nx.rank(input) - 2 do
raise ArgumentError,
"invalid output shape #{inspect(output_shape)}, expected output" <>
" output shape to have same rank as spatial dimensions of" <>
" the input tensor"
end
for {d, i} <- Enum.with_index(spatial_dimensions), reduce: Nx.shape(input) do
shape ->
put_elem(shape, d, elem(output_shape, i))
end
end)
transform({input, output_shape, spatial_dimensions, opts[:method], opts[:align_corners]}, fn
{img, shape, spatial_dimensions, :nearest, _} ->
resize_nearest(img, shape, spatial_dimensions)
{img, shape, spatial_dimensions, :linear, align_corners} ->
resize_linear(img, shape, spatial_dimensions, align_corners)
{img, shape, spatial_dimensions, :bilinear, align_corners} ->
resize_linear(img, shape, spatial_dimensions, align_corners)
{img, shape, spatial_dimensions, :trilinear, align_corners} ->
resize_linear(img, shape, spatial_dimensions, align_corners)
{img, shape, spatial_dimensions, :cubic, align_corners} ->
resize_cubic(img, shape, spatial_dimensions, align_corners)
{img, shape, spatial_dimensions, :bicubic, align_corners} ->
resize_cubic(img, shape, spatial_dimensions, align_corners)
{img, shape, spatial_dimensions, :tricubic, align_corners} ->
resize_cubic(img, shape, spatial_dimensions, align_corners)
{_, _, _, method, _} ->
raise ArgumentError,
"invalid resize method #{inspect(method)}, resize method" <>
" must be one of :nearest"
end)
end
defnp resize_nearest(input, output_shape, spatial_dimensions) do
transform({input, output_shape, spatial_dimensions}, fn {input, output_shape,
spatial_dimensions} ->
ones = List.duplicate(1, Nx.rank(input)) |> List.to_tuple()
for d <- spatial_dimensions, reduce: input do
input ->
input_shape = Nx.shape(input)
input_size = elem(input_shape, d)
output_size = elem(output_shape, d)
offset = (Nx.iota({output_size}) + 0.5) * input_size / output_size
offset = offset |> Nx.floor() |> Nx.as_type({:s, 32})
offset =
offset
|> Nx.reshape(put_elem(ones, d, output_size))
|> Nx.broadcast(put_elem(input_shape, d, output_size))
Nx.take_along_axis(input, offset, axis: d)
end
end)
end
defp resize_linear(input, output_shape, spatial_dimensions, align_corners) do
for d <- spatial_dimensions, reduce: input do
input ->
case align_corners do
true -> resize_linear_align(input, output_shape, d)
false -> resize_linear_noalign(input, output_shape, d)
end
end
end
defp resize_cubic(input, output_shape, spatial_dimensions, align_corners) do
input_shape = Nx.shape(input)
for d <- spatial_dimensions, reduce: input do
input ->
if elem(input_shape, d) == elem(output_shape, d) do
input
else
case align_corners do
true -> resize_cubic_align(input, output_shape, d)
false -> resize_cubic_noalign(input, output_shape, d)
end
end
end
end
defnp resize_cubic_align(input, output_shape, spatial_dimension) do
in_size = elem(Nx.shape(input), spatial_dimension)
out_size = elem(output_shape, spatial_dimension)
ids =
Nx.iota({out_size})
|> Nx.multiply(in_size - 1)
|> Nx.divide(out_size - 1)
pad_left1 =
Nx.subtract(
Nx.multiply(Nx.take(input, Nx.tensor([0]), axis: spatial_dimension), 2),
Nx.take(input, Nx.tensor([1]), axis: spatial_dimension)
)
t_n = Nx.take(input, Nx.tensor([in_size - 2]), axis: spatial_dimension)
t_nn = Nx.take(input, Nx.tensor([in_size - 1]), axis: spatial_dimension)
delta_right = Nx.subtract(t_nn, t_n)
pad_right1 = Nx.add(t_nn, delta_right)
pad_right2 = Nx.add(t_nn, Nx.multiply(delta_right, 2.0))
input_padded =
Nx.concatenate([pad_left1, input, pad_right1, pad_right2], axis: spatial_dimension)
id1 = Nx.floor(ids) |> Nx.as_type({:s, 8})
id_delta = Nx.subtract(ids, id1)
id0 = id1
id1 = Nx.add(id0, 1)
id2 = Nx.add(id1, 1)
id3 = Nx.add(id2, 1)
p = Nx.take(input_padded, Nx.stack([id0, id1, id2, id3]), axis: spatial_dimension)
d =
Nx.tensor([
[-0.5, 1.5, -1.5, 0.5],
[1.0, -2.5, 2.0, -0.5],
[-0.5, 0.0, 0.5, 0.0],
[0.0, 1.0, 0.0, 0.0]
])
c = Nx.dot(d, [1], p, [spatial_dimension])
x =
Nx.stack([
Nx.power(id_delta, 3),
Nx.power(id_delta, 2),
id_delta,
Nx.broadcast(1.0, Nx.shape(id_delta))
])
|> Nx.broadcast(Nx.shape(c), axes: [0, spatial_dimension + 1])
Nx.multiply(c, x) |> Nx.sum(axes: [0])
end
defnp resize_cubic_noalign(input, output_shape, spatial_dimension) do
in_size = elem(Nx.shape(input), spatial_dimension)
out_size = elem(output_shape, spatial_dimension)
w = in_size / out_size
ids =
Nx.iota({out_size})
|> Nx.multiply(w)
|> Nx.add(w / 2.0 - 0.5)
t_0 = Nx.take(input, Nx.tensor([0]), axis: spatial_dimension)
t_1 = Nx.take(input, Nx.tensor([1]), axis: spatial_dimension)
delta_left = Nx.subtract(t_1, t_0)
pad_left1 = Nx.subtract(t_0, delta_left)
pad_left2 = Nx.subtract(t_0, Nx.multiply(delta_left, 2))
t_n = Nx.take(input, Nx.tensor([in_size - 2]), axis: spatial_dimension)
t_nn = Nx.take(input, Nx.tensor([in_size - 1]), axis: spatial_dimension)
delta_right = Nx.subtract(t_nn, t_n)
pad_right1 = Nx.add(t_nn, delta_right)
pad_right2 = Nx.add(t_nn, Nx.multiply(delta_right, 2.0))
input_padded =
Nx.concatenate([pad_left2, pad_left1, input, pad_right1, pad_right2],
axis: spatial_dimension
)
id1 = Nx.floor(ids) |> Nx.as_type({:s, 8})
id_delta = Nx.subtract(ids, id1)
id0 = Nx.add(id1, 1)
id1 = Nx.add(id0, 1)
id2 = Nx.add(id1, 1)
id3 = Nx.add(id2, 1)
p = Nx.take(input_padded, Nx.stack([id0, id1, id2, id3]), axis: spatial_dimension)
d =
Nx.tensor([
[-0.5, 1.5, -1.5, 0.5],
[1.0, -2.5, 2.0, -0.5],
[-0.5, 0.0, 0.5, 0.0],
[0.0, 1.0, 0.0, 0.0]
])
c = Nx.dot(d, [1], p, [spatial_dimension])
x =
Nx.stack([
Nx.power(id_delta, 3),
Nx.power(id_delta, 2),
id_delta,
Nx.broadcast(1.0, Nx.shape(id_delta))
])
|> Nx.broadcast(Nx.shape(c), axes: [0, spatial_dimension + 1])
Nx.multiply(c, x) |> Nx.sum(axes: [0])
end
defnp resize_linear_align(input, output_shape, spatial_dimension) do
in_size = elem(Nx.shape(input), spatial_dimension)
out_size = elem(output_shape, spatial_dimension)
ids =
Nx.iota({out_size})
|> Nx.multiply(in_size - 1)
|> Nx.divide(out_size - 1)
id_prev = Nx.floor(ids) |> Nx.as_type({:s, 8})
id_next = Nx.add(id_prev, 1) |> Nx.min(in_size - 1)
w_prev = Nx.subtract(id_next, ids)
w_next = Nx.subtract(1.0, w_prev)
val_prev = Nx.take(input, id_prev, axis: spatial_dimension)
val_next = Nx.take(input, id_next, axis: spatial_dimension)
w_prev = Nx.broadcast(w_prev, Nx.shape(val_prev), axes: [spatial_dimension])
w_next = Nx.broadcast(w_next, Nx.shape(val_next), axes: [spatial_dimension])
Nx.add(Nx.multiply(w_prev, val_prev), Nx.multiply(w_next, val_next))
end
defnp resize_linear_noalign(input, output_shape, spatial_dimension) do
in_size = elem(Nx.shape(input), spatial_dimension)
out_size = elem(output_shape, spatial_dimension)
w = in_size / out_size
ids =
Nx.iota({out_size})
|> Nx.multiply(w)
|> Nx.add(w / 2.0 - 0.5)
id_prev = Nx.floor(ids) |> Nx.as_type({:s, 8})
id_next = Nx.add(id_prev, 1)
w_prev = Nx.subtract(id_next, ids)
w_next = Nx.subtract(1.0, w_prev)
val_prev = Nx.take(input, Nx.max(id_prev, 0), axis: spatial_dimension)
val_next = Nx.take(input, Nx.min(id_next, in_size - 1), axis: spatial_dimension)
w_prev = Nx.broadcast(w_prev, Nx.shape(val_prev), axes: [spatial_dimension])
w_next = Nx.broadcast(w_next, Nx.shape(val_next), axes: [spatial_dimension])
Nx.add(Nx.multiply(w_prev, val_prev), Nx.multiply(w_next, val_next))
end
# Private Axon.Layers implementation of activations for the compiler
# to use when invoking activation layers.
@activation_layers [:exp, :gelu, :hard_tanh, :linear, :log_sigmoid] ++
[:mish, :relu, :relu6, :sigmoid, :silu, :softplus] ++
[:softsign, :tanh]
for activation <- @activation_layers do
@doc false
defn unquote(activation)(input, _opts \\ []) do
transform(input, fn inp ->
Elixir.Kernel.apply(Axon.Activations, unquote(activation), [inp])
end)
end
end
@activation_layers_with_opts [:celu, :elu, :hard_sigmoid, :hard_silu, :leaky_relu] ++
[:log_softmax, :selu, :softmax]
for activation <- @activation_layers_with_opts do
defn unquote(activation)(input, opts \\ []) do
transform(input, fn inp ->
Elixir.Kernel.apply(Axon.Activations, unquote(activation), [
inp,
Keyword.delete(opts, :mode)
])
end)
end
end
# Private combinator implementations that expect variable
# arguments
@doc false
@element_wise_layers [:add, :subtract, :multiply]
for op <- @element_wise_layers do
defn unquote(op)(inputs, _opts \\ []) do
transform(inputs, fn inputs ->
[first | rest] = Tuple.to_list(inputs)
Enum.reduce(rest, first, fn next, acc ->
apply(Nx, unquote(op), [acc, next])
end)
end)
end
end
@doc false
defn concatenate(inputs, opts \\ []) do
opts = keyword!(opts, axis: -1, mode: :inference)
transform(inputs, fn inputs ->
inputs
|> Tuple.to_list()
|> Nx.concatenate(axis: opts[:axis])
end)
end
@recurrent_layers [:lstm, :gru, :conv_lstm]
for rnn_op <- @recurrent_layers do
defn unquote(rnn_op)(input, hidden_state, input_kernel, hidden_kernel, bias \\ 0, opts \\ []) do
{bias, opts} =
transform({bias, opts}, fn
{[_ | _] = opts, _opts} ->
{0, opts}
{[] = opts, _opts} ->
{0, opts}
{bias, opts} ->
{bias, opts}
end)
opts = transform(opts, &Keyword.put(&1, :cell, unquote(rnn_op)))
rnn(input, hidden_state, input_kernel, hidden_kernel, bias, opts)
end
end
defnp rnn(input, hidden_state, input_kernel, hidden_kernel, bias, opts \\ []) do
opts =
keyword!(opts,
mode: :inference,
unroll: :static,
cell: :lstm,
activation: :sigmoid,
gate: :tanh,
conv_opts: []
)
bias =
transform({bias, opts[:cell]}, fn
{0, :lstm} -> {0, 0, 0, 0}
{0, :gru} -> {0, 0, 0, 0}
{0, :conv_lstm} -> {0, 0, 0}
{bias, _} -> bias
end)
cell_fn =
transform({opts[:cell], opts[:activation], opts[:gate], opts[:conv_opts]}, &get_cell_fn/1)
transform({input, hidden_state, input_kernel, hidden_kernel, bias, cell_fn, opts[:unroll]}, fn
{input, hidden_state, input_kernel, hidden_kernel, bias, cell_fn, :static} ->
Axon.Recurrent.static_unroll(
cell_fn,
input,
hidden_state,
input_kernel,
hidden_kernel,
bias
)
{input, hidden_state, input_kernel, hidden_kernel, bias, cell_fn, :dynamic} ->
Axon.Recurrent.dynamic_unroll(
cell_fn,
input,
hidden_state,
input_kernel,
hidden_kernel,
bias
)
end)
end
defp get_cell_fn({:lstm, activation, gate, _}) do
gate_fn = &apply(Axon.Activations, gate, [&1])
act_fn = &apply(Axon.Activations, activation, [&1])
&Axon.Recurrent.lstm_cell(&1, &2, &3, &4, &5, gate_fn, act_fn)
end
defp get_cell_fn({:gru, activation, gate, _}) do
gate_fn = &apply(Axon.Activations, gate, [&1])
act_fn = &apply(Axon.Activations, activation, [&1])
&Axon.Recurrent.gru_cell(&1, &2, &3, &4, &5, gate_fn, act_fn)
end
defp get_cell_fn({:conv_lstm, _, _, conv_opts}) do
&Axon.Recurrent.conv_lstm_cell(&1, &2, &3, &4, &5, conv_opts)
end
end
|
lib/axon/layers.ex
| 0.954287
| 0.917598
|
layers.ex
|
starcoder
|
defmodule Stripe.Charges do
@moduledoc """
To charge a credit or a debit card, you create a new charge object. You can
retrieve and refund individual charges as well as list all charges. Charges
are identified by a unique random ID.
"""
@endpoint "charges"
@doc """
To charge a credit card, you create a new charge object. If your API key is
in test mode, the supplied card won't actually be charged, though
everything else will occur as if in live mode. (Stripe assumes that the
charge would have completed successfully).
## Arguments
- `amount` - required - A positive integer in the smallest currency unit
(e.g 100 cents to charge $1.00, or 1 to charge ¥1, a 0-decimal currency)
representing how much to charge the card. The minimum amount is $0.50
(or equivalent in charge currency).
- `currency` - required - 3-letter ISO code for currency.
- `customer` - optional, either customer or card is required - The ID of an
existing customer that will be charged in this request.
- `card` - optional, either card or customer is required - A card to be
charged. If you also pass a customer ID, the card must be the ID of a
card belonging to the customer. Otherwise, if you do not pass a customer
ID, the card you provide must either be a token, like the ones returned
by Stripe.js, or a dictionary containing a user's credit card details,
with the options described below. Although not all information is
required, the extra info helps prevent fraud.
- `number` - required - The card number, as a string without any
separators.
- `exp_month` - required - Two digit number representing the card's
expiration month.
- `exp_year` - required - Two or four digit number representing the
card's expiration year.
- `cvc` - optional, highly recommended - Card security code.
- `name` - optional - Cardholder's full name.
- `address_line1` - optional
- `address_line2` - optional
- `address_city` - optional
- `address_zip` - optional
- `address_state` - optional
- `address_country` - optional
- `description` - optional, default is null - An arbitrary string which you
can attach to a charge object. It is displayed when in the web interface
alongside the charge. Note that if you use Stripe to send automatic email
receipts to your customers, your receipt emails will include the
description of the charge(s) that they are describing.
- `metadata` - optional, default is { } - A set of key/value pairs that you
can attach to a customer object. It can be useful for storing additional
information about the customer in a structured format. It's often a good
idea to store an email address in metadata for tracking later.
- `capture` - optional, default is true - Whether or not to immediately
capture the charge. When false, the charge issues an authorization (or
pre-authorization), and will need to be captured later. Uncaptured
charges expire in 7 days. For more information, see authorizing charges
and settling later.
- `statement_description` - optional, default is null - An arbitrary string
to be displayed alongside your company name on your customer's credit
card statement. This may be up to 15 characters. As an example, if your
website is RunClub and you specify 5K Race Ticket, the user will see
RUNCLUB 5K RACE TICKET. The statement description may not include `<>"'`
characters. While most banks display this information consistently, some
may display it incorrectly or not at all.
- `application_fee` - optional - A fee in cents that will be applied to the
charge and transferred to the application owner's Stripe account. The
request must be made with an OAuth key in order to take an application
fee. For more information, see the application fees documentation.
## Returns
Returns a charge object if the charge succeeded. Returns an error if something
goes wrong. A common source of error is an invalid or expired card, or a valid
card with insufficient available balance.
If the cvc parameter is provided, Stripe will attempt to check the CVC's
correctness, and the check's result will be returned. Similarly, If
address_line1 or address_zip are provided, Stripe will similarly try to check
the validity of those parameters. Some banks do not support checking one or
more of these parameters, in which case Stripe will return an 'unchecked'
result. Also note that, depending on the bank, charges can succeed even when
passed incorrect CVC and address information.
"""
def create(amount, params) do
#default currency
params = Keyword.put_new params, :currency, "USD"
#drop in the amount
params = Keyword.put_new params, :amount, amount
Stripe.make_request(:post, @endpoint, params)
|> Stripe.Util.handle_stripe_response
end
def list(limit \\ 10) do
Stripe.make_request(:get, "#{@endpoint}?limit=#{limit}")
|> Stripe.Util.handle_stripe_response
end
def change(id, params) do
Stripe.make_request(:post, "#{@endpoint}/#{id}",params)
|> Stripe.Util.handle_stripe_response
end
def capture(id) do
Stripe.make_request(:post, "#{@endpoint}/#{id}/capture")
|> Stripe.Util.handle_stripe_response
end
def get(id) do
Stripe.make_request(:get, "#{@endpoint}/#{id}")
|> Stripe.Util.handle_stripe_response
end
def refund(id) do
Stripe.make_request(:post, "#{@endpoint}/#{id}/refunds")
|> Stripe.Util.handle_stripe_response
end
def refund_partial(id, amount) do
params = [amount: amount]
Stripe.make_request(:post, "#{@endpoint}/#{id}/refunds", params)
|> Stripe.Util.handle_stripe_response
end
end
|
lib/stripe/charges.ex
| 0.826467
| 0.627423
|
charges.ex
|
starcoder
|
defmodule GCloud.SpeechAPI.Streaming.Client do
@moduledoc """
A client process for Streaming API.
Once a client is started, it establishes a connection to the Streaming API,
gets ready to send requests to the API and forwards incoming responses to a set process.
## Requests
The requests can be sent using `send_request/3`. Each request should be a
`t:Google.Cloud.Speech.V1.StreamingRecognizeRequest.t/0` struct created using
`Google.Cloud.Speech.V1.StreamingRecognizeRequest.new/1` accepting keyword with struct fields.
This is an auto-generated module, so check out [this notice](readme.html#auto-generated-modules) and
[API reference](https://cloud.google.com/speech-to-text/docs/reference/rpc/google.cloud.speech.v1#google.cloud.speech.v1.StreamingRecognizeRequest)
## Responses
The client sends responses from API via messages to the target (by default it is the process that spawned client).
Each message is a struct `t:Google.Cloud.Speech.V1.StreamingRecognizeResponse.t/0` or a tuple with pid of sender and the same struct. Message format is controlled by `include_sender` option of a client.
## Usage
1. Start the client
1. Send request with `Google.Cloud.Speech.V1.StreamingRecognitionConfig`
1. Send request(s) with `Google.Cloud.Speech.V1.RecognitionAudio` containing audio data
1. (async) Receive messages conatining `Google.Cloud.Speech.V1.StreamingRecognizeResponse`
1. Send final `Google.Cloud.Speech.V1.RecognitionAudio` with option `end_stream: true`
or call `end_stream/1` after final audio chunk has been sent.
1. Stop the client after receiving all results
See [README](readme.html) for code example
"""
use GenServer
alias __MODULE__.Connection
alias Google.Cloud.Speech.V1.{
StreamingRecognizeRequest,
StreamingRecognizeResponse,
SpeechRecognitionAlternative,
WordInfo
}
alias Google.Protobuf.Duration
@nanos_per_second 1_000_000_000
@typedoc "Format of messages sent by the client to the target"
@type message :: StreamingRecognizeResponse.t() | {pid(), StreamingRecognizeResponse.t()}
@doc """
Starts a linked client process.
Possible options are:
- `target` - A pid of a process that will receive recognition results. Defaults to `self()`.
- `monitor_target` - If set to true, a client will monitor the target and shutdown
if the target is down
- `include_sender` - If true, a client will include its pid in messages sent to the target.
- `start_time` - Time by which response times will be shifted in nanoseconds. Defaults to `0` ns.
"""
@spec start_link(options :: Keyword.t()) :: {:ok, pid} | {:error, any()}
def start_link(options \\ []) do
do_start(:start_link, options)
end
@doc """
Starts a client process without links.
See `start_link/1` for more info
"""
@spec start(options :: Keyword.t()) :: {:ok, pid} | {:error, any()}
def start(options \\ []) do
do_start(:start, options)
end
defp do_start(fun, options) do
options =
options
|> Map.new()
|> Map.put_new(:target, self())
|> Map.put_new(:monitor_target, false)
|> Map.put_new(:include_sender, false)
|> Map.put_new(:start_time, 0)
apply(GenServer, fun, [__MODULE__, options])
end
@doc """
Stops a client process.
"""
@spec stop(client :: pid()) :: :ok
defdelegate stop(pid), to: GenServer
@doc """
Sends a request to the API. If option `end_stream: true` is passed,
closes a client-side gRPC stream.
"""
@spec send_request(client :: pid(), StreamingRecognizeRequest.t(), Keyword.t()) :: :ok
def send_request(pid, request, opts \\ []) do
GenServer.cast(pid, {:send_requests, [request], opts})
:ok
end
@doc """
Sends a list of requests to the API. If option `end_stream: true` is passed,
closes a client-side gRPC stream.
"""
@spec send_requests(client :: pid(), [StreamingRecognizeRequest.t()], Keyword.t()) :: :ok
def send_requests(pid, request, opts \\ []) do
GenServer.cast(pid, {:send_requests, request, opts})
:ok
end
@doc """
Closes a client-side gRPC stream.
"""
@spec end_stream(client :: pid()) :: :ok
def end_stream(pid) do
GenServer.cast(pid, :end_stream)
:ok
end
@impl true
def init(opts) do
{:ok, conn} = Connection.start_link()
state = opts |> Map.merge(%{conn: conn})
if opts.monitor_target do
Process.monitor(opts.target)
end
{:ok, state}
end
@impl true
def handle_cast({:send_requests, requests, opts}, state) do
:ok = state.conn |> Connection.send_requests(requests, opts)
{:noreply, state}
end
@impl true
def handle_cast(:end_stream, state) do
:ok = state.conn |> Connection.end_stream()
{:noreply, state}
end
@impl true
def handle_info(%StreamingRecognizeResponse{} = response, state) do
%{start_time: start_time} = state
response =
response
|> Map.update!(:results, &Enum.map(&1, fn res -> update_result_time(res, start_time) end))
if state.include_sender do
send(state.target, {self(), response})
else
send(state.target, response)
end
{:noreply, state}
end
@impl true
def handle_info({:DOWN, _ref, :process, pid, _reason}, %{target: pid} = state) do
{:stop, :normal, state}
end
@impl true
def terminate(_reason, state) do
state.conn |> Connection.stop()
end
defp update_result_time(result, start_time) when is_integer(start_time) do
result
|> Map.update!(:result_end_time, &duration_sum(&1, start_time))
|> Map.update!(:alternatives, &update_alternatives(&1, start_time))
end
defp update_alternatives(alternatives, start_time) do
alternatives |> Enum.map(&update_alternative(&1, start_time))
end
defp update_alternative(%SpeechRecognitionAlternative{words: words} = alt, start_time) do
updated_words =
words
|> Enum.map(fn %WordInfo{} = info ->
info
|> Map.update!(:start_time, &duration_sum(&1, start_time))
|> Map.update!(:end_time, &duration_sum(&1, start_time))
end)
%{alt | words: updated_words}
end
defp duration_sum(%Duration{} = a, b) when is_integer(b) do
b + a.nanos + a.seconds * @nanos_per_second
end
end
|
lib/gcloud/speech_api/streaming_client.ex
| 0.901207
| 0.411584
|
streaming_client.ex
|
starcoder
|
defmodule Snitch.Data.Schema.Package do
@moduledoc """
Models a Package which is composed of many `PackageItem`s.
"""
use Snitch.Data.Schema
alias Ecto.Nanoid
alias Snitch.Data.Schema.Embedded.ShippingMethod, as: EmbeddedShippingMethod
alias Snitch.Data.Schema.{Order, PackageItem, ShippingCategory, ShippingMethod, StockLocation}
@typedoc """
A Package that gets shipped to a user.
Note that both `Package` and `PackageItem` have the `:shipping_tax` field and
`package.shipping_tax` is NOT the sum of package_item.shipping_tax`.
## Fields
#### `:tracking`
This can be any `map` containing information to track the package and its
shipment.
#### `:shipping_method`
The `ShippingMethod` chosen for this package by the user, not to be confused
with the `:shipping_methods` field!
#### `:shipping_methods`
The `ShippingMethod`s and their estimated costs, the user chooses one of them
and the choice is then stored in `:shipping_method`.
#### `:cost`
The shipping cost for the chosen `:shipping_method`.
#### `:shipping_tax`
The shipping tax on this package. This is different from the taxes on the
constituent package items. The total shipping tax for a package is thus:
```
total_tax_on_shipping_of_items =
package.items
|> Stream.map(fn %{shipping_tax: tax} -> tax end)
|> Enum.reduce(&Money.add!/2)
total_shipping_tax = Money.add!(
package.shipping_tax,
total_tax_on_shipping_of_items
)
```
#### `:origin`
The `StockLocation` where this package originates from.
"""
@type t :: %__MODULE__{}
schema "snitch_packages" do
field(:number, Nanoid, autogenerate: true)
field(:state, :string)
field(:shipped_at, :utc_datetime)
field(:tracking, :map)
embeds_many(:shipping_methods, EmbeddedShippingMethod, on_replace: :delete)
field(:cost, Money.Ecto.Composite.Type)
field(:shipping_tax, Money.Ecto.Composite.Type)
belongs_to(:order, Order)
belongs_to(:origin, StockLocation)
belongs_to(:shipping_category, ShippingCategory)
belongs_to(:shipping_method, ShippingMethod)
has_many(:items, PackageItem)
timestamps()
end
@price_fields ~w(cost shipping_tax)a
@update_fields ~w(state shipped_at tracking shipping_method_id)a ++ @price_fields
@shipping_fields [:shipping_method_id | @price_fields]
@create_fields ~w(order_id origin_id shipping_category_id)a ++ @update_fields
@required_fields ~w(state order_id origin_id shipping_category_id)a
@doc """
Returns a `Package` changeset to create a new package.
A list of `PackageItem` params can be provided under the `:items` key.
> Note that the `:items` must be plain `map`s and not `struct`s.
"""
@spec create_changeset(t, map) :: Ecto.Changeset.t()
def create_changeset(%__MODULE__{} = package, params) do
package
|> cast(params, @create_fields)
|> validate_required(@required_fields)
|> foreign_key_constraint(:order_id)
|> foreign_key_constraint(:origin_id)
|> foreign_key_constraint(:shipping_category_id)
|> unique_constraint(:number)
|> cast_assoc(:items, with: &PackageItem.create_changeset/2)
|> cast_embed(:shipping_methods, required: true)
|> common_changeset()
end
@doc """
Returns a `Package` changeset to update the `package`.
"""
@spec update_changeset(t, map) :: Ecto.Changeset.t()
def update_changeset(package, params) do
package
|> cast(params, @update_fields)
|> cast_embed(:shipping_methods)
|> common_changeset()
end
@doc """
Returns a `Package` changeset to update the `package`.
The `:shipping_method`, `:cost` and `shipping_tax` must either be changed via
`params` or already set previously in the `package`.
"""
@spec shipping_changeset(t, map) :: Ecto.Changeset.t()
def shipping_changeset(package, params) do
package
|> update_changeset(params)
|> validate_required(@shipping_fields)
end
defp common_changeset(package_changeset) do
package_changeset
|> foreign_key_constraint(:shipping_method_id)
|> validate_amount(:cost)
|> validate_amount(:shipping_tax)
end
end
|
apps/snitch_core/lib/core/data/schema/package.ex
| 0.890996
| 0.879665
|
package.ex
|
starcoder
|
defmodule Ecto.Query.Builder.Select do
@moduledoc false
alias Ecto.Query.Builder
@doc """
Escapes a select.
It allows tuples, lists and variables at the top level or a
single `assoc(x, y)` expression.
## Examples
iex> escape({1, 2}, [])
{{:{}, [], [:{}, [], [1, 2]]}, %{}}
iex> escape([1, 2], [])
{[1, 2], %{}}
iex> escape(quote(do: x), [x: 0])
{{:{}, [], [:&, [], [0]]}, %{}}
iex> escape(quote(do: ^123), [])
{{:{}, [], [:^, [], [0]]}, %{0 => 123}}
"""
@spec escape(Macro.t, Keyword.t) :: {Macro.t, %{}}
def escape({:assoc, _, args} = assoc, vars) when is_list(args) do
escape_assoc(assoc, %{}, vars)
end
def escape(other, vars), do: do_escape(other, %{}, vars)
# Tuple
defp do_escape({left, right}, external, vars) do
do_escape({:{}, [], [left, right]}, external, vars)
end
# Tuple
defp do_escape({:{}, _, list}, external, vars) do
{list, external} = Enum.map_reduce(list, external, &do_escape(&1, &2, vars))
expr = {:{}, [], [:{}, [], list]}
{expr, external}
end
# List
defp do_escape(list, external, vars) when is_list(list) do
Enum.map_reduce(list, external, &do_escape(&1, &2, vars))
end
# var - where var is bound
defp do_escape({var, _, context}, external, vars)
when is_atom(var) and is_atom(context) do
expr = Builder.escape_var(var, vars)
{expr, external}
end
defp do_escape(other, external, vars) do
Builder.escape(other, external, vars)
end
# assoc/2
defp escape_assoc({:assoc, _, [{var, _, context}, list]}, external, vars)
when is_atom(var) and is_atom(context) and is_list(list) do
var = Builder.escape_var(var, vars)
{list, external} = Enum.map_reduce(list, external,
&escape_assoc_fields(&1, &2, vars))
expr = {:{}, [], [:assoc, [], [var, list]]}
{expr, external}
end
defp escape_assoc(other, _external, _vars) do
raise Ecto.QueryError,
reason: "`#{Macro.to_string(other)}` is not a valid expression inside `assoc/2` selector"
end
defp escape_assoc_fields({field, {assoc_var, _, assoc_ctxt}}, external, vars)
when is_atom(field) and is_atom(assoc_var) and is_atom(assoc_ctxt) do
expr = {field, Builder.escape_var(assoc_var, vars)}
{expr, external}
end
defp escape_assoc_fields({field, other}, external, vars)
when is_atom(field) do
{expr, external} = escape_assoc(other, external, vars)
{{field, expr}, external}
end
defp escape_assoc_fields(other, external, vars) do
escape_assoc(other, external, vars)
end
@doc """
Builds a quoted expression.
The quoted expression should evaluate to a query at runtime.
If possible, it does all calculations at compile time to avoid
runtime work.
"""
@spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t
def build(query, binding, expr, env) do
binding = Builder.escape_binding(binding)
{expr, external} = escape(expr, binding)
external = Builder.escape_external(external)
select = quote do: %Ecto.Query.QueryExpr{
expr: unquote(expr),
external: unquote(external),
file: unquote(env.file),
line: unquote(env.line)}
Builder.apply_query(query, __MODULE__, [select], env)
end
@doc """
The callback applied by `build/4` to build the query.
"""
@spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t
def apply(query, select) do
query = Ecto.Queryable.to_query(query)
if query.select do
raise Ecto.QueryError, reason: "only one select expression is allowed in query"
else
%{query | select: select}
end
end
end
|
lib/ecto/query/builder/select.ex
| 0.807688
| 0.482673
|
select.ex
|
starcoder
|
defmodule NervesHubLink.ConsoleChannel do
use GenServer
require Logger
@moduledoc """
Starts an IEx shell in process to allow remote console interaction
The remote console ability is disabled by default and requires the
`remote_iex` key to be enabled in the config:
```
config :nerves_hub_link, remote_iex: true
```
Once connected, shell data on the device will be pushed up the socket
for the following events:
The following events are supported _from_ the Server:
* `phx_close` or `phx_error` - This will cause the channel to attempt rejoining
every 5 seconds. You can change the rejoin timing in the config
```
config :nerves_hub_link, rejoin_after: 3_000
```
* `dn` - String data to send to the shell for evaluation
* `restart` - Restart the IEx shell process
* `window_size` - A map with `:height` and `:width` keys for resizing the terminal
The following events are supported _from_ this client:
* `up` - String data to be displayed to the user console frontend
"""
alias PhoenixClient.{Channel, Message}
alias NervesHubLink.Client
@rejoin_after Application.get_env(:nerves_hub_link, :rejoin_after, 5_000)
@version "1.0.0"
defmodule State do
defstruct socket: nil,
topic: "console",
channel: nil,
params: [],
iex_pid: nil
end
def start_link(opts), do: GenServer.start_link(__MODULE__, opts, name: __MODULE__)
@impl GenServer
def init(opts) do
send(self(), :join)
{:ok, State.__struct__(opts)}
end
@impl GenServer
def handle_info(:join, %{socket: socket, topic: topic, params: params} = state) do
params = Map.put(params, "console_version", @version)
case Channel.join(socket, topic, params) do
{:ok, _reply, channel} ->
{:noreply, %{state | channel: channel}}
_error ->
Process.send_after(self(), :join, @rejoin_after)
{:noreply, state}
end
end
def handle_info({:tty_data, data}, state) do
Channel.push_async(state.channel, "up", %{data: data})
{:noreply, state, iex_timeout()}
end
def handle_info(%Message{event: "restart"}, state) do
Logger.warn("[#{inspect(__MODULE__)}] Restarting IEx process from web request")
Channel.push_async(state.channel, "up", %{data: "\r\n*** Restarting IEx ***\r\n"})
state =
state
|> stop_iex()
|> start_iex()
{:noreply, state}
end
def handle_info(%Message{event: "dn"} = msg, %{iex_pid: nil} = state) do
handle_info(msg, start_iex(state))
end
def handle_info(%Message{event: "dn", payload: %{"data" => data}}, state) do
ExTTY.send_text(state.iex_pid, data)
{:noreply, state, iex_timeout()}
end
def handle_info(%Message{event: event, payload: payload}, state)
when event in ["phx_error", "phx_close"] do
reason = Map.get(payload, :reason, "unknown")
_ = Client.handle_error(reason)
Process.send_after(self(), :join, @rejoin_after)
{:noreply, state}
end
def handle_info(:timeout, state) do
msg = """
\r
****************************************\r
* Session timeout due to inactivity *\r
* *\r
* Press any key to continue... *\r
****************************************\r
"""
Channel.push_async(state.channel, "up", %{data: msg})
{:noreply, stop_iex(state)}
end
def handle_info(req, state) do
Client.handle_error("Unhandled Console handle_info - #{inspect(req)}")
{:noreply, state}
end
defp iex_timeout() do
Application.get_env(:nerves_hub_link, :remote_iex_timeout, 300) * 1000
end
defp start_iex(state) do
shell_opts = [[dot_iex_path: dot_iex_path()]]
{:ok, iex_pid} = ExTTY.start_link(handler: self(), type: :elixir, shell_opts: shell_opts)
%{state | iex_pid: iex_pid}
end
defp dot_iex_path() do
[".iex.exs", "~/.iex.exs", "/etc/iex.exs"]
|> Enum.map(&Path.expand/1)
|> Enum.find("", &File.regular?/1)
end
defp stop_iex(%{iex_pid: nil} = state), do: state
defp stop_iex(%{iex_pid: iex} = state) do
_ = Process.unlink(iex)
:ok = GenServer.stop(iex, 10_000)
%{state | iex_pid: nil}
end
end
|
lib/nerves_hub_link/console_channel.ex
| 0.797202
| 0.714752
|
console_channel.ex
|
starcoder
|
defmodule DistilleryPackager.Debian.Config do
@moduledoc """
This module is used to capture the configuration of the debian package build.
The module also includes validation functionality which is used to ensure that
the data is in the correct format.
"""
defstruct name: nil, version: nil, arch: nil, description: nil, vendor: nil,
maintainers: nil, homepage: nil, external_dependencies: nil,
maintainer_scripts: [], config_files: [], base_path: "/opt",
additional_files: [], owner: [user: "root", group: "root"],
package_name: nil, exclude_init_scripts: nil
use Vex.Struct
alias DistilleryPackager.Utils
alias Mix.Project
import Mix.Releases.Shell, only: [error: 1]
validates :name, presence: true
validates :version, presence: true
validates :arch, presence: true
validates :description, presence: true
validates :vendor, presence: true
validates :maintainers, presence: true
validates :homepage, presence: true
validates :base_path, presence: true
validates [:owner, :user], presence: true
validates [:owner, :group], presence: true
def build_config(release = %Mix.Releases.Release{}, options) do
base_config =
[
{:name, Atom.to_string(release.name)},
{:version, format_package_version(release.version, options)},
{:arch, format_package_arch(options)},
{:description, Project.config[:description]}
] ++ config_from_package(Project.config[:deb_package])
base_config =
base_config
|> Enum.dedup
|> Enum.reject(&is_nil(&1))
|> Enum.into(%{})
DistilleryPackager.Debian.Config
|> struct(base_config)
|> DistilleryPackager.Utils.Config.sanitize_config
|> check_valid
end
defp format_package_arch(%{architecture: arch}), do: arch
defp format_package_arch(_), do: Utils.Config.detect_arch
defp format_package_version(version, %{distribution: distribution}) do
"#{version}~#{distribution}"
end
defp format_package_version(version, _), do: version
defp config_from_package(nil) do
"""
Error: You haven't defined any 'package' data in mix.exs.
Check the configuration section of the github repository to
see how to add this in.
"""
|> String.replace("\n", " ")
|> throw
end
defp config_from_package(value) when is_list(value) do
value
|> Enum.map(fn({key, value}) -> handle_config(key, value) end)
|> Enum.dedup
|> Enum.reject(&(is_nil(&1)))
end
@joining_list_values [:maintainers, :external_dependencies]
defp handle_config(key, [_ | _] = value) when key in @joining_list_values do
{key, Enum.join(value, ", ")}
end
defp handle_config(:config_files, value) do
{:config_files, value}
end
defp handle_config(:additional_files, value) do
{:additional_files, value}
end
defp handle_config(:maintainer_scripts, [_ | _] = value) do
{:maintainer_scripts, value}
end
defp handle_config(:homepage, value) when is_bitstring(value) do
{:homepage, value}
end
defp handle_config(:vendor, value) when byte_size(value) > 0 do
{:vendor, value}
end
defp handle_config(:base_path, value) do
{:base_path, Path.absname(value, "/")}
end
defp handle_config(:owner, value) when is_list(value) do
handle_config(:owner, Enum.into(value, %{}))
end
defp handle_config(:owner, %{user: user, group: group})
when user != nil and group != nil do
{:owner, [user: user, group: group]}
end
defp handle_config(:package_name, value) do
{:package_name, value}
end
defp handle_config(:exclude_init_scripts, value) do
{:exclude_init_scripts, value}
end
defp handle_config(_, _), do: nil
defp check_valid(config = %DistilleryPackager.Debian.Config{}) do
# Use Vex to validate whether the config is valid. If not,
# then raise an error with a list of config errors
if Vex.valid?(config) do
{:ok, config}
else
error "The configuration is invalid!"
for err = {:error, _field, _type, _msg} <- Vex.errors(config) do
print_validation_error(err)
end
{:error, Vex.errors(config)}
end
end
defp print_validation_error(
{:error, field, _type, msg}) when is_atom(field) do
error(" - '#{Atom.to_string(field)}' #{msg}")
end
defp print_validation_error(
{:error, field, _type, msg}) when is_list(field) do
field = Enum.map_join(field, " -> ", &("'#{&1}'"))
error(" - #{field} #{msg}")
end
end
|
lib/distillery_packager/debian/config.ex
| 0.673621
| 0.424501
|
config.ex
|
starcoder
|
defmodule SafeURL do
@moduledoc """
`SafeURL` is library for mitigating Server Side Request
Forgery vulnerabilities in Elixir. Private/reserved IP
addresses are blocked by default, and users can add
additional CIDR ranges to the blocklist, or alternatively
allow specific CIDR ranges to which the application is
allowed to make requests.
You can use `allowed?/2` or `validate/2` to check if a
URL is safe to call. If the `HTTPoison` application is
available, you can also call `get/4` directly which will
validate the host before making an HTTP request.
## Examples
iex> SafeURL.allowed?("https://includesecurity.com")
true
iex> SafeURL.validate("http://google.com/", schemes: ~w[https])
{:error, :restricted}
iex> SafeURL.validate("http://230.10.10.10/")
{:error, :restricted}
iex> SafeURL.validate("http://230.10.10.10/", block_reserved: false)
:ok
# If HTTPoison is available:
iex> SafeURL.get("https://10.0.0.1/ssrf.txt")
{:error, :restricted}
iex> SafeURL.get("https://google.com/")
{:ok, %HTTPoison.Response{...}}
## Options
`SafeURL` can be configured to customize and override
validation behaviour by passing the following options:
* `:block_reserved` - Block reserved/private IP ranges.
Defaults to `true`.
* `:blocklist` - List of CIDR ranges to block. This is
additive with `:block_reserved`. Defaults to `[]`.
* `:allowlist` - List of CIDR ranges to allow. If
specified, blocklist will be ignored. Defaults to `[]`.
* `:schemes` - List of allowed URL schemes. Defaults to
`["http, "https"]`.
* `:dns_module` - Any module that implements the
`SafeURL.DNSResolver` behaviour. Defaults to `DNS` from
the `:dns` package.
If `:block_reserved` is `true` and additional hosts/ranges
are supplied with `:blocklist`, both of them are included in
the final blocklist to validate the address. If allowed
ranges are supplied with `:allowlist`, all blocklists are
ignored and any hosts not explicitly declared in the allowlist
are rejected.
These options can be set globally in your `config.exs` file:
config :safeurl,
block_reserved: true,
blocklist: ~w[192.168.127.12/16],
schemes: ~w[https],
dns_module: MyCustomDNSResolver
Or they can be passed to the function directly, overriding any
global options if set:
iex> SafeURL.validate("http://10.0.0.1/", block_reserved: false)
:ok
iex> SafeURL.validate("https://app.service/", allowlist: ~w[172.16.31.10/24])
:ok
iex> SafeURL.validate("https://app.service/", blocklist: ~w[172.16.31.10/24])
{:error, :restricted}
"""
@reserved_ranges [
"0.0.0.0/8",
"10.0.0.0/8",
"192.168.3.11/10",
"127.0.0.0/8",
"169.254.0.0/16",
"172.16.0.0/12",
"192.0.0.0/29",
"192.0.2.0/24",
"192.168.127.12/24",
"192.168.0.0/16",
"198.18.0.0/15",
"198.51.100.0/24",
"203.0.113.0/24",
"172.16.17.32/4",
"240.0.0.0/4"
]
# Public API
# ----------
@doc """
Validate a string URL against a blocklist or allowlist.
This method checks if a URL is safe to be called by looking at
its scheme and resolved IP address, and matching it against
reserved CIDR ranges, and any provided allowlist/blocklist.
Returns `true` if the URL meets the requirements,
`false` otherwise.
## Examples
iex> SafeURL.allowed?("https://includesecurity.com")
true
iex> SafeURL.allowed?("http://10.0.0.1/")
false
iex> SafeURL.allowed?("http://10.0.0.1/", allowlist: ~w[10.0.0.0/8])
true
## Options
See [`Options`](#module-options) section above.
"""
@spec allowed?(binary(), Keyword.t()) :: boolean()
def allowed?(url, opts \\ []) do
uri = URI.parse(url)
opts = build_options(opts)
address = resolve_address(uri.host, opts.dns_module)
cond do
uri.scheme not in opts.schemes ->
false
opts.allowlist != [] ->
ip_in_ranges?(address, opts.allowlist)
true ->
!ip_in_ranges?(address, opts.blocklist)
end
end
@doc """
Alternative method of validating a URL, returning atoms instead
of booleans.
This calls `allowed?/2` underneath to check if a URL is safe to
be called. If it is, it returns `:ok`, otherwise
`{:error, :restricted}`.
## Examples
iex> SafeURL.validate("https://includesecurity.com")
:ok
iex> SafeURL.validate("http://10.0.0.1/")
{:error, :restricted}
iex> SafeURL.validate("http://10.0.0.1/", allowlist: ~w[10.0.0.0/8])
:ok
## Options
See [`Options`](#module-options) section above.
"""
@spec validate(binary(), Keyword.t()) :: :ok | {:error, :restricted}
def validate(url, opts \\ []) do
if allowed?(url, opts) do
:ok
else
{:error, :restricted}
end
end
@doc """
Validate a URL and execute a GET request using `HTTPoison`.
If the URL is safe, this function will execute the request using
`HTTPoison`, returning the result directly. Otherwise, it will
return `{:error, :restricted}`.
`headers` and `httpoison_options` will be passed directly to
`HTTPoison` when the request is executed. This function will
raise if `HTTPoison` if not available.
See `allowed?/2` for more details on URL validation.
## Examples
iex> SafeURL.get("https://10.0.0.1/ssrf.txt")
{:error, :restricted}
iex> SafeURL.get("https://google.com/")
{:ok, %HTTPoison.Response{...}}
iex> SafeURL.get("https://google.com/", schemes: ~w[ftp])
{:error, :restricted}
## Options
See [`Options`](#module-options) section above.
"""
@spec get(binary(), Keyword.t(), HTTPoison.headers(), Keyword.t()) ::
{:ok, HTTPoison.Response.t()} | {:error, :restricted} | no_return()
def get(url, options \\ [], headers \\ [], httpoison_options \\ []) do
unless function_exported?(HTTPoison, :get, 3) do
raise "HTTPoison.get/3 not available"
end
with :ok <- validate(url, options) do
HTTPoison.get(url, headers, httpoison_options)
end
end
# Private Helpers
# ---------------
# Return a map of calculated options
defp build_options(opts) do
schemes = get_option(opts, :schemes)
allowlist = get_option(opts, :allowlist)
blocklist = get_option(opts, :blocklist)
dns_module = get_option(opts, :dns_module)
blocklist =
if get_option(opts, :block_reserved) do
blocklist ++ @reserved_ranges
else
blocklist
end
%{schemes: schemes, allowlist: allowlist, blocklist: blocklist, dns_module: dns_module}
end
# Get the value of a specific option, either from the application
# configs or overrides explicitly passed as arguments.
defp get_option(opts, key) do
if Keyword.has_key?(opts, key) do
Keyword.get(opts, key)
else
Application.get_env(:safeurl, key)
end
end
# Resolve hostname in DNS to an IP address (if not already an IP)
defp resolve_address(hostname, dns_module) do
hostname
|> to_charlist()
|> :inet.parse_address()
|> case do
{:ok, ip} ->
ip
{:error, :einval} ->
# TODO: safely handle multiple IPs/round-robin DNS
case dns_module.resolve(hostname) do
{:ok, ips} -> ips |> List.wrap() |> List.first()
{:error, _reason} -> nil
end
end
end
defp ip_in_ranges?({_, _, _, _} = addr, ranges) when is_list(ranges) do
Enum.any?(ranges, fn range ->
range
|> InetCidr.parse()
|> InetCidr.contains?(addr)
end)
end
defp ip_in_ranges?(_addr, _ranges), do: false
end
|
lib/safeurl/safeurl.ex
| 0.877477
| 0.408041
|
safeurl.ex
|
starcoder
|
defmodule Advent20.LobbyLayout do
@moduledoc """
Day 24: Lobby Layout
Reference for hex coordinate systems:
https://www.redblobgames.com/grids/hexagons
I'm using axial coordinates (q, r)
"""
@directions %{
e: {1, 0},
se: {0, 1},
sw: {-1, 1},
w: {-1, 0},
nw: {0, -1},
ne: {1, -1}
}
defp parse(input) do
input
|> String.split("\n", trim: true)
|> Enum.map(&String.codepoints/1)
|> Enum.map(&parse_directions(&1, []))
end
defp parse_directions([], parsed), do: Enum.reverse(parsed)
defp parse_directions(["s", "e" | tail], parsed), do: parse_directions(tail, [Map.fetch!(@directions, :se) | parsed])
defp parse_directions(["s", "w" | tail], parsed), do: parse_directions(tail, [Map.fetch!(@directions, :sw) | parsed])
defp parse_directions(["n", "e" | tail], parsed), do: parse_directions(tail, [Map.fetch!(@directions, :ne) | parsed])
defp parse_directions(["n", "w" | tail], parsed), do: parse_directions(tail, [Map.fetch!(@directions, :nw) | parsed])
defp parse_directions(["e" | tail], parsed), do: parse_directions(tail, [Map.fetch!(@directions, :e) | parsed])
defp parse_directions(["w" | tail], parsed), do: parse_directions(tail, [Map.fetch!(@directions, :w) | parsed])
@doc """
Part 1: Go through the renovation crew's list and determine
which tiles they need to flip. After all of the instructions
have been followed, how many tiles are left with the black side up?
"""
def part_1(input) do
input
|> parse()
|> Enum.map(&resolve_coordinate/1)
|> black_tiles()
|> Enum.count()
end
@doc """
Part 2: How many tiles will be black after 100 days?
"""
def part_2(input) do
input
|> parse()
|> Enum.map(&resolve_coordinate/1)
|> black_tiles()
|> MapSet.new()
|> Stream.iterate(&step/1)
|> Enum.at(100)
|> Enum.count()
end
# Follow the directions from the hexagonal, return the final coordinate
defp resolve_coordinate(directions) do
Enum.reduce(directions, {0, 0}, fn {dq, dr}, {q, r} -> {q + dq, r + dr} end)
end
# Take a list of coordinates, return a list of black tiles
defp black_tiles(coordinates) do
coordinates
|> Enum.frequencies()
|> Enum.reject(fn {_, frequency} -> rem(frequency, 2) == 0 end)
|> Enum.map(fn {coord, _freq} -> coord end)
end
# Given a MapSet of black coordinates, return the next tile floor version as a new MapSet
defp step(black_coordinates) do
candidates = Enum.flat_map(black_coordinates, &neighbours/1) |> MapSet.new() |> MapSet.union(black_coordinates)
Enum.reduce(candidates, black_coordinates, fn coord, tile_state ->
current_state = if MapSet.member?(black_coordinates, coord), do: :black, else: :white
black_tile_neighbours = coord |> neighbours() |> Enum.count(&MapSet.member?(black_coordinates, &1))
case {current_state, black_tile_neighbours} do
{:black, count} when count == 0 or count > 2 -> MapSet.delete(tile_state, coord)
{:white, 2} -> MapSet.put(tile_state, coord)
_ -> tile_state
end
end)
end
# Get all neighbours for a MapSet of black coords
defp neighbours({q, r}), do: @directions |> Map.values() |> Enum.map(fn {dq, dr} -> {q + dq, r + dr} end)
end
|
lib/advent20/24_lobby_layout.ex
| 0.854095
| 0.720786
|
24_lobby_layout.ex
|
starcoder
|
defmodule CQEx.Query do
@moduledoc false
require Record
import CQEx, only: :macros
defmacro __using__(_opts) do
quote do
import CQEx.Query.Sigil
alias CQEx.Query, as: Q
end
end
@default_consistency 1
@type valid_query :: iodata() | CQEx.cql_query() | CQEx.cql_query_batch() | CQEx.Query.t()
defstruct statement: "",
values: %{},
reusable: nil,
named: false,
page_size: 100,
page_state: nil,
consistency: @default_consistency,
serial_consistency: nil,
value_encode_handler: nil
@type t() :: %__MODULE__{
statement: String.t(),
values: map(),
reusable: nil | boolean(),
reusable: boolean(),
page_size: non_neg_integer(),
page_state: binary() | nil,
consistency: CQEx.consistency_values(),
serial_consistency: CQEx.serial_consistency_values() | nil,
value_encode_handler: fun() | nil
}
@spec(
convert(CQEx.Query.t()) :: CQEx.cql_query(),
convert(CQEx.cql_query()) :: CQEx.Query.t()
)
def convert(%CQEx.Query{
:statement => statement,
:values => values,
:reusable => reusable,
:named => named,
:page_size => page_size,
:page_state => page_state,
:consistency => consistency,
:serial_consistency => serial_consistency,
:value_encode_handler => value_encode_handler
}) do
cql_query(
statement: statement,
values: nullify(values, :null),
reusable: nullify(reusable, :undefined),
named: nullify(named, :undefined),
page_size: nullify(page_size, :undefined),
page_state: nullify(page_state, :undefined),
consistency: nullify(consistency, :undefined),
serial_consistency: nullify(serial_consistency, :undefined),
value_encode_handler: nullify(value_encode_handler, :undefined)
)
end
def convert(q) when Record.is_record(q, :cql_query) do
Enum.into([{:__struct__, CQEx.Query} | cql_query(q)], %{})
end
def convert(res), do: res
@spec call(CQEx.Client.client_value(), valid_query) ::
{:ok, map} | {:error, %{acc: any, msg: <<_::64, _::_*8>>}}
def call(c, q) do
client = CQEx.Client.get(c)
case q do
%CQEx.Query{statement: statement, values: values} when is_binary(statement) ->
{{statement, values}, :cqerl.run_query(client, convert(q))}
%CQEx.Query{} ->
:cqerl.run_query(client, convert(q))
any ->
:cqerl.run_query(client, any)
end
|> case do
{_, {:ok, result}} ->
{:ok, CQEx.Result.convert(result, client)}
{:ok, result} ->
{:ok, CQEx.Result.convert(result, client)}
{:error, {:error, {reason, stacktrace}}} ->
{:error, %{msg: ":cqerl processing error: #{reason}", acc: stacktrace}}
{{s, v}, {:error, {code, message, _extras}}} ->
{:error,
%{msg: "#{message} (Code #{code})\nStatement: #{s}\nValues: #{inspect(v)}", acc: []}}
{:error, {code, message, _extras}} ->
{:error, %{msg: "#{message} (Code #{code})", acc: []}}
end
end
@spec call!(CQEx.Client.client_value(), valid_query) :: CQEx.Result.t()
def call!(c, q) do
case call(c, q) do
{:ok, result} ->
result
{:error, %{msg: msg, acc: acc}} ->
raise CQEx.Error, msg: msg, acc: acc
end
end
require Logger
@spec cast(CQEx.Client.client_value(), valid_query) :: reference()
def cast(c, q) do
client = CQEx.Client.get(c)
current = self()
spawn_link(fn ->
tag =
case q do
%CQEx.Query{} ->
:cqerl.send_query(client, convert(q))
any ->
:cqerl.send_query(client, any)
end
send(current, {:tag, tag})
receive do
{:result, ^tag, result} ->
send(current, {:result, tag, CQEx.Result.convert(result, client)})
any ->
send(current, any)
end
end)
receive do
{:tag, tag} -> tag
end
end
@spec put(CQEx.Query.t()) :: CQEx.Query.t()
def put(q = %CQEx.Query{values: nil}) do
put(%{q | values: %{}})
end
def put(q = %CQEx.Query{}) do
q
end
@spec put(CQEx.Query.t(), any, any) :: CQEx.Query.t()
def put(q = %CQEx.Query{values: values}, key, value) when is_map(values) do
%{q | values: Map.put(values, key, value)}
end
def put(q = %CQEx.Query{values: values}, key, value) when is_list(values) do
%{q | values: Keyword.put(values, key, value)}
end
@spec get(CQEx.Query.t(), any, any) :: any
def get(query, key, default \\ nil)
def get(%CQEx.Query{values: nil}, _, default) do
default
end
def get(%CQEx.Query{values: values}, key, default) when is_map(values) do
Map.get(values, key, default)
end
def get(%CQEx.Query{values: values}, key, default) when is_list(values) do
Keyword.get(values, key, default)
end
@spec delete(CQEx.Query.t(), any) :: CQEx.Query.t()
def delete(q = %CQEx.Query{values: nil}, _key) do
q
end
def delete(q = %CQEx.Query{values: values}, key) when is_map(values) do
%{q | values: Map.delete(values, key)}
end
def delete(q = %CQEx.Query{values: values}, key) when is_list(values) do
%{q | values: Keyword.delete(values, key)}
end
@spec merge(CQEx.Query.t(), Keyword.t() | map()) :: CQEx.Query.t()
def merge(q = %CQEx.Query{values: nil}, other) when is_map(other) or is_list(other) do
merge(%{q | values: other}, other)
end
def merge(q = %CQEx.Query{}, other = %{__struct__: _}) do
merge(q, Map.delete(other, :__struct__))
end
def merge(q = %CQEx.Query{values: values}, other) when is_map(values) and is_list(other) do
merge(q, Map.new(other))
end
def merge(q = %CQEx.Query{values: values}, other) when is_map(values) do
%{q | values: Map.merge(values, other)}
end
def merge(q = %CQEx.Query{values: values}, other) when is_list(values) and is_map(other) do
merge(q, Enum.to_list(other))
end
def merge(q = %CQEx.Query{values: values}, other) when is_list(values) do
%{q | values: Keyword.merge(values, other)}
end
@spec new :: CQEx.Query.t()
def new() do
%CQEx.Query{}
end
@spec statement(CQEx.Query.t(), String.t()) :: CQEx.Query.t()
def statement(q = %CQEx.Query{}, statement) do
%{q | statement: statement}
end
@spec page_size(CQEx.Query.t(), integer) :: CQEx.Query.t()
def page_size(q = %CQEx.Query{}, page_size) when is_integer(page_size) do
%{q | page_size: page_size}
end
@spec consistency(CQEx.Query.t(), CQEx.consistency_values()) :: CQEx.Query.t()
def consistency(q = %CQEx.Query{}, consistency) do
%{q | consistency: consistency}
end
@spec serial_consistency(CQEx.Query.t(), CQEx.serial_consistency_values()) :: CQEx.Query.t()
def serial_consistency(q = %CQEx.Query{}, serial_consistency) do
%{q | serial_consistency: serial_consistency}
end
defp nullify(rec, fallback) when is_map(rec) do
rec
|> Enum.map(fn
{key, nil} -> {key, fallback}
{key, other} -> {key, nullify(other, fallback)}
end)
|> Enum.into(%{})
end
defp nullify(list = [{_key, _value} | _rest], fallback) do
list
|> Enum.map(fn
{key, nil} -> {key, fallback}
{key, other} -> {key, nullify(other, fallback)}
end)
end
defp nullify(list = [_value | _rest], fallback) do
list
|> Enum.map(fn
nil -> fallback
other -> nullify(other, fallback)
end)
end
defp nullify(nil, fallback), do: fallback
defp nullify(other, _fallback), do: other
defmodule Sigil do
@moduledoc false
@spec sigil_q(String.t(), [char()]) :: CQEx.Query.t()
def sigil_q(statement, _modifiers) do
%CQEx.Query{statement: statement}
end
end
end
|
lib/cqex/query.ex
| 0.784236
| 0.402216
|
query.ex
|
starcoder
|
defmodule Db.Dets do
@moduledoc """
An implementor of Db that provides a dets table as a backend.
This currently provides a means to lookup a key, delete a value associated with a key,
and insert values associated with keys. Values can be keyspaced by the keyspace argument,
but by default are available everywhere.
"""
use GenServer
require Logger
defstruct keyspace: :global, query: :undefined
def start_link do
GenServer.start_link(__MODULE__, %{}, [name: __MODULE__])
end
def init(_) do
{:ok, tid} = :dets.open_file(__MODULE__, [access: :read_write, repair: true])
{:ok, tid}
end
def handle_call(m = %Db.Dets{}, _from, tid) do
reply = handle(m, tid)
{:reply, reply, tid}
end
defp ets_key(keyspace, key), do: {keyspace, key}
defp handle(%Db.Dets{keyspace: keyspace, query: {:lookup, key}}, tid) do
k = ets_key(keyspace, key)
case :dets.lookup(tid, k) do
[] -> {:error, :notfound}
[{^k, i}] -> {:ok, i}
end
end
defp handle(%Db.Dets{keyspace: keyspace, query: {:insert, key, value}}, tid) do
:ok = :dets.insert(tid, {ets_key(keyspace, key), value})
:ok = :dets.sync(tid)
:ok
end
defp handle(%Db.Dets{keyspace: keyspace, query: {:delete, key}}, tid) do
:ok = :dets.delete(tid, ets_key(keyspace, key))
:ok = :dets.sync(tid)
:ok
end
defp handle(%Db.Dets{query: :flush}, tid) do
:ok = :dets.delete_all_objects(tid)
:dets.sync(tid)
:ok
end
# Special actions that don't correspond to one ets operation
defp handle(%Db.Dets{keyspace: keyspace, query: {:insert_list, key, value}}, tid) do
case handle(%Db.Dets{keyspace: keyspace, query: {:lookup, key}}, tid) do
{:ok, items} -> handle(%Db.Dets{keyspace: keyspace, query: {:insert, key, Enum.uniq([value | items])}}, tid)
{:error, :notfound} -> handle(%Db.Dets{keyspace: keyspace, query: {:insert, key, [value]}}, tid)
end
end
defp handle(%Db.Dets{keyspace: keyspace, query: {:delete_list, key, value}}, tid) do
case handle(%Db.Dets{keyspace: keyspace, query: {:lookup, key}}, tid) do
{:ok, items} -> handle(%Db.Dets{keyspace: keyspace, query: {:insert, key, Enum.uniq(items -- [value])}}, tid)
{:error, :notfound} -> :ok
end
end
end
defimpl Db.Backend, for: Db.Dets do
def execute(query) do
GenServer.call(Db.Dets, query, 1000)
end
end
|
lib/dets.ex
| 0.664867
| 0.654063
|
dets.ex
|
starcoder
|
defprotocol Cat.MonadError do
@moduledoc """
MonadError defines
* `raise(t(any), error) :: t(none)`
* `recover(t(a), (error -> t(a))) :: t(a)`
* `lift_ok_or_error(t(any), ok_or_error(a)) :: t(a)`
* `attempt(t(a)) :: t(ok_or_error(a))`
**It must also be `Monad`, `Applicative` and `Functor`.**
Default implementations (at `MonadError.Default`):
* `lift_ok_or_error(t(any), ok_or_error(a)) :: t(a)`
* `attempt(t(a)) :: t(ok_or_error(a))`
"""
@type t(_x) :: term
@spec raise(t(any), error) :: t(none) when error: any
def raise(example, error)
@spec recover(t(a), (error -> t(a))) :: t(a) when a: var, error: any
def recover(ta, f)
@spec on_error(t(a), (error -> t(no_return) | no_return)) :: t(a) when a: var, error: any
def on_error(ta, f)
@type ok_or_error(a) :: {:ok, a} | {:error, any}
@spec lift_ok_or_error(t(any), ok_or_error(a)) :: t(a) when a: var
def lift_ok_or_error(example, result)
@spec attempt(t(a)) :: t(ok_or_error(a)) when a: var
def attempt(ta)
end
alias Cat.{Applicative, Functor, MonadError}
defmodule Cat.MonadError.Default do
@spec on_error(MonadError.t(a), (error -> MonadError.t(no_return))) :: MonadError.t(a) when a: var, error: any
def on_error(ta, f), do:
MonadError.recover ta, fn error ->
Functor.as f.(error), MonadError.raise(ta, error)
end
@spec lift_ok_or_error(MonadError.t(any), MonadError.ok_or_error(a)) :: MonadError.t(a) when a: var
def lift_ok_or_error(example, {:ok, a}), do: Applicative.pure(example, a)
def lift_ok_or_error(example, {:error, err}), do: MonadError.raise(example, err)
@spec attempt(MonadError.t(a)) :: MonadError.t(MonadError.ok_or_error(a)) when a: var
def attempt(ta), do:
MonadError.recover(
Functor.map(ta, &({:ok, &1})),
&({:error, &1})
)
end
defmodule Cat.MonadError.Arrow do
@spec raise(error) :: (MonadError.t(any) -> MonadError.t(none)) when error: any
def raise(example), do: &MonadError.raise(example, &1)
@spec recover((error -> MonadError.t(a))) :: (MonadError.t(a) -> MonadError.t(a)) when a: var, error: any
def recover(f), do: &MonadError.recover(&1, f)
@spec on_error((error -> MonadError.t(no_return) | no_return)) :: (MonadError.t(a) -> MonadError.t(a)) when a: var, error: any
def on_error(f), do: &MonadError.on_error(&1, f)
@spec lift_ok_or_error(MonadError.t(any)) :: (MonadError.ok_or_error(a) -> MonadError.t(a)) when a: var
def lift_ok_or_error(example), do: &MonadError.lift_ok_or_error(example, &1)
end
|
lib/cat/protocols/monad_error.ex
| 0.912407
| 0.44059
|
monad_error.ex
|
starcoder
|
defmodule Mail.Proplist do
@moduledoc """
A hybrid of erlang's proplists and lists keystores.
It acts as a Set for key-value pairs, but stil maintains it's order like a
List.
"""
@type t :: [{term, term} | term]
@doc """
Retrieves all keys from the key value pairs present in the list,
unlike :proplists.get_keys which will return non-kv pairs as keys
Args:
* `list` - a list to retrieve all the keys from
"""
@spec keys(list :: __MODULE__.t()) :: [term]
def keys(list) do
Enum.reduce(list, [], fn
{key, _value}, acc ->
if Enum.member?(acc, key) do
acc
else
[key | acc]
end
_value, acc ->
acc
end)
|> Enum.reverse()
end
@doc """
Detects if the list contains the specified key.
Args:
* `list` - the list to look in
* `key` - the key to look for
"""
@spec has_key?(list :: __MODULE__.t(), key :: term) :: [term] | false
def has_key?(list, key) do
Enum.any?(list, fn
{k, _value} -> key == k
_value -> false
end)
end
@doc """
Stores a key-value pair in the list, will replace an existing pair with the
same key.
Args:
* `list` - the list to store in
* `key` - the key of the pair
* `value` - the value of the pair
"""
@spec put(list :: __MODULE__.t(), key :: term, value :: term) :: __MODULE__.t()
def put(list, key, value) do
:lists.keystore(key, 1, list, {key, value})
end
@doc """
Prepends the key-value pair to the list if it doesn't already exist, otherwise
it will replace the existing pair
Args:
* `list` - the list to store in
* `key` - the key of the pair
* `value` - the value of the pair
"""
@spec prepend(list :: __MODULE__.t(), key :: term, value :: term) :: __MODULE__.t()
def prepend(list, key, value) do
if has_key?(list, key) do
# replace the existing pair
put(list, key, value)
else
[{key, value} | list]
end
end
@doc """
Retrieves a value from the list
Args:
* `list` - the list to look in
* `key` - the key of the pair to retrieve it's value
"""
def get(list, key) do
case :proplists.get_value(key, list) do
:undefined -> nil
value -> value
end
end
@doc """
Merges duplicate pairs with the latest value.
Args:
* `list` - the list to normalize
"""
@spec normalize(list :: __MODULE__.t()) :: __MODULE__.t()
def normalize(list) do
Enum.reduce(list, [], fn
{key, value}, acc ->
if has_key?(acc, key) do
put(acc, key, value)
else
[{key, value} | acc]
end
value, acc ->
[value | acc]
end)
|> Enum.reverse()
end
@doc """
Concatentates the given lists.
Args:
* `a` - base list to merge unto
* `b` - list to merge with
"""
@spec merge(a :: __MODULE__.t(), b :: __MODULE__.t()) :: __MODULE__.t()
def merge(a, b) do
Enum.reduce(b, Enum.reverse(a), fn
{key, v}, acc -> prepend(acc, key, v)
value, acc -> [value | acc]
end)
|> Enum.reverse()
end
@doc """
Removes a key-value pair by the given key and returns the remaining list
Args:
* `list` - the list to remove the pair from
* `key` - the key to remove
"""
@spec delete(list :: __MODULE__.t(), key :: term) :: __MODULE__.t()
def delete(list, key) do
:proplists.delete(key, list)
end
@doc """
Filters the proplist, i.e. returns only those elements
for which `fun` returns a truthy value.
Args:
* `list` - the list to filter
* `func` - the function to execute
"""
@spec filter(list :: __MODULE__.t(), func :: any) :: __MODULE__.t()
def filter(list, func) do
Enum.filter(list, fn
{_key, _value} = value -> func.(value)
_value -> true
end)
end
@doc """
Drops the specified keys from the list, returning the remaining.
Args:
* `list` - the list
* `keys` - the keys to remove
"""
@spec drop(list :: __MODULE__.t(), keys :: list) :: __MODULE__.t()
def drop(list, keys) do
filter(list, fn {key, _value} -> !Enum.member?(keys, key) end)
end
@doc """
Takes the specified keys from the list, returning the remaining.
Args:
* `list` - the list
* `keys` - the keys to keep
"""
@spec take(list :: __MODULE__.t(), keys :: list) :: __MODULE__.t()
def take(list, keys) do
filter(list, fn {key, _value} -> Enum.member?(keys, key) end)
end
end
|
lib/mail/proplist.ex
| 0.791942
| 0.544014
|
proplist.ex
|
starcoder
|
defmodule ArtemisLog.Helpers do
@doc """
Detect if value is truthy
"""
def present?(nil), do: false
def present?(""), do: false
def present?(0), do: false
def present?(_value), do: true
@doc """
Converts an atom or string to an integer
"""
def to_integer(value) when is_float(value), do: Kernel.trunc(value)
def to_integer(value) when is_atom(value), do: to_integer(Atom.to_string(value))
def to_integer(value) when is_bitstring(value), do: String.to_integer(value)
def to_integer(value), do: value
@doc """
Converts an atom or integer to a bitstring
"""
def to_string(value) when is_atom(value), do: Atom.to_string(value)
def to_string(value) when is_integer(value), do: Integer.to_string(value)
def to_string(value), do: value
@doc """
Recursively converts the keys of a map into a string.
Example:
keys_to_strings(%{nested: %{example: "value"}})
Returns:
%{"nested" => %{"example" => "value"}}
"""
def keys_to_strings(map, options \\ [])
def keys_to_strings(%_{} = struct, _options), do: struct
def keys_to_strings(map, options) when is_map(map) do
for {key, value} <- map, into: %{} do
key =
case is_atom(key) do
false -> key
true -> Atom.to_string(key)
end
{key, keys_to_strings(value, options)}
end
end
def keys_to_strings(value, _), do: value
@doc """
Recursive version of `Map.take/2`. Adds support for nested values:
Example:
map = %{
simple: "simple",
nested: %{example: "value", other: "value"}
}
deep_take(map, [:simple, nested: [:example]])
Returns:
map = %{
simple: "simple",
nested: %{example: "value"}
}
"""
def deep_take(map, keys) when is_map(map) do
{nested_keys, simple_keys} = Enum.split_with(keys, &is_tuple/1)
simple = Map.take(map, simple_keys)
nested =
Enum.reduce(nested_keys, %{}, fn {key, keys}, acc ->
value =
map
|> Map.get(key)
|> deep_take(keys)
Map.put(acc, key, value)
end)
Map.merge(simple, nested)
end
end
|
apps/artemis_log/lib/artemis_log/helpers.ex
| 0.82887
| 0.654736
|
helpers.ex
|
starcoder
|
defmodule UeberauthToken.Plug do
@moduledoc """
An implementation of Ueberauth token validation in a plug pipeline
In order for there to be successful authentication, the `Plug.Conn`
should have a request header in the following format:
%Plug.Conn{req_headers: [%{"authorization" => "Bearer <token>"}]}
## Example Usage
Typically, `UeberauthToken.Plug` would be used as part of plug pipeline
in an api for the validation phase of an oauth2 token. The client will
be in possession of a token an is making a request for a resource. This
plug validates the requests and assigns an `Ueberauth` struct to the `%Conn{}`
pipeline :api do
plug :accepts, ["json"]
plug UeberauthToken.Plug, provider: UeberauthToken.TestProvider
end
## Options
- * `:provider` - a module
The provider may be passed in as an option if more than one provider is
configured. The plug pipeline `plug UeberauthToken.Plug` should only be
called once in a given plug pipeline, in other words only one provider
per plug pipeline is supported.
"""
alias UeberauthToken.Config
alias Ueberauth.Strategy
alias Plug.Conn
@behaviour Plug
@token_strategy UeberauthToken.Strategy
def init(opts \\ []) do
provider =
Keyword.get(opts, :provider) ||
if Enum.count(Config.providers()) == 1 do
:erlang.hd(Config.providers())
else
"""
When multiple providers have been configured, a specific provider must
be provided as an option to a UeberauthToken.Plug pipline. It can be configured by
passing the provider option, `provider: UeberauthToken.TestProvider`"
"""
|> raise()
end
Config.validate_provider!(provider)
opts
end
def call(conn, opts) do
conn
|> Conn.put_private(:ueberauth_token, %{provider: opts[:provider]})
|> Strategy.run_callback(@token_strategy)
# ^ leads to invocation of `@token_strategy.handle_callback!/1` and `@token_strategy.auth/1`
end
end
|
lib/ueberauth_token/plug.ex
| 0.874527
| 0.638004
|
plug.ex
|
starcoder
|
defmodule X3m.System.Router do
@moduledoc """
Registers system wide services.
Each `service/2` macro registers system-wide service and function with
documentation in module that `uses` this module.
`servicep/2` is considered as private service and is not introduced to other nodes
in the cluster.
Service functions invoke function of the same name of specified module.
If result of that invocation is `{:reply, %X3m.System.Message{}}`,
it sends message to `message.reply_to` pid.
If result of invocation is `:noreply`, nothing is sent to that pid.
In any case function returns `:ok`.
## Examples
### Defining router
defmodule MyRouter do
use X3m.System.Router
@servicedoc false
service :create_user, MessageHandler
@servicedoc \"""
overridden!
\"""
service :get_user, MessageHandler
service :edit_user, MessageHandler
servicep :private_service, MessageHandler
end
### Getting registered services (public, private, or by default all)
iex> MyRouter.registered_services()
[create_user: 1, get_user: 1, edit_user: 1, private_service: 1]
iex> MyRouter.registered_services(:public)
[create_user: 1, get_user: 1, edit_user: 1]
### Invoking service as a function
iex> :create_user |>
...> X3m.System.Message.new() |>
...> MyRouter.create_user()
:ok
"""
require Logger
alias X3m.System.Message
defmacro service(service_name, message_handler, f) do
quote do
case(@servicedoc) do
nil ->
@doc """
Accepts `#{unquote(service_name)}` service call, routing it's `message` to
`#{unquote(message_handler)}.#{unquote(f)}/1`.
If result of that invocation is `{:reply, %X3m.System.Message{}}`,
it sends message to `message.reply_to` pid.
If result of invocation is `:noreply`, nothing is sent to that pid.
In any case function returns `:ok`.
## Example:
iex> #{inspect(unquote(service_name))} |>
...> X3m.System.Message.new() |>
...> #{__MODULE__}.#{unquote(service_name)}()
:ok
"""
other ->
@doc other
end
@x3m_service [{unquote(service_name), 1}]
@spec unquote(service_name)(Message.t()) :: :ok
def unquote(service_name)(%Message{service_name: unquote(service_name)} = message) do
Logger.metadata(message.logger_metadata)
X3m.System.Instrumenter.execute(:service_request_received, %{}, %{
service: unquote(service_name)
})
message
|> choose_node()
|> _invoke(unquote(message_handler), unquote(f), message)
end
@servicedoc nil
end
end
defmacro service(service_name, message_handler) do
quote do
service(unquote(service_name), unquote(message_handler), unquote(service_name))
end
end
defmacro servicep(service_name, message_handler, f) do
quote do
case(@servicedoc) do
nil ->
@doc """
This service is not shared with other nodes!
Accepts `#{unquote(service_name)}` service call, routing it's `message` to
`#{unquote(message_handler)}.#{unquote(f)}/1`.
If result of that invocation is `{:reply, %X3m.System.Message{}}`,
it sends message to `message.reply_to` pid.
If result of invocation is `:noreply`, nothing is sent to that pid.
In any case function returns `:ok`.
## Example:
iex> #{inspect(unquote(service_name))} |>
...> X3m.System.Message.new() |>
...> #{__MODULE__}.#{unquote(service_name)}()
:ok
"""
other ->
@doc other
end
@x3m_servicep [{unquote(service_name), 1}]
@spec unquote(service_name)(Message.t()) :: :ok
def unquote(service_name)(%Message{service_name: unquote(service_name)} = message) do
Logger.metadata(message.logger_metadata)
X3m.System.Instrumenter.execute(:service_request_received, %{}, %{
service: unquote(service_name)
})
message
|> choose_node()
|> _invoke(unquote(message_handler), unquote(f), message)
end
@servicedoc nil
end
end
defmacro servicep(service_name, message_handler) do
quote do
servicep(unquote(service_name), unquote(message_handler), unquote(service_name))
end
end
defmacro __using__(_opts) do
quote do
alias X3m.System.Router
require Router
import Router
Module.register_attribute(
__MODULE__,
:x3m_service,
accumulate: true,
persist: true
)
Module.register_attribute(
__MODULE__,
:x3m_servicep,
accumulate: true,
persist: true
)
@servicedoc nil
@doc !"""
Returns list of public, private or all service functions with their arrity.
"""
@spec registered_services(:public | :private | :all) :: [{:atom, non_neg_integer}]
def registered_services(visibility \\ :all)
def registered_services(:public) do
__MODULE__.__info__(:attributes)
|> Keyword.get_values(:x3m_service)
|> List.flatten()
end
def registered_services(:private) do
__MODULE__.__info__(:attributes)
|> Keyword.get_values(:x3m_servicep)
|> List.flatten()
end
def registered_services(:all),
do: registered_services(:private) ++ registered_services(:public)
@doc !"""
Sends internal event for each service to be registered in runtime.
"""
@spec register_services :: :ok
def register_services do
public_services =
registered_services(:public)
|> Enum.map(fn {service, _arrity} -> {service, __MODULE__} end)
|> Enum.into(%{})
private_services =
registered_services(:private)
|> Enum.map(fn {service, _arrity} -> {service, __MODULE__} end)
|> Enum.into(%{})
X3m.System.Instrumenter.execute(:register_local_services, %{}, %{
public: public_services,
private: private_services
})
:ok
end
@doc false
@spec _invoke(:local | node(), atom, atom, Message.t()) :: :ok
def _invoke(node, message_handler, f, message)
def _invoke(:local, message_handler, f, message) do
Logger.metadata(message.logger_metadata)
mono_start = System.monotonic_time()
X3m.System.Instrumenter.execute(
:invoking_service,
%{start: DateTime.utc_now(), mono_start: mono_start},
%{
node: Node.self(),
service: message.service_name
}
)
case apply(message_handler, f, [message]) do
{:reply, %Message{} = message} ->
message =
case message do
%Message{dry_run: :verbose} = msg ->
%Message{msg | request: nil}
%Message{} = msg ->
%Message{msg | request: nil, events: []}
end
send(message.reply_to, message)
X3m.System.Instrumenter.execute(
:service_responded,
%{
time: DateTime.utc_now(),
duration: X3m.System.Instrumenter.duration(mono_start)
},
%{message: message}
)
:ok
:noreply ->
:ok
end
end
def _invoke(node, message_handler, f, message) do
true =
:rpc.cast(node, __MODULE__, :_invoke, [
:local,
message_handler,
f,
message
])
:ok
end
@doc !"""
Choose node on which MFA will be applied.
This is optional callback. By default it will return `:local`,
meaning that `sys_msg` will be handled by local module.
It can be overriden like:
```
def choose_node(%X3m.System.Message{}) do
[:jobs_1@my_comp_name, :local] |> Enum.random()
end
```
"""
@spec choose_node(Message.t()) :: :local | node()
def choose_node(_sys_msg),
do: :local
defoverridable choose_node: 1
end
end
end
|
lib/router.ex
| 0.860208
| 0.436082
|
router.ex
|
starcoder
|
defmodule GN.Orchestration do
import GN.Gluon
import GN.Evolution, only: [spawn_offspring: 1, build_layer: 2]
import GN.MNIST
alias GN.Network, as: Network
import GN.Python
import GN.Selection, only: [select: 1]
use Export.Python
def start_and_spawn({_level, net}) do
seed_layers = net.layers
layers = spawn_offspring(seed_layers)
{:ok, py} = start()
built_layers = Enum.map(layers, &build_layer(&1, py))
built_net = py |> Python.call(build(built_layers), from_file: "mnist")
[net_json_string, {:"$erlport.opaque", :python, net_params}, test_acc] =
py |> Python.call(run(built_net), from_file: "mnist")
net_json = Poison.decode!(net_json_string)
%Network{
id: UUID.uuid4(),
layers: layers,
test_acc: test_acc,
json: net_json,
params: net_params
}
end
def strip_empties(nets) do
Enum.filter(nets, fn {_k, v} -> Map.size(v) != 0 end)
end
def learn_generation(%Network{} = initial_net) do
generation_size = GN.Parameters.get(__MODULE__, :generation_size)
# clone the initial net to create a generation
nets =
Enum.reduce(1..generation_size, %{}, fn n, acc ->
Map.put(acc, -1 * n, initial_net)
end)
learn_generation(nets)
end
def learn_generation(nets) when map_size(nets) == 1 do
# too little diversity in complexity, so clones must be spawned
[net] = Map.values(nets)
learn_generation(net)
end
def learn_generation(nets) do
clean_nets = strip_empties(nets)
tasks =
Task.Supervisor.async_stream_nolink(
GN.TaskSupervisor,
clean_nets,
&start_and_spawn(&1),
timeout: GN.Parameters.get(__MODULE__, :timeout)
)
generation = for {status, net} <- tasks, status == :ok, do: net
IO.puts(inspect(generation))
generation
end
def decrement(generations) do
generations - 1
end
def evolve(nets, generations) do
evolve(nets, generations, &decrement/1)
end
def evolve_continual(nets) do
evolve(nets, :infinity, & &1)
end
def evolve(nets, generations, count_function) when generations > 0 do
Task.Supervisor.async(GN.TaskSupervisor, fn ->
IO.puts("Generations remaining: #{generations}")
learn_generation(nets)
|> select()
|> evolve(count_function.(generations), count_function)
end)
end
def evolve(nets, _generations, _count_function) do
nets
end
end
|
lib/galapagos_nao/orchestration.ex
| 0.623721
| 0.510619
|
orchestration.ex
|
starcoder
|
defmodule Say do
@doc """
Translate a positive integer into English.
"""
@spec in_english(integer) :: {atom, String.t()}
def in_english(number) when number in 0..999_999_999_999, do: {:ok, say(number)}
def in_english(_number), do: {:error, "number is out of range"}
defp say(number), do: say(number, 0) |> Enum.join()
defp say(number, exponent) do
quotient = div(number, 1000)
remainder = rem(number, 1000)
case {quotient > 0, remainder > 0} do
{true, true} -> [say(quotient, exponent + 1), " ", convert(remainder), unit(exponent)]
{true, false} -> [say(quotient, exponent + 1)]
_ -> [convert(remainder), unit(exponent)]
end
end
defp convert(number) do
quotient = div(number, 100)
remainder = rem(number, 100)
convert(quotient, div(remainder, 10), rem(remainder, 10))
end
defp convert(0, 0, 0), do: "zero"
defp convert(0, 0, 1), do: "one"
defp convert(0, 0, 2), do: "two"
defp convert(0, 0, 3), do: "three"
defp convert(0, 0, 4), do: "four"
defp convert(0, 0, 5), do: "five"
defp convert(0, 0, 6), do: "six"
defp convert(0, 0, 7), do: "seven"
defp convert(0, 0, 8), do: "eight"
defp convert(0, 0, 9), do: "nine"
defp convert(0, 1, 0), do: "ten"
defp convert(0, 1, 1), do: "eleven"
defp convert(0, 1, 2), do: "twelve"
defp convert(0, 1, 3), do: "thirteen"
defp convert(0, 1, 4), do: "fourteen"
defp convert(0, 1, 5), do: "fifteen"
defp convert(0, 1, 6), do: "sixteen"
defp convert(0, 1, 7), do: "seventeen"
defp convert(0, 1, 8), do: "eighteen"
defp convert(0, 1, 9), do: "ninteen"
defp convert(0, 2, 0), do: "twenty"
defp convert(0, 3, 0), do: "thirty"
defp convert(0, 4, 0), do: "forty"
defp convert(0, 5, 0), do: "fifty"
defp convert(0, 6, 0), do: "sixty"
defp convert(0, 7, 0), do: "seventy"
defp convert(0, 8, 0), do: "eighty"
defp convert(0, 9, 0), do: "ninty"
defp convert(0, t, o), do: [convert(0, t, 0), "-", convert(0, 0, o)]
defp convert(h, 0, 0), do: [convert(0, 0, h), " hundred"]
defp convert(h, t, o), do: [convert(0, 0, h), " hundred ", convert(0, t, o)]
defp unit(0), do: ""
defp unit(1), do: " thousand"
defp unit(2), do: " million"
defp unit(3), do: " billion"
defp unit(4), do: " trillion"
end
|
say/lib/say.ex
| 0.651798
| 0.535341
|
say.ex
|
starcoder
|
defmodule Inquisitor.JsonApi.Filter do
@moduledoc """
Inquisitor query handlers for JSON API filters
[JSON API Spec](http://jsonapi.org/format/#fetching-filtering)
#### Usage
`use` the module *after* the `Inquisitor` module:
defmodule MyApp do
use Inquisitor
use Inquisitor.JsonApi.Filter
...
end
This module allows you to decide how you want to handle filter key/value params.
For example you may query your API with the following URL:
https://example.com/posts?filter[foo]=bar&filter[baz]=qux
You can use `build_filter_query/4` to define matchers:
def build_filter_query(query, "foo", value, _conn) do
Ecto.Query.where(query, [r], r.foo == ^value)
end
def build_filter_query(query, "baz", value, _conn) do
Ecto.Query.where(query, [r], r.baz > ^value)
end
#### General key/value matcher
You may want a handler that simply queries on key/value pairs. Use the following:
def build_filter_query(query, key, value, _conn) do
Ecto.Query.where(query, [r], Ecto.Query.API.field(r, ^String.to_existing_atom(key)) == ^value)
end
#### Security
This module is secure by default. Meaning that you must opt-in to handle the filter params.
Otherwise they are ignored by the query builder.
If you would like to limit the values to act upon use a `guard`:
@filter_whitelist ~w(name title)
def build_filter_query(query, key, value, _conn) where key in @filter_whitelist do
Ecto.Query.where(query, [r], Ecto.Query.API.field(r, ^String.to_existing_atom(key)) == ^value)
end
"""
require Inquisitor
defmacro __using__(_opts) do
quote do
def build_query(query, "filter", filters, context) do
Enum.reduce(filters, query, fn({key, value}, query) ->
build_filter_query(query, key, value, context)
end)
end
@before_compile Inquisitor.JsonApi.Filter
end
end
defmacro __before_compile__(_env) do
quote generated: true do
def build_filter_query(query, _key, _value, _context), do: query
defoverridable [build_filter_query: 4]
end
end
end
|
lib/inquisitor/jsonapi/filter.ex
| 0.819207
| 0.472501
|
filter.ex
|
starcoder
|
defmodule GoodTimes do
@vsn "1.1.2"
@doc false
def version, do: @vsn
@moduledoc """
Convenient and expressive functions dealing with dates and times.
This is the core module of the `GoodTimes` library. For other modules and
their functionality, see _Associated modules_ below.
Unless explicitly stated, all functions operate on and return an
Erlang datetime based on the Coordinated Universal Time (UTC).
## Functions overview
The following functions are available for each time unit (second, minute,
hour, day, week, month, or year); all return a datetime that is offset the
specified amount of time units.
* `<time_units>_after/2`
* `<time_units>_before/2`
* `<time_units>_from_now/1`
* `<time_units>_ago/1`
You can use the following functions when only adjusting the datetime by a
single time unit.
* `a_<time_unit>_after/1`
* `a_<time_unit>_before/1`
* `a_<time_unit>_from_now/0`
* `a_<time_unit>_ago/0`
In addition, `now/0` and `at/2` are included for convenience.
* `now/0` - returns the current datetime
* `at/2` - merges a given date or datetime with a time
## Examples
iex> now
{{2015, 2, 27}, {18, 30, 45}}
iex> an_hour_ago
{{2015, 2, 27}, {17, 30, 45}}
iex> a_month_before {{2016, 3, 31}, {9, 30, 0}}
{{2016, 2, 29}, {9, 30, 0}}
iex> 2 |> weeks_from_now |> at {12, 15, 0}
{{2015, 3, 13}, {12, 15, 0}}
## Known limitations
As the entire library builds upon Erlang's calendar module,
dates before year 0 are not supported.
## Associated modules
Aside from the core module, the `GoodTimes` library consistes of these
modules.
* `GoodTimes.Boundary` - find bounderies between time units.
* `GoodTimes.Convert` - convert dates, datetimes and times.
* `GoodTimes.Date` - functions operating on and returning dates.
* `GoodTimes.Generate` - functions generating streams of time units.
"""
@seconds_per_minute 60
@seconds_per_hour 60 * @seconds_per_minute
@seconds_per_day 24 * @seconds_per_hour
@seconds_per_week 7 * @seconds_per_day
@months_per_year 12
@type year :: non_neg_integer
@type month :: 1..12
@type day :: 1..31
@type hour :: 0..23
@type minute :: 0..59
@type second :: 0..59
@type date :: {year, month, day}
@type time :: {hour, minute, second}
@type datetime :: {date, time}
@doc """
Returns the current UTC time as a datetime.
## Examples
iex> now
{{2015, 2, 27}, {18, 30, 45}}
"""
@spec now :: datetime
def now, do: :calendar.universal_time()
@doc """
Merges the date from the given date or datetime with the given time.
## Examples
iex> now |> at {10, 30, 15}
{{2015, 2, 27}, {10, 30, 15}}
iex> {2015, 2, 27} |> at {10, 30, 15}
{{2015, 2, 27}, {10, 30, 15}}
"""
@spec at(date, time) :: datetime
@spec at(datetime, time) :: datetime
def at(date_or_datetime, time), do: _at(date_or_datetime, time)
defp _at({date, _}, time), do: {date, time}
defp _at(date, time), do: {date, time}
@doc """
Returns the UTC date and time the specified seconds after the given datetime.
## Examples
iex> 15 |> seconds_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {18, 31, 0}}
"""
@spec seconds_after(integer, datetime) :: datetime
def seconds_after(seconds, datetime) do
datetime
|> :calendar.datetime_to_gregorian_seconds()
|> Kernel.+(seconds)
|> :calendar.gregorian_seconds_to_datetime()
end
@doc """
Returns the UTC date and time the specified seconds before the given datetime.
## Examples
iex> 15 |> seconds_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {18, 30, 30}}
"""
@spec seconds_before(integer, datetime) :: datetime
def seconds_before(seconds, datetime), do: seconds_after(-seconds, datetime)
@doc """
Returns the UTC date and time a second after the given datetime.
## Examples
iex> a_second_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {18, 30, 46}}
"""
@spec a_second_after(datetime) :: datetime
def a_second_after(datetime), do: seconds_after(1, datetime)
@doc """
Returns the UTC date and time a second before the given datetime.
## Examples
iex> a_second_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {18, 30, 44}}
"""
@spec a_second_before(datetime) :: datetime
def a_second_before(datetime), do: seconds_before(1, datetime)
@doc """
Returns the UTC date and time the specified seconds from now.
## Examples
iex> 15 |> seconds_from_now
{{2015, 2, 27}, {18, 31, 0}}
"""
@spec seconds_from_now(integer) :: datetime
def seconds_from_now(seconds), do: seconds_after(seconds, now())
@doc """
Returns the UTC date and time the specified seconds ago.
## Examples
iex> 20 |> seconds_ago
{{2015, 2, 27}, {18, 30, 25}}
"""
@spec seconds_ago(integer) :: datetime
def seconds_ago(seconds), do: seconds_before(seconds, now())
@doc """
Returns the UTC date and time a second from now.
## Examples
iex> a_second_from_now
{{2015, 2, 27}, {18, 30, 46}}
"""
@spec a_second_from_now :: datetime
def a_second_from_now, do: seconds_from_now(1)
@doc """
Returns the UTC date and time a second ago.
iex> a_second_ago
{{2015, 2, 27}, {18, 30, 44}}
"""
@spec a_second_ago :: datetime
def a_second_ago, do: seconds_ago(1)
@doc """
Returns the UTC date and time the specified minutes after the given datetime.
## Examples
iex> 15 |> minutes_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {18, 45, 45}}
"""
@spec minutes_after(integer, datetime) :: datetime
def minutes_after(minutes, datetime), do: seconds_after(minutes * @seconds_per_minute, datetime)
@doc """
Returns the UTC date and time the specified minutes before the given datetime.
## Examples
iex> 15 |> minutes_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {18, 15, 45}}
"""
@spec minutes_before(integer, datetime) :: datetime
def minutes_before(minutes, datetime),
do: seconds_before(minutes * @seconds_per_minute, datetime)
@doc """
Returns the UTC date and time a minute after the given datetime.
## Examples
iex> a_minute_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {18, 31, 45}}
"""
@spec a_minute_after(datetime) :: datetime
def a_minute_after(datetime), do: minutes_after(1, datetime)
@doc """
Returns the UTC date and time a minute before the given datetime.
## Examples
iex> a_minute_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {18, 29, 45}}
"""
@spec a_minute_before(datetime) :: datetime
def a_minute_before(datetime), do: minutes_before(1, datetime)
@doc """
Returns the UTC date and time the specified minutes from now.
## Examples
iex> 10 |> minutes_from_now
{{2015, 2, 27}, {18, 40, 45}}
"""
@spec minutes_from_now(integer) :: datetime
def minutes_from_now(minutes), do: minutes_after(minutes, now())
@doc """
Returns the UTC date and time the specified minutes ago.
## Examples
iex> 5 |> minutes_ago
{{2015, 2, 27}, {18, 25, 45}}
"""
@spec minutes_ago(integer) :: datetime
def minutes_ago(minutes), do: minutes_before(minutes, now())
@doc """
Returns the UTC date and time a minute from now.
## Examples
iex> a_minute_from_now
{{2015, 2, 27}, {18, 31, 45}}
"""
@spec a_minute_from_now :: datetime
def a_minute_from_now, do: minutes_from_now(1)
@doc """
Returns the UTC date and time a minute ago.
## Examples
iex> a_minute_ago
{{2015, 2, 27}, {18, 29, 45}}
"""
@spec a_minute_ago :: datetime
def a_minute_ago, do: minutes_ago(1)
@doc """
Returns the UTC date and time the specified hours after the given datetime.
## Examples
iex> 15 |> hours_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 28}, {9, 30, 45}}
"""
@spec hours_after(integer, datetime) :: datetime
def hours_after(hours, datetime), do: seconds_after(hours * @seconds_per_hour, datetime)
@doc """
Returns the UTC date and time the specified hours before the given datetime.
## Examples
iex> 15 |> hours_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {3, 30, 45}}
"""
@spec hours_before(integer, datetime) :: datetime
def hours_before(hours, datetime), do: seconds_before(hours * @seconds_per_hour, datetime)
@doc """
Returns the UTC date and time an hour after the given datetime.
## Examples
iex> an_hour_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {19, 30, 45}}
"""
@spec an_hour_after(datetime) :: datetime
def an_hour_after(datetime), do: hours_after(1, datetime)
@doc """
Returns the UTC date and time an hour before the given datetime.
## Examples
iex> an_hour_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 27}, {17, 30, 45}}
"""
@spec an_hour_before(datetime) :: datetime
def an_hour_before(datetime), do: hours_before(1, datetime)
@doc """
Returns the UTC date and time the specified hours from now.
## Examples
iex> 6 |> hours_from_now
{{2015, 2, 28}, {0, 30, 45}}
"""
@spec hours_from_now(integer) :: datetime
def hours_from_now(hours), do: hours_after(hours, now())
@doc """
Returns the UTC date and time the specified hours ago.
## Examples
iex> 2 |> hours_ago
{{2015, 2, 27}, {16, 30, 45}}
"""
@spec hours_ago(integer) :: datetime
def hours_ago(hours), do: hours_before(hours, now())
@doc """
Returns the UTC date and time an hour from now.
## Examples
iex> an_hour_from_now
{{2015, 2, 27}, {19, 30, 45}}
"""
@spec an_hour_from_now :: datetime
def an_hour_from_now, do: hours_from_now(1)
@doc """
Returns the UTC date and time an hour ago.
## Examples
iex> an_hour_ago
{{2015, 2, 27}, {17, 30, 45}}
"""
@spec an_hour_ago :: datetime
def an_hour_ago, do: hours_ago(1)
@doc """
Returns the UTC date and time the specified days after the given datetime.
## Examples
iex> 3 |> days_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 3, 2}, {18, 30, 45}}
"""
@spec days_after(integer, datetime) :: datetime
def days_after(days, datetime), do: seconds_after(days * @seconds_per_day, datetime)
@doc """
Returns the UTC date and time the specified days before the given datetime.
## Examples
iex> 3 |> days_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 24}, {18, 30, 45}}
"""
@spec days_before(integer, datetime) :: datetime
def days_before(days, datetime), do: seconds_before(days * @seconds_per_day, datetime)
@doc """
Returns the UTC date and time a day after the given datetime.
## Examples
iex> a_day_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 28}, {18, 30, 45}}
"""
@spec a_day_after(datetime) :: datetime
def a_day_after(datetime), do: days_after(1, datetime)
@doc """
Returns the UTC date and time a day before the given datetime.
## Examples
iex> a_day_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 26}, {18, 30, 45}}
"""
@spec a_day_before(datetime) :: datetime
def a_day_before(datetime), do: days_before(1, datetime)
@doc """
Returns the UTC date and time the specified days from now.
## Examples
iex> 2 |> days_from_now
{{2015, 3, 1}, {18, 30, 45}}
"""
@spec days_from_now(integer) :: datetime
def days_from_now(days), do: days_after(days, now())
@doc """
Returns the UTC date and time the specified days ago.
## Examples
iex> 7 |> days_ago
{{2015, 2, 20}, {18, 30, 45}}
"""
@spec days_ago(integer) :: datetime
def days_ago(days), do: days_before(days, now())
@doc """
Returns the UTC date and time a day from now.
## Examples
iex> a_day_from_now
{{2015, 2, 28}, {18, 30, 45}}
"""
@spec a_day_from_now :: datetime
def a_day_from_now, do: days_from_now(1)
@doc """
Returns the UTC date and time a day ago.
## Examples
iex> a_day_ago
{{2015, 2, 26}, {18, 30, 45}}
"""
@spec a_day_ago :: datetime
def a_day_ago, do: days_ago(1)
@doc """
Returns the UTC date and time the specified weeks after the given datetime.
## Examples
iex> 3 |> weeks_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 3, 20}, {18, 30, 45}}
"""
@spec weeks_after(integer, datetime) :: datetime
def weeks_after(weeks, datetime), do: seconds_after(weeks * @seconds_per_week, datetime)
@doc """
Returns the UTC date and time the specified weeks before the given datetime.
## Examples
iex> 3 |> weeks_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 6}, {18, 30, 45}}
"""
@spec weeks_before(integer, datetime) :: datetime
def weeks_before(weeks, datetime), do: seconds_before(weeks * @seconds_per_week, datetime)
@doc """
Returns the UTC date and time a week after the given datetime.
## Examples
iex> a_week_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 3, 6}, {18, 30, 45}}
"""
@spec a_week_after(datetime) :: datetime
def a_week_after(datetime), do: weeks_after(1, datetime)
@doc """
Returns the UTC date and time a week before the given datetime.
## Examples
iex> a_week_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 2, 20}, {18, 30, 45}}
"""
@spec a_week_before(datetime) :: datetime
def a_week_before(datetime), do: weeks_before(1, datetime)
@doc """
Returns the UTC date and time the specified weeks from now.
## Examples
iex> 2 |> weeks_from_now
{{2015, 3, 13}, {18, 30, 45}}
"""
@spec weeks_from_now(integer) :: datetime
def weeks_from_now(weeks), do: weeks_after(weeks, now())
@doc """
Returns the UTC date and time the specified weeks ago.
## Examples
iex> 2 |> weeks_ago
{{2015, 2, 13}, {18, 30, 45}}
"""
@spec weeks_ago(integer) :: datetime
def weeks_ago(weeks), do: weeks_before(weeks, now())
@doc """
Returns the UTC date and time a week from now.
## Examples
iex> a_week_from_now
{{2015, 3, 6}, {18, 30, 45}}
"""
@spec a_week_from_now :: datetime
def a_week_from_now, do: weeks_from_now(1)
@doc """
Returns the UTC date and time a week ago.
## Examples
iex> a_week_ago
{{2015, 2, 20}, {18, 30, 45}}
"""
@spec a_week_ago :: datetime
def a_week_ago, do: weeks_ago(1)
@doc """
Returns the UTC date and time the specified months after the given datetime.
## Examples
iex> 3 |> months_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 5, 27}, {18, 30, 45}}
"""
@spec months_after(integer, datetime) :: datetime
def months_after(months, {date, time}), do: {new_date(date, months), time}
defp new_date(date, months) do
date
|> new_year_and_month(months)
|> adjust_year_and_month
|> adjust_for_last_day_of_month
end
defp new_year_and_month({year, month, day}, months) do
{year + div(months, 12), month + rem(months, 12), day}
end
defp adjust_year_and_month({year, month, day}) when month < 1, do: {year - 1, month + 12, day}
defp adjust_year_and_month({year, month, day}) when month > 12, do: {year + 1, month - 12, day}
defp adjust_year_and_month(date), do: date
defp adjust_for_last_day_of_month(date = {year, month, _}), do: {year, month, valid_day(date)}
defp valid_day({year, month, day}) do
[day, :calendar.last_day_of_the_month(year, month)]
|> Enum.min()
end
@doc """
Returns the UTC date and time the specified months before the given datetime.
## Examples
iex> 3 |> months_before({{2015, 2, 27}, {18, 30, 45}})
{{2014, 11, 27}, {18, 30, 45}}
"""
@spec months_before(integer, datetime) :: datetime
def months_before(months, datetime), do: months_after(-months, datetime)
@doc """
Returns the UTC date and time a month after the given datetime.
## Examples
iex> a_month_after({{2015, 2, 27}, {18, 30, 45}})
{{2015, 3, 27}, {18, 30, 45}}
"""
@spec a_month_after(datetime) :: datetime
def a_month_after(datetime), do: months_after(1, datetime)
@doc """
Returns the UTC date and time a month before the given datetime.
## Examples
iex> a_month_before({{2015, 2, 27}, {18, 30, 45}})
{{2015, 1, 27}, {18, 30, 45}}
"""
@spec a_month_before(datetime) :: datetime
def a_month_before(datetime), do: months_before(1, datetime)
@doc """
Returns the UTC date and time the specified months from now.
## Examples
iex> 2 |> months_from_now
{{2015, 4, 27}, {18, 30, 45}}
"""
@spec months_from_now(integer) :: datetime
def months_from_now(months), do: months_after(months, now())
@doc """
Returns the UTC date and time the specified months ago.
## Examples
iex> 2 |> months_ago
{{2014, 12, 27}, {18, 30, 45}}
"""
@spec months_ago(integer) :: datetime
def months_ago(months), do: months_before(months, now())
@doc """
Returns the UTC date and time a month from now.
## Examples
iex> a_month_from_now
{{2015, 3, 27}, {18, 30, 45}}
"""
@spec a_month_from_now :: datetime
def a_month_from_now, do: months_from_now(1)
@doc """
Returns the UTC date and time a month ago.
## Examples
iex> a_month_ago
{{2015, 1, 27}, {18, 30, 45}}
"""
@spec a_month_ago :: datetime
def a_month_ago, do: months_ago(1)
@doc """
Returns the UTC date and time the specified years after the given datetime.
## Examples
iex> 3 |> years_after({{2015, 2, 27}, {18, 30, 45}})
{{2018, 2, 27}, {18, 30, 45}}
"""
@spec years_after(integer, datetime) :: datetime
def years_after(years, datetime), do: months_after(years * @months_per_year, datetime)
@doc """
Returns the UTC date and time the specified years before the given datetime.
## Examples
iex> 3 |> years_before({{2015, 2, 27}, {18, 30, 45}})
{{2012, 2, 27}, {18, 30, 45}}
"""
@spec years_before(integer, datetime) :: datetime
def years_before(years, datetime), do: months_before(years * @months_per_year, datetime)
@doc """
Returns the UTC date and time a year after the given datetime.
## Examples
iex> a_year_after({{2015, 2, 27}, {18, 30, 45}})
{{2016, 2, 27}, {18, 30, 45}}
"""
@spec a_year_after(datetime) :: datetime
def a_year_after(datetime), do: years_after(1, datetime)
@doc """
Returns the UTC date and time a year before the given datetime.
## Examples
iex> a_year_before({{2015, 2, 27}, {18, 30, 45}})
{{2014, 2, 27}, {18, 30, 45}}
"""
@spec a_year_before(datetime) :: datetime
def a_year_before(datetime), do: years_before(1, datetime)
@doc """
Returns the UTC date and time the specified years from now.
## Examples
iex> 2 |> years_from_now
{{2017, 2, 27}, {18, 30, 45}}
"""
@spec years_from_now(integer) :: datetime
def years_from_now(years), do: years_after(years, now())
@doc """
Returns the UTC date and time the specified years ago.
## Examples
iex> 2 |> years_ago
{{2013, 2, 27}, {18, 30, 45}}
"""
@spec years_ago(integer) :: datetime
def years_ago(years), do: years_before(years, now())
@doc """
Returns the UTC date and time a year from now.
## Examples
iex> a_year_from_now
{{2016, 2, 27}, {18, 30, 45}}
"""
@spec a_year_from_now :: datetime
def a_year_from_now, do: years_from_now(1)
@doc """
Returns the UTC date and time a year ago.
## Examples
iex> a_year_ago
{{2014, 2, 27}, {18, 30, 45}}
"""
@spec a_year_ago :: datetime
def a_year_ago, do: years_ago(1)
end
|
lib/good_times.ex
| 0.926711
| 0.744517
|
good_times.ex
|
starcoder
|
defmodule Fuzzyurl.Strings do
@moduledoc ~S"""
Functions to parse a string URL into a Fuzzyurl, and vice versa.
"""
## this regex matches URLs like this:
## [protocol ://] [username [: password] @] [hostname] [: port] [/ path] [? query] [# fragment]
@regex ~r"""
^
(?: (?<protocol> \* | [a-zA-Z][A-Za-z+.-]+) ://)?
(?: (?<username> \* | [a-zA-Z0-9%_.!~*'();&=+$,-]+)
(?: : (?<password> \* | [a-zA-Z0-9%_.!~*'();&=+$,-]*))?
@
)?
(?<hostname> [a-zA-Z0-9\.\*\-_]+?)?
(?: : (?<port> \* | \d+))?
(?<path> / [^\?\#]*)? ## captures leading /
(?: \? (?<query> [^\#]*) )?
(?: \# (?<fragment> .*) )?
$
"""x
@doc ~S"""
Attempts to parse the given string as a URL, and returns either
{:ok, fuzzy_url} or {:error, message}.
"""
@spec from_string(String.t(), Keyword.t()) :: {:ok, Fuzzyurl.t()} | {:error, String.t()}
def from_string(string, opts \\ [])
def from_string(string, opts) when is_binary(string) do
case Regex.named_captures(@regex, string) do
nil ->
{:error, "input string couldn't be parsed"}
nc ->
{:ok, from_named_captures(nc, opts)}
end
end
def from_string(_, _) do
{:error, "input argument must be a string"}
end
defp from_named_captures(nc, opts) do
# default nil
dv = opts[:default]
blank_fu = Fuzzyurl.new(dv, dv, dv, dv, dv, dv, dv, dv)
nc
|> Map.to_list()
|> Enum.reduce(blank_fu, fn {k, v}, acc ->
if v != "" do
Map.put(acc, String.to_atom(k), v)
else
acc
end
end)
end
@doc ~S"""
Returns a string representation of the given Fuzzyurl.
"""
@spec to_string(%Fuzzyurl{}) :: String.t()
def to_string(%Fuzzyurl{} = fu) do
url_pieces = [
if(fu.protocol, do: "#{fu.protocol}://", else: ""),
if(fu.username, do: "#{fu.username}", else: ""),
if(fu.password, do: ":#{fu.password}", else: ""),
if(fu.username, do: "@", else: ""),
if(fu.hostname, do: "#{fu.hostname}", else: ""),
if(fu.port, do: ":#{fu.port}", else: ""),
if(fu.path, do: "#{fu.path}", else: ""),
if(fu.query, do: "?#{fu.query}", else: ""),
if(fu.fragment, do: "##{fu.fragment}", else: "")
]
url_pieces |> Enum.join()
end
end
|
lib/fuzzyurl/strings.ex
| 0.63341
| 0.428831
|
strings.ex
|
starcoder
|
defmodule VintageNetMobile.CellMonitor do
@moduledoc """
Monitor cell network information
This monitor queries the modem for cell network information and posts it to
VintageNet properties.
The following properties are reported:
| Property | Values | Description |
| ------------- | -------------- | ----------------------------- |
| `lac` | `0-65533` | The Location Area Code (lac) for the current cell |
| `cid` | `0-268435455` | The Cell ID (cid) for the current cell |
| `mcc` | `0-999` | Mobile Country Code for the network |
| `mnc` | `0-999` | Mobile Network Code for the network |
| `network` | string | The network operator's name |
| `access_technology` | string | The technology currently in use to connect to the network |
| `band` | string | The frequency band in use |
| `channel` | integer | An integer that indicates the channel that's in use |
"""
use GenServer
require Logger
alias VintageNet.PropertyTable
alias VintageNetMobile.{ExChat, ATParser}
defmodule State do
@moduledoc false
defstruct up: false, ifname: nil, tty: nil
end
@spec start_link([keyword()]) :: GenServer.on_start()
def start_link(opts) do
GenServer.start_link(__MODULE__, opts)
end
@impl true
def init(opts) do
ifname = Keyword.fetch!(opts, :ifname)
tty = Keyword.fetch!(opts, :tty)
VintageNet.subscribe(["interface", ifname, "connection"])
us = self()
ExChat.register(tty, "+CREG:", fn message -> send(us, {:handle_creg, message}) end)
ExChat.register(tty, "+QSPN:", fn message -> send(us, {:handle_qspn, message}) end)
ExChat.register(tty, "+QNWINFO:", fn message -> send(us, {:handle_qnwinfo, message}) end)
_ = :timer.send_interval(30_000, :poll)
{:ok, %State{ifname: ifname, tty: tty}}
end
@impl true
def handle_info({:handle_creg, message}, state) do
message
|> ATParser.parse()
|> creg_response_to_registration()
|> post_registration(state.ifname)
{:noreply, state}
end
def handle_info({:handle_qspn, message}, state) do
message
|> ATParser.parse()
|> qspn_response_to_network()
|> post_network(state.ifname)
{:noreply, state}
end
def handle_info({:handle_qnwinfo, message}, state) do
message
|> ATParser.parse()
|> qnwinfo_response_to_info()
|> post_qnwinfo(state.ifname)
{:noreply, state}
end
def handle_info(
{VintageNet, ["interface", ifname, "connection"], _old, :internet, _meta},
%{ifname: ifname} = state
) do
new_state = %{state | up: true}
# Set the CREG report format just in case it hasn't been set.
{:ok, _} = ExChat.send(new_state.tty, "AT+CREG=2", timeout: 1000)
poll(new_state)
{:noreply, new_state}
end
def handle_info(
{VintageNet, ["interface", ifname, "connection"], _old, _not_internet, _meta},
%{ifname: ifname} = state
) do
# NOTE: None of this should depend on whether there's an Internet connection. At
# one point, some trouble was seen when polling status and not connected. The easy
# solution was to not poll. This should be revisited since might be valuable to
# know that you're connected to a cell tower, but ppp isn't working.
new_state = %{state | up: false}
# Reset cell connection properties
post_registration(%{stat: :unknown}, ifname)
{:noreply, new_state}
end
def handle_info(:poll, state) do
poll(state)
{:noreply, state}
end
defp poll(%{up: true} = state) do
ExChat.send_best_effort(state.tty, "AT+CREG?", timeout: 1000)
ExChat.send_best_effort(state.tty, "AT+QNWINFO", timeout: 1000)
ExChat.send_best_effort(state.tty, "AT+QSPN", timeout: 1000)
end
defp poll(_state), do: :ok
defp creg_response_to_registration({:ok, _header, [2, stat, lac, ci, act]})
when is_integer(stat) and is_binary(lac) and is_binary(ci) and is_integer(act) do
%{stat: decode_stat(stat), lac: safe_hex_to_int(lac), ci: safe_hex_to_int(ci), act: act}
end
defp creg_response_to_registration({:ok, _header, [2, stat, lac, ci]})
when is_integer(stat) and is_binary(lac) and is_binary(ci) do
%{stat: decode_stat(stat), lac: safe_hex_to_int(lac), ci: safe_hex_to_int(ci), act: 0}
end
defp creg_response_to_registration({:ok, _header, [2, stat]}) when is_integer(stat) do
%{stat: decode_stat(stat), lac: 0, ci: 0, act: 0}
end
# Missing n? parameter where n == 2
defp creg_response_to_registration({:ok, _header, [stat, lac, ci, act]})
when is_integer(stat) and stat >= 1 and is_binary(lac) and is_binary(ci) and
is_integer(act) do
%{stat: decode_stat(stat), lac: safe_hex_to_int(lac), ci: safe_hex_to_int(ci), act: act}
end
defp creg_response_to_registration(malformed) do
Logger.warn("Unexpected AT+CREG? response: #{inspect(malformed)}")
%{stat: :invalid, lac: 0, ci: 0, act: 0}
end
defp qspn_response_to_network({:ok, _header, [fnn, snn, spn, alphabet, plmn]})
when is_binary(fnn) and is_binary(snn) and is_binary(spn) and is_integer(alphabet) and
is_binary(plmn) do
{mcc, mnc} = plmn_to_mcc_mnc(plmn)
%{network_name: fnn, mcc: mcc, mnc: mnc}
end
defp qspn_response_to_network(malformed) do
Logger.warn("Unexpected AT+QSPN response: #{inspect(malformed)}")
%{network_name: "", mcc: 0, mnc: 0}
end
defp qnwinfo_response_to_info({:ok, _header, [act, oper, band, channel]})
when is_binary(act) and is_binary(oper) and is_binary(band) and is_integer(channel) do
%{act: act, band: band, channel: channel}
end
defp qnwinfo_response_to_info(malformed) do
Logger.warn("Unexpected AT+QNWINFO response: #{inspect(malformed)}")
%{act: "UNKNOWN", band: "", channel: 0}
end
defp plmn_to_mcc_mnc(<<mcc::3-bytes, mnc::3-bytes>>) do
{safe_decimal_to_int(mcc), safe_decimal_to_int(mnc)}
end
defp plmn_to_mcc_mnc(<<mcc::3-bytes, mnc::2-bytes>>) do
{safe_decimal_to_int(mcc), safe_decimal_to_int(mnc)}
end
defp plmn_to_mcc_mnc(other) do
{safe_decimal_to_int(other), 0}
end
defp safe_hex_to_int(hex_string) do
case Integer.parse(hex_string, 16) do
{value, ""} -> value
_other -> 0
end
end
defp safe_decimal_to_int(string) do
case Integer.parse(string) do
{value, ""} -> value
_other -> 0
end
end
defp decode_stat(0), do: :not_registered_not_looking
defp decode_stat(1), do: :registered_home_network
defp decode_stat(2), do: :not_registered_looking
defp decode_stat(3), do: :registration_denied
defp decode_stat(4), do: :unknown
defp decode_stat(5), do: :registered_roaming
defp decode_stat(_), do: :invalid
defp post_registration(%{stat: stat, lac: lac, ci: ci}, ifname)
when stat in [:registered_home_network, :registered_roaming] do
PropertyTable.put(VintageNet, ["interface", ifname, "mobile", "lac"], lac)
PropertyTable.put(VintageNet, ["interface", ifname, "mobile", "cid"], ci)
end
defp post_registration(%{stat: _stat}, ifname) do
# Disconnected case, so clear out properties reported by the cell monitor
PropertyTable.clear(VintageNet, ["interface", ifname, "mobile", "lac"])
PropertyTable.clear(VintageNet, ["interface", ifname, "mobile", "cid"])
PropertyTable.clear(VintageNet, ["interface", ifname, "mobile", "network"])
PropertyTable.clear(VintageNet, ["interface", ifname, "mobile", "mcc"])
PropertyTable.clear(VintageNet, ["interface", ifname, "mobile", "mnc"])
PropertyTable.clear(VintageNet, ["interface", ifname, "mobile", "access_technology"])
PropertyTable.clear(VintageNet, ["interface", ifname, "mobile", "band"])
PropertyTable.clear(VintageNet, ["interface", ifname, "mobile", "channel"])
end
defp post_network(%{network_name: name, mcc: mcc, mnc: mnc}, ifname) do
PropertyTable.put(VintageNet, ["interface", ifname, "mobile", "network"], name)
PropertyTable.put(VintageNet, ["interface", ifname, "mobile", "mcc"], mcc)
PropertyTable.put(VintageNet, ["interface", ifname, "mobile", "mnc"], mnc)
end
defp post_qnwinfo(%{act: act, band: band, channel: channel}, ifname) do
PropertyTable.put(VintageNet, ["interface", ifname, "mobile", "access_technology"], act)
PropertyTable.put(VintageNet, ["interface", ifname, "mobile", "band"], band)
PropertyTable.put(VintageNet, ["interface", ifname, "mobile", "channel"], channel)
end
end
|
lib/vintage_net_mobile/cell_monitor.ex
| 0.843186
| 0.524212
|
cell_monitor.ex
|
starcoder
|
defmodule Octo.Repo do
@moduledoc """
Split read/write operations across multiple Ecto repos.
Forwards all write operations to the `master_repo` and all read operations to the `replica_repos`
"""
defmacro __using__(opts) do
quote bind_quoted: [opts: opts] do
master_repo = Keyword.get(opts, :master_repo)
replica_repos = Keyword.get(opts, :replica_repos, [master_repo])
algorithm = Keyword.get(opts, :algorithm, Octo.Algorithms.Random)
@master_repo master_repo
@replica_repos replica_repos
@algorithm algorithm
def master_repo, do: @master_repo
def replica_repos, do: @replica_repos
# write ops
defdelegate delete!(struct_or_changeset, opts \\ []), to: @master_repo
defdelegate delete(struct_or_changeset, opts \\ []), to: @master_repo
defdelegate delete_all(queryable, opts \\ []), to: @master_repo
defdelegate insert!(struct_or_changeset, opts \\ []), to: @master_repo
defdelegate insert(struct_or_changeset, opts \\ []), to: @master_repo
defdelegate insert_all(schema_or_source, entries, opts \\ []), to: @master_repo
defdelegate insert_or_update!(changeset, opts \\ []), to: @master_repo
defdelegate insert_or_update(changeset, opts \\ []), to: @master_repo
defdelegate update!(changeset, opts \\ []), to: @master_repo
defdelegate update(changeset, opts \\ []), to: @master_repo
defdelegate update_all(queryable, updates, opts \\ []), to: @master_repo
# must use master repo for transactions since they may include writes
defdelegate checkout(function, opts \\ []), to: @master_repo
defdelegate in_transaction?, to: @master_repo
defdelegate rollback(value), to: @master_repo
defdelegate transaction(fun_or_multi, opts \\ []), to: @master_repo
# read opts
def aggregate(queryable, aggregate, field, opts \\ []),
do: replica_repo().aggregate(queryable, aggregate, field, opts)
def all(queryable, opts \\ []), do: replica_repo().all(queryable, opts)
def exists?(queryable, opts \\ []),
do: replica_repo().exists?(queryable, opts)
def get!(queryable, id, opts \\ []),
do: replica_repo().get!(queryable, id, opts)
def get(queryable, id, opts \\ []),
do: replica_repo().get(queryable, id, opts)
def get_by!(queryable, clauses, opts \\ []),
do: replica_repo().get_by!(queryable, clauses, opts)
def get_by(queryable, clauses, opts \\ []),
do: replica_repo().get_by(queryable, clauses, opts)
def load(module_or_map, data),
do: replica_repo().load(module_or_map, data)
def one!(queryable, opts \\ []),
do: replica_repo().one!(queryable, opts)
def one(queryable, opts \\ []),
do: replica_repo().one(queryable, opts)
def preload(structs_or_struct_or_nil, preloads, opts \\ []),
do: replica_repo().preload(structs_or_struct_or_nil, preloads, opts)
def stream(queryable, opts \\ []),
do: stream(queryable, opts)
# helpers
defp replica_repo when is_atom(@algorithm), do: @algorithm.get_repo(@replica_repos)
end
end
end
|
lib/octo/repo.ex
| 0.700178
| 0.510192
|
repo.ex
|
starcoder
|
defmodule Goth.TokenStore do
@moduledoc """
The `Goth.TokenStore` is a simple `GenServer` that manages storage and retrieval
of tokens `Goth.Token`. When adding to the token store, it also queues tokens
for a refresh before they expire: ten seconds before the token is set to expire,
the `TokenStore` will call the API to get a new token and replace the expired
token in the store.
"""
use GenServer
alias Goth.Token
def start_link do
GenServer.start_link(__MODULE__, %{}, name: __MODULE__)
end
def init(state) do
{:ok, state}
end
@doc ~S"""
Store a token in the `TokenStore`. Upon storage, Goth will queue the token
to be refreshed ten seconds before its expiration.
"""
@spec store(Token.t()) :: pid
def store(%Token{} = token), do: store(token.scope, token.sub, token)
@spec store({String.t() | atom(), String.t()} | String.t(), Token.t()) :: pid()
def store(scopes, %Token{} = token) when is_binary(scopes),
do: store({:default, scopes}, token.sub, token)
def store({account, scopes}, %Token{} = token) when is_binary(scopes),
do: store({account, scopes}, token.sub, token)
@spec store(String.t(), String.t(), Token.t()) :: pid
def store(scopes, sub, %Token{} = token) when is_binary(scopes),
do: store({:default, scopes}, sub, token)
@spec store({String.t() | atom(), String.t()}, String.t() | nil, Token.t()) :: pid
def store({account, scopes}, sub, %Token{} = token) when is_binary(scopes) do
GenServer.call(__MODULE__, {:store, {account, scopes, sub}, token})
end
@doc ~S"""
Retrieve a token from the `TokenStore`.
token = %Goth.Token{type: "Bearer",
token: "<PASSWORD>",
scope: "scope",
expires: :os.system_time(:seconds) + 3600}
Goth.TokenStore.store(token)
{:ok, ^token} = Goth.TokenStore.find(token.scope)
"""
@spec find({String.t() | atom(), String.t()} | String.t(), String.t() | nil) ::
{:ok, Token.t()} | :error
def find(info, sub \\ nil)
def find(scope, sub) when is_binary(scope), do: find({:default, scope}, sub)
def find({account, scope}, sub) do
GenServer.call(__MODULE__, {:find, {account, scope, sub}})
end
# when we store a token, we should refresh it later
def handle_call({:store, {account, scope, sub}, token}, _from, state) do
# this is a race condition when inserting an expired (or about to expire) token...
pid_or_timer = Token.queue_for_refresh(token)
{:reply, pid_or_timer, Map.put(state, {account, scope, sub}, token)}
end
def handle_call({:find, {account, scope, sub}}, _from, state) do
state
|> Map.fetch({account, scope, sub})
|> filter_expired(:os.system_time(:seconds))
|> reply(state, {account, scope, sub})
end
defp filter_expired(:error, _), do: :error
defp filter_expired({:ok, %Goth.Token{expires: expires}}, system_time)
when expires < system_time,
do: :error
defp filter_expired(value, _), do: value
defp reply(:error, state, {account, scope, sub}),
do: {:reply, :error, Map.delete(state, {account, scope, sub})}
defp reply(value, state, _key), do: {:reply, value, state}
end
|
lib/goth/token_store.ex
| 0.819929
| 0.5835
|
token_store.ex
|
starcoder
|
defmodule Feedex do
@moduledoc """
Feedex is a simple elixir feed (atom/rss) parser.
## Examples
```elixir
iex> {:ok, feed} = Feedex.fetch_and_parse "http://9gagrss.com/feed/"
...
iex> {:ok, feed} = Feedex.parse "<rss version=\"2.0\" xmlns:content=\"http://purl.org/rss/1.0/modules/content/\" ..."
...
iex> feed.title
"9GAG RSS feed"
iex> feed.entries |> Enum.map(&(&1.title))
["Are you the lucky one ?", "Hide and Seek", "Playing guitar for little cate", ...]
```
## Results
#### Feed
- `id` feed identifier (usually the site url)
- `title` feed title
- `description` feed description
- `url` feed url
- `site_url` feed main site url
- `updated` feed last modification timestamp
- `entries` entry list
#### Entry
- `id` unique identifier (sha256)
- `title` entry title
- `url` entry permalink
- `content` entry content
- `updated` entry publication or modification timestamp
"""
require Logger
alias Feedex.Helpers.{Fetch, Sanitizer}
alias Feedex.Parsers.{Atom, RSS1, RSS2}
@doc """
Parses a `xml` string.
## Examples
iex> Feedex.parse "<rss version="2.0"><channel><title>9GAG RSS feed</title><description>Free 9GAG RSS feed</description>..."
{:ok, %{id: "http://9gagrss.com/", title: "9GAG RSS feed", description: "Free 9GAG RSS feed"...}}
iex> Feedex.parse "foo"
{:error, :invalid_xml}
iex> Feedex.parse("<!DOCTYPE html><html lang="en"><head><meta charset...")
{:error, :unknown_feed_format}
"""
def parse(xml, url \\ "") do
with {:ok, doc} <- read_xml_doc(xml),
{:ok, parser} <- select_parser(doc),
{:ok, feed} <- parser.parse(doc, url) do
entries =
feed.entries
|> Enum.filter(fn(e) -> e.title && e.content end)
|> Enum.sort(&(DateTime.compare(&1[:updated], &2[:updated]) != :gt))
|> Enum.map(&sanitize_entry(&1))
{:ok, %{feed | entries: entries}}
end
end
@doc """
Similar to `parse/2` but raises `ArgumentError` if unable to parse the `xml`.
## Examples
iex> Feedex.parse! "<rss version="2.0"><channel><title>9GAG RSS feed</title><description>Free 9GAG RSS feed</description>..."
{:ok, %{id: "http://9gagrss.com/", title: "9GAG RSS feed", description: "Free 9GAG RSS feed"...}}
iex> Feedex.parse! "foo"
** (ArgumentError) Not a valid XML
"""
def parse!(xml, url \\ "") do
with {:ok, feed} <- parse(xml, url) do
feed
else
_ -> raise ArgumentError, "Not a valid XML"
end
end
@doc """
Fetches the given `url` and parses the response using `parse/2`.
## Examples
iex> Feedex.fetch_and_parse "http://9gagrss.com/feed/"
%{id: "http://9gagrss.com/", title: "9GAG RSS feed", description: "Free 9GAG RSS feed"...}
iex> Feedex.fetch_and_parse "http://invalid-url"
{:error, :fetch_error}
"""
def fetch_and_parse(url) do
with {:ok, xml} <- Fetch.get(url),
{:ok, feed} <- parse(xml, url) do
{:ok, %{feed | url: url}}
end
end
# --
defp read_xml_doc(xml) do
try do
{:ok, SweetXml.parse(xml, [quiet: true, namespace_conformant: true])}
rescue
FunctionClauseError -> {:error, :invalid_xml}
catch
:exit, _ -> {:error, :invalid_xml}
end
end
defp select_parser(doc) do
cond do
Atom.valid?(doc) -> {:ok, Atom}
RSS1.valid?(doc) -> {:ok, RSS1}
RSS2.valid?(doc) -> {:ok, RSS2}
true -> {:error, :unknown_feed_format}
end
end
defp sanitize_entry(entry) do
content =
(entry.content || "")
|> Sanitizer.basic_html
%{entry | content: content}
end
end
|
lib/feedex.ex
| 0.823328
| 0.679438
|
feedex.ex
|
starcoder
|
defmodule Money.ExchangeRates.Supervisor do
@moduledoc """
Functions to manage the starting, stopping,
deleting and restarting of the Exchange
Rates Retriever.
"""
use Supervisor
alias Money.ExchangeRates
@child_name ExchangeRates.Retriever
@doc """
Starts the Exchange Rates supervisor and
optionally starts the exchange rates
retrieval service as well.
## Options
* `:restart` is a boolean value indicating
if the supervisor is to be restarted. This is
typically used to move the supervisor from its
default position under the `ex_money` supervision
tree to a different supervision tree. The default
is `false`
* `:start_retriever` is a boolean indicating
if the exchange rates retriever is to be started
when the supervisor is started. The default is
defined by the configuration key
`:auto_start_exchange_rate_service`
"""
def start_link do
Supervisor.start_link(__MODULE__, :ok, name: ExchangeRates.Supervisor)
end
def start_link(options) do
options = Keyword.merge(default_options(), options)
if options[:restart], do: stop()
supervisor = start_link()
if options[:start_retriever], do: ExchangeRates.Retriever.start()
supervisor
end
defp default_options do
[
restart: false,
start_retriever: Money.get_env(:auto_start_exchange_rates_service, false, :boolean)
]
end
@doc """
Stop the Money.ExchangeRates.Supervisor.
Unless `ex_money` is configured in `mix.exs` as
`rumtime: false`, the Money.ExchangeRates.Supervisor
is always started when `ex_money` starts even if the
config key `:auto_start_exchange_rates_service` is
set to `false`.
In some instances an application may require the
`Money.ExchangeRates.Supervisor` to be started under
a different supervision tree. In this case it is
required to call this function first before a new
configuration is started.
One use case is when the Exchange Rates service is
configured with either an API module, a Callback module
or a Cache module which uses Ecto and therefore its
a requirement that Ecto is started first.
See the README section on "Using Ecto or other applications
from within the callback module" for an eanple of how
to configure the supervisor in this case.
"""
def stop(supervisor \\ default_supervisor()) do
Supervisor.terminate_child(supervisor, __MODULE__)
end
@doc """
Returns the name of the default supervisor
which is `Money.Supervisor`
"""
def default_supervisor do
{_, options} =
Application.spec(:ex_money)
|> Keyword.get(:mod)
Keyword.get(options, :name)
end
@doc false
def init(:ok) do
Supervisor.init([], strategy: :one_for_one)
end
@doc """
Returns a boolean indicating of the
retriever process is configured and
running
"""
def retriever_running? do
!!Process.whereis(@child_name)
end
@doc """
Returns the status of the exchange rates
retriever. The returned value is one of:
* `:running` if the service is running. In this
state the valid action is `Money.ExchangeRates.Service.stop/0`
* `:stopped` if it is stopped. In this state
the valid actions are `Money.ExchangeRates.Supervisor.restart_retriever/0`
or `Money.ExchangeRates.Supervisor.delete_retriever/0`
* `:not_started` if it is not configured
in the supervisor and is not running. In
this state the only valid action is
`Money.ExchangeRates.Supervisor.start_retriever/1`
"""
def retriever_status do
cond do
!!Process.whereis(@child_name) -> :running
configured?(@child_name) -> :stopped
true -> :not_started
end
end
defp configured?(child) do
Money.ExchangeRates.Supervisor
|> Supervisor.which_children()
|> Enum.any?(fn {name, _pid, _type, _args} -> name == child end)
end
@doc """
Starts the exchange rates retriever
## Arguments
* `config` is a `%Money.ExchangeRages.Config{}`
struct returned by `Money.ExchangeRates.config/0`
and adjusted as required. The default is
`Money.ExchangeRates.config/0`
"""
def start_retriever(config \\ ExchangeRates.config()) do
Supervisor.start_child(__MODULE__, retriever_spec(config))
end
@doc """
Stop the exchange rates retriever.
"""
def stop_retriever do
Supervisor.terminate_child(__MODULE__, @child_name)
end
@doc """
Restarts a stopped retriever.
See also `Money.ExchangeRates.Retriever.stop/0`
"""
def restart_retriever do
Supervisor.restart_child(__MODULE__, @child_name)
end
@doc """
Deleted the retriever child specification from
the exchange rates supervisor.
This is primarily of use if you want to change
the configuration of the retriever after it is
stopped and before it is restarted.
In this situation the sequence of calls would be:
```
iex> Money.ExchangeRates.Retriever.stop
iex> Money.ExchangeRates.Retriever.delete
iex> Money.ExchangeRates.Retriever.start(config)
```
"""
def delete_retriever do
Supervisor.delete_child(__MODULE__, @child_name)
end
defp retriever_spec(config) do
%{id: @child_name, start: {@child_name, :start_link, [@child_name, config]}}
end
end
|
lib/money/exchange_rates/exchange_rates_supervisor.ex
| 0.86196
| 0.764232
|
exchange_rates_supervisor.ex
|
starcoder
|
defmodule Plaid.Auth do
@moduledoc """
[Plaid Auth API](https://plaid.com/docs/api/products/#auth) calls and schema.
"""
defmodule GetResponse do
@moduledoc """
[Plaid API /auth/get response schema.](https://plaid.com/docs/api/products/#auth).
"""
@behaviour Plaid.Castable
alias Plaid.Account
alias Plaid.Auth.Numbers
alias Plaid.Castable
alias Plaid.Item
@type t :: %__MODULE__{
accounts: [Account.t()],
numbers: Numbers.t(),
item: Item.t(),
request_id: String.t()
}
defstruct [
:accounts,
:numbers,
:item,
:request_id
]
@impl true
def cast(generic_map) do
%__MODULE__{
accounts: Castable.cast_list(Account, generic_map["accounts"]),
numbers: Castable.cast(Numbers, generic_map["numbers"]),
item: Castable.cast(Item, generic_map["item"]),
request_id: generic_map["request_id"]
}
end
end
@doc """
Get information about account and routing numbers for
checking and savings accounts.
Does a `POST /auth/get` call which returns high level account information
along with account and routing numbers for checking and savings
accounts.
Params:
* `access_token` - Token to fetch accounts for.
Options:
* `:account_ids` - Specific account ids to fetch balances for.
## Examples
Auth.get("access-sandbox-123xxx", client_id: "123", secret: "abc")
{:ok, %Auth.GetResponse{}}
"""
@spec get(String.t(), options, Plaid.config()) ::
{:ok, GetResponse.t()} | {:error, Plaid.Error.t()}
when options: %{optional(:account_ids) => [String.t()]}
def get(access_token, options \\ %{}, config) do
options_payload = Map.take(options, [:account_ids])
payload =
%{}
|> Map.put(:access_token, access_token)
|> Map.put(:options, options_payload)
Plaid.Client.call(
"/auth/get",
payload,
GetResponse,
config
)
end
end
|
lib/plaid/auth.ex
| 0.863002
| 0.41256
|
auth.ex
|
starcoder
|
defmodule Hitbtc.Http.Account do
alias Hitbtc.Util.Api
@moduledoc """
Set of account related action
This set of methods requires auth information.
You could configure it into `config.exs` file of your application
"""
@doc """
Load list of your balance
## Example:
```elixir
iex(1)> Hitbtc.Account.balance
{:ok,
[%{available: "0.00000000", currency: "BTC", reserved: "0.00000000"},
%{available: "0.00100000", currency: "BTG", reserved: "0.00000000"},
%{available: "0.00000000", currency: "CLD", ...},
%{available: "0.00000000", ...}, %{...}, ...]}
```
"""
@spec balance() :: {:ok, [map]} | {:error, term}
def balance(), do: Api.get_body("/account/balance")
@doc """
Get address for deposit currency
## Example:
```elixir
iex(1)> Hitbtc.Account.deposit_address_get("ETH")
{:ok, %{address: "0xe2be99cf4d3b1ce48ae0f7c5f8a508be9b62d5e0"}}
```
"""
@spec deposit_address_get(String.t) :: {:ok, map} | {:error, term}
def deposit_address_get(currency), do: Api.get_body("/account/crypto/address/#{currency}")
@doc """
Create new address for deposit currency
## Example:
```elixir
iex(1)> Hitbtc.Account.deposit_address_new("BTG")
{:ok, %{address: "GV2PQ4cmrC6zJZJQzfpLZze8D6KGM2piec"}}
```
"""
@spec deposit_address_new(String.t) :: {:ok, map} | {:error, term}
def deposit_address_new(currency), do: Api.post_body("/account/crypto/address/#{currency}", %{})
@doc """
Withdraw crypto
**Note that this method might take long time.**
Be carefull about timeout.
This method support set of parameters:
- `paymentId` - `String`
- `networkFee` - `Number` or `String` Too low and too high commission value will be rounded to valid values.
- `includeFee` - `Boolean` Default false. If set true then total will be spent the specified amount, fee and networkFee will be deducted from the amount
- `autoCommit` - `Boolean` Default true. If set false then you should commit or rollback transaction in an hour. Used in two phase commit schema.
## Example:
```elixir
iex(5)> Hitbtc.Account.withdraw("ETH", 0.01, "0xe2be99cf4d3b1ce48ae0f7c5f8a508be9b62d5e0", [autoCommit: true])
{:ok, %{id: "d2ce578f-647d-4fa0-b1aa-4a27e5ee597b"}}
```
"""
@spec withdraw(String.t, float, String.t, [tuple]) :: {:ok, map} | {:error, term}
def withdraw(currency, amount, address, params \\ []) do
body = [
currency: currency,
amount: Float.to_string(amount),
address: address
]
Api.post_body("/account/crypto/withdraw", body ++ params)
end
@doc """
Commit withdraw
## Example:
```elixir
iex(1)> Hitbtc.Account.withdraw_commit("d2ce578f-647d-4fa0-b1aa-4a27e5ee597b")
{:ok, %{result: true}}
```
"""
@spec withdraw_commit(String.t) :: {:ok, map} | {:error, term}
def withdraw_commit(id), do: Api.put_body("/account/crypto/withdraw/#{id}", %{})
@doc """
Cancel withdraw
## Example:
```elixir
iex(1)> Hitbtc.Account.withdraw_cancel("d2ce578f-647d-4fa0-b1aa-4a27e5ee597b")
{:ok, %{result: true}}
```
"""
@spec withdraw_cancel(String.t) :: {:ok, map} | {:error, term}
def withdraw_cancel(id), do: Api.delete_body("/account/crypto/withdraw/#{id}")
@doc """
Transfer money between trading and account
Here before you are able to use your currency in trading you have to transfer it from your "bank" account to exchage.
And before withdraw you have to transfer it vice versa
This method has 3rd parameter `type` that indicated direction of transfer.
And it might have this values:
- `bankToExchange` - Transfer from bank to exchange
- `exchangeToBank` - Transfer from exchange to bank
## Example:
```elixir
iex(1)> Hitbtc.Account.transfer("ETH", 0.01, "bankToExchange")
{:ok, %{id: "d2ce578f-647d-4fa0-b1aa-4a27e5ee597b"}}
iex(2)> Hitbtc.Account.transfer("ETH", 0.01, "exchangeToBank")
{:ok, %{id: "d2ce578f-647d-4fa0-b1aa-4a27e5ee597b"}}
```
"""
@spec transfer(String.t, float, String.t) :: {:ok, map} | {:error, term}
def transfer(currency, amount, type \\ "bankToExchange") do
body = [
currency: currency,
amount: Float.to_string(amount),
type: type
]
Api.post_body("/account/transfer", body)
end
@doc """
Get transaction history
## Example:
```elixir
iex(1)> Hitbtc.Account.transaction_list
{:ok,
[%{amount: "0.00100000", createdAt: "2017-12-17T09:09:16.399Z",
currency: "SBTC", id: "9289748d-2c1b-4a31-9a9c-e6bceda2b39d",
index: 556153413, status: "success", type: "deposit",
updatedAt: "2017-12-17T09:09:16.672Z"},
%{amount: "0.050000000000000000", createdAt: "2017-10-26T05:46:21.883Z",
currency: "ETH", id: "a484c7b6-6ccf-4c70-bfa8-251db3f0609b",
index: 377464416, status: "success", type: "bankToExchange",
updatedAt: "2017-10-26T05:46:26.404Z"},
%{amount: "0.00100000", createdAt: "2017-10-26T05:46:09.363Z",
currency: "BTC", id: "293a7efc-2b29-4262-8413-ca29b0b3b36b",
index: 377462653, status: "success", type: "bankToExchange",
updatedAt: "2017-10-26T05:46:10.799Z"}]}
```
Or you could call it with transaction id:
```elixir
iex(2)> Hitbtc.Account.transaction_list("9289748d-2c1b-4a31-9a9c-e6bceda2b39d")
{:ok,
%{amount: "0.00100000", createdAt: "2017-12-17T09:09:16.399Z",
currency: "SBTC", id: "9289748d-2c1b-4a31-9a9c-e6bceda2b39d",
index: 556153413, status: "success", type: "deposit",
updatedAt: "2017-12-17T09:09:16.672Z"}}
```
"""
@spec transaction_list(String.t, [tuple]) :: {:ok, [map]} | {:error, term}
def transaction_list(id \\ "", params \\ []) do
case id do
"" -> Api.get_body("/account/transactions", params)
str -> Api.get_body("/account/transactions/#{str}", params)
end
end
end
|
lib/hitbtc/http/account.ex
| 0.916517
| 0.689129
|
account.ex
|
starcoder
|
defmodule Ehee.Gists do
import Ehee
alias Ehee.Credential
@moduledoc """
The Gist Webhooks API
"""
@doc """
List authenticated users gists
## Example
Ehee.Gists.list(credential)
More info at: https://developer.github.com/v3/gists/#list-a-users-gists
"""
@spec list(Credential.t) :: Ehee.response
def list(credential) do
get("/gists", credential)
end
@doc """
List public gists for specified user
## Example
Ehee.Gists.list_users(credential, "username")
More info at: https://developer.github.com/v3/gists/#list-a-users-gists
"""
@spec list_users(Credential.t, binary) :: Ehee.response
def list_users(credential, username) do
get("/users/#{username}/gists", credential)
end
@doc """
List all public gists
## Example
Ehee.Gists.list_public(credential)
More info at: https://developer.github.com/v3/gists/#list-all-public-gists
"""
@spec list_public(Credential.t) :: Ehee.response
def list_public(credential) do
get("/gists/public", credential, [], [pagination: :stream])
end
@doc """
Get a single gist
## Example
Ehee.Gists.show(credential, 1234567)
More info at: https://developer.github.com/v3/gists/#get-a-single-gist
"""
@spec show(Credential.t, binary | integer) :: Ehee.response
def show(credential, id) do
get("/gists/#{id}", credential)
end
@doc """
Create a gist
## Example
Ehee.Gists.create(credential, "gist description", true, "file1", "sample gist")
More info at: https://developer.github.com/v3/gists/#create-a-gist
"""
@spec create(Credential.t, binary, boolean, binary, binary) :: Ehee.response
def create(credential, description, is_public, file_name, content) do
body = %{ "description": description,
"public": is_public,
"files": %{
"#{file_name}": %{
"content": content
}
}
}
post("/gists", credential, body)
end
@doc """
Edit a gist
## Example
Ehee.Gists.edit(credential, 1234567, "files", "sample gist")
More info at: https://developer.github.com/v3/gists/#edit-a-gist
"""
@spec edit(Credential.t, binary | integer, binary, binary) :: Ehee.response
def edit(credential, id, file_name, content) do
body = %{"files": %{
"#{file_name}": %{
"content": content
}
}
}
patch("/gists/#{id}", credential, body)
end
@doc """
Star a gist
## Example
Ehee.Gists.star(credential, 1234567)
More info at: https://developer.github.com/v3/gists/#star-a-gist
"""
@spec star(Credential.t, binary | integer) :: Ehee.response
def star(credential, id) do
put("/gists/#{id}/star", credential)
end
@doc """
Unstar a gist
## Example
Ehee.Gists.unstar(credential, 1234567)
More info at: https://developer.github.com/v3/gists/#unstar-a-gist
"""
@spec unstar(Credentail.t, binary | integer) :: Ehee.response
def unstar(credential, id) do
delete("/gists/#{id}/star", credential)
end
@doc """
Check if a gist is starred
## Example
Ehee.Gists.starred?(credential, 1234567)
More info at: https://developer.github.com/v3/gists/#check-if-a-gist-is-starred
"""
@spec starred?(Credential.t, binary | integer) :: boolean
def starred?(credential, id) do
resp = get("/gists/#{id}/star", credential)
resp |> elem(0) == 204
end
@doc """
Delete a gist
## Example
Ehee.Gists.destroy(credential, 1234567)
More info at: https://developer.github.com/v3/gists/#delete-a-gist
"""
@spec destroy(Credential.t, binary | integer) :: Ehee.response
def destroy(credential, id) do
delete("/gists/#{id}", credential)
end
@doc """
List gist on a gist
## Example
Ehee.Gists.list_comments(credential, 1234567)
More info at: https://developer.github.com/v3/gists/comments/#list-comments-on-a-gist
"""
@spec list_comments(Credentail.t, binary | integer) :: Ehee.response
def list_comments(credential, id) do
get("/gists/#{id}/comments", credential)
end
@doc """
Get a single comment
## Example
Ehee.Gists.show_comment(credential, 1234567, 1234567)
More info at: https://developer.github.com/v3/gists/comments/#get-a-single-comment
"""
@spec show_comment(Credentail.t, binary | integer, binary | integer) :: Ehee.response
def show_comment(credential, gist_id, comment_id) do
get("/gists/#{gist_id}/comments/#{comment_id}", credential)
end
@doc """
Create a comment
## Example
Ehee.Gists.create_comment(credential, 1234567, "gist comment!")
More info at: https://developer.github.com/v3/gists/comments/#create-a-comment
"""
@spec create_comment(Credentail.t, binary | integer, binary) :: Ehee.response
def create_comment(credential, id, comment) do
body = %{ "body": comment}
post("/gists/#{id}/comments", credential, body)
end
@doc """
Edit a comment
## Example
Ehee.Gists.edit_coment(credential, 1234567, 1234567, "gist comment!")
More info at: https://developer.github.com/v3/gists/comments/#edit-a-comment
"""
@spec edit_comment(Credentail.t, binary | integer, binary | integer, binary) :: Ehee.response
def edit_comment(credential, gist_id, comment_id, comment) do
body = %{ "body": comment }
patch("/gists/#{gist_id}/comments/#{comment_id}", credential, body)
end
@doc """
Delete a comment
## Example
Ehee.Gists.delete_comment(credential, 1234567, 1234567)
More info at: https://developer.github.com/v3/gists/comments/#delete-a-comment
"""
@spec delete_comment(Credential.t, binary | integer, binary | integer) :: Ehee.response
def delete_comment(credential, gist_id, comment_id) do
delete("/gists/#{gist_id}/comments/#{comment_id}", credential)
end
end
|
lib/ehee/gists.ex
| 0.783077
| 0.479077
|
gists.ex
|
starcoder
|
defmodule Norm.Core.Collection do
@moduledoc false
defstruct spec: nil, opts: []
def new(spec, opts) do
%__MODULE__{spec: spec, opts: opts}
end
defimpl Norm.Conformer.Conformable do
alias Norm.Conformer
alias Norm.Conformer.Conformable
def conform(%{spec: spec, opts: opts}, input, path) do
with :ok <- check_enumerable(input, path, opts),
:ok <- check_kind_of(input, path, opts),
:ok <- check_distinct(input, path, opts),
:ok <- check_counts(input, path, opts) do
results =
input
|> Enum.with_index()
|> Enum.map(fn {elem, i} -> Conformable.conform(spec, elem, path ++ [i]) end)
|> Conformer.group_results()
into = cond do
opts[:into] ->
opts[:into]
is_list(input) ->
[]
is_map(input) and Map.has_key?(input, :__struct__) ->
struct(input.__struct__)
is_map(input) ->
%{}
true ->
raise ArgumentError, "Cannot determine output type for collection"
end
if Enum.any?(results.error) do
{:error, results.error}
else
{:ok, convert(results.ok, into)}
end
end
end
def valid?(%{spec: spec, opts: opts}, input, path) do
with :ok <- check_enumerable(input, path, opts),
:ok <- check_kind_of(input, path, opts),
:ok <- check_distinct(input, path, opts),
:ok <- check_counts(input, path, opts) do
input
|> Stream.with_index()
|> Stream.map(fn {elem, i} -> Conformable.valid?(spec, elem, path ++ [i]) end)
|> Enum.all?(& &1)
else
_ -> false
end
end
defp convert(results, type) do
Enum.into(results, type)
end
defp check_counts(input, path, opts) do
min = opts[:min_count]
max = opts[:max_count]
length = Enum.count(input)
cond do
min > length ->
{:error, [Conformer.error(path, input, "min_count: #{min}")]}
max < length ->
{:error, [Conformer.error(path, input, "max_count: #{max}")]}
true ->
:ok
end
end
defp check_distinct(input, path, opts) do
if opts[:distinct] do
if Enum.uniq(input) == input do
:ok
else
{:error, [Conformer.error(path, input, "distinct?")]}
end
else
:ok
end
end
defp check_enumerable(input, path, _opts) do
if Enumerable.impl_for(input) == nil do
{:error, [Conformer.error(path, input, "not enumerable")]}
else
:ok
end
end
defp check_kind_of(input, path, opts) do
cond do
# If kind is nil we assume it doesn't matter
opts[:kind] == nil ->
:ok
# If we have a `:kind` and it returns true we pass the spec
opts[:kind].(input) ->
:ok
# Otherwise return an error
true ->
{:error, [Conformer.error(path, input, "does not match kind: #{inspect opts[:kind]}")]}
end
end
end
if Code.ensure_loaded?(StreamData) do
defimpl Norm.Generatable do
def gen(%{spec: spec, opts: opts}) do
with {:ok, g} <- Norm.Generatable.gen(spec) do
generator =
g
|> sequence(opts)
|> into(opts)
{:ok, generator}
end
end
def sequence(g, opts) do
min = opts[:min_count]
max = opts[:max_count]
if opts[:distinct] do
StreamData.uniq_list_of(g, [min_length: min, max_length: max])
else
StreamData.list_of(g, [min_length: min, max_length: max])
end
end
def into(list_gen, opts) do
StreamData.bind(list_gen, fn list ->
# We assume that if we don't have an `into` specified then its a list
StreamData.constant(Enum.into(list, opts[:into] || []))
end)
end
end
end
defimpl Inspect do
import Inspect.Algebra
def inspect(coll_of, opts) do
concat(["#Norm.CollOf<", to_doc(coll_of.spec, opts), ">"])
end
end
end
|
lib/norm/core/collection.ex
| 0.672654
| 0.457137
|
collection.ex
|
starcoder
|
defmodule RedisGraph.QueryResult do
@moduledoc """
A QueryResult containing returned fields and query metadata.
The resulting struct contains the result set header and records,
statistics about the query executed, and referential lists of entity
identifiers, specifically labels, property keys, and relationship types.
The labels refer to the `label` attribute of either Node entities
in the graph, The property keys are the keys found in any Node or Edge
property maps. The relationship types are the `relation` attributes of
Edge entities in the graph.
## Example
```elixir
# Create a query to fetch some data
query = "MATCH (p:person)-[v:visited]->(c:country) RETURN p.name, p.age, v.purpose, c.name"
# Execute the query
{:ok, query_result} = RedisGraph.query(conn, graph.name, query)
# Show the resulting statistics
IO.inspect(query_result.statistics)
# Pretty print the results using the Scribe lib
IO.puts(QueryResult.pretty_print(query_result))
```
which gives the following results:
```elixir
# Query result statistics
%{
"Labels added" => nil,
"Nodes created" => nil,
"Nodes deleted" => nil,
"Properties set" => nil,
"Query internal execution time" => "0.228669",
"Relationships created" => nil,
"Relationships deleted" => nil
}
# Pretty printed output
+----------------+-------------+-----------------+--------------+
| "p.name" | "p.age" | "v.purpose" | "c.name" |
+----------------+-------------+-----------------+--------------+
| "<NAME>" | 33 | nil | "Japan" |
+----------------+-------------+-----------------+--------------+
```
"""
alias RedisGraph.Edge
alias RedisGraph.Node
@labels_added "Labels added"
@nodes_created "Nodes created"
@nodes_deleted "Nodes deleted"
@relationships_deleted "Relationships deleted"
@properties_set "Properties set"
@relationships_created "Relationships created"
@query_internal_execution_time "Query internal execution time"
@graph_removed_internal_execution_time "Graph removed, internal execution time"
@type t() :: %__MODULE__{
raw_result_set: list(any()) | String.t(),
header: list(String.t()),
result_set: list(list(any())),
statistics: %{String.t() => String.t()}
}
@enforce_keys [:conn, :graph_name, :raw_result_set]
defstruct [
:conn,
:graph_name,
:raw_result_set,
:header,
:result_set,
:statistics,
:labels,
:property_keys,
:relationship_types
]
@doc """
Create a new QueryResult from a map.
Pass a map with a connection, graph name, and raw redisgraph result.
The raw result is the output of the function `Redix.command/2`.
This function is invoked by the `RedisGraph.command/2` function.
The functions `RedisGraph.commit/2`, `RedisGraph.query/3`, `RedisGraph.delete/2`,
and `RedisGraph.merge/3` will also return a new `RedisGraph.QueryResult`.
"""
@spec new(map()) :: t()
def new(map) do
s = struct(__MODULE__, map)
process_raw_result(s)
end
defp process_raw_result(%{raw_result_set: result} = query_result) when is_list(result) do
if length(result) == 1 do
%{query_result | statistics: parse_statistics(Enum.at(result, 0))}
else
%{
parse_results(query_result)
| statistics: parse_statistics(Enum.at(result, -1))
}
end
end
# process the result of a delete query
defp process_raw_result(%{raw_result_set: result} = query_result) when is_binary(result) do
%{query_result | statistics: parse_statistics(result)}
end
@doc "Return a boolean indicating emptiness of a QueryResult."
@spec is_empty(t()) :: boolean()
def is_empty(query_result) do
if is_nil(query_result.result_set) or length(query_result.result_set) == 0 do
true
else
false
end
end
defp parse_statistics(raw_statistics) when is_list(raw_statistics) do
stats = [
@labels_added,
@nodes_created,
@properties_set,
@relationships_created,
@nodes_deleted,
@relationships_deleted,
@query_internal_execution_time
]
stats
|> Enum.map(fn s -> {s, get_value(s, raw_statistics)} end)
|> Enum.into(%{})
end
# delete query result
defp parse_statistics(raw_statistics) when is_binary(raw_statistics) do
%{
@labels_added => nil,
@nodes_created => nil,
@properties_set => nil,
@relationships_created => nil,
@nodes_deleted => nil,
@relationships_deleted => nil,
@query_internal_execution_time =>
extract_value(@graph_removed_internal_execution_time, raw_statistics)
}
end
defp get_value(stat, [raw_statistic | raw_statistics]) do
case extract_value(stat, raw_statistic) do
nil -> get_value(stat, raw_statistics)
value -> value
end
end
defp get_value(_stat, []) do
nil
end
defp extract_value(stat, raw_statistic) do
if String.contains?(raw_statistic, stat) do
raw_statistic
|> String.split(": ")
|> Enum.at(1)
|> String.split(" ")
|> Enum.at(0)
else
nil
end
end
defp parse_header(%{raw_result_set: [header | _tail]} = _query_result) do
header |> Enum.map(fn h -> Enum.at(h, 1) end)
end
defp fetch_metadata(%{conn: conn, graph_name: name} = query_result) do
labels = parse_procedure_call(RedisGraph.labels(conn, name))
property_keys = parse_procedure_call(RedisGraph.property_keys(conn, name))
relationship_types = parse_procedure_call(RedisGraph.relationship_types(conn, name))
%{
query_result
| labels: labels,
property_keys: property_keys,
relationship_types: relationship_types
}
end
defp parse_procedure_call(response) do
# TODO how to bubble this up better
case response do
{:ok, result} ->
result
# records
|> Enum.at(1)
# each element is [[<int>, <str>]], extract the strings
|> Enum.map(fn elem -> elem |> Enum.at(0) |> Enum.at(1) end)
{:error, reason} ->
raise reason
end
end
defp parse_results(%{raw_result_set: [header | _tail]} = query_result) do
query_result = fetch_metadata(query_result)
if length(header) > 0 do
%{
query_result
| header: parse_header(query_result),
result_set: parse_records(query_result)
}
else
query_result
end
end
defp parse_records(%{raw_result_set: [_header | [records | _statistics]]} = query_result) do
Enum.map(records, &parse_row(query_result, &1))
end
defp parse_row(%{raw_result_set: [header | _tail]} = query_result, row) do
Enum.with_index(row)
|> Enum.map(fn {cell, idx} ->
parse_cell(query_result, cell, header |> Enum.at(idx) |> Enum.at(0))
end)
end
# https://oss.redislabs.com/redisgraph/client_spec/
defp parse_cell(query_result, cell, 1) do
parse_scalar(query_result, cell)
end
defp parse_cell(query_result, cell, 2) do
parse_node(query_result, cell)
end
defp parse_cell(query_result, cell, 3) do
parse_edge(query_result, cell)
end
defp parse_scalar(_query_result, cell) do
Enum.at(cell, 1)
end
defp parse_node(query_result, cell) do
[node_id | [label_indexes | properties]] = cell
Node.new(%{
id: node_id,
label: get_label(query_result, label_indexes),
properties: parse_entity_properties(query_result, properties)
})
end
defp parse_edge(query_result, cell) do
[edge_id | [relation_index | [src_node_id | [dest_node_id | properties]]]] = cell
Edge.new(%{
id: edge_id,
relation: get_relationship_type(query_result, relation_index),
src_node: src_node_id,
dest_node: dest_node_id,
properties: parse_entity_properties(query_result, properties)
})
end
defp get_label(query_result, label_indexes) do
Enum.at(query_result.labels, Enum.at(label_indexes, 0))
end
defp get_property_key(query_result, property_key_index) do
Enum.at(query_result.property_keys, property_key_index)
end
defp get_relationship_type(query_result, relationship_type_index) do
Enum.at(query_result.relationship_types, relationship_type_index)
end
defp parse_entity_properties(query_result, properties) do
properties
|> Enum.at(0)
|> Enum.map(fn [property_key_index | value] ->
{get_property_key(query_result, property_key_index), parse_scalar(query_result, value)}
end)
|> Enum.into(%{})
end
@doc "Transform a QueryResult into a list of maps as records."
@spec results_to_maps(t()) :: list(map())
def results_to_maps(%{header: header, result_set: records} = _query_result) do
records
|> Enum.map(fn record ->
record
|> Enum.with_index()
|> Enum.map(fn {v, idx} -> {Enum.at(header, idx), v} end)
|> Enum.into(%{})
end)
end
@doc "Pretty print a QueryResult to a tabular string using `Scribe`."
@spec pretty_print(t()) :: String.t()
def pretty_print(%{header: header, result_set: records} = query_result) do
if is_nil(header) or is_nil(records) do
""
else
Scribe.format(results_to_maps(query_result), data: header)
end
end
defp get_stat(query_result, stat) do
Map.get(query_result.statistics, stat, 0)
end
@doc "Get the `labels added` quantity from a QueryResult."
@spec labels_added(t()) :: String.t()
def labels_added(query_result) do
get_stat(query_result, @labels_added)
end
@doc "Get the `nodes created` quantity from a QueryResult."
@spec nodes_created(t()) :: String.t()
def nodes_created(query_result) do
get_stat(query_result, @nodes_created)
end
@doc "Get the `nodes deleted` quantity from a QueryResult."
@spec nodes_deleted(t()) :: String.t()
def nodes_deleted(query_result) do
get_stat(query_result, @nodes_deleted)
end
@doc "Get the `properties set` quantity from a QueryResult."
@spec properties_set(t()) :: String.t()
def properties_set(query_result) do
get_stat(query_result, @properties_set)
end
@doc "Get the `relationships created` quantity from a QueryResult."
@spec relationships_created(t()) :: String.t()
def relationships_created(query_result) do
get_stat(query_result, @relationships_created)
end
@doc "Get the `relationships deleted` quantity from a QueryResult."
@spec relationships_deleted(t()) :: String.t()
def relationships_deleted(query_result) do
get_stat(query_result, @relationships_deleted)
end
@doc "Get the `query internal execution time` (ms) from a QueryResult."
@spec query_internal_execution_time(t()) :: String.t()
def query_internal_execution_time(query_result) do
get_stat(query_result, @query_internal_execution_time)
end
end
|
lib/redis_graph/query_result.ex
| 0.880528
| 0.844985
|
query_result.ex
|
starcoder
|
module OrderedDict
% Generates a new OrderedDict from a list of tuples
%
% ## Examples
%
% { 'a: 1, 'b: 2 } = OrderedDict.from_list(['a/1, 'b/2])
%
def from_list(list)
{ 'elixir_orddict__, Erlang.orddict.from_list(list) }
end
% Return a new Elixir OrderedDict.
def new()
{ 'elixir_orddict__, Erlang.orddict.new() }
end
def new(orddict)
{ 'elixir_orddict__, orddict }
end
module Behavior
% Updates the given *key* in list according to te given *function*.
% If no key exist, raises an error. You can use update/3 if you want
% to set an initial value if none exists.
%
% ## Examples
%
% dict = { 'vowels: ['a, 'e, 'i, 'o] }
% new_dict = dict.update 'vowels, _.push('u)
% new_dict['vowels] % => ['a, 'e, 'i, 'o, 'u]
%
def update(key, function)
new Erlang.orddict.update(key, function, orddict)
end
% Merge one dict into the other.
%
% ## Examples
%
% { 'a: 3, 'b: 2 } = { 'a: 1 }.merge { 'b: 2, 'a: 3 }, -> (_, v1, _) v1
%
def merge(other)
function = -> (_k, _v1, v2) v2
new Erlang.orddict.merge(function, orddict, other.to_list)
end
% Merge one dict into the other according to the given function. The function
% is invoked when both dicts have the same keys with the key and both values
% as arguments and should return the result given by the conflict of such keys.
%
% ## Examples
%
% The example provides a reverse merge, where the first dict is merged into
% the one given as argument:
%
% { 'a: 1, 'b: 2 } = { 'a: 1 }.merge { 'b: 2, 'a: 3 }, -> (_k, v1, _v2) v1
%
def merge(other, function)
new Erlang.orddict.merge(function, orddict, other.to_list)
end
% The same as update/2, but if no value exists, *initial* is used.
%
% ## Examples
%
% dict = {}.update('values, [], _.push(1))
% dict['values] % => []
%
% dict.update('values, [], _.push(1))
% dict['values] % => [1]
%
def update(key, initial, function)
new Erlang.orddict.update(key, function, initial, orddict)
end
% Retrieves the given key from the OrderedDict. Returns [] if key does not exist.
%
% ## Examples
%
% { 1: 2, 3: 4}[1] % => 2
% { 1: 2, 3: 4}[5] % => []
%
def [](key)
Erlang.elixir_helpers.orddict_find(key, orddict)
end
alias_local '[], 'get, 1
% Returns a boolean if the ordered dict has the given key or not.
%
% ## Examples
%
% { 'a: 1 }.key?('a) % => true
%
def key?(key)
case Erlang.orddict.find(key, orddict)
match {'ok, value}
true
match 'error
false
end
end
% Stores the given *value* in *key* in the dictionary.
%
% ## Examples
%
% {}.set('a, 'b) % => { 'a: 'b }
%
def set(key, value)
new Erlang.orddict.store(key, value, orddict)
end
% Stores the given *value* in *key* in the dictionary if none is set yet.
%
% ## Examples
%
% {'a: b}.set_new('a,'c) % => { 'a: 'b }
% {'a: b}.set_new('b,'c) % => { 'a: 'b, 'b: c }
%
def set_new(key, value)
if key?(key)
self
else
set(key, value)
end
end
% Calls the given *function* for each key and value of the dictionary with an
% extra argumen *acc* (short for accumulator). *function* must return a new
% accumulator passed to the next call. Returns the last accumulator.
%
% The function expects three arguments a key, a value and the accumulator.
%
% ## Examples
%
% dict = { 'a: 1, 'b: 2 }
% list = dict.fold [], do (key, value, acc)
% ["#{key}: #{value}"|acc]
% end
% list.join(", ") % => "b: 2, a: 1"
%
def fold(acc, function)
Erlang.orddict.fold(function, acc, orddict)
end
% Deletes the given key from the Ordered Dict returning the new dict.
%
% ## Examples
%
% { 'a: 1, 'b: 2 }.delete 'a % => { 'b: 2 }
%
def delete(key)
new Erlang.orddict.erase(key, orddict)
end
% Calls the given *function* for each key and value. Returns a List
% with the result of each *function*.
%
% The *function* expects a key and value as argument.
%
% ## Examples
%
% dict = { 'a: 1, 'b: 2 }
% new_dict = dict.map do (key, value)
% value * 2
% end
% new_dict % => { 'a: 2, 'b: 4 }
%
def map(function)
new Erlang.orddict.map(function, orddict)
end
% Loops for each key-value pair in the dictionary.
%
% ## Examples
%
% dict = { 'a: 1, 'b: 2 }
% dict.each do (key, value)
% IO.puts "#{key}: #{value}"
% end
%
def each(function)
to_l.each -> ({x, y}) function.(x, y)
self
end
% Returns this dictionary represented as a String.
%
% ## Examples
%
% { 'a: 1, 'b: 2 }.inspect % => "{'a: 1, 'b: 1}"
%
def inspect
inspect(orddict)
end
% Returns true if the ordered dict is empty.
def empty?
orddict == []
end
% Converts this OrderedDict to a list. The return list is ordered.
%
% == Examples
%
% [{'a, 1},{'b, 2}] = { 'a: 1, 'b: 2 }.to_list
%
def to_list
orddict
end
private
def new(orddict)
{ 'elixir_orddict__, orddict }
end
def inspect([])
"{}"
end
def inspect(_)
transformer = -> (key, value, acc) ["#{key.inspect}: #{value.inspect}"|acc]
"{#{fold([], transformer).reverse.join(", ")}}"
end
def orddict
Erlang.element(2, self)
end
end
end
|
lib/ordered_dict.ex
| 0.6137
| 0.437403
|
ordered_dict.ex
|
starcoder
|
defmodule Farmbot.Firmware.Gcode.Parser do
@moduledoc """
Parses [farmbot-arduino-firmware](https://github.com/farmbot/farmbot-arduino-firmware) G-Codes.
"""
import Farmbot.Firmware.Gcode.Param
@spec parse_code(binary) :: {binary, tuple}
# Status codes.
@doc "Parse a code to an Elixir consumable message."
def parse_code("R00 Q" <> tag), do: {tag, :idle}
def parse_code("R01 Q" <> tag), do: {tag, :received}
def parse_code("R02 Q" <> tag), do: {tag, :done}
def parse_code("R03 Q" <> tag), do: {tag, :error}
def parse_code("R04 Q" <> tag), do: {tag, :busy}
def parse_code("R05" <> _r), do: {nil, :noop}
def parse_code("R06 " <> r), do: parse_report_calibration(r)
def parse_code("R07 " <> _), do: {nil, :noop}
def parse_code("R08 " <> echo),
do: {:echo, {:echo, String.replace(echo, "\r", "")}}
def parse_code("R09 " <> tag), do: {tag, :invalid_command}
# Report axis homing.
def parse_code("R11 " <> tag), do: {tag, :report_axis_home_complete_x}
def parse_code("R12 " <> tag), do: {tag, :report_axis_home_complete_y}
def parse_code("R13 " <> tag), do: {tag, :report_axis_home_complete_z}
# Param report.
def parse_code("R20 Q" <> tag), do: {tag, :report_params_complete}
def parse_code("R21 " <> params), do: parse_pvq(params, :report_parameter_value)
def parse_code("R23 " <> params), do: parse_report_axis_calibration(params)
def parse_code("R31 " <> params), do: parse_pvq(params, :report_status_value)
def parse_code("R41 " <> params), do: parse_pvq(params, :report_pin_value)
def parse_code("R71 " <> tag), do: {tag, :report_axis_timeout_x}
def parse_code("R72 " <> tag), do: {tag, :report_axis_timeout_y}
def parse_code("R73 " <> tag), do: {tag, :report_axis_timeout_z}
# Report Position.
def parse_code("R81 " <> params), do: parse_end_stops(params)
def parse_code("R82 " <> p), do: report_xyz(p, :report_current_position)
def parse_code("R83 " <> v), do: parse_version(v)
def parse_code("R84 " <> p), do: report_xyz(p, :report_encoder_position_scaled)
def parse_code("R85 " <> p), do: report_xyz(p, :report_encoder_position_raw)
def parse_code("R87 Q" <> q), do: {q, :report_emergency_lock}
def parse_code("R88 Q" <> q), do: {q, :report_no_config}
def parse_code("R99 " <> message) do
{nil, {:debug_message, message}}
end
def parse_code(code) do
{:unhandled_gcode, code}
end
@spec parse_report_calibration(binary)
:: {binary, {:report_calibration, binary, binary}}
defp parse_report_calibration(r) do
[axis_and_status | [q]] = String.split(r, " Q")
<<a::size(8), b::size(8)>> = axis_and_status
case <<b>> do
"0" -> {q, {:report_calibration, <<a>>, :idle}}
"1" -> {q, {:report_calibration, <<a>>, :home}}
"2" -> {q, {:report_calibration, <<a>>, :end}}
end
end
defp parse_report_axis_calibration(params) do
["P" <> parm, "V" <> val, "Q" <> tag] = String.split(params, " ")
if parm in ["141", "142", "143"] do
parm_name = :report_axis_calibration
result = parse_param(String.to_integer(parm))
case Float.parse(val) do
{float, _} ->
msg = {parm_name, result, float}
{tag, msg}
:error ->
msg = {parm_name, result, String.to_integer(val)}
{tag, msg}
end
else
{tag, :noop}
end
end
@spec parse_version(binary) :: {binary, {:report_software_version, binary}}
defp parse_version(version) do
[v | [code]] = String.split(version, " Q")
{code, {:report_software_version, v}}
end
@type reporter ::
:report_current_position
| :report_encoder_position_scaled
| :report_encoder_position_raw
@spec report_xyz(binary, reporter)
:: {binary, {reporter, binary, binary, binary}}
defp report_xyz(position, reporter) when is_bitstring(position),
do: position |> String.split(" ") |> do_parse_pos(reporter)
@valid_position_reporters [
:report_current_position,
:report_encoder_position_scaled
]
defp do_parse_pos(["X" <> x, "Y" <> y, "Z" <> z, "Q" <> tag], reporter)
when reporter in @valid_position_reporters
do
import String, only: [to_float: 1]
msg = {reporter, to_float(x), to_float(y), to_float(z)}
{tag, msg}
end
defp do_parse_pos(["X" <> x, "Y" <> y, "Z" <> z, "Q" <> tag], reporter) do
import String, only: [to_integer: 1]
msg = {reporter, to_integer(x), to_integer(y), to_integer(z)}
{tag, msg}
end
defp do_parse_pos(l, _) do
{:unhandled_gcode, Enum.join(l, " ")}
end
@doc false
@spec parse_end_stops(binary)
:: {:report_end_stops,
binary, binary, binary, binary, binary, binary, binary}
def parse_end_stops(
<<"XA", xa::size(8), 32,
"XB", xb::size(8), 32,
"YA", ya::size(8), 32,
"YB", yb::size(8), 32,
"ZA", za::size(8), 32,
"ZB", zb::size(8), 32,
"Q", tag::binary >>)
do
r = :report_end_stops
msg = {r, xa |> pes, xb |> pes, ya |> pes, yb |> pes, za |> pes, zb |> pes}
{tag, msg}
end
# lol
@spec pes(48 | 49) :: 0 | 1
defp pes(48), do: 0
defp pes(49), do: 1
@doc false
@spec parse_pvq(binary, :report_parameter_value) ::
{:report_parameter_value, atom, integer, String.t()}
def parse_pvq(params, :report_parameter_value)
when is_bitstring(params),
do: params |> String.split(" ") |> do_parse_params
def parse_pvq(params, human_readable_param_name)
when is_bitstring(params) and is_atom(human_readable_param_name),
do: params |> String.split(" ") |> do_parse_pvq(human_readable_param_name)
defp do_parse_pvq([p, v, q], human_readable_param_name) do
import String, only: [split: 2, to_integer: 1]
[_, rp] = split(p, "P")
[_, rv] = split(v, "V")
[_, rq] = split(q, "Q")
{rq, {human_readable_param_name, to_integer(rp), to_integer(rv)}}
end
defp do_parse_params([p, v, q]) do
import String, only: [split: 2, to_integer: 1]
[_, rp] = split(p, "P")
[_, rv] = split(v, "V")
[_, rq] = split(q, "Q")
{rq, {:report_parameter_value, parse_param(to_integer(rp)), to_integer(rv)}}
end
end
|
lib/farmbot/firmware/gcode/parser.ex
| 0.713232
| 0.428771
|
parser.ex
|
starcoder
|
defmodule Bolt.Cogs.USW.Set do
@moduledoc false
@behaviour Nosedrum.Command
alias Bolt.ErrorFormatters
alias Bolt.Helpers
alias Bolt.Humanizer
alias Bolt.ModLog
alias Bolt.Repo
alias Bolt.Schema.USWRuleConfig
alias Nosedrum.Predicates
alias Nostrum.Api
@impl true
def usage, do: ["usw set <rule:str> <count:int> [per] <interval:int>"]
@impl true
def description,
do: """
Sets the given `rule` to allow `count` objects to pass through within `interval` seconds.
Existing rules:
• `BURST`: Allows `count` messages by the same author within `interval` seconds.
• `DUPLICATES`: Allows `count` same messages within `interval` seconds.
• `LINKS`: Allows `count` links by the same author within `interval` seconds
• `MENTIONS`: Allows `count` user mentions by the same author within `interval` seconds.
• `NEWLINES`: Allows `count` newlines by the same author within `interval` seconds.
For example, to allow 5 messages by the same user within 7 seconds (using the `BURST` rule), one would use `usw set BURST 5 7`.
For readability, `per` can be given between `count` and `interval`, for example `usw set BURST 5 per 7`.
Requires the `MANAGE_GUILD` permission.
"""
@impl true
def predicates,
do: [&Predicates.guild_only/1, Predicates.has_permission(:manage_guild)]
@impl true
def command(msg, [rule_name, count_str, interval_str]) do
rule_name = String.upcase(rule_name)
response =
with true <- rule_name in USWRuleConfig.existing_rules(),
{count, _} <- Integer.parse(count_str),
{interval, _} <- Integer.parse(interval_str),
params <- %{
guild_id: msg.guild_id,
rule: rule_name,
count: count,
interval: interval
},
changeset <- USWRuleConfig.changeset(%USWRuleConfig{}, params),
{:ok, _struct} <-
Repo.insert(
changeset,
conflict_target: [:guild_id, :rule],
on_conflict: [set: [count: count, interval: interval]]
) do
ModLog.emit(
msg.guild_id,
"CONFIG_UPDATE",
"#{Humanizer.human_user(msg.author)} updated USW configuration: " <>
"now allowing max **#{count}** objects per **#{interval}**s in rule `#{rule_name}`"
)
"👌 updated configuration, will now allow max **#{count}**" <>
" objects per **#{interval}**s in rule `#{rule_name}`"
else
false ->
"🚫 `#{Helpers.clean_content(rule_name)}` is not a known rule"
:error ->
"🚫 either `count` or `interval` are not integers"
error ->
ErrorFormatters.fmt(msg, error)
end
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
def command(msg, [filter, count_str, "per", interval_str]) do
command(msg, [filter, count_str, interval_str])
end
def command(msg, _args) do
response = "🚫 expected 3 arguments (rule, count, interval), got something else"
{:ok, _msg} = Api.create_message(msg.channel_id, response)
end
end
|
lib/bolt/cogs/usw/set.ex
| 0.859943
| 0.541409
|
set.ex
|
starcoder
|
defmodule Site.TripPlan.Location do
alias Phoenix.HTML
alias Site.TripPlan.Query
alias TripPlan.NamedPosition
@spec validate(Query.t(), map) :: Query.t()
def validate(
%Query{} = query,
%{
"to_latitude" => <<_::binary>>,
"to_longitude" => <<_::binary>>,
"to" => _
} = params
) do
validate_lat_lng(:to, params, query)
end
def validate(
%Query{} = query,
%{
"from_latitude" => <<_::binary>>,
"from_longitude" => <<_::binary>>,
"from" => _
} = params
) do
validate_lat_lng(:from, params, query)
end
def validate(%Query{} = query, %{"to" => same, "from" => same} = params) do
query =
query
|> Map.put(:to, {:error, :same_address})
|> Map.put(:from, {:error, :same_address})
|> Map.put(:errors, MapSet.put(query.errors, :same_address))
params =
params
|> Map.delete("to")
|> Map.delete("from")
validate(query, params)
end
def validate(%Query{} = query, %{"to" => _} = params) do
validate_by_name(:to, query, params)
end
def validate(%Query{} = query, %{"from" => _} = params) do
validate_by_name(:from, query, params)
end
def validate(
%Query{
to: %NamedPosition{latitude: lat, longitude: lng},
from: %NamedPosition{latitude: lat, longitude: lng}
} = query,
%{}
) do
%{query | errors: MapSet.put(query.errors, :same_address)}
end
def validate(%Query{} = query, %{}) do
query
end
@spec validate_lat_lng(:to | :from, map, Query.t()) :: Query.t()
defp validate_lat_lng(field_atom, params, %Query{} = query) do
field = Atom.to_string(field_atom)
{lat_bin, params} = Map.pop(params, field <> "_latitude")
{lng_bin, params} = Map.pop(params, field <> "_longitude")
with {lat, ""} <- Float.parse(lat_bin),
{lng, ""} <- Float.parse(lng_bin) do
{name, params} = Map.pop(params, field)
position = %NamedPosition{
latitude: lat,
longitude: lng,
name: encode_name(name)
}
query
|> Map.put(field_atom, position)
|> validate(params)
else
:error ->
validate(query, params)
end
end
@spec encode_name(String.t()) :: String.t()
defp encode_name(name) do
name
|> HTML.html_escape()
|> HTML.safe_to_string()
|> String.replace("'", "'")
|> String.replace("&", "&")
end
@spec validate_by_name(:to | :from, Query.t(), map) :: Query.t()
defp validate_by_name(field, %Query{} = query, params) do
{val, params} = Map.pop(params, Atom.to_string(field))
case val do
nil ->
do_validate_by_name({:error, :required}, field, query, params)
"" ->
do_validate_by_name({:error, :required}, field, query, params)
<<location::binary>> ->
# lat/lng was missing or invalid; attempt geolocation based on name
location
|> TripPlan.geocode()
|> do_validate_by_name(field, query, params)
end
end
@spec do_validate_by_name(TripPlan.Geocode.t(), :to | :from, Query.t(), map) :: Query.t()
defp do_validate_by_name({:ok, %NamedPosition{} = pos}, field, query, params) do
query
|> Map.put(field, pos)
|> validate(params)
end
defp do_validate_by_name({:error, error}, field, query, params) do
error_atom =
case error do
{:multiple_results, _} -> :multiple_results
atom when is_atom(atom) -> atom
end
query
|> Map.put(:errors, MapSet.put(query.errors, error_atom))
|> Map.put(field, {:error, error})
|> validate(params)
end
end
|
apps/site/lib/site/trip_plan/location.ex
| 0.808672
| 0.401189
|
location.ex
|
starcoder
|
defmodule PS2.API.QueryBuilder do
@moduledoc """
A module for creating Census API queries in a clean manner via pipelines.
### Example
iex> import PS2.API.QueryBuilder
PS2.API.QueryBuilder
iex> alias PS2.API.Query
PS2.API.Query
iex> query = Query.new(collection: "character")
...> |> term("character_id", "5428011263335537297")
...> |> show(["character_id", "name.en", "faction_id"])
...> |> limit(3)
...> |> exact_match_first(true)
%PS2.API.Query{
collection: "character",
joins: [],
sort: nil,
params: %{
"c:exactMatchFirst" => true,
"c:limit" => 3,
"c:show" => "character_id,name.en,faction_id",
"character_id" => {"", "5428011263335537297"}
},
tree: nil
}
iex> PS2.API.encode query
{:ok, "character?c:exactMatchFirst=true&c:limit=3&c:show=character_id,name.en,faction_id&character_id=5428011263335537297"}
You can then send the query to the api using `PS2.API.query/1`.
## Search Modifiers
The Census API provides [search modifiers](https://census.daybreakgames.com/#search-modifier) for filtering query results.
You can pass an atom as the third parameter in `term/4` representing one of
these search modifiers. The recognized atoms are the following:
```
:greater_than
:greater_than_or_equal
:less_than
:less_than_or_equal
:starts_with
:contains
:not
```
For example: `term(query_or_join, "name.first_lower", "wrel", :starts_with)`
## Joining Queries
You can use `Join`s to gather data from multiple collections within one query,
like so:
```elixir
import PS2.API.QueryBuilder
alias PS2.API.{Query, Join}
online_status_join =
# Note we could use Join.new(collection: "characters_online_status", show: "online_status" ...)
%Join{}
|> collection("characters_online_status")
|> show("online_status")
|> inject_at("online_status")
|> list(true)
query =
%Query{}
|> collection("character")
|> join(online_status_join)
```
When the query `q` is sent to the API, the result with have an extra field,
"online_status", which contains the result of the `Join` (the player's
online status.)
You can create as many adjacent `Join`s as you'd like by repeatedly piping
a query through `QueryBuilder.join/2`. You can also nest `Join`s via
`QueryBuilder.join/2` when you pass a Join as the first argument instead
of a Query.
```elixir
import PS2.API.QueryBuilder
alias PS2.API.{Query, Join}
char_achieve_join =
Join.new(collection: "characters_achievement", on: "character_id")
char_name_join =
Join.new(collection: "character_name", on: "character_id", inject_at: "c_name")
online_status_join =
Join.new(collection: "characters_online_status")
|> join(char_name_join)
|> join(char_achieve_join)
query =
%Query{}
|> collection("character")
|> term("name.first", "Snowful")
|> show(["character_id", "faction_id"])
|> join(online_status_join)
```
## Trees
You can organize the returned data by a field within the data, using the
`QueryBuilder.tree/2`.
```elixir
import PS2.API.QueryBuilder
alias PS2.API.{Query, Tree}
%Query{}
|> collection("world_event")
|> term("type", "METAGAME")
|> lang("en")
|> tree(
%Tree{}
|> field("world_id")
|> list(true)
)
```
"""
@modifier_map %{
greater_than: ">",
greater_than_or_equal: "]",
less_than: "<",
less_than_or_equal: "[",
starts_with: "^",
contains: "*",
not: "!"
}
@type modifer ::
:greater_than
| :greater_than_or_equal
| :less_than
| :less_than_or_equal
| :starts_with
| :contains
| :not
| nil
@type collection_name :: String.t()
@type field_name :: String.t()
alias PS2.API.{Query, Join, Tree}
@doc """
Set the collection of the query/join.
"""
@spec collection(Query.t(), collection_name) :: Query.t()
@spec collection(Join.t(), collection_name) :: Join.t()
def collection(query_or_join, collection_name)
def collection(%Query{} = query, collection_name),
do: %Query{query | collection: collection_name}
def collection(%Join{} = join, collection), do: %Join{join | collection: collection}
@doc """
Adds a c:show term. Overwrites previous params of the same name.
### API Documentation:
Only include the provided fields from the object within the result.
"""
@spec show(Query.t(), String.t() | list(String.t())) :: Query.t()
@spec show(Join.t(), String.t() | list(String.t())) :: Join.t()
def show(query_or_join, value)
def show(%Query{} = query, values) when is_list(values), do: show(query, Enum.join(values, ","))
def show(%Query{} = query, value),
do: %Query{query | params: Map.put(query.params, "c:show", value)}
def show(%Join{} = join, values) when is_list(values), do: show(join, Enum.join(values, "'"))
def show(%Join{} = join, value), do: %Join{join | params: Map.put(join.params, "show", value)}
@doc """
Adds a c:hide term. Overwrites previous params of the same name.
### API Documentation:
Include all field except the provided fields from the object within the result.
"""
@spec hide(Query.t(), String.t() | list(String.t())) :: Query.t()
@spec hide(Join.t(), String.t() | list(String.t())) :: Join.t()
def hide(query_or_join, values)
def hide(%Query{} = query, values) when is_list(values), do: hide(query, Enum.join(values, ","))
def hide(%Query{} = query, value),
do: %Query{query | params: Map.put(query.params, "c:hide", value)}
def hide(%Join{} = join, values) when is_list(values), do: hide(join, Enum.join(values, "'"))
def hide(%Join{} = join, field), do: %Join{join | params: Map.put(join.params, "hide", field)}
@doc """
Add a term to filter query results. i.e. filter a query by character ID: `.../character?character_id=1234123412341234123`
"""
@spec term(Query.t(), String.t() | atom, any, modifer) :: Query.t()
@spec term(Join.t(), String.t() | atom, any, modifer) :: Join.t()
def term(query_or_join, field, value, modifier \\ nil)
def term(%Query{} = query, field, value, modifier),
do: %Query{
query
| params: Map.put(query.params, field, {Map.get(@modifier_map, modifier, ""), value})
}
def term(%Join{} = join, field, value, modifier) do
term_value = {Map.get(@modifier_map, modifier, ""), value}
%Join{
join
| params:
Map.update(join.params, :terms, %{field => term_value}, &Map.put(&1, field, term_value))
}
end
@doc """
Adds a join to a query.
See the "Using c:join to join collections dynamically"
section at https://census.daybreakgames.com/#query-commands to learn more about joining
queries.
### c:join API Documentation:
Meant to replace c:resolve, useful for dynamically joining (resolving)
multiple data types in one query.
"""
@spec join(Query.t(), Join.t()) :: Query.t()
@spec join(Join.t(), Join.t()) :: Join.t()
def join(query_or_join, join)
def join(%Query{} = query, %Join{} = join), do: %Query{query | joins: [join | query.joins]}
def join(%Join{} = join, %Join{} = new_join), do: %Join{join | joins: [new_join | join.joins]}
@doc """
Adds a sort term (to a Join or Tree).
Specifies whether the result should be a list (true) or a single record (false). Defaults to false.
"""
@spec list(Join.t(), boolean()) :: %Join{}
@spec list(Tree.t(), boolean()) :: %Tree{}
def list(tree_or_join, boolean)
def list(%Join{} = join, boolean),
do: %Join{join | params: Map.put(join.params, "list", PS2.Utils.boolean_to_integer(boolean))}
def list(%Tree{} = tree, boolean),
do: %Tree{tree | terms: Map.put(tree.terms, :list, PS2.Utils.boolean_to_integer(boolean))}
# ~~Query specific functions~~
@doc """
Adds a c:sort term. Overwrites previous params of the same name.
### API Documentation:
Sort the results by the field(s) provided.
"""
@spec sort(Query.t(), Query.sort_terms()) :: Query.t()
def sort(%Query{} = query, %{} = sort_terms), do: %Query{query | sort: sort_terms}
@doc """
Adds a c:has term. Overwrites previous params of the same name.
### API Documentation:
Include objects where the specified field exists, regardless
of the value within that field.
"""
@spec has(Query.t(), String.t() | list()) :: Query.t()
def has(%Query{} = query, values) when is_list(values), do: has(query, Enum.join(values, ","))
def has(%Query{} = query, value),
do: %Query{query | params: Map.put(query.params, "c:has", value)}
@doc """
Adds a c:resolve term. Overwrites previous params of the same name.
**Note** that `join/3` is recommended over `resolve/2`, as resolve relies
on supported collections to work.
### API Documentation:
"Resolve" information by merging data from another collection and include
the detailed object information for the provided fields from the object
within the result (multiple field names separated by a comma).\n
*Please note that the resolve will only function if the initial query contains
the field to be resolved on. For instance, resolving leader on outfit requires
that leader_character_id be in the initial query.
"""
@spec resolve(Query.t(), String.t()) :: Query.t()
def resolve(%Query{} = query, collection),
do: %Query{query | params: Map.put(query.params, "c:resolve", collection)}
@doc """
Adds a c:case (sensitivity) term. Overwrites previous params of the same name.
### API Documentation:
Set whether a search should be case-sensitive, `true` means
case-sensitive. true is the default. Note that using this command may slow
down your queries. If a lower case version of a field is available use that
instead for faster performance.
"""
@spec case_sensitive(Query.t(), boolean()) :: Query.t()
def case_sensitive(%Query{} = query, boolean),
do: %Query{query | params: Map.put(query.params, "c:case", boolean)}
@doc """
Adds a c:limit term. Overwrites previous params of the same name.
### API Documentation:
Limit the results to at most N [`value`] objects.
"""
@spec limit(Query.t(), integer()) :: Query.t()
def limit(%Query{} = query, value),
do: %Query{query | params: Map.put(query.params, "c:limit", value)}
@doc """
Adds a c:limitPerDB term. Overwrites previous params of the same name.
### API Documentation:
Limit the results to at most (N * number of databases) objects.\n
*The data type ps2/character is distributed randomly across 20
databases. Using c:limitPerDb will have more predictable results on
ps2/character than c:limit will.
"""
@spec limit_per_db(Query.t(), integer()) :: Query.t()
def limit_per_db(%Query{} = query, value),
do: %Query{query | params: Map.put(query.params, "c:limitPerDB", value)}
@doc """
Adds a c:start term. Overwrites previous params of the same name.
### API Documentation:
Start with the Nth object within the results of the query.\n
*Please note that c:start will have unusual behavior when
querying ps2/character which is distributed randomly across
20 databases.
"""
@spec start(Query.t(), integer()) :: Query.t()
def start(%Query{} = query, value),
do: %Query{query | params: Map.put(query.params, "c:start", value)}
@doc """
Adds a c:includeNull term. Overwrites previous params of the same name.
### API Documentation:
Include `NULL` values in the result. By default this is false. For
example, if the `name.fr` field of a vehicle is `NULL` the field `name.fr`
will not be included in the response by default. Add the
c:includeNull=true command if you want the value name.fr : `NULL` to be
returned in the result.
"""
@spec include_null(Query.t(), boolean()) :: Query.t()
def include_null(%Query{} = query, boolean),
do: %Query{query | params: Map.put(query.params, "c:includeNull", boolean)}
@doc """
Adds a c:lang term. Overwrites previous params of the same name.
### API Documentation:
For internationalized strings, remove all translations except the one specified.
"""
@spec lang(Query.t(), String.t()) :: Query.t()
def lang(%Query{} = query, value),
do: %Query{query | params: Map.put(query.params, "c:lang", value)}
@doc """
Adds a c:tree term
### API Documentaion:
Useful for rearranging lists of data into trees of data. See below for details.
"""
@spec tree(Query.t(), Tree.t()) :: Query.t()
def tree(%Query{} = query, %Tree{} = tree), do: %Query{query | tree: tree}
@doc """
Adds a c:timing term. Overwrites previous params of the same name.
### API Documentation:
Shows the time taken by the involved server-side queries and resolves.
"""
@spec timing(Query.t(), boolean()) :: Query.t()
def timing(%Query{} = query, boolean),
do: %Query{query | params: Map.put(query.params, "c:timing", boolean)}
@doc """
Adds a c:exactMatchFirst term. Overwrites previous params of the same name.
### API Documentation:
When using a regex search (=^ or =*) c:exactMatchFirst=true will cause
exact matches of the regex value to appear at the top of the result list
despite the value of c:sort.
"""
@spec exact_match_first(Query.t(), boolean()) :: Query.t()
def exact_match_first(%Query{} = query, boolean),
do: %Query{query | params: Map.put(query.params, "c:exactMatchFirst", boolean)}
@doc """
Adds a c:distinct term. Overwrites previous params of the same name.
### API Documentation:
Get the distinct values of the given field. For example to get the
distinct values of ps2.item.max_stack_size use
`http://census.daybreakgames.com/get/ps2/item?c:distinct=max_stack_size`.
Results are capped at 20,000 values.
"""
@spec distinct(Query.t(), boolean()) :: Query.t()
def distinct(%Query{} = query, boolean),
do: %Query{query | params: Map.put(query.params, "c:distinct", boolean)}
@doc """
Adds a c:retry term. Overwrites previous params of the same name.
### API Documentation:
If `true`, query will be retried one time. Default value is true.
If you prefer your query to fail quickly pass c:retry=false.
"""
@spec retry(Query.t(), boolean()) :: Query.t()
def retry(%Query{} = query, boolean),
do: %Query{query | params: Map.put(query.params, "c:retry", boolean)}
# ~~Join specific functions~~
@doc """
Adds an `on:` term. `field` is the field on the parent/leading collection to compare with the join's field
(optionally specified with the `to/2` function).
### API Documentation:
The field on this type to join on, i.e. item_id. Will default to {this_type}_id or {other_type}_id if not provided.
"""
@spec on(Join.t(), field_name) :: %Join{}
def on(%Join{} = join, field), do: %Join{join | params: Map.put(join.params, "on", field)}
@doc """
Adds a `to:` term. `field` is the field on the joined collection to compare with the parent/leading field
(optionally specified with the `on/2` function).
### API Documentation:
The field on the joined type to join to, i.e. attachment_item_id. Will default to on if on is provide, otherwise
will default to {this_type}_id or {other_type}_id if not provided.
"""
@spec to(Join.t(), field_name) :: %Join{}
def to(%Join{} = join, field), do: %Join{join | params: Map.put(join.params, "to", field)}
@doc """
Adds an `injected_at:` term. `field` is the name of the new field where the result of the join is inserted.
### API Documentation:
The field name where the joined data should be injected into the returned document.
"""
@spec inject_at(Join.t(), field_name) :: %Join{}
def inject_at(%Join{} = join, field),
do: %Join{join | params: Map.put(join.params, "inject_at", field)}
@doc """
Adds an `outer:` term. Note: where the API docs specify `1`, `true` should be passed, and `false` in place of `0`.
### API Documentation:
1 if you wish to do an outer join (include non-matches), 0 if you wish to do an inner join (exclude non-matches).
Defaults to 1- do an outer join, include non-matches.
"""
@spec outer(Join.t(), boolean()) :: %Join{}
def outer(%Join{} = join, boolean),
do: %Join{join | params: Map.put(join.params, "outer", PS2.Utils.boolean_to_integer(boolean))}
# ~~Tree specific functions~~
@doc """
Adds a `start:` term.
### API Documentaion:
Used to tell the tree where to start. By default, the list of objects at the root will be formatted as a tree.
"""
@spec start_field(Tree.t(), field_name) :: %Tree{}
def start_field(%Tree{} = tree, field),
do: %Tree{tree | terms: Map.put(tree.terms, :start, field)}
@doc """
Adds a `field:` term.
### API Documentation:
The field to remove and use as in the data structure, or tree.
"""
@spec field(Tree.t(), field_name) :: %Tree{}
def field(%Tree{} = tree, field), do: %Tree{tree | terms: Map.put(tree.terms, :field, field)}
@doc """
Add a `prefix:` term.
### API Documentation:
A prefix to add to the field value to make it more readable. For example, if the field is "faction_id" and prefix
is "f_", path will be f_1, f_2, f_3 etc.
"""
@spec prefix(Tree.t(), String.t()) :: %Tree{}
def prefix(%Tree{} = tree, prefix),
do: %Tree{tree | terms: Map.put(tree.terms, :prefix, prefix)}
end
|
lib/ps2/api/query_builder.ex
| 0.897319
| 0.679724
|
query_builder.ex
|
starcoder
|
defmodule Movielist.Reports do
@moduledoc """
The Reports context.
"""
import Ecto.Query, warn: false
alias Movielist.Repo
alias Movielist.Admin
# alias Movielist.Admin.Genre
alias Movielist.Admin.Movie
alias Movielist.Admin.Rating
@doc """
Returns map with count of movies for genre id and their average pre-rating
"""
def movie_stats_for_genre(genre_id) do
from(m in Movie, where: m.genre_id == ^genre_id, select: %{movie_count: count(m), average_pre_rating: avg(m.pre_rating)})
|> Repo.one!
end
@doc """
Returns map with count of rated movies for genre id and their average score
"""
def rating_stats_for_genre(genre_id) do
from(r in Rating, join: m in assoc(r, :movie), where: m.genre_id == ^genre_id, select: %{rating_count: count(r), average_score: avg(r.score)})
|> Repo.one!
end
@doc """
Returns map with count of rated movies for year and their average score
"""
def rating_stats_for_year(year) do
from(r in Rating, join: m in assoc(r, :movie), where: fragment("EXTRACT(year FROM ?)", r.date_scored) == ^year, select: %{rating_count: count(r), average_score: coalesce(avg(r.score), 0)})
|> Repo.one!
end
@doc """
Base query for ratings by year
"""
def list_ratings_for_year_base_query(year) do
Admin.list_ratings_base_query()
|> where([r], fragment("EXTRACT(year FROM ?)", r.date_scored) == ^year)
end
@doc """
Returns list of ratings by year
"""
def list_ratings_for_year(year, :date) do
list_ratings_for_year_base_query(year)
|> order_by(asc: :date_scored, asc: :id, desc: :score)
|> Repo.all
end
def list_ratings_for_year(year, :score) do
list_ratings_for_year_base_query(year)
|> order_by(desc: :score, asc: :date_scored, asc: :id)
|> Repo.all
end
@doc """
Gets the month number and number of ratings (movies watched) in that month for the given year
"""
def get_ratings_count_by_month_base_query(year) do
#need to create rating subquery or weeks with 0 ratings won't be returned
rating_subquery = from(
r in Rating,
where: fragment("EXTRACT(year FROM ?)", r.date_scored) == ^year,
select: %{
id: r.id,
date_scored: r.date_scored,
}
)
from(
r in subquery(rating_subquery),
right_join: month_number in fragment("SELECT generate_series(1,12) AS month_number"),
on: fragment("month_number = extract(month FROM ?)", r.date_scored),
group_by: [fragment("month_number")],
select: %{
month_number: fragment("month_number"),
count: count(r.id)
},
order_by: [fragment("month_number")]
)
end
def get_ratings_count_by_month_query(year, should_limit) do
case should_limit do
#limit results only to weeks in current year
true -> get_ratings_count_by_month_base_query(year) |> limit(fragment("SELECT EXTRACT(MONTH FROM current_timestamp)"))
false -> get_ratings_count_by_month_base_query(year)
end
end
@doc """
Gets the week number and number of ratings (books read) in that week for the given year
"""
def get_ratings_count_by_month(year, should_limit) when is_boolean(should_limit) do
get_ratings_count_by_month_query(year, should_limit)
|> Repo.all
end
end
|
lib/movielist/admin/reports.ex
| 0.636353
| 0.478773
|
reports.ex
|
starcoder
|
defmodule TwelveDays do
@doc """
Given a `number`, return the song's verse for that specific day, including
all gifts for previous days in the same line.
"""
@days_and_gifts [
{1, "first", "a Partridge in a Pear Tree."},
{2, "second", "two Turtle Doves"},
{3, "third", "three French Hens"},
{4, "fourth", "four Calling Birds"},
{5, "fifth", "five Gold Rings"},
{6, "sixth", "six Geese-a-Laying"},
{7, "seventh", "seven Swans-a-Swimming"},
{8, "eighth", "eight Maids-a-Milking"},
{9, "ninth", "nine Ladies Dancing"},
{10, "tenth", "ten Lords-a-Leaping"},
{11, "eleventh", "eleven Pipers Piping"},
{12, "twelfth", "twelve Drummers Drumming"}
]
@spec verse(number :: integer) :: String.t()
def verse(number) do
"On the #{get_day(number)} day of Christmas my true love gave to me: #{get_phrase(number)}"
end
@doc """
Given a `starting_verse` and an `ending_verse`, return the verses for each
included day, one per line.
"""
@spec verses(starting_verse :: integer, ending_verse :: integer) :: String.t()
def verses(starting_verse, ending_verse) do
Enum.map(starting_verse..ending_verse, & verse(&1))
|> Enum.join("\n")
end
@doc """
Sing all 12 verses, in order, one verse per line.
"""
@spec sing() :: String.t()
def sing do
verses(1, 12)
end
defp get_day_and_gift(number) do
List.keyfind(@days_and_gifts, number, 0)
end
defp get_day(number) do
{_, day, _} = get_day_and_gift(number)
day
end
defp get_gift(number) do
{_, _, gift} = get_day_and_gift(number)
gift
end
defp get_phrase(number) do
Enum.map(1..number, & get_gift(&1))
|> Enum.reverse
|> phrase_gifts([])
end
defp phrase_gifts([head], []), do: "#{head}"
defp phrase_gifts([head | []], acc), do: "#{acc}, and #{head}"
defp phrase_gifts([head | tail], []), do: phrase_gifts(tail, "#{head}")
defp phrase_gifts([head | tail], acc), do: phrase_gifts(tail, "#{acc}, #{head}")
end
|
elixir/twelve-days/lib/twelve_days.ex
| 0.728459
| 0.433022
|
twelve_days.ex
|
starcoder
|
defmodule BlockchainAPI.Geocoder do
@max_retries 5
require Logger
alias BlockchainAPI.Util
def reverse_geocode(loc) do
reverse_geocode(loc, @max_retries)
end
def reverse_geocode(loc, 0) do
Logger.error("Exceeded google maps lookup for #{inspect(loc)}")
{:error, :retries_exceeded}
end
def reverse_geocode(loc, retry) do
{lat, lng} = Util.h3_to_lat_lng(Util.h3_to_string(loc))
api_key = Application.get_env(:blockchain_api, :google_maps_secret)
url = "https://maps.googleapis.com/maps/api/geocode/json?latlng=#{lat},#{lng}&key=#{api_key}"
case HTTPoison.get(url, [], ssl: [{:honor_cipher_order, :undefined}]) do
{:ok, %{status_code: 200, body: body}} ->
decoded_body = Jason.decode!(body)
results = Map.get(decoded_body, "results")
case results do
nil ->
reverse_geocode(loc, retry - 1)
[] ->
reverse_geocode(loc, retry - 1)
res ->
parse_results(res)
end
_ ->
reverse_geocode(loc, retry - 1)
end
end
def parse_results(results) do
case hd(results) do
%{"address_components" => address_components} ->
{:ok,
%{
long_street: parse_street(address_components, :long),
short_street: parse_street(address_components, :short),
long_city: parse_city(address_components, :long),
short_city: parse_city(address_components, :short),
long_state: parse_state(address_components, :long),
short_state: parse_state(address_components, :short),
long_country: parse_country(address_components, :long),
short_country: parse_country(address_components, :short)
}}
end
end
defp parse_street(address_components, variant) do
find_component(address_components, variant, ["route"])
end
defp parse_city(address_components, variant) do
find_component(address_components, variant, ["locality", "sublocality"])
end
defp parse_state(address_components, variant) do
find_component(address_components, variant, ["administrative_area_level_1"])
end
defp parse_country(address_components, variant) do
find_component(address_components, variant, ["country"])
end
defp find_component(_components, _variant, types) when length(types) == 0 do
"Unknown"
end
defp find_component(components, variant, [type | rest]) do
case Enum.find(components, fn c -> type in c["types"] end) do
nil ->
find_component(components, variant, rest)
component ->
parse_component(component, variant)
end
end
defp parse_component(component, variant) do
case variant do
:short -> component["short_name"]
:long -> component["long_name"]
end
end
end
|
lib/blockchain_api/geocoder.ex
| 0.582729
| 0.430746
|
geocoder.ex
|
starcoder
|
defmodule Liquex do
@moduledoc """
Liquid template renderer for Elixir with a goal of 100% compatibility with the
[Liquid](https://shopify.github.io/liquid/) gem by [Shopify](https://www.shopify.com/).
## Basic Usage
iex> {:ok, template_ast} = Liquex.parse("Hello {{ name }}!")
iex> context = Liquex.Context.new(%{"name" => "World"})
iex> {content, _context} = Liquex.render(template_ast, context)
iex> content |> to_string()
"Hello World!"
## Supported features
Currently, all standard Liquid tags, filters, and types are fully supported. Liquex can
be considered a drop in replacement of the Liquid gem, but in Elixir.
There is a caveat that must be noted:
***Whitespace control is partially supported.***
[Whitespace control](https://shopify.github.io/liquid/basics/whitespace/) is only partially
supported. Whitespace is successfully removed after `-%}` and `-}}` tags. However, whitespace
isn't removed from the left side yet, before `{%-` and `{{-`. This is because we're using
[NimbleParsec](https://github.com/dashbitco/nimble_parsec) which does not support greedy matches.
Instead, we will need to do post processing to properly remove spaces. As whitespace control is
deemed of low importance for most applications, this has not been prioritized.
## Lazy variables
Liquex allows resolver functions for variables that may require some extra
work to generate. For example, Shopify has variables for things like
available products. Pulling all products every time would be too expensive
to do on every render. Instead, it would be better to lazily pull that
information as needed.
Instead of adding the product list to the context variable map, you can add
a function to the variable map. If a function is accessed in the variable
map, it is executed.
products_resolver = fn _parent -> Product.all() end
with context <- Liquex.Context.new(%{products: products_resolver}),
{:ok, document} <- Liquex.parse("There are {{ products.size }} products"),
{result, _} <- Liquex.render(document, context) do
result
end
iex> "There are 5 products"
## Indifferent access
By default, Liquex accesses your maps and structs that may have atom or
string (or other type) keys. Liquex will try a string key first. If that
fails, it will fall back to using an atom keys. This is similar to how
Ruby on Rails handles many of its hashes.
This allows you to pass in your structs without having to replace all your
keys with string keys.
iex> {:ok, template_ast} = Liquex.parse("Hello {{ name }}!")
iex> context = Liquex.Context.new(%{name: "World"})
iex> {content, _context} = Liquex.render(template_ast, context)
iex> content |> to_string()
"Hello World!"
## Custom filters
Liquex contains the full suite of standard Liquid filters, but you may find that there are still
filters that you may want to add.
Liquex supports adding your own custom filters to the render pipeline. When creating the context
for the renderer, set the filter module to your own module.
defmodule CustomFilter do
# Import all the standard liquid filters
use Liquex.Filter
def scream(value, _), do: String.upcase(value) <> "!"
end
context = Liquex.Context.new(%{}, filter_module: CustomFilter)
{:ok, template_ast} = Liquex.parse("{{'Hello World' | scream}}"
{result, _} = Liquex.render(template_ast, context)
result |> to_string()
iex> "HELLO WORLD!"
## Custom tags
One of the strong points for Liquex is that the tag parser can be extended to support non-standard
tags. For example, Liquid used internally for the Shopify site includes a large range of tags that
are not supported by the base Ruby gem. These tags could also be added to Liquex by extending the
liquid parser.
defmodule CustomTag do
import NimbleParsec
alias Liquex.Parser.Base
# Parse <<Custom Tag>>
def custom_tag(combinator \\\\ empty()) do
text =
lookahead_not(string(">>"))
|> utf8_char([])
|> times(min: 1)
|> reduce({Kernel, :to_string, []})
|> tag(:text)
combinator
|> ignore(string("<<"))
|> optional(text)
|> ignore(string(">>"))
|> tag(:custom_tag)
end
def element(combinator \\\\ empty()) do
# Add the `custom_tag/1` parsing function to the supported element tag list
combinator
|> choice([custom_tag(), Base.base_element()])
end
end
defmodule CustomParser do
@moduledoc false
import NimbleParsec
defcombinatorp(:document, repeat(CustomTag.element()))
defparsec(:parse, parsec(:document) |> eos())
end
iex> Liquex.parse("<<Hello World!>>", CustomParser)
iex> {:ok, [custom_tag: [text: ["Hello World!"]]]}
## Custom renderer
In many cases, if you are building custom tags for your Liquid documents, you probably want to
use a custom renderer. Just like the custom filters, you add your module to the context object.
defmodule CustomTagRender do
def render({:custom_tag, contents}, context) do
{result, context} = Liquex.render(contents, context)
{["Custom Tag: ", result], context}
end
# Ignore this tag if we don't match
def render(_, _), do: false
end
context = %Liquex.Context.new(%{}, render_module: CustomTagRender)
{:ok, document} = Liquex.parse("<<Hello World!>>", CustomParser)
{result, _} = Liquex.render(document, context)
result |> to_string()
iex> "Custom Tag: Hello World!"
## Installation
Add the package to your `mix.exs` file.
def deps do
[{:liquex, "~> 0.5"}]
end
"""
alias Liquex.Context
@type document_t :: [
{:control_flow, nonempty_maybe_improper_list}
| {:iteration, [...]}
| {:object, [...]}
| {:text, any}
| {:variable, [...]}
]
@spec parse(String.t(), module) :: {:ok, document_t} | {:error, String.t(), pos_integer()}
@doc """
Parses a liquid `template` string using the given `parser`.
Returns a liquid AST document or the parser error
"""
def parse(template, parser \\ Liquex.Parser) do
case parser.parse(template) do
{:ok, content, _, _, _, _} -> {:ok, content}
{:error, reason, _, _, {line, _}, _} -> {:error, reason, line}
end
end
@spec render(document_t, Context.t()) :: {iolist, Context.t()}
@doc """
Render a Liquex AST `document` with the given `context`
"""
def render(document, context \\ %Context{}),
do: Liquex.Render.render([], document, context)
end
|
lib/liquex.ex
| 0.765593
| 0.610802
|
liquex.ex
|
starcoder
|
defmodule Traktor do
@moduledoc """
`Traktor` is a library to execute actions in a traceable manner by applying a two-phase-commit pattern.
It is mainly defined by two behaviours:
- `Traktor.Action` for the business logic;
- `Traktor.Store` for the persistance layer.
### Entity
An action or a group of actions can be applied to an entity. The action is a behaviour that transforms an entity from one version to another.
Eg. **booking** and **payment** actions are applied to the **Checkout** entity.
"""
@typedoc "Module implementing the `Traktor.Store` behaviour. An in memory store is provided with the package."
@type store :: module()
@typedoc "Module implementing the `Traktor.Action` behaviour."
@type action :: module()
@typedoc "Unique reference of an entity."
@type ref :: any()
@typedoc "The revision of the state of an entity."
@type revision :: term()
@typedoc "The state of an entity at a specific revision."
@type state :: term()
@typedoc "A transaction contains all the information required to commit an action."
@type transaction :: term()
@typedoc "Unique reference of a transaction."
@type transaction_ref :: term()
@typedoc "Specific arguments required to apply an action to an entity."
@type args :: any()
@type error_reason ::
{:conflict, revision()}
| {:pending, transaction_ref()}
| term()
@type on_error :: {:error, error_reason}
@doc "Prepares the execution of the businses logic and persists it."
@spec prepare(action(), state(), args(), store(), ref(), revision()) ::
{:ok, transaction(), transaction_ref()}
| on_error()
def prepare(action, state, args, store, ref, revision) do
with {:ok, transaction} <- action.prepare(state, args),
{:ok, transaction_ref} <- store.prepare(ref, revision, action, transaction) do
{:ok, transaction, transaction_ref}
end
end
@doc "Executes the business logic and persists the result."
@spec commit(action(), state(), transaction(), store(), ref(), transaction_ref()) ::
{:ok, state(), revision(), result :: any()}
| on_error()
def commit(action, state, transaction, store, ref, transaction_ref) do
with {:ok, new_state, result} <- action.commit(state, transaction),
{:ok, revision} <- store.commit(ref, transaction_ref, new_state) do
{:ok, new_state, revision, result}
end
end
@doc "Runs both phases of the two phase commit: `prepare/6` and `commit/6`."
@spec apply(action(), state(), args(), store(), ref(), revision()) ::
{:ok, state(), revision(), result :: any()}
| on_error()
def apply(action, state, args, store, ref, revision) do
with {:ok, transaction, transaction_ref} <-
prepare(action, state, args, store, ref, revision),
{:ok, new_state, revision, result} <-
commit(action, state, transaction, store, ref, transaction_ref) do
{:ok, new_state, revision, result}
end
end
end
|
lib/traktor.ex
| 0.913295
| 0.737371
|
traktor.ex
|
starcoder
|
defmodule Lab42.F.Parser do
use Lab42.F.Types
import Lab42.F.Time, only: [make_time: 1]
@moduledoc """
```elixir
defstruct wildcard: "*",
type: nil, # e.g. "vid", "elixir", ...
mgt: nil, # date modification date greater than
mlt: nil, # date modification date less than
sgt: nil, # size greater than
slt: nil, # size less than
rgx: nil, # additional regex filter
transform: "%p" # see for details in the README or in Lab42.F.Transform
```
"""
defstruct wildcard: "*",
type: nil, # e.g. "vid", "elixir", ...
mgt: nil, # date modification date greater than
mlt: nil, # date modification date less than
sgt: nil, # size greater than
slt: nil, # size less than
rgx: ~r{.}, # additional regex filter
transform: "%p" # see for details in the README or in Lab42.F.Transform
@type t :: %__MODULE__{
wildcard: binary(),
transform: binary(),
type: binary?(),
mgt: binary?(),
mlt: binary?(),
sgt: binary?(),
slt: binary?(),
rgx: binary?(),
}
@spec parse(binaries()) :: t()
def parse(argv) do
_parse(argv, struct!(__MODULE__))
end
@spec _check_values!(t()) :: t()
defp _check_values!(parser) do
if parser.wildcard == "boom" do
raise "oh no"
else
parser
end
end
@spec _parse(binaries(), t()) :: t()
defp _parse(argv, result)
defp _parse([], result) do
result |> _check_values!()
end
defp _parse(["w", wildcard|rest], result) do
_parse(rest, %{result | wildcard: wildcard})
end
defp _parse(["t", type | rest], result) do
_parse(rest, %{result | type: type})
end
defp _parse(["mgt", mgt | rest], result) do
_parse(rest, %{result | mgt: make_time(mgt)})
end
defp _parse(["mlt", mlt | rest], result) do
_parse(rest, %{result | mlt: make_time(mlt)})
end
defp _parse(["sgt", sgt | rest], result) do
_parse(rest, %{result | sgt: make_time(sgt)})
end
defp _parse(["slt", slt | rest], result) do
_parse(rest, %{result | slt: make_time(slt)})
end
defp _parse(["x", rgx | rest], result) do
_parse(rest, %{result | rgx: Regex.compile!(rgx)})
end
defp _parse(rest, result) do
%{result | transform: Enum.join(rest, " ")} |> _check_values!()
end
end
|
lib/lab42/f/parser.ex
| 0.761893
| 0.801392
|
parser.ex
|
starcoder
|
defmodule Aoc1 do
@moduledoc """
Advent of Code 2018, Day 1
"""
@doc """
Parse an integer from a string, defaulting to 0
## Examples
iex> Aoc1.parse_int "-5"
-5
iex> Aoc1.parse_int "+8"
8
iex> Aoc1.parse_int "15"
15
iex> Aoc1.parse_int "3.14159"
3
Unparsable values will be calculated as zeroes:
iex> Aoc1.parse_int ""
0
iex> Aoc1.parse_int "not a number"
0
"""
def parse_int(str) do
case Integer.parse str do
{int, _} -> int
:error -> 0
end
end
@doc """
Reduce frequency variations list to single accumulated value.
## Examples
iex> Aoc1.find_first_repeat 3, {[3, 1], false}
{:cont, {[6, 3, 1], false}}
iex> Aoc1.find_first_repeat -2, {[3, 1], false}
{:halt, {[1, 3, 1], true}}
"""
def find_first_repeat(x, state) do
{acc, _} = state
[ov | _] = acc
nv = x + ov
if Enum.member? acc, nv do
{:halt, {[nv | acc], true}}
else
{:cont, {[nv | acc], false}}
end
end
@doc """
Reduce frequency variations list to single accumulated value.
## Examples
iex> Aoc1.process_data [1, 1, -2]
0
iex> Aoc1.process_data [1, -1]
0
iex> Aoc1.process_data [3, 3, 4, -2, -4]
10
iex> Aoc1.process_data [-6, 3, 8, 5, -6]
5
iex> Aoc1.process_data [7, 7, -2, -7, -4]
14
"""
def process_data(data, state \\ {[0], false}) do
{acc, is_finished} = Enum.reduce_while(data, state, &find_first_repeat/2)
case is_finished do
true -> List.first acc
_ -> process_data(data, {acc, is_finished})
end
end
@doc """
Compute the resulting frequency from an input file of variations.
"""
def main(filepath) do
filepath
|> File.open!([:read])
|> IO.read(:all)
|> String.split(~r/\n/)
|> Enum.filter(fn x -> String.length(x) > 0 end)
|> Enum.map(&parse_int/1)
|> process_data
end
def main do
IO.puts "Error: Please provide the path to the data file."
end
end
|
01/lib/aoc1.ex
| 0.824709
| 0.451568
|
aoc1.ex
|
starcoder
|
defmodule PersistentEts do
@external_resource "README.md"
@moduledoc File.read!("README.md")
|> String.split(~r/<!-- MDOC !-->/)
|> Enum.fetch!(1)
@type tab :: :ets.tab()
# :ets.type() is private for some reason
@type type :: :set | :ordered_set | :bag | :duplicate_bag
@type access :: :public | :protected
@type tweaks ::
{:write_concurrency, boolean}
| {:read_concurrency, boolean}
| :compressed
@type persist_opt :: {:extended_info, [:md5sum | :object_count]} | {:sync, boolean}
@type persistence :: {:persist_every, pos_integer} | {:persist_opts, [persist_opt]}
@type option :: type | access | :named_table | {:keypos, pos_integer} | tweaks | persistence
@doc """
Creates a new table backed by the file `path`.
Starts a "table manager" process responsible for periodically persisting the
table to the file `path` and links the caller to the process.
Tries to re-read the table from the persistence file. If no such file exists,
a new table is created. Since options a table was created with are persisted
alongside the table data, if the options the table was created with
differ from the current options an error occurs. It's advised to manually
transfer the data to the new table, with new options, if a change if options
is needed.
If the table was created with extended info, it will be read using the verify
option. For information on what this means, refer to `:ets.file2tab/2`.
Changing the `:heir` option on the returned table is not supported, since it's
leveraged by the persistence mechanism for correct operation.
## Options
* `:path` (required) - where to store the table file,
* `:persist_every` - how often to write the table to the file
in milliseconds (default: 5_000),
* `:persist_opts` - options passed to `:ets.tab2file/3` when saving the table
For other options refer to the `:ets.new/2` documentation.
The `:heir` option is not supported as it's leveraged by the persistence system
to guarantee the best possible durability.
The `:private` option is not supported since the manager process needs access
to the table in order to save it to the file.
"""
@spec new(atom, Path.t(), [option]) :: tab
def new(module, path, opts) do
child_spec = {PersistentEts.TableManager, {module, path, opts}}
{:ok, pid} = DynamicSupervisor.start_child(PersistentEts.Supervisor, child_spec)
PersistentEts.TableManager.borrow(pid)
end
@doc """
Make process `pid` the new owner of `table`.
If successful, message `{:"ETS-TRANSFER", table, manager_pid, data}` is sent
to the new owner.
This behaviour differs slightly from the behaviour of `:ets.give_away/3`,
where the pid in the transfer message is the pid of the process giving the
table away. This is not maintained, because the table manager process needs
to keep track of the owner.
The old owner is unlinked from the manager process and the new onwer is linked.
See `:ets.give_away/3` for more information.
"""
@spec give_away(tab, pid, term) :: true
def give_away(table, pid, data) do
PersistentEts.TableManager.transfer(table, pid, data)
true
end
@doc """
Synchronously dumps the table `table` to disk.
This can be used to make sure all changes have been persisted, before continuing.
The persistence loop will be restarted.
"""
@spec flush(tab) :: :ok
def flush(table) do
PersistentEts.TableManager.flush(table)
end
@doc """
Deletes the entire table `table`.
See `:ets.delete/1` for more information.
"""
@spec delete(tab) :: true
def delete(table) do
PersistentEts.TableManager.return(table)
true
end
end
|
lib/persistent_ets.ex
| 0.830937
| 0.48121
|
persistent_ets.ex
|
starcoder
|
defmodule EpicenterWeb.Test.LiveViewAssertions do
import Euclid.Test.Extra.Assertions
import ExUnit.Assertions
import Phoenix.LiveViewTest
alias Epicenter.Test
alias Euclid.Extra
def assert_attribute(view, selector, attribute, expected) do
rendered = view |> element(selector) |> render()
if rendered |> Test.Html.parse_doc() |> Floki.attribute(attribute) == expected do
true
else
"""
Expected to find element with selector “#{selector}” with attribute “#{attribute}” as “#{expected}”, but found:
#{rendered}
"""
|> flunk()
end
end
def assert_disabled(%Phoenix.LiveViewTest.View{} = view, selector) do
assert_attribute(view, selector, "disabled", ["disabled"])
end
def assert_enabled(%Phoenix.LiveViewTest.View{} = view, selector) do
assert_attribute(view, selector, "disabled", [])
end
def assert_has_role(%Phoenix.LiveViewTest.View{} = view, data_role) do
if has_element?(view, "[data-role=#{data_role}]") do
view
else
"""
Expected to find element with data-role “#{data_role}” in:
#{render(view)}
"""
|> flunk()
end
end
def assert_has_role(html, data_role) when is_binary(html) do
if html |> Test.Html.parse_doc() |> Test.Html.has_role?(data_role) do
html
else
"""
Expected to find element with data-role “#{data_role}” in:
#{html}
"""
|> flunk()
end
end
def assert_redirects_to({_, {:live_redirect, %{to: destination_path}}}, expected_path) do
assert destination_path == expected_path
end
def assert_role_text(%Phoenix.LiveViewTest.View{} = view, data_role, expected_value) do
selector = "[data-role=#{data_role}]"
rendered = view |> element(selector) |> render() |> Test.Html.parse() |> Test.Html.text()
if rendered == expected_value do
true
else
"""
Expected to find element with data-role “#{data_role}” and text “#{expected_value}”, but found:
#{rendered}
"""
|> flunk()
end
end
def assert_role_list(%Phoenix.LiveViewTest.View{} = view, data_role, expected_values) do
view
|> render()
|> Test.Html.parse()
|> Test.Html.all("[data-role=#{data_role}] li", as: :text)
|> assert_eq(expected_values, returning: view)
end
def assert_role_attribute_value(%Phoenix.LiveViewTest.View{} = view, data_role, expected_value) do
selector = "[data-role=#{data_role}]"
rendered = view |> element(selector) |> render() |> Test.Html.parse() |> Test.Html.attr("*", "value") |> Extra.List.only!()
if rendered == expected_value do
true
else
"""
Expected to find input with data-role “#{data_role}” and value “#{expected_value}”, but found:
#{rendered}
"""
|> flunk()
end
end
def assert_select_dropdown_options(view: %Phoenix.LiveViewTest.View{} = view, data_role: data_role, expected: expected_values) do
rendered = view |> render() |> Test.Html.parse_doc() |> Floki.find("[data-role=#{data_role}] option") |> Enum.map(&Test.Html.text(&1))
if rendered == expected_values do
true
else
"""
Expected to find element with data-role “#{data_role}” and options “#{inspect(expected_values)}”, but found:
#{inspect(rendered)}
"""
|> flunk()
end
end
def assert_selected_dropdown_option(view: %Phoenix.LiveViewTest.View{} = view, data_role: data_role, expected: expected_value) do
rendered = view |> render() |> Test.Html.parse_doc() |> Floki.find("[data-role=#{data_role}] option[selected]") |> Enum.map(&Test.Html.text(&1))
if rendered == expected_value do
true
else
"""
Expected to find element with data-role “#{data_role}” and options “#{inspect(expected_value)}”, but found:
#{inspect(rendered)}
"""
|> flunk()
end
end
end
|
test/support/live_view_assertions.ex
| 0.61659
| 0.563948
|
live_view_assertions.ex
|
starcoder
|
defmodule EctoVista do
@moduledoc """
Provides the macros and functions to define and manage
PostgreSQL views with Ecto.
## Using EctoVista
To use `EctoVista`, you need to add `use EctoVista` to your Elixir
files. This gives you access to the functions and macros defined
in the module.
example:
def App.Catalog do
use Ecto.Schema
use EctoVista,
repo: App.Repo
table_name: "catalog"
schema @table_name do
field(:name, :string)
field(:product_count, :integer)
end
end
The `@table_name` will be defined in macro as `{table_name}_v{version}` (version is 1 by default)
This naming convention facilitates 0-downtime view updates and will be handled automagically in future versions.
## Generating Views
Views can be generated via regular migration, just put the definition inside `change` or `up` migration methods.
For the schema definition like the one above, view can be generated as:
execute(\"\"\"
CREATE MATERIALIZED VIEW catalog_v1 AS
SELECT c.*, count(p.id) AS product_count
FROM categories c
LEFT JOIN products p ON c.id = p.category_id
GROUP BY c.id
;
\"\"\")
## Updating Views
If you need to update the view, generate a new migration and then just update the version number in the schema definition:
def App.Catalog do
use Ecto.Schema
use EctoVista,
repo: App.Repo
table_name: "catalog"
version: 2
...
end
## Refreshing Views
Use the `refresh/0` function
It will run `REFRESH MATERIALIZED VIEW [view_name];` query.
iex> Catalog.refresh
:ok
"""
require Ecto.Query
defmacro __using__(opts \\ []) do
unless repo = Keyword.get(opts, :repo) do
raise ArgumentError,
"""
expected :repo to be given as an option. Example:
use EctoVista, repo: App.Repo
"""
end
unless table_name = Keyword.get(opts, :table_name) do
raise ArgumentError,
"""
expected :table_name to be given as an option. Example:
use EctoVista, table_name: "categories"
"""
end
version = Keyword.get(opts, :version, 1)
quote do
import Ecto
@table_name "#{unquote(table_name)}_v#{unquote(version)}"
def repo, do: unquote(repo)
def source, do: __MODULE__.__struct__().__meta__.source
@doc """
A function that refreshes a current version of the view,
defined in module.
Currently support only materialized views.
iex> Catalog.refresh
:ok
"""
@spec refresh() :: :ok | {:error, String.t()}
def refresh do
@table_name
|> refresh_query()
|> repo().query()
|> handle_refresh_result()
end
defp refresh_query(name) do
"REFRESH MATERIALIZED VIEW #{name};"
end
defp handle_refresh_result({:ok, _}), do: :ok
defp handle_refresh_result({:error, %Postgrex.Error{postgres: error_hash}}) do
{:error, error_hash.message}
end
end
end
end
|
lib/ecto_vista.ex
| 0.767429
| 0.423816
|
ecto_vista.ex
|
starcoder
|
defprotocol StathamLogger.Loggable do
@moduledoc """
Implement this protocol for structs, that require custom sanitization.
1. Implement `StathamLogger.Loggable` protocol. Most flexible approach.
```elixir
defimpl StathamLogger.Loggable, for: YourStruct do
@impl true
def sanitize(struct, opts) do
# your sanitization logic
end
end
```
2. Derive StathamLogger.Loggable, possibly overriding options.
```elixir
defmodule YourStruct do
@derive {StathamLogger.Loggable, filter_keys: {:discard, [:phone_number]}}
struct [:phone_number, ...]
end
```
"""
@doc """
Sanitize term, according to given options.
Built-in implementations accept only `sensitive_keys` and `max_string_size` options.
"""
@fallback_to_any true
def sanitize(term, opts \\ [])
end
defimpl StathamLogger.Loggable, for: Any do
defmacro __deriving__(module, _struct, options) do
quote do
defimpl StathamLogger.Loggable, for: unquote(module) do
def sanitize(data, options) do
options =
Keyword.merge(
options,
unquote(options)
)
data
|> Map.from_struct()
|> StathamLogger.Loggable.sanitize(options)
end
end
end
end
@impl true
def sanitize(%{__struct__: Ecto.Association.NotLoaded}, _), do: :not_loaded
@impl true
def sanitize(%_struct{} = data, opts) do
if jason_implemented?(data) do
data
else
data
|> Map.from_struct()
|> StathamLogger.Loggable.sanitize(opts)
end
end
@impl true
def sanitize(data, _), do: inspect(data)
defp jason_implemented?(data) do
impl = Jason.Encoder.impl_for(data)
impl && impl != Jason.Encoder.Any
end
end
defimpl StathamLogger.Loggable, for: Atom do
@impl true
def sanitize(data, _), do: data
end
defimpl StathamLogger.Loggable, for: PID do
@impl true
def sanitize(data, _), do: inspect(data)
end
defimpl StathamLogger.Loggable, for: Reference do
@impl true
def sanitize(data, _), do: inspect(data)
end
defimpl StathamLogger.Loggable, for: Boolean do
@impl true
def sanitize(data, _), do: data
end
defimpl StathamLogger.Loggable, for: Integer do
@impl true
def sanitize(data, _), do: data
end
defimpl StathamLogger.Loggable, for: Map do
@impl true
def sanitize(map, opts) do
{mode, keys} = Keyword.get(opts, :filter_keys, {nil, []})
filter_keys_option = {
mode,
Enum.map(keys, &to_string/1)
}
map
|> Map.drop([:__struct__, :__meta__])
|> Map.new(fn {field, value} ->
if should_be_filtered(field, filter_keys_option) do
{sanitize_map_key(field), "[FILTERED]"}
else
{sanitize_map_key(field), StathamLogger.Loggable.sanitize(value, opts)}
end
end)
end
defp sanitize_map_key(key) when is_binary(key) or is_atom(key) or is_number(key), do: key
defp sanitize_map_key(key), do: inspect(key)
defp should_be_filtered(field, _) when not is_atom(field) and not is_binary(field), do: false
defp should_be_filtered(_field, {nil, _}), do: false
defp should_be_filtered(field, {:keep, keys}) do
field = to_string(field)
Enum.all?(keys, fn key -> key != field end)
end
defp should_be_filtered(field, {:discard, keys = [_h | _t]}) do
field = to_string(field)
Enum.any?(keys, fn key -> key == field end)
end
end
defimpl StathamLogger.Loggable, for: BitString do
@impl true
def sanitize(string, opts) do
max_string_size = Keyword.get(opts, :max_string_size)
if string_valid?(string) do
if max_string_size && String.length(string) > max_string_size do
"#{String.slice(string, 0..(max_string_size - 1))}..."
else
string
end
else
inspect(string)
end
end
defp string_valid?(string), do: String.valid?(string) && String.printable?(string)
end
defimpl StathamLogger.Loggable, for: List do
@impl true
def sanitize(list, opts) do
if Keyword.keyword?(list) do
list
|> Map.new()
|> StathamLogger.Loggable.sanitize(opts)
else
list
|> Enum.map(&StathamLogger.Loggable.sanitize(&1, opts))
end
end
end
defimpl StathamLogger.Loggable, for: Tuple do
@impl true
def sanitize(tuple, opts) do
tuple
|> Tuple.to_list()
|> StathamLogger.Loggable.sanitize(opts)
end
end
|
lib/loggable.ex
| 0.859369
| 0.681859
|
loggable.ex
|
starcoder
|
defmodule Erl2ex.Cli do
@moduledoc """
This module provides the command line interface for the erl2ex binary and
the mix erl2ex task.
"""
alias Erl2ex.Results
@doc """
Runs the erl2ex binary, given a set of command line arguments.
Returns the OS result code, which is 0 for success or nonzero for failure.
"""
@spec run([String.t]) :: non_neg_integer
def run(argv) do
{options, args, errors} =
OptionParser.parse(argv,
strict: [
output: :string,
include_dir: [:string, :keep],
lib_dir: [:string, :keep],
emit_file_headers: :boolean,
define_prefix: :string,
defines_from_config: :string,
verbose: [:boolean, :keep],
help: :boolean
],
aliases: [
v: :verbose,
"?": :help,
o: :output,
"I": :include_dir
]
)
{options, errors2} = decode_options(options)
errors = errors ++ errors2
cond do
not Enum.empty?(errors) ->
display_errors(errors)
Keyword.get(options, :help) ->
display_help()
true ->
run_conversion(args, options)
end
end
@doc """
Runs the erl2ex binary, given a set of command line arguments.
Does not return. Instead, halts the VM on completion with the appropriate
OS result code.
"""
@spec main([String.t]) :: no_return
def main(argv) do
argv
|> run
|> System.halt
end
@doc """
Returns the usage documentation for the command line.
The argument specifies how to invoke the binary (e.g. "erl2ex" or
"mix erl2ex"). The returned text is in markdown form, but may be rendered
as plain text as well.
"""
@spec usage_text(String.t) :: String.t
def usage_text(invocation \\ "erl2ex") do
"""
Usage: `#{invocation} [options] [input path]`
Command line options:
--output, -o "path" (Set the output file or directory path)
--include-dir, -I "dir" (Add a directory to the include path)
--[no-]emit-file-headers (Emit a header comment in each file)
--define-prefix "prefix" (Prefix for variables used to define macros)
--defines-from-config "app" (Define macros from this application's config)
--verbose, -v (Display verbose status)
--help, -? (Display help text)
erl2ex is a Erlang to Elixir transpiler.
When no input path is provided, erl2ex reads from stdin and writes to
stdout. Any output path is ignored.
When the input path is a file, erl2ex reads from the file and writes to
the specified output path. If no output path is present, erl2ex creates
an output file in the same directory as the input file.
When the input path is a directory, erl2ex recursively searches the
directory and reads from every Erlang (*.erl) file it finds. It writes
the results in the same directory structure under the given output path,
which must also be a directory. If no output path is provided, the
results are written in the same directories as the input files.
"""
end
defp decode_options(options) do
verbose_count = options
|> Keyword.get_values(:verbose)
|> Enum.count
{lib_dirs, errors} = options
|> Keyword.get_values(:lib_dir)
|> Enum.reduce({%{}, []}, fn (str, {map, errs}) ->
case Regex.run(~r{^([^=]+)=([^=]+)$}, str) do
nil ->
{map, [{:lib_dir, str} | errs]}
[_, key, val] ->
{Map.put(map, String.to_atom(key), val), errs}
end
end)
options = options
|> Keyword.put(:verbosity, verbose_count)
|> Keyword.put(:lib_dir, lib_dirs)
{options, errors}
end
defp run_conversion([], options) do
:all
|> IO.read
|> Erl2ex.convert_str!(options)
|> IO.write
0
end
defp run_conversion([path], options) do
output = Keyword.get(options, :output)
cond do
File.dir?(path) ->
result = Erl2ex.convert_dir(path, output, options)
handle_result(result)
File.regular?(path) ->
result = Erl2ex.convert_file(path, output, options)
handle_result(result)
true ->
IO.puts(:stderr, "Could not find input: #{path}")
1
end
end
defp run_conversion(paths, _) do
IO.puts(:stderr, "Got too many input paths: #{inspect(paths)}\n")
display_help()
1
end
defp handle_result(results) do
error = Results.get_error(results)
if error == nil do
0
else
IO.puts(:stderr, "Error converting #{error.file}, line #{error.line}: #{error.description}")
1
end
end
defp display_errors(errors) do
Enum.each(errors, fn
{switch, val} ->
IO.puts(:stderr, "Unrecognized or malformed switch: #{switch}=#{val}")
end)
IO.puts(:stderr, "")
display_help()
1
end
defp display_help do
IO.write(:stderr, usage_text("erl2ex"))
end
end
|
lib/erl2ex/cli.ex
| 0.576423
| 0.442637
|
cli.ex
|
starcoder
|
defmodule Rex2048.Board do
@doc """
iex> Rex2048.Board.can_move?([2, 4, 2, 8])
true
iex> Rex2048.Board.can_move?([2, 4, 8, 16])
false
"""
def can_move?(board) do
[:left, :right, :up, :down]
|> Enum.map(&({board, push(board, &1)}))
|> Enum.any?(fn {b1, b2} -> b1 != b2 end)
end
@doc """
iex> Rex2048.Board.reached_2048?([1, 2048, 0, 0])
true
iex> Rex2048.Board.reached_2048?([2, 0, 4, 2])
false
"""
def reached_2048?(board) do
Enum.any?(board, &(&1 == 2048))
end
@doc """
iex> Rex2048.Board.push([0, 0, 2, 4, 0, 4, 4, 8, 4], :left)
[2, 0, 0, 8, 0, 0, 4, 8, 4]
iex> Rex2048.Board.push([0, 0, 2, 4, 0, 4, 4, 8, 4], :right)
[0, 0, 2, 0, 0, 8, 4, 8, 4]
iex> Rex2048.Board.push([0, 0, 2, 4, 0, 4, 4, 8, 4], :up)
[8, 8, 2, 0, 0, 8, 0, 0, 0]
iex> Rex2048.Board.push([0, 0, 2, 4, 0, 4, 4, 8, 4], :down)
[0, 0, 0, 0, 0, 2, 8, 8, 8]
"""
def push(board, :left) do
board
|> collapse_left_and_pad_
end
def push(board, :right) do
board
|> mirror
|> push(:left)
|> mirror
end
def push(board, :up) do
board
|> transpose
|> push(:left)
|> transpose
end
def push(board, :down) do
board
|> transpose
|> push(:right)
|> transpose
end
@doc """
iex> Rex2048.Board.empty(2)
[0, 0, 0, 0]
iex> Rex2048.Board.empty(3)
[0, 0, 0, 0, 0, 0, 0, 0, 0]
"""
def empty(size) when size > 1 do
for _ <- 1..(size * size), do: 0
end
@doc """
iex> Rex2048.Board.calculate_points([0, 1, 1, 0], [1, 0, 1, 0])
0
iex> Rex2048.Board.calculate_points([1, 1, 2, 2], [2, 0, 4, 0])
6
iex> Rex2048.Board.calculate_points([4, 4, 2, 2], [8, 0, 4, 0])
12
"""
def calculate_points(before_push, after_push) do
_calculate_points(
Enum.reverse(Enum.sort(before_push)),
Enum.reverse(Enum.sort(after_push))
)
end
defp _calculate_points([x | b_rest], [x | a_rest]) do
_calculate_points(b_rest, a_rest)
end
defp _calculate_points([x, x | b_rest], [y | a_rest]) do
y + _calculate_points(b_rest, a_rest)
end
defp _calculate_points([], _), do: 0
@doc """
iex> Rex2048.Board.transpose([1, 2, 3, 4])
[1, 3, 2, 4]
iex> Rex2048.Board.transpose([1, 2, 3, 4, 5, 6, 7, 8, 9])
[1, 4, 7, 2, 5, 8, 3, 6, 9]
"""
def transpose(board) do
for offset <- 0..(size(board) - 1) do
Enum.take_every(Enum.drop(board, offset), size(board))
end
|> Enum.concat
end
@doc """
iex> Rex2048.Board.mirror([1, 2, 3, 4])
[2, 1, 4, 3]
iex> Rex2048.Board.mirror([1, 2, 3, 4, 5, 6, 7, 8, 9])
[3, 2, 1, 6, 5, 4, 9, 8, 7]
"""
def mirror(board) do
board
|> rows
|> Enum.map(&Enum.reverse/1)
|> Enum.concat
end
@doc """
iex> Rex2048.Board.collapse_left_and_pad_([0, 2, 2, 0, 0, 0, 1, 1, 1])
[4, 0, 0, 0, 0, 0, 2, 1, 0]
"""
def collapse_left_and_pad_(board) do
board
|> rows
|> Enum.map(&collapse_row/1)
|> Enum.map(&(pad_row(&1, size(board))))
|> Enum.concat
end
@doc """
iex> Rex2048.Board.rows([0, 2, 2, 0])
[[0, 2], [2, 0]]
"""
def rows(board) do
Enum.chunk(board, size(board))
end
@doc """
iex> Rex2048.Board.collapse_row([0, 2, 2, 1, 0, 1, 1, 4, 0])
[4, 2, 1, 4]
"""
def collapse_row(row) do
row
|> Enum.reject(&(&1 == 0))
|> _collapse_row
end
defp _collapse_row([]), do: []
defp _collapse_row([num, num | rest]) do
[num + num] ++ _collapse_row(rest)
end
defp _collapse_row([num | rest]) do
[num] ++ _collapse_row(rest)
end
defp pad_row(row, size) do
row ++ List.duplicate(0, (size - length(row)))
end
@doc """
iex> Rex2048.Board.insert_at_random([2, 0, 2, 4, 0])
[2, 2, 2, 4, 0]
iex> Rex2048.Board.insert_at_random([2, 8, 2, 4, 0])
[2, 8, 2, 4, 2]
iex> Rex2048.Board.insert_at_random([2, 8, 2, 4, 2])
[2, 8, 2, 4, 2]
"""
def insert_at_random(board) do
indexes = board
|> Enum.with_index
|> Enum.filter(fn {x, _} -> x == 0 end)
|> Enum.map(fn {_, i} -> i end)
if length(indexes) > 0 do
number = if(:random.uniform < 0.9, do: 2, else: 4)
List.replace_at(board, Enum.random(indexes), number)
else
board
end
end
defp size(board) do
length(board)
|> :math.sqrt
|> round
end
end
|
lib/rex2048/board.ex
| 0.603581
| 0.504761
|
board.ex
|
starcoder
|
defmodule Hades.Arguments.HostDiscovery do
@moduledoc """
One of the very first steps in any network reconnaissance mission is to reduce a (sometimes huge) set of IP ranges into a list of active or interesting hosts. Scanning every port of every single IP address is slow and usually unnecessary. Of course what makes a host interesting depends greatly on the scan purposes. Network administrators may only be interested in hosts running a certain service, while security auditors may care about every single device with an IP address. An administrator may be comfortable using just an ICMP ping to locate hosts on his internal network, while an external penetration tester may use a diverse set of dozens of probes in an attempt to evade firewall restrictions.
Because host discovery needs are so diverse, Nmap offers a wide variety of options for customizing the techniques used. Host discovery is sometimes called ping scan, but it goes well beyond the simple ICMP echo request packets associated with the ubiquitous ping tool. Users can skip the ping step entirely with a list scan (-sL) or by disabling ping (-Pn), or engage the network with arbitrary combinations of multi-port TCP SYN/ACK, UDP, SCTP INIT and ICMP probes. The goal of these probes is to solicit responses which demonstrate that an IP address is actually active (is being used by a host or network device). On many networks, only a small percentage of IP addresses are active at any given time. This is particularly common with private address space such as 10.0.0.0/8. That network has 16 million IPs, but I have seen it used by companies with less than a thousand machines. Host discovery can find those machines in a sparsely allocated sea of IP addresses.
If no host discovery options are given, Nmap sends an ICMP echo request, a TCP SYN packet to port 443, a TCP ACK packet to port 80, and an ICMP timestamp request. These defaults are equivalent to the -PE -PS443 -PA80 -PP options. An exception to this is that an ARP scan is used for any targets which are on a local ethernet network. For unprivileged Unix shell users, the default probes are a SYN packet to ports 80 and 443 using the connect system call.. This host discovery is often sufficient when scanning local networks, but a more comprehensive set of discovery probes is recommended for security auditing.
The -P* options (which select ping types) can be combined. You can increase your odds of penetrating strict firewalls by sending many probe types using different TCP ports/flags and ICMP codes. Also note that ARP discovery (-PR). is done by default against targets on a local ethernet network even if you specify other -P* options, because it is almost always faster and more effective.
By default, Nmap does host discovery and then performs a port scan against each host it determines is online. This is true even if you specify non-default host discovery types such as UDP probes (-PU). Read about the -sn option to learn how to perform only host discovery, or use -Pn to skip host discovery and port scan all target hosts.
"""
alias Hades.Argument
require Hades.Helpers
@known_options %{
sL: %Argument{
name: "-sL",
context: :scan_type,
desc: "List Scan - simply list targets to scan"
},
sn: %Argument{name: "-sn", context: :scan_type, desc: "Ping Scan - disable port scan"},
Pn: %Argument{
name: "-Pn",
context: :scan_type,
desc: "Treat all hosts as online -- skip host discovery"
},
PS: %Argument{
name: "-PS",
context: :scan_type,
options: true,
desc: "TCP SYN discovery to given ports"
},
PA: %Argument{
name: "-PA",
context: :scan_type,
options: true,
desc: "ACK discovery to given ports"
},
PU: %Argument{
name: "-PU",
context: :scan_type,
options: true,
desc: "UDP discovery to given ports"
},
PY: %Argument{
name: "-PY",
context: :scan_type,
options: true,
desc: "SCTP discovery to given ports"
},
PE: %Argument{
name: "-PE",
context: :scan_type,
desc: "ICMP echo request discovery probes"
},
PP: %Argument{
name: "-PP",
context: :scan_type,
desc: "Timestamp request discovery probes"
},
PM: %Argument{
name: "-PM",
context: :scan_type,
desc: "Netmask request discovery probes"
},
PO: %Argument{
name: "-PO",
context: :scan_type,
options: true,
desc: "IP Protocol Ping"
},
n: %Argument{
name: "-n",
context: :scan_type,
desc: "Never do DNS resolution [default: sometimes]"
},
R: %Argument{
name: "-R",
context: :scan_type,
desc: "Always resolve [default: sometimes]"
},
dns_servers: %Argument{
name: "--dns-servers",
context: :option,
options: true,
desc: "Specify custom DNS servers"
},
system_dns: %Argument{
name: "--system-dns",
context: :option,
desc: "Use OS's DNS resolver"
},
raceroute: %Argument{
name: "--traceroute",
context: :option,
desc: "Trace hop path to each host"
}
}
Hades.Helpers.option_functions(@known_options)
end
|
lib/arguments/host_discovery.ex
| 0.768081
| 0.662531
|
host_discovery.ex
|
starcoder
|
defmodule Gim.Index do
@moduledoc """
Internal helper module to handle indexes.
"""
@type id :: pos_integer()
@type index :: list(id)
@doc """
Turns a given list into an index
iex> new([10, 13, 6, 60, 2])
[60, 13, 10, 6, 2]
iex> new([2, 2, 2, 3])
[3, 2]
"""
@spec new(list :: index) :: index
def new(list) do
Enum.reduce(list, [], &add(&2, &1))
end
@doc """
Adds an id to the given index
iex> add([], 42)
[42]
iex> add([404, 23, 13], 42)
[404, 42, 23, 13]
Since ids have to be unique, duplicates are prevented
iex> add([404, 42, 23, 13], 42)
[404, 42, 23, 13]
"""
@spec add(index :: index, id :: id) :: index
def add(index, id)
def add([], id) do
[id]
end
def add([head | rest], id) when id < head do
[head | add(rest, id)]
end
def add([head | _] = list, id) when id > head do
[id | list]
end
def add([head | _] = list, head) do
list
end
@doc """
Removes an id from the given index
iex> remove([], 42)
[]
iex> remove([404, 42, 23, 13], 42)
[404, 23, 13]
"""
@spec remove(index :: index, id :: id) :: index
def remove(index, id) do
List.delete(index, id)
end
@doc """
Intersects the given indexes
iex> intersect([99, 66, 77, 50, 30, 10, 0], [99, 70, 60, 50, 11, 10, 1, 0])
[99, 50, 10, 0]
iex> intersect([], [99, 70, 60, 50, 11, 10, 1, 0])
[]
"""
@spec intersect(index_a :: index, index_b :: index) :: index
def intersect([head | rest_a], [head | rest_b]) do
[head | intersect(rest_a, rest_b)]
end
def intersect([head_a | rest_a], [head_b | _] = index_b) when head_a > head_b do
intersect(rest_a, index_b)
end
def intersect([head_a | _] = index_a, [head_b | rest_b]) when head_a < head_b do
intersect(index_a, rest_b)
end
def intersect([], _) do
[]
end
def intersect(_, []) do
[]
end
@doc """
Intersects a list of indexes
iex> intersect([[99, 77, 66, 50, 30, 10, 0], [99, 70, 60, 50, 11, 10, 1, 0], [99, 50, 10]])
[99, 50, 10]
"""
@spec intersect(indexes :: list(index)) :: index
def intersect(lists) do
lists
|> Enum.sort()
|> Enum.reduce(&intersect/2)
end
@doc """
Joins the given indexes
iex> join([99, 50, 13], [90, 50, 10])
[99, 90, 50, 13, 10]
iex> join([], [99, 70, 60, 50, 11, 10, 1, 0])
[99, 70, 60, 50, 11, 10, 1, 0]
"""
@spec join(index_a :: index, index_b :: index) :: index
def join([head | rest_a], [head | rest_b]) do
[head | join(rest_a, rest_b)]
end
def join([head_a | rest_a], [head_b | _] = index_b) when head_a > head_b do
[head_a | join(rest_a, index_b)]
end
def join([head_a | _] = index_a, [head_b | rest_b]) when head_a < head_b do
[head_b | join(index_a, rest_b)]
end
def join([], rest) do
rest
end
def join(rest, []) do
rest
end
@doc """
Intersects a list of indexes
iex> join([[99, 55, 42, 11], [98, 54, 42, 10], [90, 50, 42]])
[99, 98, 90, 55 , 54, 50, 42, 11, 10]
"""
@spec join(indexes :: list(index)) :: index
def join(lists) do
lists
|> Enum.sort()
|> Enum.reduce(&join/2)
end
@doc """
Returns the difference of two given indexes
iex> difference([99, 66, 77, 50, 30, 10, 0], [99, 70, 60, 50, 11, 10, 1, 0])
{[77, 66, 30], [70, 60, 11, 1]}
iex> difference([5, 4, 3, 2, 1], [5, 4, 3, 2, 1])
{[], []}
iex> difference([], [])
{[], []}
"""
def difference(index_a, index_b) do
difference(index_a, index_b, {[], []})
end
defp difference(index_a, [], {diff_a, diff_b}) do
{join(index_a, diff_a), diff_b}
end
defp difference([], index_b, {diff_a, diff_b}) do
{diff_a, join(index_b, diff_b)}
end
defp difference([head | rest_a], [head | rest_b], diff) do
difference(rest_a, rest_b, diff)
end
defp difference([head_a | rest_a], [head_b | _] = index_b, {diff_a, diff_b})
when head_a > head_b do
difference(rest_a, index_b, {add(diff_a, head_a), diff_b})
end
defp difference([head_a | _] = index_a, [head_b | rest_b], {diff_a, diff_b})
when head_a < head_b do
difference(index_a, rest_b, {diff_a, add(diff_b, head_b)})
end
end
|
lib/gim/index.ex
| 0.787646
| 0.458409
|
index.ex
|
starcoder
|
defmodule HomeBot.EnergyStream.Watercooker.WatercookerDetector do
use GenStage
alias HomeBot.EnergyStream.Watercooker.State
def start_link(_) do
GenStage.start_link(__MODULE__, :ok)
end
@impl GenStage
def init(:ok) do
# Starts a permanent subscription to the broadcaster
# which will automatically start requesting items.
{:consumer, %State{}, subscribe_to: [HomeBot.EnergyStream.Producer.EnergyProducer]}
end
@impl GenStage
@spec handle_events(any, any, %State{}) :: {:noreply, [], any}
def handle_events(events, _from, state) do
state =
Enum.reduce(events, state, fn event, state ->
handle_event(event, state)
end)
{:noreply, [], state}
end
defp handle_event(%{"current_energy_usage" => nil}, state), do: state
defp handle_event(event, state) do
energy_difference = event["current_energy_usage"] - state.previous_usage
{:ok, now, _} = DateTime.from_iso8601(event["time"])
cond do
detection_inactive?(state) && watercooker_on?(energy_difference) ->
%{
state
| previous_usage: event["current_energy_usage"],
active_since: now,
usage: energy_difference
}
detection_active?(state) && watercooker_off?(energy_difference) &&
active_long_enough?(state.active_since, now) ->
HomeBot.Bot.notify_users(
"Watercooker used at #{in_timezone(state.active_since)} for #{
seconds_active(state.active_since, now)
} seconds with usage: #{state.usage}"
)
%{state | previous_usage: event["current_energy_usage"], active_since: nil, usage: 0}
detection_active?(state) && watercooker_off?(energy_difference) &&
!active_long_enough?(state.active_since, now) ->
%{state | previous_usage: event["current_energy_usage"], active_since: nil, usage: 0}
detection_active?(state) && event["current_energy_usage"] < 2 ->
%{state | previous_usage: event["current_energy_usage"], active_since: nil, usage: 0}
true ->
%{state | previous_usage: event["current_energy_usage"]}
end
end
defp watercooker_on?(energy_difference), do: energy_difference >= 2.1 && energy_difference <= 2.3
defp watercooker_off?(energy_difference), do: energy_difference <= -2.1 && energy_difference >= -2.3
defp detection_inactive?(state), do: state.active_since == nil
defp detection_active?(state), do: state.active_since != nil
defp seconds_active(start_time, end_time), do: Timex.diff(end_time, start_time, :seconds)
defp active_long_enough?(start_time, end_time) do
seconds = seconds_active(start_time, end_time)
seconds >= 60 && seconds <= 5 * 60
end
defp in_timezone(time) do
time
|> Timex.Timezone.convert("Europe/Amsterdam")
|> DateTime.truncate(:second)
|> DateTime.to_iso8601()
end
end
|
lib/home_bot/energy_stream/watercooker/watercooker_detector.ex
| 0.851305
| 0.469034
|
watercooker_detector.ex
|
starcoder
|
defmodule Astro.Math do
@moduledoc false
import Kernel, except: [min: 2, max: 2, ceil: 1, floor: 1]
alias Astro.Time
@radians_to_degrees 180.0 / :math.pi()
@au_to_km 149_597_870.7
@au_to_m 149_597_870.7 * 1_000
defmacro to_degrees(radians) do
radians_to_degrees = @radians_to_degrees
quote do
unquote(radians) * unquote(radians_to_degrees)
end
end
defmacro to_radians(degrees) do
radians_to_degrees = @radians_to_degrees
quote do
unquote(degrees) / unquote(radians_to_degrees)
end
end
def au_to_km(au) do
au * @au_to_km
end
def au_to_m(au) do
au * @au_to_m
end
def m_to_au(m) do
m / @au_to_m
end
def degrees(degrees) do
mod(degrees, 360.0)
end
defmacro mt(x) do
quote do
unquote(x)
end
end
defmacro deg(x) do
quote do
unquote(x)
end
end
defmacro angle(d, m, s) do
angle = d + (m + s / Time.seconds_per_minute()) / Time.minutes_per_hour()
quote do
unquote(angle)
end
end
defmacro degrees_minutes_seconds(d, m, s) do
quote do
{unquote(d), unquote(m), unquote(s)}
end
end
def cos(degrees) do
degrees
|> to_radians
|> :math.cos()
end
def sin(degrees) do
degrees
|> to_radians
|> :math.sin()
end
def asin(degrees) do
degrees
|> to_radians
|> :math.asin()
end
def acos(degrees) do
degrees
|> to_radians
|> :math.acos()
end
def tan(degrees) do
degrees
|> to_radians
|> :math.tan()
end
def atan(0, 0) do
:undefined
end
def atan(y, x) do
cond do
x == 0 && y != 0 -> signum(y) * deg(90.0)
x >= 0 -> to_degrees(:math.atan(y / x))
x < 0 -> to_degrees(:math.atan(y / x)) + signum(y) * deg(180.0)
end
|> mod(360.0)
end
def atan_r(0, 0) do
:NaN
end
def atan_r(y, x) do
cond do
x == 0 && y != 0 -> signum(y) * :math.pi() / 2.0
x >= 0 -> :math.atan(y / x)
x < 0 -> :math.atan(y / x) + signum(y) * :math.pi()
end
end
@doc """
Returns the minimum number for which
the given function returns a `truthy`
value.
"""
@spec min(number(), function()) :: number()
def min(i, fun) when is_number(i) and is_function(fun) do
if fun.(i), do: i, else: min(i + 1, fun)
end
@doc """
Returns the maximum number for which
the given function returns a `truthy`
value.
"""
@spec max(number(), function()) :: number()
def max(i, fun) when is_number(i) and is_function(fun) do
if fun.(i), do: max(i + 1, fun), else: i - 1
end
@spec poly(number(), [number()]) :: number()
def poly(_, []), do: 0
def poly(x, [a | a_s]), do: a + x * poly(x, a_s)
def amod(x, y) when y != 0 do
y + mod(x, -y)
end
@spec signum(number()) :: -1 | 0 | 1
def signum(x) when x > 0, do: 1
def signum(x) when x < 0, do: -1
def signum(_), do: 0
@spec sigma([[number(), ...]], function()) :: number()
def sigma(list_of_lists, fun) do
if Enum.all?(list_of_lists, &(&1 == [])) do
0
else
# [hd(l) || l <- list_of_lists]
current = Enum.map(list_of_lists, &hd/1)
# [tl(l) || l <- list_of_lists]
next = Enum.map(list_of_lists, &tl/1)
fun.(current) + sigma(next, fun)
end
|> Kernel.*(1.0)
end
@doc """
Calculates the modulo of a number (integer, float).
Note that this function uses `floored division` whereas the builtin `rem`
function uses `truncated division`.
See [Wikipedia](https://en.wikipedia.org/wiki/Modulo_operation) for an
explanation of the difference.
## Examples
iex> Astro.Math,mod(1234.0, 5)
4.0
"""
@spec mod(number, number) :: number
def mod(number, modulus) when is_float(number) and is_number(modulus) do
number - Float.floor(number / modulus) * modulus
end
def mod(number, modulus) when is_integer(number) and is_integer(modulus) do
modulo =
number
|> Integer.floor_div(modulus)
|> Kernel.*(modulus)
number - modulo
end
def mod(number, modulus) when is_integer(number) and is_number(modulus) do
modulo =
number
|> Kernel./(modulus)
|> Float.floor()
|> Kernel.*(modulus)
number - modulo
end
@doc """
Returns the remainder and dividend of two numbers.
"""
@spec div_mod(number, number) :: {number, number}
def div_mod(n1, n2) when is_integer(n1) and is_integer(n2) do
div = div(n1, n2)
mod = n2 - div * n2
{div, mod}
end
def div_mod(n1, n2) when is_number(n1) and is_number(n2) do
div = n1 / n2
mod = n2 - div * n2
{div, mod}
end
def final(k, pred) do
new_k = k + 1
if !pred.(new_k) do
k
else
final(new_k, pred)
end
end
def next(k, pred) do
if pred.(k) do
k
else
next(k + 1, pred)
end
end
@spec invert_angular(function(), number(), number(), number()) :: number()
def invert_angular(f, y, a, b) do
# returns X such that A =< X =< B for which f(X) = Y
# where |X-X0| < Tolerance
tolerance = 1 / 100_000.0
phi = fn l, u -> u - l < tolerance end
psi = fn x -> mod(f.(x) - y, 360.0) < deg(180.0) end
bisection_search(a, b, phi, psi)
end
@spec bisection_search(number(), number(), function(), function()) :: number()
def bisection_search(u, v, phi, psi) do
x = (v + u) / 2.0
if phi.(u, v) do
x
else
if psi.(x) do
bisection_search(u, x, phi, psi)
else
bisection_search(x, v, phi, psi)
end
end
end
end
|
lib/astro/math.ex
| 0.894525
| 0.737513
|
math.ex
|
starcoder
|
defmodule DiscUnion.Utils.Constructors do
@moduledoc false
require DiscUnion.Utils.Case
alias DiscUnion.Utils
alias DiscUnion.Utils.{Constructors, Case}
defmacro build_constructor_functions(mod, cases) do
from_constructors = build_from_constructors(cases, mod)
c_constructors = build_c_constructors(cases, mod)
named_constructors = build_named_constructors(cases, mod)
[from_constructors, c_constructors, named_constructors]
end
def build_from_constructors(cases, mod) do
quote bind_quoted: [cases: cases, mod: mod] do
ext_cases = Enum.map(cases, &Constructors.extended_case_tag_definition/1)
for {variant_case, case_tag, case_tag_match_ast, _case_tag_str, count} <- ext_cases do
case_tuple_match_ast = Constructors.case_tuple_match_ast variant_case
case_tuple_spec_ast = Constructors.case_tuple_spec_ast variant_case
@spec from!(unquote(case_tuple_spec_ast)) :: %__MODULE__{
case: unquote(case_tuple_spec_ast)
}
@doc """
Constructs a valid case for `#{Utils.module_name mod}` discriminated
union. This works at run-time, to have a compile-time guarantees, use
`#{mod}.from/1` macro. When an undefined union case is supplied it will
raise an error at run-time.
"""
def from!(case_tuple = unquote(case_tuple_match_ast) ) do
%__MODULE__{case: case_tuple}
end
@spec from!(unquote(case_tuple_spec_ast), any) :: %__MODULE__{
case: unquote(case_tuple_spec_ast)
}
@doc """
Constructs a valid case for `#{Utils.module_name mod}` discriminated
union. This works at run-time, to have a compile-time guarantees, use
`#{Utils.module_name mod}.from/1` macro. When an undefined union case
is supplied it will return second argument.
"""
def from!(case_tuple = unquote(case_tuple_match_ast), _) do
%__MODULE__{case: case_tuple}
end
case count do
0 ->
defmacro from(unquote(case_tag_match_ast)) do
case_clause = unquote(case_tag)
mod = unquote(mod)
quote do: %{__struct__: unquote(mod), case: unquote(case_clause)}
end
1 ->
defmacro from({unquote(case_tag_match_ast), arg}) do
case_clause = {unquote(case_tag), arg}
mod = unquote(mod)
quote do: %{__struct__: unquote(mod), case: unquote(case_clause)}
end
_ ->
args = for i <- 1..count, do: Macro.var(:"v#{i}", nil)
defmacro from(case_tuple = {:{}, _, [unquote(case_tag_match_ast) | args]})
when length(args) == unquote(count) do
case_clause = case_tuple
mod = unquote(mod)
quote do: %{__struct__: unquote(mod), case: unquote(case_clause)}
end
end
end
# default fallbacks raising errors
def from!(case_tuple, ret), do: ret
def from!(case_tuple) when is_tuple case_tuple do
ctl = Tuple.to_list case_tuple
case_tuple = quote do {unquote_splicing(ctl)} end
Case.raise_undefined_union_case case_tuple, at: :runtime
end
def from!(case_tag) when is_atom case_tag do
Case.raise_undefined_union_case case_tag, at: :runtime
end
defmacro from(case_tag) do
Case.raise_undefined_union_case case_tag, at: :compiletime
end
end
end
defp build_c_constructors(cases, mod) do
quote bind_quoted: [cases: cases, mod: mod] do
# construct `c` and `from` macros and `c!` functions
ext_cases = Enum.map(cases, &Constructors.extended_case_tag_definition/1)
for {variant_case, case_tag, case_tag_match_ast, case_tag_str, 0} <- ext_cases do
case_params_spec_ast = Constructors.case_spec_ast_params_list variant_case
@doc """
Constructs a valid `#{case_tag_str}` case for `#{Utils.module_name mod}`
discriminated union. Works at compile-time and will raise an error when
unknown union case is used.
"""
defmacro c(unquote(case_tag_match_ast)) do
case_clause = unquote(case_tag)
mod = unquote(mod)
quote do
%{__struct__: unquote(mod), case: unquote(case_clause)}
end
end
@spec c!(unquote_splicing(case_params_spec_ast)) :: %__MODULE__{
case: (unquote_splicing(case_params_spec_ast))
}
def c!(case_tag = unquote(case_tag)) do
from!(case_tag)
end
end
for {variant_case, case_tag, case_tag_match_ast, case_tag_str, count} <- ext_cases,
count > 0 do
case_params_spec_ast = Constructors.case_spec_ast_params_list variant_case
args = for i <- 1..count, do: Macro.var(:"v#{i}", nil)
defmacro c(unquote(case_tag_match_ast), unquote_splicing(args)) do
case_tag = unquote(case_tag)
args = unquote(args)
mod = unquote(mod)
quote do: %{__struct__: unquote(mod),
case: {unquote(case_tag), unquote_splicing(args)}}
end
@spec c!(unquote_splicing(case_params_spec_ast)) :: %__MODULE__{
case: {unquote_splicing(case_params_spec_ast)}
}
def c!(case_tag = unquote(case_tag), unquote_splicing(args)) do
from!({case_tag, unquote_splicing(args)})
end
end
# default fallbacks raising errors
ext_cases_grouped_by_arity = Enum.group_by(ext_cases, &elem(&1, 4))
for {0, _} <- ext_cases_grouped_by_arity do
@doc """
Constructs a valid case for `#{Utils.module_name mod}` discriminated
union. Works at compile-time and will raise an error when unknown union
case is used.
"""
defmacro c(case_tag) do
Case.raise_undefined_union_case case_tag, at: :compiletime
end
@doc """
Constructs a valid case for `#{Utils.module_name mod}` discriminated
union. Works at runtime-time and will raise an error when unknown union
case is used.
"""
def c!(case_tag) do
from!(case_tag)
end
end
for {count, _} <- ext_cases_grouped_by_arity, count > 0 do
args = for i <- 1..count, do: Macro.var(:"v#{i}", nil)
@doc """
Constructs a valid case for `#{Utils.module_name mod}` discriminated
union. Works at compile-time and will raise an error when unknown union
case is used.
"""
defmacro c(case_tag, unquote_splicing(args)) do
args = unquote(args)
case_tuple = quote do: {unquote(case_tag), unquote_splicing(args)}
Case.raise_undefined_union_case case_tuple, at: :compiletime
end
@doc """
Constructs a valid case for `#{Utils.module_name mod}` discriminated
union. Works at runtime-time and will raise an error when unknown union
case is used.
"""
def c!(case_tag, unquote_splicing(args)) do
# let from! do the fallback
from!({case_tag, unquote_splicing(args)})
end
end
defoverridable [from!: 1, from!: 2]
end
end
def build_named_constructors(cases, mod) do
quote bind_quoted: [cases: cases, mod: mod] do
if true == Module.get_attribute __MODULE__, :named_constructors do
ext_cases = Enum.map(cases, &Constructors.extended_case_tag_definition/1)
for {_variant_case, case_tag, _case_tag_match_ast, case_tag_str, 0} <- ext_cases do
named_constructors_name = Constructors.named_constructors_name(case_tag)
@doc """
Constructs a valid `#{case_tag_str}` case for
`#{Utils.module_name mod}` discriminated union. Works at
compile-time and will raise an error when unknown union case is used.
"""
defmacro unquote(named_constructors_name)() do
# {:%, [], [{:__aliases__, [alias: false], [__MODULE__]}, {:%{}, [], [case: unquote(case_tag)]}]}
mod = unquote(mod)
case_tag = unquote(case_tag)
quote do: %{__struct__: unquote(mod), case: unquote(case_tag)}
end
end
for {_variant_case, case_tag, _case_tag_match_ast, case_tag_str, count} <- ext_cases,
count > 0 do
args = for i <- 1..count, do: Macro.var(:"v#{i}", nil)
named_constructors_name = Constructors.named_constructors_name(case_tag)
@doc """
Constructs a valid `#{case_tag_str}` case for
`#{Utils.module_name mod}` discriminated union. Works at compile-time
and will raise an error when unknown union case is used.
"""
defmacro unquote(named_constructors_name)(unquote_splicing(args)) do
mod = unquote(mod)
args = unquote(args)
case_tag = unquote(case_tag)
quote do: %{__struct__: unquote(mod), case: {unquote(case_tag), unquote_splicing(args)}}
end
end
end
end
end
@doc """
Builds AST for matching a case. When is a single atom, AST looks the
same. For tuples, first argument (union tag) is saved, but rest is replaced
with underscore `_` to match anything.
"""
@spec case_tuple_match_ast(Macro.expr) :: Macro.expr
def case_tuple_match_ast(union_case) do
union_case = union_case |> Macro.escape
case union_case do
x when is_atom(x) ->
x
{op, ctx, [c | cs]} when op in [:{}, :__aliases__] and is_atom(c) ->
cs = cs |> Enum.map(fn _ ->
quote do: _
end)
{:{}, ctx, [c |cs]}
{c, _} when c |> is_atom -> # 2-tuple
cs = [quote do: _]
{:{}, [], [c | cs ]}
end
end
def case_spec_ast_params_list(union_case) do
union_case = union_case |> Macro.escape
case union_case do
x when is_atom(x) ->
[x]
{op, ctx, [c | cs]} when op in [:{}, :__aliases__] and is_atom(c) ->
cs = cs |> Enum.map(fn arg ->
"quote do #{arg} end"
|> Code.eval_string([], ctx)
|> elem(0)
end)
[c | cs]
{c, cs} when c |> is_atom -> # 2-tuple
cs =
"quote do #{cs} end"
|> Code.eval_string([], __ENV__)
|> elem(0)
[c, cs]
end
end
@spec case_tuple_spec_ast(Macro.expr) :: Macro.expr
def case_tuple_spec_ast(union_case) do
specs = union_case |> case_spec_ast_params_list
case specs do
[x] ->
x
_ -> # 2-tuple
{:{}, [], specs}
end
end
def named_constructors_name(variant_case) do
case_tag = case variant_case |> to_string do
"Elixir." <> case_tag -> case_tag
case_tag -> case_tag
end
case_tag
|> Macro.underscore
|> String.to_atom
end
def extended_case_tag_definition(variant_case) do
{case_tag, count} = case variant_case do
variant_case when is_atom variant_case ->
{variant_case, 0}
variant_case when is_tuple(variant_case)
and is_atom(elem variant_case, 0) ->
{variant_case |> elem(0), tuple_size(variant_case) - 1 }
end
case_tag_str = case_tag |> Macro.to_string
case_tag_match_ast = case case_tag |> to_string do
"Elixir." <> m ->
{:{}, [], [:__aliases__,
{:_, [], Elixir},
[m |> String.to_atom]]}
_ -> case_tag
end
{variant_case, case_tag, case_tag_match_ast, case_tag_str, count}
end
end
|
lib/utils/constructors.ex
| 0.778355
| 0.494324
|
constructors.ex
|
starcoder
|
defmodule VendingMachine.MakeItemStock do
@item %{
A1: %{name: "Coke", display: "🍹", department: "Soft Drink", price: 1.0, quantity: 20},
A2: %{name: "Sprite", display: "🍹", department: "Soft Drink", price: 0.5, quantity: 20},
A3: %{name: "Pepsi", display: "🍹", department: "Soft Drink", price: 0.65, quantity: 20},
A4: %{name: "<NAME>", display: "🍹", department: "Soft Drink", price: 1.0, quantity: 20},
A5: %{name: "<NAME>", display: "🍹", department: "Soft Drink", price: 0.5, quantity: 20},
A6: %{name: "<NAME>", display: "🍹", department: "Soft Drink", price: 0.65, quantity: 20},
A7: %{name: "<NAME>", display: "🍹", department: "Soft Drink", price: 1.0, quantity: 20},
A8: %{name: "<NAME>", display: "🍹", department: "Soft Drink", price: 0.5, quantity: 20},
A9: %{name: "Fanta", display: "🍹", department: "Soft Drink", price: 0.65, quantity: 20},
A10: %{name: "<NAME>", display: "🍹", department: "Soft Drink", price: 2.65, quantity: 20},
B1: %{name: "Cheetos", display: "🍿", department: "Snacks", price: 1.0, quantity: 20},
B2: %{name: "Lays", display: "🍿", department: "Snacks", price: 0.5, quantity: 20},
B3: %{name: "Pingles", display: "🍿", department: "Snacks", price: 0.65, quantity: 20},
B4: %{name: "Daritos", display: "🍿", department: "Snacks", price: 1.0, quantity: 20},
B5: %{name: "<NAME>", display: "🍿", department: "Snacks", price: 0.5, quantity: 20},
B6: %{name: "Fritoos", display: "🍿", department: "Snacks", price: 0.65, quantity: 20},
B7: %{name: "Ruffles", display: "🍿", department: "Snacks", price: 1.0, quantity: 20},
B8: %{name: "Bugles", display: "🍿", department: "Snacks", price: 0.5, quantity: 20},
B9: %{name: "Chesters", display: "🍿", department: "Snacks", price: 0.65, quantity: 20},
B10: %{name: "Tostitos", display: "🍿", department: "Snacks", price: 1.0, quantity: 20},
C1: %{name: "Essentia", display: "🌊", department: "Water", price: 1.0, quantity: 20},
C2: %{name: "Propel", display: "🌊", department: "Water", price: 0.5, quantity: 20},
C3: %{name: "<NAME>", display: "🌊", department: "Water", price: 0.65, quantity: 20},
C4: %{name: "Uniquely", display: "🌊", department: "Water", price: 1.0, quantity: 20},
C5: %{name: "Fiji", display: "🌊", department: "Water", price: 0.5, quantity: 20},
C6: %{name: "Laugh", display: "🌊", department: "Water", price: 0.65, quantity: 20},
C7: %{name: "<NAME>", display: "🌊", department: "Water", price: 1.0, quantity: 20},
C8: %{name: "Ozarka", display: "🌊", department: "Water", price: 0.5, quantity: 20},
C9: %{name: "Aquafina", display: "🌊", department: "Water", price: 0.65, quantity: 20},
C10: %{name: "<NAME>", display: "🌊", department: "Water", price: 1.0, quantity: 20},
D1: %{name: "Hershey's", display: "🍫", department: "Chocolate", price: 1.0, quantity: 20},
D2: %{name: "Kisses", display: "🍫", department: "Chocolate", price: 0.5, quantity: 20},
D3: %{name: "Reese's", display: "🍫", department: "Chocolate", price: 0.65, quantity: 20},
D4: %{name: "M&M's", display: "🍫", department: "Chocolate", price: 1.0, quantity: 20},
D5: %{name: "Dove", display: "🍫", department: "Chocolate", price: 0.5, quantity: 20},
D6: %{name: "Ghirardelli", display: "🍫", department: "Chocolate", price: 0.65, quantity: 20},
D7: %{name: "KitKat", display: "🍫", department: "Chocolate", price: 1.0, quantity: 20},
D8: %{name: "Snickers", display: "🍫", department: "Chocolate", price: 0.5, quantity: 20},
D9: %{
name: "<NAME>",
display: "🍫",
department: "Chocolate",
price: 0.65,
quantity: 20
},
D10: %{name: "Lindt", display: "🍫", department: "Chocolate", price: 1.0, quantity: 20},
E1: %{name: "Bisquick", display: "🍪", department: "Cookies", price: 1.0, quantity: 20},
E2: %{name: "<NAME>", display: "🍪", department: "Cookies", price: 0.5, quantity: 20},
E3: %{name: "Britannia", display: "🍪", department: "Cookies", price: 0.65, quantity: 20},
E4: %{name: "Glico", display: "🍪", department: "Cookies", price: 1.0, quantity: 20},
E5: %{name: "<NAME>", display: "🍪", department: "Cookies", price: 0.5, quantity: 20},
E6: %{name: "McVitie's", display: "🍪", department: "Cookies", price: 0.65, quantity: 20},
E7: %{name: "Leibniz", display: "🍪", department: "Cookies", price: 1.0, quantity: 20},
E8: %{name: "Kedem", display: "🍪", department: "Cookies", price: 0.5, quantity: 20},
E9: %{name: "Walkers", display: "🍪", department: "Cookies", price: 0.65, quantity: 20},
E10: %{name: "Biscoff", display: "🍪", department: "Cookies", price: 1.0, quantity: 20},
F1: %{name: "Planters", display: "🌰", department: "Dry Fruits", price: 1.0, quantity: 20},
F2: %{name: "Emerald", display: "🌰", department: "Dry Fruits", price: 0.5, quantity: 20},
F3: %{name: "Medallion", display: "🌰", department: "Dry Fruits", price: 0.65, quantity: 20},
F4: %{name: "Wonderful", display: "🌰", department: "Dry Fruits", price: 1.0, quantity: 20},
F5: %{name: "<NAME>", display: "🌰", department: "Dry Fruits", price: 0.5, quantity: 20},
F6: %{name: "Fisher", display: "🌰", department: "Dry Fruits", price: 0.65, quantity: 20},
F7: %{
name: "<NAME>",
display: "🌰",
department: "Dry Fruits",
price: 1.0,
quantity: 20
},
F8: %{name: "<NAME>", display: "🌰", department: "Dry Fruits", price: 0.5, quantity: 20},
F9: %{name: "GourmetNut", display: "🌰", department: "Dry Fruits", price: 0.65, quantity: 20},
F10: %{name: "<NAME>", display: "🌰", department: "Dry Fruits", price: 1.0, quantity: 20},
G1: %{name: "<NAME>", display: "🍺", department: "Beer", price: 1.0, quantity: 20},
G2: %{name: "Corona", display: "🍺", department: "Beer", price: 0.5, quantity: 20},
G3: %{name: "Heinenken", display: "🍺", department: "Beer", price: 0.65, quantity: 20},
G4: %{name: "Michelob", display: "🍺", department: "Beer", price: 1.0, quantity: 20},
G5: %{name: "Coors", display: "🍺", department: "Beer", price: 0.5, quantity: 20},
G6: %{name: "Modelo", display: "🍺", department: "Beer", price: 0.65, quantity: 20},
G7: %{name: "Budweiser", display: "🍺", department: "Beer", price: 1.0, quantity: 20},
G8: %{name: "<NAME>", display: "🍺", department: "Beer", price: 0.5, quantity: 20},
G9: %{name: "Busch", display: "🍺", department: "Beer", price: 0.65, quantity: 20},
G10: %{name: "Pabst", display: "🍺", department: "Beer", price: 1.0, quantity: 20},
H1: %{name: "<NAME>", display: "🍠", department: "Bar", price: 1.0, quantity: 20},
H2: %{name: "Kashi", display: "🍠", department: "Bar", price: 0.5, quantity: 20},
H3: %{name: "Chewy", display: "🍠", department: "Bar", price: 0.65, quantity: 20},
H4: %{name: "Kind", display: "🍠", department: "Bar", price: 1.0, quantity: 20},
H5: %{name: "Larabar", display: "🍠", department: "Bar", price: 0.5, quantity: 20},
H6: %{name: "Kellogg's", display: "🍠", department: "Bar", price: 0.65, quantity: 20},
H7: %{name: "<NAME>", display: "🍠", department: "Bar", price: 1.0, quantity: 20},
H8: %{name: "Newtons", display: "🍠", department: "Bar", price: 0.5, quantity: 20},
H9: %{name: "<NAME>", display: "🍠", department: "Bar", price: 0.65, quantity: 20},
H10: %{name: "<NAME>", display: "🍠", department: "Bar", price: 1.0, quantity: 20},
I1: %{name: "Carnation", display: "🥛", department: "Milk", price: 1.0, quantity: 20},
I2: %{name: "Lala", display: "🥛", department: "Milk", price: 0.5, quantity: 20},
I3: %{name: "<NAME>", display: "🥛", department: "Milk", price: 0.65, quantity: 20},
I4: %{name: "Atkins", display: "🥛", department: "Milk", price: 1.0, quantity: 20},
I5: %{name: "Enfagrow", display: "🥛", department: "Milk", price: 0.5, quantity: 20},
I6: %{name: "Go & Grow", display: "🥛", department: "Milk", price: 0.65, quantity: 20},
I7: %{name: "<NAME>", display: "🥛", department: "Milk", price: 1.0, quantity: 20},
I8: %{name: "<NAME>", display: "🥛", department: "Milk", price: 0.5, quantity: 20},
I9: %{name: "Mars", display: "🥛", department: "Milk", price: 0.65, quantity: 20},
I10: %{name: "Vitasoy", display: "🥛", department: "Milk", price: 1.0, quantity: 20},
J1: %{name: "Starbucks", display: "☕", department: "Coffee", price: 1.0, quantity: 20},
J2: %{name: "Kitu", display: "☕", department: "Coffee", price: 0.5, quantity: 20},
J3: %{name: "Califia", display: "☕", department: "Coffee", price: 0.65, quantity: 20},
J4: %{name: "Rise", display: "☕", department: "Coffee", price: 1.0, quantity: 20},
J5: %{name: "Bulletproof", display: "☕", department: "Coffee", price: 0.5, quantity: 20},
J6: %{name: "<NAME>", display: "☕", department: "Coffee", price: 0.65, quantity: 20},
J7: %{name: "McCafe", display: "☕", department: "Coffee", price: 1.0, quantity: 20},
J8: %{name: "<NAME>", display: "☕", department: "Coffee", price: 0.5, quantity: 20},
J9: %{name: "Foro", display: "☕", department: "Coffee", price: 0.65, quantity: 20},
J10: %{name: "Click", display: "☕", department: "Coffee", price: 1.0, quantity: 20}
}
def item, do: @item
end
|
lib/vending_machine/stock/make_item_stock.ex
| 0.648132
| 0.695829
|
make_item_stock.ex
|
starcoder
|
defmodule InterceptConfig do
@config %{
################# `Interceptor.intercept do ... end` tests
# on before tests
{InterceptedOnBefore1, :to_intercept, 0} => [before: {Before.Callback, :before, 1}],
{InterceptedOnBefore2, :to_intercept, 0} => [before: {Before.Callback, :before, 1}],
{InterceptedOnBefore2, :other_to_intercept, 0} => [before: {Before.Callback, :before, 1}],
{InterceptedOnBefore3, :other_to_intercept, 1} => [before: {Before.Callback, :before, 1}],
{InterceptedOnBefore4, :to_intercept, 0} => [before: {Before.Callback, :before, 1}],
# on after tests
{InterceptedOnAfter1, :to_intercept, 0} => [after: {After.Callback, :right_after, 2}],
{InterceptedOnAfter2, :to_intercept, 0} => [after: {After.Callback, :right_after, 2}],
{InterceptedOnAfter2, :other_to_intercept, 0} => [after: {After.Callback, :right_after, 2}],
{InterceptedOnAfter3, :other_to_intercept, 1} => [after: {After.Callback, :right_after, 2}],
{InterceptedOnAfter4, :to_intercept_guarded, 1} => [after: {After.Callback, :right_after, 2}],
{InterceptedOnAfter5, :it_has_threes, 1} => [after: {After.Callback, :right_after, 2}],
{InterceptedOnAfter5, :its_abc, 1} => [after: {After.Callback, :right_after, 2}],
# on success tests
{InterceptedOnSuccess1, :to_intercept, 0} => [on_success: {OnSuccess.Callback, :on_success, 3}],
{InterceptedOnSuccess2, :to_intercept, 0} => [on_success: {OnSuccess.Callback, :on_success, 3}],
{InterceptedOnSuccess2, :other_to_intercept, 0} => [on_success: {OnSuccess.Callback, :on_success, 3}],
{InterceptedOnSuccess3, :other_to_intercept, 1} => [on_success: {OnSuccess.Callback, :on_success, 3}],
{InterceptedOnSuccess3, :trickier_args_function, 6} => [on_success: {OnSuccess.Callback, :on_success, 3}],
{InterceptedOnSuccess4, :with_struct, 1} => [on_success: {OnSuccess.Callback, :on_success, 3}],
{InterceptedOnSuccess4, :with_structs, 2} => [on_success: {OnSuccess.Callback, :on_success, 3}],
{InterceptedOnSuccess4, :with_struct_already_assigned, 1} => [on_success: {OnSuccess.Callback, :on_success, 3}],
# on error tests
{InterceptedOnError1, :to_intercept, 0} => [on_error: {OnError.Callback, :on_error, 3}],
{InterceptedOnError2, :to_intercept, 0} => [on_error: {OnError.Callback, :on_error, 3}],
{InterceptedOnError2, :other_to_intercept, 0} => [on_error: {OnError.Callback, :on_error, 3}],
{InterceptedOnError3, :other_to_intercept, 1} => [on_error: {OnError.Callback, :on_error, 3}],
# wrapper tests
{InterceptedByWrapper1, :to_intercept, 0} => [wrapper: {Wrapper.Callback, :wrap_returns_result, 2}],
{InterceptedByWrapper2, :to_intercept, 0} => [wrapper: {Wrapper.Callback, :wrap_returns_result, 2}],
{InterceptedByWrapper2, :other_to_intercept, 0} => [wrapper: {Wrapper.Callback, :wrap_returns_result, 2}],
{InterceptedByWrapper3, :other_to_intercept, 1} => [wrapper: {Wrapper.Callback, :wrap_returns_result, 2}],
{InterceptedByWrapper4, :to_intercept, 0} => [wrapper: {Wrapper.Callback, :wrap_returns_hello, 2}],
# edge cases
{InterceptedEdgeCases1, :to_intercept, 3} => [on_success: {EdgeCases.Callbacks, :success_cb, 3}, on_error: {EdgeCases.Callbacks, :error_cb, 3}],
{InterceptedEdgeCases1, :intercept_with_prefix, 1} => [on_success: {EdgeCases.Callbacks, :success_cb, 3}, on_error: {EdgeCases.Callbacks, :error_cb, 3}],
{InterceptedEdgeCases1, :intercept_pattern_match_atom_argument, 2} => [on_success: {EdgeCases.Callbacks, :success_cb, 3}, on_error: {EdgeCases.Callbacks, :error_cb, 3}],
# wildcarded callbacks
{InterceptedWildcardedMfa1, :foo, :*} => [on_success: {WildcardedMfa.Callbacks, :success_cb, 3}, on_error: {WildcardedMfa.Callbacks, :error_cb, 3}],
{InterceptedWildcardedMfa2, :*, :*} => [on_success: {WildcardedMfa.Callbacks, :success_cb, 3}, on_error: {WildcardedMfa.Callbacks, :error_cb, 3}],
# these configs will be overridden by the module own configuration
{InterceptedOnAfterOwnConfiguration1, :to_intercept, 0} => [after: {After.Callback, :right_after, 2}],
################# `@intercept :true` tests
# on before tests
{AnnotatedInterceptedOnBefore1, :to_intercept, 0} => [before: {AnnotatedBefore.Callback, :before, 1}],
{AnnotatedInterceptedOnBefore2, :to_intercept, 0} => [before: {AnnotatedBefore.Callback, :before, 1}],
{AnnotatedInterceptedOnBefore2, :other_to_intercept, 0} => [before: {AnnotatedBefore.Callback, :before, 1}],
{AnnotatedInterceptedOnBefore3, :other_to_intercept, 1} => [before: {AnnotatedBefore.Callback, :before, 1}],
{AnnotatedInterceptedOnBefore4, :to_intercept, 0} => [before: {AnnotatedBefore.Callback, :before, 1}],
# on after tests
{AnnotatedInterceptedOnAfter1, :to_intercept, 0} => [after: {AnnotatedAfter.Callback, :right_after, 2}],
{AnnotatedInterceptedOnAfter2, :to_intercept, 0} => [after: {AnnotatedAfter.Callback, :right_after, 2}],
{AnnotatedInterceptedOnAfter2, :other_to_intercept, 0} => [after: {AnnotatedAfter.Callback, :right_after, 2}],
{AnnotatedInterceptedOnAfter3, :other_to_intercept, 1} => [after: {AnnotatedAfter.Callback, :right_after, 2}],
{AnnotatedInterceptedOnAfter4, :to_intercept_guarded, 1} => [after: {AnnotatedAfter.Callback, :right_after, 2}],
{AnnotatedInterceptedOnAfter5, :it_has_threes, 1} => [after: {AnnotatedAfter.Callback, :right_after, 2}],
{AnnotatedInterceptedOnAfter5, :its_abc, 1} => [after: {AnnotatedAfter.Callback, :right_after, 2}],
# on success tests
{AnnotatedInterceptedOnSuccess1, :to_intercept, 0} => [on_success: {AnnotatedOnSuccess.Callback, :on_success, 3}],
{AnnotatedInterceptedOnSuccess2, :to_intercept, 0} => [on_success: {AnnotatedOnSuccess.Callback, :on_success, 3}],
{AnnotatedInterceptedOnSuccess2, :other_to_intercept, 0} => [on_success: {AnnotatedOnSuccess.Callback, :on_success, 3}],
{AnnotatedInterceptedOnSuccess3, :other_to_intercept, 1} => [on_success: {AnnotatedOnSuccess.Callback, :on_success, 3}],
{AnnotatedInterceptedOnSuccess3, :trickier_args_function, 6} => [on_success: {AnnotatedOnSuccess.Callback, :on_success, 3}],
{AnnotatedInterceptedOnSuccess4, :with_struct, 1} => [on_success: {AnnotatedOnSuccess.Callback, :on_success, 3}],
{AnnotatedInterceptedOnSuccess4, :with_structs, 2} => [on_success: {AnnotatedOnSuccess.Callback, :on_success, 3}],
{AnnotatedInterceptedOnSuccess4, :with_struct_already_assigned, 1} => [on_success: {AnnotatedOnSuccess.Callback, :on_success, 3}],
# on error tests
{AnnotatedInterceptedOnError1, :to_intercept, 0} => [on_error: {AnnotatedOnError.Callback, :on_error, 3}],
{AnnotatedInterceptedOnError2, :to_intercept, 0} => [on_error: {AnnotatedOnError.Callback, :on_error, 3}],
{AnnotatedInterceptedOnError2, :other_to_intercept, 0} => [on_error: {AnnotatedOnError.Callback, :on_error, 3}],
{AnnotatedInterceptedOnError3, :other_to_intercept, 1} => [on_error: {AnnotatedOnError.Callback, :on_error, 3}],
# wrapper tests
{AnnotatedInterceptedByWrapper1, :to_intercept, 0} => [wrapper: {AnnotatedWrapper.Callback, :wrap_returns_result, 2}],
{AnnotatedInterceptedByWrapper2, :to_intercept, 0} => [wrapper: {AnnotatedWrapper.Callback, :wrap_returns_result, 2}],
{AnnotatedInterceptedByWrapper2, :other_to_intercept, 0} => [wrapper: {AnnotatedWrapper.Callback, :wrap_returns_result, 2}],
{AnnotatedInterceptedByWrapper3, :other_to_intercept, 1} => [wrapper: {AnnotatedWrapper.Callback, :wrap_returns_result, 2}],
{AnnotatedInterceptedByWrapper4, :to_intercept, 0} => [wrapper: {AnnotatedWrapper.Callback, :wrap_returns_hello, 2}],
# edge cases
{AnnotatedInterceptedEdgeCases1, :to_intercept, 3} => [on_success: {AnnotatedEdgeCases.Callbacks, :success_cb, 3}, on_error: {AnnotatedEdgeCases.Callbacks, :error_cb, 3}],
{AnnotatedInterceptedEdgeCases1, :intercept_with_prefix, 1} => [on_success: {AnnotatedEdgeCases.Callbacks, :success_cb, 3}, on_error: {AnnotatedEdgeCases.Callbacks, :error_cb, 3}],
{AnnotatedInterceptedEdgeCases1, :intercept_pattern_match_atom_argument, 2} => [on_success: {AnnotatedEdgeCases.Callbacks, :success_cb, 3}, on_error: {AnnotatedEdgeCases.Callbacks, :error_cb, 3}],
{AnnotatedInterceptedEdgeCases2, :to_intercept, 3} => [on_success: {AnnotatedEdgeCases.Callbacks, :success_cb, 3}, on_error: {AnnotatedEdgeCases.Callbacks, :error_cb, 3}],
{AnnotatedInterceptedEdgeCases2, :intercept_with_prefix, 1} => [on_success: {AnnotatedEdgeCases.Callbacks, :success_cb, 3}, on_error: {AnnotatedEdgeCases.Callbacks, :error_cb, 3}],
# wildcarded callbacks
{AnnotatedInterceptedWildcardedMfa1, :foo, :*} => [on_success: {AnnotatedWildcardedMfa.Callbacks, :success_cb, 3}, on_error: {AnnotatedWildcardedMfa.Callbacks, :error_cb, 3}],
{AnnotatedInterceptedWildcardedMfa2, :*, :*} => [on_success: {AnnotatedWildcardedMfa.Callbacks, :success_cb, 3}, on_error: {AnnotatedWildcardedMfa.Callbacks, :error_cb, 3}],
# these configs will be overridden by the module own configuration
{AnnotatedInterceptedOnAfterOwnConfiguration1, :to_intercept, 0} => [after: {After.Callback, :right_after, 2}],
}
def get_intercept_config(), do: @config
end
|
test/intercepted_modules/intercept_config.ex
| 0.665628
| 0.733285
|
intercept_config.ex
|
starcoder
|
defmodule Surface.AST do
@type t ::
Surface.AST.Text.t()
| Surface.AST.Interpolation.t()
| Surface.AST.Expr.t()
| Surface.AST.Tag.t()
| Surface.AST.Template.t()
| Surface.AST.Slot.t()
| Surface.AST.If.t()
| Surface.AST.For.t()
| Surface.AST.Container.t()
| Surface.AST.Component.t()
| Surface.AST.SlotableComponent.t()
| Surface.AST.Error.t()
end
defmodule Surface.AST.Container do
@moduledoc """
An AST node representing a container of other nodes. This does not
have content itself, just contains children which have content, and
directives that operate on the entirety of the children (i.e. for, if, debug)
## Properties
* `:children` - children AST nodes
* `:directives` - directives associated with this container
* `:meta` - compile meta
* `:debug` - keyword list indicating when debug information should be printed during compilation
"""
defstruct [:children, :directives, :meta, debug: []]
@type t :: %__MODULE__{
children: list(Surface.AST.t()),
debug: List.t(atom()),
meta: Surface.AST.Meta.t(),
directives: list(Surface.AST.Directive.t())
}
end
defmodule Surface.AST.Expr do
@moduledoc """
An AST node representing an expression which does not resolve to a value printed out to the final DOM.
## Properties
* `:value` - a quoted expression
* `:meta` - compile meta
"""
defstruct [:value, :meta]
@type t :: %__MODULE__{
# quoted expression
value: any(),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.Meta do
@moduledoc """
A container for metadata about compilation.
## Properties
* `:line` - the line from the source code where the parent was extracted
* `:module` - the component module (e.g. `Surface.Components.LivePatch`)
* `:node_alias` - the alias used inside the source code (e.g. `LivePatch`)
* `:file` - the file from which the source was extracted
* `:line_offset` - the line offset from the caller's line to the start of this source
* `:caller` - a Macro.Env struct representing the caller
"""
@derive {Inspect, only: [:line, :module, :node_alias, :file]}
defstruct [:line, :module, :node_alias, :line_offset, :file, :caller]
@type t :: %__MODULE__{
line: non_neg_integer(),
line_offset: non_neg_integer(),
module: atom(),
node_alias: binary() | nil,
caller: Macro.Env.t(),
file: binary()
}
def quoted_caller_cid(meta) do
cond do
Module.open?(meta.caller.module) and
Module.get_attribute(meta.caller.module, :component_type) == Surface.LiveComponent ->
quote generated: true do
@myself
end
true ->
nil
end
end
end
defmodule Surface.AST.Directive do
@moduledoc """
An AST node representing a directive
## Properties
* `:module` - the module which implements logic for this directive (e.g. `Surface.Directive.Let`)
* `:name` - the name of the directive (e.g. `:let`)
* `:value` - the code/configuration for this directive. typically a quoted expression
* `:meta` - compilation meta data
"""
defstruct [:module, :name, :value, :meta]
@type t :: %__MODULE__{
module: atom(),
name: atom(),
# the value here is defined by the individual directive
value: Surface.AST.AttributeExpr.t() | Surface.AST.Text.t() | nil,
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.For do
@moduledoc """
An AST node representing a for comprehension.
## Properties
* `:generator` - a quoted expression
* `:children` - the children to collect over the generator
* `:meta` - compilation meta data
* `:debug` - keyword list indicating when debug information should be printed during compilation
"""
defstruct [:generator, :children, :meta, debug: []]
@type t :: %__MODULE__{
generator: any(),
debug: List.t(atom()),
children: list(Surface.AST.t()),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.If do
@moduledoc """
An AST node representing an if expression
## Properties
* `:condition` - a quoted expression
* `:children` - the children to insert into the dom if the condition evaluates truthy
* `:meta` - compilation meta data
* `:debug` - keyword list indicating when debug information should be printed during compilation
"""
defstruct [:condition, :children, :meta, debug: []]
@type t :: %__MODULE__{
condition: any(),
debug: List.t(atom()),
children: list(Surface.AST.t()),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.Attribute do
@moduledoc """
An AST node representing an attribute or property
## Properties
* `:type` - an atom representing the type of attribute. See Surface.API for the list of supported types
* `:name` - the name of the attribute (e.g. `:class`)
* `:value` - a list of nodes that can be concatenated to form the value for this attribute. Potentially contains dynamic data
* `:meta` - compilation meta data
"""
defstruct [:name, :type, :value, :meta]
@type t :: %__MODULE__{
type: atom(),
name: atom(),
value: Surface.AST.Text.t() | Surface.AST.AttributeExpr.t(),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.DynamicAttribute do
@moduledoc """
An AST node representing a dynamic attribute (or attributes).
## Properties
* `:expr` - an expression which will generate a keyword list of attributes and value tuples of the form {type, value}
* `:meta` - compilation meta data
"""
defstruct [:name, :expr, :meta]
@type t :: %__MODULE__{
expr: Surface.AST.AttributeExpr.t(),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.AttributeExpr do
@moduledoc """
An AST node representing an attribute expression (i.e. a dynamic value for an attribute, directive, or property)
## Properties
* `:original` - the original text, useful for debugging and error messages
* `:value` - a quoted expression
* `:meta` - compilation meta data
"""
defstruct [:original, :value, :meta]
@type t :: %__MODULE__{
# quoted
value: any(),
original: binary(),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.Interpolation do
@moduledoc """
An AST node representing interpolation within a node
## Properties
* `:original` - the original text, useful for debugging and error messages
* `:value` - a quoted expression
* `:meta` - compilation meta data
"""
defstruct [:original, :value, :meta]
@type t :: %__MODULE__{
original: binary(),
# quoted
value: any(),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.Slot do
@moduledoc """
An AST node representing a <slot /> element
## Properties
* `:name` - the slot name
* `:default` - a list of AST nodes representing the default content for this slot
* `:props` - either an atom or a quoted expression representing bindings for this slot
* `:meta` - compilation meta data
"""
defstruct [:name, :props, :default, :meta]
@type t :: %__MODULE__{
name: binary(),
meta: Surface.AST.Meta.t(),
# quoted ?
props: Surface.AST.Directive.t(),
default: list(Surface.AST.t())
}
end
# TODO differentiate between raw HTML and plain text ?
defmodule Surface.AST.Text do
@moduledoc """
An AST node representing static text
## Properties
* `:value` - the text
"""
defstruct [:value]
@type t :: %__MODULE__{
value: binary | boolean
}
end
defmodule Surface.AST.Tag do
@moduledoc """
An AST node representing a standard HTML tag
## Properties
* `:element` - the element name
* `:attributes` - the attributes for this tag
* `:directives` - any directives to be applied to this tag
* `:children` - the tag children
* `:meta` - compilation meta data
* `:debug` - keyword list indicating when debug information should be printed during compilation
"""
defstruct [:element, :attributes, :directives, :children, :meta, debug: []]
@type t :: %__MODULE__{
element: binary(),
debug: List.t(atom()),
attributes: list(Surface.AST.Attribute.t() | Surface.AST.DynamicAttribute.t()),
directives: list(Surface.AST.Directive.t()),
children: list(Surface.AST.t()),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.VoidTag do
@moduledoc """
An AST node representing a void (empty) HTML tag
## Properties
* `:element` - the element name
* `:attributes` - the attributes for this tag
* `:directives` - any directives to be applied to this tag
* `:meta` - compilation meta data
* `:debug` - keyword list indicating when debug information should be printed during compilation
"""
defstruct [:element, :attributes, :directives, :meta, debug: []]
@type t :: %__MODULE__{
element: binary(),
debug: List.t(atom()),
attributes: list(Surface.AST.Attribute.t() | Surface.AST.DynamicAttribute.t()),
directives: list(Surface.AST.Directive.t()),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.Template do
@moduledoc """
An AST node representing a <template> element. This is used to provide content for slots
## Properties
* `:name` - the template name
* `:let` - the bindings for this template
* `:children` - the template children
* `:meta` - compilation meta data
* `:debug` - keyword list indicating when debug information should be printed during compilation
"""
defstruct [:name, :children, :let, :meta]
@type t :: %__MODULE__{
name: atom(),
children: list(Surface.AST.t()),
# quoted?
let: Surface.AST.Directive.t(),
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.Error do
@moduledoc """
An AST node representing an error. This will be rendered as an html element.
## Properties
* `:message` - the error message
* `:meta` - compilation meta data
"""
defstruct [:message, :meta]
@type t :: %__MODULE__{
message: binary(),
meta: Surface.HTML.Meta.t()
}
end
defmodule Surface.AST.Component do
@moduledoc """
An AST node representing a standard HTML tag
## Properties
* `:module` - the component module
* `:type` - the type of component (i.e. Surface.LiveComponent vs Surface.LiveView)
* `:props` - the props for this component
* `:directives` - any directives to be applied to this tag
* `:children` - the tag children
* `:meta` - compilation meta data
* `:debug` - keyword list indicating when debug information should be printed during compilation
"""
defstruct [:module, :type, :props, :dynamic_props, :directives, :templates, :meta, debug: []]
@type t :: %__MODULE__{
module: module(),
debug: List.t(atom()),
type: module(),
props: list(Surface.AST.Attribute.t()),
dynamic_props: Surface.AST.DynamicAttribute.t(),
directives: list(Surface.AST.Directive.t()),
templates: %{
:default => list(Surface.AST.Template.t() | Surface.AST.SlotableComponent.t()),
optional(atom()) => list(Surface.AST.Template.t())
},
meta: Surface.AST.Meta.t()
}
end
defmodule Surface.AST.SlotableComponent do
@moduledoc """
An AST node representing a standard HTML tag
## Properties
* `:module` - the component module
* `:type` - the type of component (i.e. Surface.LiveComponent vs Surface.Component)
* `:slot` - the name of the slot that this component is for
* `:let` - the bindings for this template
* `:props` - the props for this component
* `:directives` - any directives to be applied to this tag
* `:children` - the tag children
* `:meta` - compilation meta data
* `:debug` - keyword list indicating when debug information should be printed during compilation
"""
defstruct [
:module,
:slot,
:type,
:let,
:props,
:dynamic_props,
:directives,
:templates,
:meta,
debug: []
]
@type t :: %__MODULE__{
module: module(),
debug: List.t(atom()),
type: module(),
slot: atom(),
let: Surface.AST.Directive.t(),
props: list(Surface.AST.Attribute.t()),
dynamic_props: Surface.AST.DynamicAttribute.t(),
directives: list(Surface.AST.Directive.t()),
templates: %{
:default => list(Surface.AST.Template.t()),
optional(atom()) => list(Surface.AST.Template.t())
},
meta: Surface.AST.Meta.t()
}
end
|
lib/surface/ast.ex
| 0.903166
| 0.61144
|
ast.ex
|
starcoder
|
defmodule Phoenix.Socket.Transport do
@moduledoc """
API for building transports.
This module describes what is required to build a Phoenix transport.
The transport sits between the socket and channels, forwarding client
messages to channels and vice-versa.
A transport is responsible for:
* Implementing the transport behaviour
* Establishing the socket connection
* Handling of incoming messages
* Handling of outgoing messages
* Managing channels
* Providing secure defaults
## The transport behaviour
The transport requires two functions:
* `default_config/0` - returns the default transport configuration
to be merged when the transport is declared in the socket module
* `handlers/0` - returns a map of handlers. For example, if the
transport is implemented via Cowboy, it just need to specify the
appropriate Cowboy handler
## Socket connections
Once a connection is established, the transport is responsible
for invoking the `Phoenix.Socket.connect/2` callback and acting
accordingly. Once connected, the transport should request the
`Phoenix.Socket.id/1` and subscribe to the topic if one exists.
On subscribed, the transport must be able to handle "disconnect"
broadcasts on the given id topic.
The `connect/6` function in this module can be used as a
convenience or a documentation on such steps.
## Incoming messages
Incoming messages are encoded in whatever way the transport
chooses. Those messages must be decoded in the transport into a
`Phoenix.Socket.Message` before being forwarded to a channel.
Most of those messages are user messages except:
* "heartbeat" events in the "phoenix" topic - should just emit
an OK reply
* "phx_join" on any topic - should join the topic
* "phx_leave" on any topic - should leave the topic
The function `dispatch/3` can be used to handle these messages.
## Outgoing messages
Channels can send two types of message back to a transport:
`Phoenix.Socket.Message` and `Phoenix.Socket.Reply`. Those
messages are encoded in the channel into a format defined by
the transport. That's why transports are required to pass a
serializer that abides to the behaviour described in
`Phoenix.Transports.Serializer`.
## Managing channels
Because channels are spawned from the transport process, transports
must trap exits and correctly handle the `{:EXIT, _, _}` messages
arriving from channels, relaying the proper response to the client.
The following events are sent by the transport when a channel exits:
* `"phx_close"` - The channel has exited gracefully
* `"phx_error"` - The channel has crashed
The `on_exit_message/3` function aids in constructing these messages.
## Duplicate Join Subscriptions
For a given topic, the client may only establish a single channel
subscription. When attempting to create a duplicate subscription,
`dispatch/3` will close the existing channel, log a warning, and
spawn a new channel for the topic. When sending the `"phx_close"`
event from the closed channel, the message will contain the `ref` the
client sent when joining. This allows the client to uniquely identify
`"phx_close"` and `"phx_error"` messages when force-closing a channel
on duplicate joins.
## Security
This module also provides functions to enable a secure environment
on transports that, at some point, have access to a `Plug.Conn`.
The functionality provided by this module helps in performing "origin"
header checks and ensuring only SSL connections are allowed.
## Remote Client
Channels can reply, synchronously, to any `handle_in/3` event. To match
pushes with replies, clients must include a unique `ref` with every
message and the channel server will reply with a matching ref where
the client can pick up the callback for the matching reply.
Phoenix includes a JavaScript client for WebSocket and Longpolling
support using JSON encodings.
Clients can be implemented for other protocols and encodings by
abiding by the `Phoenix.Socket.Message` format.
## Protocol Versioning
Clients are expected to send the Channel Transport protocol version that they
expect to be talking to. The version can be retrieved on the server from
`Phoenix.Channel.Transport.protocol_version/0`. If no version is provided, the
Transport adapters should default to assume a `"1.0.0"` version number.
See `web/static/js/phoenix.js` for an example transport client
implementation.
"""
require Logger
alias Phoenix.Socket
alias Phoenix.Socket.Message
alias Phoenix.Socket.Reply
@protocol_version "1.0.0"
@client_vsn_requirements "~> 1.0"
@doc """
Provides a keyword list of default configuration for socket transports.
"""
@callback default_config() :: Keyword.t
@doc """
Returns the Channel Transport protocol version.
"""
def protocol_version, do: @protocol_version
@doc """
Handles the socket connection.
It builds a new `Phoenix.Socket`, invokes the handler
`connect/2` callback and returns the result.
If the connection was successful, generates `Phoenix.PubSub`
topic from the `id/1` callback.
"""
def connect(endpoint, handler, transport_name, transport, serializer, params) do
vsn = params["vsn"] || "1.0.0"
if Version.match?(vsn, @client_vsn_requirements) do
connect_vsn(endpoint, handler, transport_name, transport, serializer, params)
else
Logger.error "The client's requested channel transport version \"#{vsn}\" " <>
"does not match server's version requirements of \"#{@client_vsn_requirements}\""
:error
end
end
defp connect_vsn(endpoint, handler, transport_name, transport, serializer, params) do
socket = %Socket{endpoint: endpoint,
transport: transport,
transport_pid: self(),
transport_name: transport_name,
handler: handler,
pubsub_server: endpoint.__pubsub_server__,
serializer: serializer}
case handler.connect(params, socket) do
{:ok, socket} ->
case handler.id(socket) do
nil -> {:ok, socket}
id when is_binary(id) -> {:ok, %Socket{socket | id: id}}
invalid ->
Logger.error "#{inspect handler}.id/1 returned invalid identifier #{inspect invalid}. " <>
"Expected nil or a string."
:error
end
:error ->
:error
invalid ->
Logger.error "#{inspect handler}.connect/2 returned invalid value #{inspect invalid}. " <>
"Expected {:ok, socket} or :error"
:error
end
end
@doc """
Dispatches `Phoenix.Socket.Message` to a channel.
All serialized, remote client messages should be deserialized and
forwarded through this function by adapters.
The following returns must be handled by transports:
* `:noreply` - Nothing to be done by the transport
* `{:reply, reply}` - The reply to be sent to the client
* `{:joined, channel_pid, reply}` - The channel was joined
and the reply must be sent as result
* `{:error, reason, reply}` - An error occurred and the reply
must be sent as result
## Parameter filtering on join
When logging parameters, Phoenix can filter out sensitive parameters
such as passwords and tokens. Parameters to be filtered can be added
via the `:filter_parameters` option:
config :phoenix, :filter_parameters, ["password", "<PASSWORD>"]
With the configuration above, Phoenix will filter any parameter
that contains the terms `password` or `secret`. The match is
case sensitive.
Phoenix's default is `["password"]`.
"""
def dispatch(msg, channels, socket)
def dispatch(%{ref: ref, topic: "phoenix", event: "heartbeat"}, _channels, _socket) do
{:reply, %Reply{ref: ref, topic: "phoenix", status: :ok, payload: %{}}}
end
def dispatch(%Message{} = msg, channels, socket) do
channels
|> Map.get(msg.topic)
|> do_dispatch(msg, socket)
end
defp do_dispatch(nil, %{event: "phx_join", topic: topic} = msg, socket) do
if channel = socket.handler.__channel__(topic, socket.transport_name) do
socket = %Socket{socket | topic: topic, channel: channel}
case Phoenix.Channel.Server.join(socket, msg.payload) do
{:ok, response, pid} ->
log_info topic, fn -> "Replied #{topic} :ok" end
{:joined, pid, %Reply{ref: msg.ref, topic: topic, status: :ok, payload: response}}
{:error, reason} ->
log_info topic, fn -> "Replied #{topic} :error" end
{:error, reason, %Reply{ref: msg.ref, topic: topic, status: :error, payload: reason}}
end
else
reply_ignore(msg, socket)
end
end
defp do_dispatch(pid, %{event: "phx_join"} = msg, socket) when is_pid(pid) do
Logger.debug "Duplicate channel join for topic \"#{msg.topic}\" in #{inspect(socket.handler)}. " <>
"Closing existing channel for new join."
:ok = Phoenix.Channel.Server.close(pid)
do_dispatch(nil, msg, socket)
end
defp do_dispatch(nil, msg, socket) do
reply_ignore(msg, socket)
end
defp do_dispatch(channel_pid, msg, _socket) do
send(channel_pid, msg)
:noreply
end
defp log_info("phoenix" <> _, _func), do: :noop
defp log_info(_topic, func), do: Logger.info(func)
defp reply_ignore(msg, socket) do
Logger.warn fn -> "Ignoring unmatched topic \"#{msg.topic}\" in #{inspect(socket.handler)}" end
{:error, :unmatched_topic, %Reply{ref: msg.ref, topic: msg.topic, status: :error,
payload: %{reason: "unmatched topic"}}}
end
@doc """
Returns the message to be relayed when a channel exits.
"""
# TODO remove 2-arity on next major release
def on_exit_message(topic, reason) do
IO.write :stderr, "Phoenix.Transport.on_exit_message/2 is deprecated. Use on_exit_message/3 instead."
on_exit_message(topic, nil, reason)
end
def on_exit_message(topic, join_ref, reason) do
case reason do
:normal -> %Message{ref: join_ref, topic: topic, event: "phx_close", payload: %{}}
:shutdown -> %Message{ref: join_ref, topic: topic, event: "phx_close", payload: %{}}
{:shutdown, _} -> %Message{ref: join_ref, topic: topic, event: "phx_close", payload: %{}}
_ -> %Message{ref: join_ref, topic: topic, event: "phx_error", payload: %{}}
end
end
@doc """
Forces SSL in the socket connection.
Uses the endpoint configuration to decide so. It is a
noop if the connection has been halted.
"""
def force_ssl(%Plug.Conn{halted: true} = conn, _socket, _endpoint, _opts) do
conn
end
def force_ssl(conn, socket, endpoint, opts) do
if force_ssl = force_ssl_config(socket, endpoint, opts) do
Plug.SSL.call(conn, force_ssl)
else
conn
end
end
defp force_ssl_config(socket, endpoint, opts) do
Phoenix.Config.cache(endpoint, {:force_ssl, socket}, fn _ ->
opts =
if force_ssl = Keyword.get(opts, :force_ssl, endpoint.config(:force_ssl)) do
force_ssl
|> Keyword.put_new(:host, endpoint.config(:url)[:host] || "localhost")
|> Plug.SSL.init()
end
{:cache, opts}
end)
end
@doc """
Logs the transport request.
Available for transports that generate a connection.
"""
def transport_log(conn, level) do
if level do
Plug.Logger.call(conn, Plug.Logger.init(log: level))
else
conn
end
end
@doc """
Checks the origin request header against the list of allowed origins.
Should be called by transports before connecting when appropriate.
If the origin header matches the allowed origins, no origin header was
sent or no origin was configured, it will return the given connection.
Otherwise a 403 Forbidden response will be sent and the connection halted.
It is a noop if the connection has been halted.
"""
def check_origin(conn, handler, endpoint, opts, sender \\ &Plug.Conn.send_resp/1)
def check_origin(%Plug.Conn{halted: true} = conn, _handler, _endpoint, _opts, _sender),
do: conn
def check_origin(conn, handler, endpoint, opts, sender) do
import Plug.Conn
origin = get_req_header(conn, "origin") |> List.first
check_origin = check_origin_config(handler, endpoint, opts)
cond do
is_nil(origin) or check_origin == false ->
conn
origin_allowed?(check_origin, URI.parse(origin), endpoint) ->
conn
true ->
Logger.error """
Could not check origin for Phoenix.Socket transport.
This happens when you are attempting a socket connection to
a different host than the one configured in your config/
files. For example, in development the host is configured
to "localhost" but you may be trying to access it from
"127.0.0.1". To fix this issue, you may either:
1. update [url: [host: ...]] to your actual host in the
config file for your current environment (recommended)
2. pass the :check_origin option when configuring your
endpoint or when configuring the transport in your
UserSocket module, explicitly outlining which origins
are allowed:
check_origin: ["https://example.com",
"//another.com:888", "//other.com"]
"""
resp(conn, :forbidden, "")
|> sender.()
|> halt()
end
end
defp check_origin_config(handler, endpoint, opts) do
Phoenix.Config.cache(endpoint, {:check_origin, handler}, fn _ ->
check_origin =
case Keyword.get(opts, :check_origin, endpoint.config(:check_origin)) do
origins when is_list(origins) ->
Enum.map(origins, &parse_origin/1)
boolean when is_boolean(boolean) ->
boolean
end
{:cache, check_origin}
end)
end
defp parse_origin(origin) do
case URI.parse(origin) do
%{host: nil} ->
raise ArgumentError,
"invalid check_origin: #{inspect origin} (expected an origin with a host)"
%{scheme: scheme, port: port, host: host} ->
{scheme, host, port}
end
end
defp origin_allowed?(_check_origin, %URI{host: nil}, _endpoint),
do: true
defp origin_allowed?(true, uri, endpoint),
do: compare?(uri.host, endpoint.config(:url)[:host])
defp origin_allowed?(check_origin, uri, _endpoint) when is_list(check_origin),
do: origin_allowed?(uri, check_origin)
defp origin_allowed?(uri, allowed_origins) do
%{scheme: origin_scheme, host: origin_host, port: origin_port} = uri
Enum.any?(allowed_origins, fn {allowed_scheme, allowed_host, allowed_port} ->
compare?(origin_scheme, allowed_scheme) and
compare?(origin_port, allowed_port) and
compare_host?(origin_host, allowed_host)
end)
end
defp compare?(request_val, allowed_val) do
is_nil(allowed_val) or request_val == allowed_val
end
defp compare_host?(_request_host, nil),
do: true
defp compare_host?(request_host, "*." <> allowed_host),
do: String.ends_with?(request_host, allowed_host)
defp compare_host?(request_host, allowed_host),
do: request_host == allowed_host
end
|
lib/phoenix/socket/transport.ex
| 0.90777
| 0.623148
|
transport.ex
|
starcoder
|
require Logger
use Timex
defmodule PonyFactor do
@moduledoc """
Calculates the [Pony Factor](https://ke4qqq.wordpress.com/2015/02/08/pony-factor-math/) of a GitHub
repository.
The Pony Factor of a repository is the smallest number of contributors that, when totalling the
number of commits for each contributor, equals 50% or more of the commits in the repository.
This is a measure of the "health" of a project based on the idea that a higher number means:
* More people understand the codebase, increasing resiliency in the face of long-term contributor
turnover
* More people contributing significantly means that the codebase is more approachable
There is also an Augmented Pony Factor that only takes into account the contributors that are
considered "active". By default, the definition of active is that they have added a commit in the
last year.
**Note:** In neither case is the size of the commits taken into account.
"""
@commit_percentage 0.5
@doc """
Calculates the Pony Factor of the `location`.
The `location` can either be a GitHub `owner/repo` name or a path to a local repository if the
`:directory` option is specified. If a GitHub repository is specified, the repo will be cloned
to the local filesystem for the calculations and then deleted afterward.
It returns a list of three-element tuples of the form `{name, date, commit_count}` where:
* `name` is the display name of the contributor
* `date` is the date of the latest commit from that contributor
* `commit_count` is the number of commits that person has contributed
The list is sorted from most commits to least.
You can get the length of the list to determine the Pony Factor number.
### Options
The following options are supported:
* `:directory` - Set to `true` to supply the path to a local repository as the location
"""
def calculate(location, options \\ [])
def calculate(path, directory: true), do: calculate_from({:path, path})
def calculate(nwo, _) when is_binary(nwo), do: calculate_from({:github, nwo})
def calculate([], _), do: {:error, "No commits were found!"}
def calculate(commits, []) when is_list(commits) do
Logger.info("Calculate Pony Factor")
commits
|> collect_committers
|> sort_committers
|> filter_committers
|> pony(Enum.count(commits))
end
@doc """
Displays the results of the calculation.
"""
def display(list_or_error), do: display(list_or_error, Kernel)
@doc false
def display(list_or_error, kernel_module)
def display(pony_list, _) when is_list(pony_list) do
display_pony_list(pony_list)
IO.puts(nil)
IO.puts("Augmented Pony Factor = #{Enum.count(pony_list)}")
end
def display({pony_list, {commits, target}}, _) do
IO.puts("Augmented Pony Factor is undefined: only #{Float.round(commits / (target / @commit_percentage) * 100, 2)}% of commits are covered by committers who are still active")
IO.puts(nil)
display_pony_list(pony_list)
IO.puts(nil)
IO.puts("Pony Factor = #{Enum.count(pony_list)}")
end
def display({:error, message}, kernel_module) do
IO.puts(message)
kernel_module.exit({:shutdown, 1})
end
defp calculate_from({:github, nwo}) do
{clone_dir, 0} = PonyFactor.Git.clone(nwo)
pony_list = calculate_from({:path, clone_dir})
File.rm_rf!(clone_dir)
pony_list
end
defp calculate_from({:path, clone_dir}) do
clone_dir
|> PonyFactor.Git.commit_list
|> calculate
end
defp collect_committers(commits), do: collect_committers(%{}, commits)
defp collect_committers(committers, []), do: committers
defp collect_committers(committers, [{_, date, name} | commits]) do
date = parse_git_date(date)
{_, new_committers} = Map.get_and_update(committers, name, fn
nil -> {name, {name, date, 1}}
{_, commit_date, count} -> {name, {name, max_date(date, commit_date), count + 1}}
end)
collect_committers(new_committers, commits)
end
defp display_pony_list(list), do: Enum.each(list, fn({name, date, count}) -> IO.puts("#{name}\t#{count}\t#{date}") end)
defp filter_committers(committers) do
one_year_ago = Timex.shift(Timex.now, years: -1)
Enum.filter(committers, fn({_, commit_date, _}) ->
Timex.compare(one_year_ago, commit_date) == -1
end)
end
defp max_date(a, b) do
case Timex.compare(a, b) do
-1 -> b
_ -> a
end
end
defp parse_git_date(date), do: Timex.parse!(date, "%F %T %z", :strftime)
defp pony(committers, commit_count) do
pony(0, [], committers, commit_count * @commit_percentage)
end
defp pony(sum, list, _, count) when sum >= count, do: Enum.reverse(list)
defp pony(sum, _, [], count),
do: {:error, "Augmented Pony Factor undefined: only #{Float.round(sum / (count / @commit_percentage) * 100, 2)}% of commits are covered by committers who are still active"}
defp pony(sum, list, [committer = {_, _, author_count} | committers], count),
do: pony(sum + author_count, [committer | list], committers, count)
defp sort_committers(committers) do
committers
|> Map.values
|> Enum.sort_by(fn({_, _, count}) -> count end, &>=/2)
end
end
|
lib/pony_factor.ex
| 0.794544
| 0.70156
|
pony_factor.ex
|
starcoder
|
defmodule Still.Compiler.PassThroughCopy do
@moduledoc """
Copies a file from the input path to the output directory without changing it.
## Matching parameters
You can configure matching parameters by setting
config :still,
pass_through_copy: ["img/logo.png"]
In the example above, the file `logo.png` inside the `img` folder will be copied
to the `img` folder in the output. But if you write something like this:
config :still,
pass_through_copy: ["img"]
**Any file or folder that starts with the string `img` will be copied, which may
include an `img` folder or a file named `img.png`.** So you need to be mindful
of that.
You can also use regular expressions:
config still,
pass_through_copy: [~r/.*\.jpe?g/]
**Sometimes you want to alter the file name or path but keep the content of the
files.** The configuration allows this by using tuples. The key will be used
to match the input folder, and the value will be used to transform the input
path:
config :still,
pass_through_copy: [css: "styles"]
# this is also valid:
# config :still,
# pass_through_copy: [{"css", "styles"}]
In the example above, the `css` folder from the input folder but will be
renamed to `styles` in the output folder.
"""
import Still.Utils
require Logger
@doc """
Attempts to copy a file from the input path to the output directory without changing.
If the file doesn't match any configured name, `:no_match` is returned.
See the [Matching Parameters](#module-matching-parameters) section.
"""
def try(file) do
case get_pass_through_copy_match(file) do
{input_file, output_file} ->
run(file, replace_match(file, input_file, output_file))
output_file when not is_nil(output_file) ->
run(file)
_ ->
:no_match
end
end
defp run(file), do: run(file, file)
defp run(file, output_file) do
with :ok <- do_run(file, output_file) do
Logger.info("Pass through copy #{file}")
:ok
else
_ ->
Logger.error("Failed to process #{file} in #{__MODULE__}")
:error
end
end
defp do_run(file, output_file) do
get_output_path(output_file)
|> Path.dirname()
|> File.mkdir_p!()
if File.dir?(get_input_path(file)) do
process_folder(file, output_file)
else
process_file(file, output_file)
end
end
defp process_file(file, output_file) do
File.cp(get_input_path(file), get_output_path(output_file))
end
defp process_folder(folder, output_folder) do
with {:ok, _} <- File.cp_r(get_input_path(folder), get_output_path(output_folder)) do
:ok
end
end
defp get_pass_through_copy_match(file) do
config(:pass_through_copy, [])
|> Enum.find(&match_pass_through_copy(file, &1))
end
defp match_pass_through_copy(file, {match, _output}) when is_binary(match),
do: match_pass_through_copy(file, match)
defp match_pass_through_copy(file, {match, _output}) when is_atom(match),
do: match_pass_through_copy(file, match |> Atom.to_string())
defp match_pass_through_copy(file, match) when is_binary(match),
do: String.starts_with?(file, match)
defp match_pass_through_copy(file, match) do
cond do
Regex.regex?(match) -> String.match?(file, match)
true -> false
end
end
defp replace_match(file, input_match, output_match) when is_atom(output_match),
do: replace_match(file, input_match, output_match |> Atom.to_string())
defp replace_match(file, input_match, output_match) when is_atom(input_match),
do: replace_match(file, input_match |> Atom.to_string(), output_match)
defp replace_match(file, input_match, output_match) do
String.replace_prefix(file, input_match, output_match)
end
end
|
lib/still/compiler/pass_through_copy.ex
| 0.7324
| 0.602734
|
pass_through_copy.ex
|
starcoder
|
defmodule Stackdelivery do
@moduledoc """
Main Stackdelivery entry point.
This app starts a bunch of stacks as building blocks and can be joined any which way transferring ejectile between two stacks.
The Stacks act as sources to a one or more sinks in numerous ways.
The main operations these stacks can do are:
- Transfer from Stack A to Stack B
- Copy from Stack A to [Stack B, C, D, .... so on ]
- Copy & Transfer, in which case the following concurrent operations happen:
a. Copy from Stack A to [Stack B, C, D, .... so on ]
b. Transfer from Stack A to only one of Stacks excluding the above stacks.
Consider a Subscriber point A that has subscribed to a broadcast channel.
When A receives the input, it can pass it down to further pipes, based on how
the user wants to consume this information. This way any information packet <#Package{}> can be piped like
A |> B |> {C, D, E} |> .. or
{A, B, C} |> {D, E, F}
So, if there are companies like:
A
/ \
B C
/ \ / \
D E F G
Then A could subscribe to updates from {D, E, F, G} AND
B could subscribe to updates from {D, E} AND
D could subscribe to updates from {A, B} and even {A, B, E}.
It really depends upon the constraint on how <#Package{}> is
consumed.
Infact one could implement something a well defined struct like:
defmodule Package do
defstruct %{txnID: txnID, dt dt, packetname: packetname}
end
and implement a protocol that implements operations on a series
of such Package flows:
defprotocol TxnBlock do
end
defimpl TxnBlock, for: Package do
..
end
Or simply:
defprotocol TxnBlock do
@spec new(String.t) :: Package.t
def new(stack_name)
@spec transfer(String.t, String.t) :: Atom.t
def transfer(stack_a, stack_b)
end
defmodule Package do
defstruct %{txnID: txnID, dt dt, packetname: packetname}
defimpl TxnBlock do
..
end
end
"""
alias Stackdelivery.StackGen.Supervisor, as: StackSup
# alias Stackdelivery.StoreGen.Supervisor, as: StoreSup
alias Stackdelivery.{
Stack,
Store
}
@doc """
create a new deque stack.
Each time a stack is created, it's name and GenServer pid are
stored in an ets table, unless it already exists then an error is thrown.
"""
def new(stack_name) do
case shoot(stack_name) do
{:ok, pid} -> Store.push [stack_name: pid]
{:error, _} ->
IO.puts "stack name already exists"
:not_allowed
end
end
defp shoot(stack_name), do: StackSup.shoot(stack_name)
@doc """
add to stack
"""
def add(stack, val), do: Stack.push(stack, val)
@doc """
transfers from stack1 to stack2
"""
@spec transfer(String.t, String.t) :: Map.t
def transfer(stack1, stack2) do
cond do
!!Store.fetch(stack1) && !!Store.fetch(stack2) ->
Stack.push(stack2, Stack.pop(stack1))
:commit
true ->
IO.puts "either of the stacks does not exist."
:invalid_transfer_rollback
end
end
@spec copy(String.t, List.t) :: Atom.t
def copy(stack1, stack_list) do
with true <- Enum.all?(stack_list, &(!!Store.fetch(&1))),
[{k1, v1}] <- Stack.peek(stack1),
do: (
Enum.each(stack_list, &(Stack.push(&1, [{k1, v1}])))
:commit
),
else: (other->
case other do
false ->
IO.puts "Undefined stack in list or Empty list"
:invalid_list_rollback
nil ->
IO.puts "Empty source stack"
:empty_source
end)
end
@spec copy_transfer(String.t, String.t, List.t) :: Atom.t
def copy_transfer(stack1, stack2, stack_list) do
# check if stack2 not exists in stack_list
with true <- Enum.all?(stack_list, &(!(stack2 in &1))),
# then copy stack1 first key to stack_list
:commit <- copy(stack1, stack_list),
# then transfer from stack1 to stack2
do: transfer(stack1, stack2),
else: (other ->
case other do
false ->
IO.puts "#{stack2} exists in stack list provided"
:stack_overlap
reason -> reason
end
)
end
end
|
stackdelivery/lib/stackdelivery.ex
| 0.688992
| 0.682448
|
stackdelivery.ex
|
starcoder
|
defmodule Zaryn.TransactionChain.MemTables.PendingLedger do
@moduledoc """
Represents a memory table for all the transaction which are in pending state
awaiting some signatures to be counter-validated
"""
@table_name :zaryn_pending_ledger
use GenServer
require Logger
@doc """
Initialize the memory table
## Examples
iex> PendingLedger.start_link()
iex> :ets.info(:zaryn_pending_ledger)[:type]
:bag
"""
@spec start_link(list()) :: {:ok, pid()}
def start_link(args \\ []) do
GenServer.start_link(__MODULE__, args)
end
def init(_args) do
Logger.info("Initialize InMemory Pending Ledger...")
:ets.new(@table_name, [:bag, :named_table, :public, read_concurrency: true])
{:ok, %{table_name: @table_name}}
end
@doc """
Add a transaction address as pending.
## Examples
iex> {:ok, pid} = PendingLedger.start_link()
iex> :ok = PendingLedger.add_address("@Alice2")
iex> %{table_name: table} = :sys.get_state(pid)
iex> :ets.tab2list(table)
[
{"@Alice2", "@Alice2"}
]
"""
@spec add_address(address :: binary()) :: :ok
def add_address(address) when is_binary(address) do
true = :ets.insert(@table_name, {address, address})
:ok
end
@doc """
Add a signature to a pending transaction.
The address of the transaction act as signature
The previous public key is used to determine the previous signing
## Examples
iex> {:ok, _} = PendingLedger.start_link()
iex> :ok = PendingLedger.add_address("@Alice2")
iex> :ok = PendingLedger.add_signature("@Alice2", "@Bob3")
iex> :ets.tab2list(:zaryn_pending_ledger)
[
{"@Alice2", "@Alice2"},
{"@Alice2", "@Bob3"}
]
"""
@spec add_signature(pending_tx_address :: binary(), signature_address :: binary()) :: :ok
def add_signature(pending_tx_address, signature_address)
when is_binary(pending_tx_address) and is_binary(signature_address) do
true = :ets.insert(@table_name, {pending_tx_address, signature_address})
:ok
end
@doc """
Determines if an public key has already a sign for the pending transaction address
## Examples
iex> {:ok, _pid} = PendingLedger.start_link()
iex> :ok = PendingLedger.add_address("@Alice2")
iex> :ok = PendingLedger.add_signature("@Alice2", "@Bob3")
iex> PendingLedger.already_signed?("@Alice2", "@Bob3")
true
"""
@spec already_signed?(binary(), binary()) :: boolean()
def already_signed?(address, signature_address) do
case :ets.lookup(@table_name, address) do
[] ->
false
res ->
res
|> Enum.map(fn {_, signature} -> signature end)
|> Enum.any?(&(&1 == signature_address))
end
end
@doc """
Get the list of counter signature for the pending transaction address.
The counter signatures are transaction addresses validating the the pending transaction
## Examples
iex> {:ok, _pid} = PendingLedger.start_link()
iex> :ok = PendingLedger.add_address("@Alice2")
iex> :ok = PendingLedger.add_signature("@Alice2", "@Bob3")
iex> PendingLedger.list_signatures("@Alice2")
["@Alice2", "@Bob3"]
"""
@spec list_signatures(binary()) :: list(binary())
def list_signatures(address) when is_binary(address) do
Enum.map(:ets.lookup(@table_name, address), fn {_, sig} -> sig end)
end
@doc """
Remove a transaction for being a pending one
## Examples
iex> {:ok, _pid} = PendingLedger.start_link()
iex> :ok = PendingLedger.add_address("@Alice2")
iex> :ok = PendingLedger.add_signature("@Alice2", "@Bob3")
iex> PendingLedger.remove_address("@Alice2")
iex> :ets.tab2list(:archethic_pending_ledger)
[]
"""
@spec remove_address(binary()) :: :ok
def remove_address(address) when is_binary(address) do
true = :ets.delete(@table_name, address)
:ok
end
end
|
lib/zaryn/transaction_chain/mem_tables/pending_ledger.ex
| 0.808181
| 0.410727
|
pending_ledger.ex
|
starcoder
|
defmodule Riffed.Server do
@moduledoc ~S"""
Provides a server and datastructure mappings to help you build thrift servers in Elixir. Macros
dutifully work behind the scenes to give you near-seamless access to Thrift structures.
*Riffed: Bridging the divide between Thrift and Elixir.*
## Usage
The Thrift erlang implementation doesn't provide the ability to enumerate all defined RPC functions,
so you need to tell Riffed which functions you'd like to expose in your server. After doing this, the
Thrift metadata is interrogated and your return types are figured out and built for you. They're
available for you to use in a module of your choosing.
The example below assumes a thrift service called database defined in src/database_thrift.erl. The
database exports select, delete and insert as functions. These functions take a string and a list of
strings and return a ResultSet thrift object.
You can also define an `after_start` function that will execute after the server has been started. The
function takes a server_pid and the server_opts as arguments.
Lastly, you can optionally define your own error handler to perform logic when clients disconnect,
timeout, or do any other bad things.
defmodule Server do
use Riffed.Server, service: :database_thrift,
structs: DB,
functions: [select: &Handlers.select/2,
insert: &Handlers.insert/2,
delete: &Handlers.delete/2],
server: {:thrift_socket_server,
port: 3306,
framed: true,
max: 5000,
socket_opts: [recv_timeout: 3000]
},
after_start: fn(server_pid, server_opts) ->
ZKRegister.death_pact(server_pid, server_opts[:port])
end,
error_handler: &Handlers.handle_error/2
end
defmodule Handlers do
def select(query, args) do
%DB.ResultSet.new(num_rows: 0, results: [])
end
def insert(query, args) do
%DB.ResultSet.new(num_rows: 0, results: [])
end
def delete(query, args) do
%DB.ResultSet.new(num_rows: 0, results: [])
end
def handle_error(_, :closed) do
"Oh no, the client left!"
end
def handle_error(_, :timeout) do
"Woah, the client disappeared!"
end
def handle_error(function_name, reason) do
"Thrift exploded"
end
end
### Usage:
Server.start_link
"""
import Riffed.MacroHelpers
alias Riffed.ThriftMeta, as: ThriftMeta
alias Riffed.ThriftMeta.Meta, as: Meta
defmacro __using__(opts) do
quote do
use Riffed.Callbacks
use Riffed.Enumeration
require Riffed.Server
require Riffed.Struct
import Riffed.Server
@thrift_module unquote(opts[:service])
@functions unquote(opts[:functions])
@struct_module unquote(opts[:structs])
@server unquote(opts[:server])
@after_start unquote(Macro.escape(opts[:after_start]))
@error_handler unquote(opts[:error_handler])
@auto_import_structs unquote(Keyword.get(opts, :auto_import_structs, true))
@before_compile Riffed.Server
end
end
defp build_delegate_call(delegate_fn) do
delegate_info = :erlang.fun_info(delegate_fn)
arg_list = build_arg_list(delegate_info[:arity])
{{:., [], [{:__aliases__, [alias: false],
[delegate_info[:module]]}, delegate_info[:name]]}, [], arg_list}
end
defp build_handler(meta=%Meta{}, struct_module, thrift_fn_name, delegate_fn, fn_overrides) do
function_meta = Meta.metadata_for_function(meta, thrift_fn_name)
params_meta = function_meta[:params]
reply_meta = function_meta[:reply] |> Riffed.Struct.to_riffed_type_spec
tuple_args = build_handler_tuple_args(params_meta)
delegate_call = build_delegate_call(delegate_fn)
casts = build_casts(thrift_fn_name, struct_module, params_meta, fn_overrides, :to_elixir)
overridden_type = Riffed.Enumeration.get_overridden_type(thrift_fn_name, :return_type, fn_overrides, reply_meta)
quote do
def handle_function(unquote(thrift_fn_name), unquote(tuple_args)) do
unquote_splicing(casts)
rsp = unquote(delegate_call)
|> unquote(struct_module).to_erlang(unquote(overridden_type))
case rsp do
:ok -> :ok
_ -> {:reply, rsp}
end
end
end
end
defp build_error_handler(nil) do
quote do
def handle_error(_, :timeout) do
Lager.notice("Connection to client timed out.")
{:ok, :timeout}
end
def handle_error(_, :closed) do
{:ok, :closed}
end
def handle_error(name, reason) do
Lager.error("Unhandled thrift error: #{name}, #{reason}")
{:error, reason}
end
end
end
defp build_error_handler(delegate_fn) do
quote do
def handle_error(function_name, reason) do
unquote(delegate_fn).(function_name, reason)
end
end
end
defmacro __before_compile__(env) do
functions = Module.get_attribute(env.module, :functions)
struct_module = Module.get_attribute(env.module, :struct_module)
thrift_module = Module.get_attribute(env.module, :thrift_module)
function_names = Enum.map(functions, fn({name, _}) -> name end)
thrift_meta = ThriftMeta.extract(thrift_module, function_names)
{server, server_opts} = Module.get_attribute(env.module, :server)
overrides = Riffed.Enumeration.get_overrides(env.module)
after_start = Module.get_attribute(env.module, :after_start) || quote do: fn (_, _)-> end
error_handler = Module.get_attribute(env.module, :error_handler) |> build_error_handler
function_handlers = Enum.map(functions,
fn({fn_name, delegate}) ->
build_handler(thrift_meta, struct_module, fn_name, delegate, overrides.functions) end)
structs_keyword = ThriftMeta.Meta.structs_to_keyword(thrift_meta)
if Module.get_attribute(env.module, :auto_import_structs) do
struct_module = quote do
defmodule unquote(struct_module) do
@build_cast_to_erlang true
use Riffed.Struct, unquote(structs_keyword)
unquote_splicing(Riffed.Callbacks.reconstitute(env.module))
unquote_splicing(Riffed.Enumeration.reconstitute(env.module))
end
end
else
struct_module = quote do
end
end
quote do
unquote(struct_module)
require Lager
def start_link(cmd_opts \\ []) do
default_opts = [service: unquote(thrift_module),
handler: unquote(env.module),
name: unquote(env.module)]
opts = Keyword.merge(unquote(server_opts), default_opts)
|> Keyword.merge(cmd_opts)
{:ok, server_pid} = unquote(server).start(opts)
unquote(after_start).(server_pid, unquote(server_opts))
{:ok, server_pid}
end
unquote(Riffed.Callbacks.default_to_erlang)
unquote(Riffed.Callbacks.default_to_elixir)
unquote_splicing(function_handlers)
def handle_function(name, args) do
raise "Handler #{inspect(name)} #{inspect(args)} Not Implemented"
end
unquote(error_handler)
end
end
end
|
lib/riffed/server.ex
| 0.629547
| 0.442034
|
server.ex
|
starcoder
|
defmodule Krihelinator.ImportExport do
@moduledoc """
Tools to export all of the data from the DB into a single json, and
to populate the DB from an existing one.
"""
alias Krihelinator.Github, as: GH
@models [GH.Language, GH.Repo, Krihelinator.History.Language]
@doc """
Populate the DB with data from json string.
json: A string.
repo: An Ecto repo.
"""
def import_data(data, repo) do
decoded = decode_data(data)
do_import_data(decoded, repo)
Enum.each(decoded, &fix_postgres_next_val(&1.model, repo))
end
defp decode_data(data) do
model_strings = Enum.map(@models, &Atom.to_string/1)
data
|> Poison.decode!(keys: :atoms!)
|> Enum.filter(fn model_data -> model_data.model in model_strings end)
|> Enum.map(fn model_data ->
%{model_data | model: String.to_existing_atom(model_data.model)}
end)
end
defp do_import_data(data, repo) do
data
|> Stream.flat_map(fn %{model: model, items: items} ->
Stream.map(items, &create_changeset(model, &1))
end)
|> Enum.each(&repo.insert!/1)
end
# Fix for #163, reset postgres next_val
defp fix_postgres_next_val(model, repo) do
table = model.__schema__(:source)
sql = "SELECT setval('#{table}_id_seq', (SELECT MAX(id) from \"#{table}\"));"
Ecto.Adapters.SQL.query(repo, sql, [])
end
defp create_changeset(model, item) do
associations = Enum.filter(item, fn {key, _} -> is_association_key?(key) end)
{id, params} = Map.pop(item, :id)
model
|> struct(id: id)
|> model.changeset(params)
|> Ecto.Changeset.change(associations)
end
defp is_association_key?(key) do
key
|> Atom.to_string()
|> String.ends_with?("_id")
end
@doc """
A wrapper around the `export_data` function with the relevant models and
repo.
"""
def export_krihelinator_data() do
export_data(@models, Krihelinator.Repo)
end
@doc """
Create a json string of the data.
models: A list of models (modules). The model is passed to `repo.all`.
Models can derive from `Poison.Encoder` with the `:only` option to restrict
serialized fields.
repo: A module that have an `all` function that is called with the models to
get all of the items for each model.
"""
def export_data(models, repo) do
models
|> Stream.map(fn (model) ->
%{model: Atom.to_string(model), items: repo.all(model)}
end)
|> Poison.encode!()
end
end
|
www/lib/krihelinator/import_export.ex
| 0.524395
| 0.460471
|
import_export.ex
|
starcoder
|
defmodule Sentry.Logger do
require Logger
@moduledoc """
This is based on the Erlang [error_logger](http://erlang.org/doc/man/error_logger.html).
To set this up, add `:ok = :error_logger.add_report_handler(Sentry.Logger)` to your application's start function. Example:
```elixir
def start(_type, _opts) do
children = [
supervisor(Task.Supervisor, [[name: Sentry.TaskSupervisor]]),
:hackney_pool.child_spec(Sentry.Client.hackney_pool_name(), [timeout: Config.hackney_timeout(), max_connections: Config.max_hackney_connections()])
]
opts = [strategy: :one_for_one, name: Sentry.Supervisor]
:ok = :error_logger.add_report_handler(Sentry.Logger)
Supervisor.start_link(children, opts)
end
```
Your application will then be running a `Sentry.Logger` event handler that receives error report messages and send them to Sentry.
It is important to note that the same report handler can be added multiple times. If you run an umbrella app, and add the report handler in multiple individual applications, the same error will be reported multiple times (one for each handler). There are two solutions to fix it.
The first is to ensure that the handler is only added at the primary application entry-point. This will work, but can be brittle, and will not work for applications running the multiple release style.
The other solution is to check for existing handlers before trying to add another. Example:
```elixir
if !(Sentry.Logger in :gen_event.which_handlers(:error_logger)) do
:ok = :error_logger.add_report_handler(Sentry.Logger)
end
```
With this solution, if a `Sentry.Logger` handler is already running, it will not add another. One can add the code to each application, and there will only ever be one handler created. This solution is safer, but slightly more complex to manage.
"""
@behaviour :gen_event
def init(_mod), do: {:ok, []}
def handle_call({:configure, new_keys}, _state), do: {:ok, :ok, new_keys}
def handle_event({:error, _gl, {_pid, _type, [pid, {exception, stack}]} = info}, state)
when is_list(stack) and is_pid(pid) do
try do
opts =
Keyword.put([], :event_source, :logger)
|> Keyword.put(:stacktrace, stack)
|> Keyword.put(:error_type, :error)
Sentry.capture_exception(exception, opts)
rescue
ex ->
Logger.warn(fn -> "Unable to notify Sentry due to #{inspect(ex)}! #{inspect(info)}" end)
end
{:ok, state}
end
def handle_event({:error_report, _gl, {_pid, _type, [message | _]}}, state)
when is_list(message) do
try do
{kind, exception, stacktrace, module} =
get_exception_and_stacktrace(message[:error_info])
|> get_initial_call_and_module(message)
opts =
(get_in(message, ~w[dictionary sentry_context]a) || %{})
|> Map.take(Sentry.Context.context_keys())
|> Map.to_list()
|> Keyword.put(:event_source, :logger)
|> Keyword.put(:stacktrace, stacktrace)
|> Keyword.put(:error_type, kind)
|> Keyword.put(:module, module)
Sentry.capture_exception(exception, opts)
rescue
ex ->
Logger.warn(fn -> "Unable to notify Sentry due to #{inspect(ex)}! #{inspect(message)}" end)
end
{:ok, state}
end
def handle_event(_, state) do
{:ok, state}
end
def handle_info(_msg, state) do
{:ok, state}
end
def code_change(_old, state, _extra) do
{:ok, state}
end
def terminate(_reason, _state) do
:ok
end
defp get_exception_and_stacktrace({kind, {exception, sub_stack}, _stack})
when is_list(sub_stack) do
{kind, exception, sub_stack}
end
defp get_exception_and_stacktrace({kind, exception, stacktrace}) do
{kind, exception, stacktrace}
end
# GenServer exits will usually only report a stacktrace containing core
# GenServer functions, which causes Sentry to group unrelated exits
# together. This gets the `:initial_call` to help disambiguate, as it contains
# the MFA for how the GenServer was started.
defp get_initial_call_and_module({kind, exception, stacktrace}, error_info) do
case Keyword.get(error_info, :initial_call) do
{module, function, arg} ->
{kind, exception, stacktrace ++ [{module, function, arg, []}], module}
_ ->
{kind, exception, stacktrace, nil}
end
end
end
|
lib/sentry/logger.ex
| 0.7586
| 0.793426
|
logger.ex
|
starcoder
|
defmodule Braintree.SettlementBatchSummary do
@moduledoc """
The settlement batch summary displays the total sales and credits for each
batch for a particular date. The transactions can be grouped by a single
custom field's values.
https://developers.braintreepayments.com/reference/request/settlement-batch-summary/generate/ruby
"""
use Braintree.Construction
import Braintree.Util, only: [atomize: 1]
alias Braintree.HTTP
alias Braintree.ErrorResponse, as: Error
defmodule Record do
@moduledoc """
A record contains details for a transaction in a summary.
"""
@type t :: %__MODULE__{
card_type: String.t(),
count: String.t(),
merchant_account_id: String.t(),
kind: String.t(),
amount_settled: String.t()
}
defstruct card_type: nil,
count: "0",
merchant_account_id: nil,
kind: nil,
amount_settled: nil
@doc """
Convert a list of records into structs, including any custom fields that
were used as the grouping value.
"""
def new(params) when is_map(params) do
atomized = atomize(params)
summary = Construction.new(__MODULE__, params)
case Map.keys(atomized) -- Map.keys(summary) do
[custom_key] -> Map.put(summary, custom_key, atomized[custom_key])
_ -> summary
end
end
def new(params) when is_list(params) do
Enum.map(params, &new/1)
end
end
@type t :: %__MODULE__{records: [Record.t()]}
defstruct records: []
@doc """
Generate a report of all settlements for a particular date. The
field used for custom grouping will always be set as
`custom_field`, regardless of its name.
## Example
Braintree.SettlementBatchSummary("2016-9-5", "custom_field_1")
"""
@spec generate(binary, binary | nil, Keyword.t()) :: {:ok, [t]} | {:error, Error.t()}
def generate(settlement_date, custom_field \\ nil, opts \\ []) do
criteria = build_criteria(settlement_date, custom_field)
params = %{settlement_batch_summary: criteria}
with {:ok, payload} <- HTTP.post("settlement_batch_summary", params, opts) do
%{"settlement_batch_summary" => summary} = payload
{:ok, new(summary)}
end
end
@spec build_criteria(binary, binary | nil) :: map
defp build_criteria(settlement_date, nil) do
%{settlement_date: settlement_date}
end
defp build_criteria(settlement_date, custom_field) do
%{settlement_date: settlement_date, group_by_custom_field: custom_field}
end
@doc """
Convert a map including records into a summary struct with a list
of record structs.
"""
def new(%{"records" => records}) do
struct(__MODULE__, records: Record.new(records))
end
end
|
lib/settlement_batch_summary.ex
| 0.920245
| 0.587825
|
settlement_batch_summary.ex
|
starcoder
|
defmodule DoubleCheck do
@moduledoc """
A module for distributing assignments to assignees.
Originally designed to distribute exams to graders, the goals were these:
* Have every exam graded twice, by two different graders.
* Have every grader grade at least one exam in common with every other grader,
so that any bias by a particular grader can be accounted for.
* Allow graders to be assigned multiple batches of tests to grade,
where the number of batches can differ between graders, and size of each batch is roughly equal.
The way it works is this:
1. Generate a list of unique pairs of graders
2. Go through the list of exams, and assign each next exam to both graders in a pair
3. Assign each remaing exam twice to the next two graders, who each have at least one batch with current smallest batch size of all batches.
## Example
Say you have graders A, B, and C.
The list of unique pairs for these three would be
```elixir
{'A', 'B'}, {'A', 'C'}, {'B', 'C'}
```
Now lets say grader B is only part time, so they are given one batch of exams to grade,
whereas graders A, and C are assigned two batches each.
Grader | Assigned exams
:---------- | -------------:
A (batch 1) | []
A (batch 2) | []
B | []
C (batch 1) | []
C (batch 2) | []
Next, lets say there are six exams that need grading.
```elixir
[1,2,3,4,5,6]
```
We first iterate trough the list of unique pairs,
popping exams to be assigned off the list,
and assigning them to both graders in the pairs,
and to the next batch with the least assignments.
> ```elixir
> [{'A', 'B'}, ...
> [1,2,3,4,5,6]
> ^
> ```
Grader | Assigned exams
:---------- | -------------:
A (batch 1) | [1]
A (batch 2) | []
B | [1]
C (batch 1) | []
C (batch 2) | []
> ```elixir
> ..., {'A', 'C'}, ...
> [1,2,3,4,5,6]
> ^
> ```
Grader | Assigned exams
:---------- | -------------:
A (batch 1) | [1]
A (batch 2) | [2]
B | [1]
C (batch 1) | [2]
C (batch 2) | []
> ```elixir
> ..., {'B', 'C'}]
> [1,2,3,4,5,6]
> ^
> ```
Grader | Assigned exams
:---------- | -------------:
A (batch 1) | [1]
A (batch 2) | [2]
B | [1, 3]
C (batch 1) | [2]
C (batch 2) | [3]
Finally, we finisih iterating across the list of exams,
and assign each remaining exam to the next to two graders
who have a batch with a current size less than or equal to every other batch.
> ```elixir
> # Graders A(batch 1), and C(batch 1)
> [1,2,3,4,5,6]
> ^
> ```
Grader | Assigned exams
:---------- | -------------:
A (batch 1) | [1, 4]
A (batch 2) | [2]
B | [1, 3]
C (batch 1) | [2, 4]
C (batch 2) | [3]
> ```elixir
> # Graders A(batch 2), and C(batch 2)
> [1,2,3,4,5,6]
> ^
> ```
Grader | Assigned exams
:---------- | -------------:
A (batch 1) | [1, 4]
A (batch 2) | [2, 5]
B | [1, 3]
C (batch 1) | [2, 4]
C (batch 2) | [3, 5]
> ```elixir
> # Graders A(batch 1), and B
> [1,2,3,4,5,6]
> ^
> ```
Grader | Assigned exams
:---------- | -------------:
A (batch 1) | [1, 4, 6]
A (batch 2) | [2, 5]
B | [1, 3, 6]
C (batch 1) | [2, 4]
C (batch 2) | [3, 5]
Once every exam has been assigned to a grader, the program return a list of maps, with the grader and thier assignments:
```elixir
[
%{grader: "A", assignments: [1, 2, 4, 5, 6]},
%{grader: "B", assignments: [1, 3, 6]},
%{grader: "C", assignments: [2, 3, 4, 5]}
]
```
"""
@doc """
Generates a list of tuples, reperesnting each unique pair from a given list of objects.
## Examples
iex> DoubleCheck.get_pairs(['A', 'B', 'C'])
[{'A', 'B'}, {'A', 'C'}, {'B', 'C'}]
"""
@spec get_pairs([any]) :: [{any, any}]
def get_pairs(list) do
r = Enum.reduce(list, {1, list, []}, fn
x, acc ->
{i, sublist, pairs} = acc
tail = sublist |> Enum.drop(1)
new_pairs = Enum.reduce(tail, [], fn
y, pair_acc ->
pair_acc ++ [{x, y}]
end)
{i+1, tail, pairs ++ new_pairs}
end)
{_count, _list, pairs} = r
pairs
end
end
|
lib/double_check.ex
| 0.883393
| 0.951997
|
double_check.ex
|
starcoder
|
defmodule Makeup.Lexer.Combinators do
import NimbleParsec
@doc """
Wraps the given combinator into a token of the given `ttype`.
Instead of a combinator, the first argument can also be a string literal.
"""
def token(literal, token_type) when is_binary(literal) do
replace(string(literal), {token_type, %{}, literal})
end
def token(combinator, token_type) do
combinator |> traverse({__MODULE__, :__token__, [token_type]})
end
def token(literal, token_type, attrs) when is_binary(literal) and is_map(attrs) do
replace(string(literal), {token_type, attrs, literal})
end
def token(combinator, token_type, attrs) when is_map(attrs) do
combinator |> traverse({__MODULE__, :__token__, [token_type, attrs]})
end
@doc """
Joins the result of the given combinator into a single string.
"""
def lexeme(combinator) do
combinator |> traverse({__MODULE__, :__lexeme__, []})
end
@doc false
def __token__(_rest, [arg], context, _line, _offset, token_type) do
{[{token_type, %{}, arg}], context}
end
def __token__(_rest, arg, context, _line, _offset, token_type) when is_binary(arg) do
{[{token_type, %{}, arg}], context}
end
def __token__(_rest, args, context, _line, _offset, token_type) do
{[{token_type, %{}, args |> :lists.reverse}], context}
end
@doc false
def __token__(_rest, [arg], context, _line, _offset, token_type, attrs) do
{[{token_type, attrs, arg}], context}
end
def __token__(_rest, arg, context, _line, _offset, token_type, attrs) when is_binary(arg) do
{[{token_type, attrs, arg}], context}
end
def __token__(_rest, args, context, _line, _offset, token_type, attrs) do
{[{token_type, attrs, args |> :lists.reverse}], context}
end
@doc false
def __lexeme__(_rest, args, context, _line, _offset) do
result = args |> List.wrap |> :lists.reverse |> IO.iodata_to_binary
{[result], context}
end
defp reverse_sort(items) do
Enum.sort(items, fn a, b -> {byte_size(a), a} > {byte_size(b), b} end)
end
@doc """
Matches one of the literal strings in the list.
"""
def word_from_list(words) do
choice(for word <- reverse_sort(words), do: string(word))
end
@doc """
Matches one of the literal strings in the list and wraps it in a token of the given type.
"""
def word_from_list(words, ttype) do
choice(for word <- reverse_sort(words), do: string(word)) |> token(ttype)
end
@doc """
Matches one of the literal strings in the list and wraps it in a token of the given `type`, with the given `attrs`.
"""
def word_from_list(words, ttype, attrs) do
choice(for word <- reverse_sort(words), do: string(word)) |> token(ttype, attrs)
end
@doc """
Matches a given combinator, repeated 0 or more times, surrounded by left and right delimiters.
Delimiters can be combinators or literal strings (wither both combinators or both literal strings)
"""
def many_surrounded_by(combinator, left, right) when is_binary(left) and is_binary(right) do
token(left, :punctuation)
|> concat(
repeat_until(
combinator,
[string(right)]))
|> concat(token(right, :punctuation))
end
def many_surrounded_by(combinator, left, right) do
left
|> concat(
repeat_until(
combinator,
[right]))
|> concat(right)
end
@doc """
Matches a given combinator, repeated 0 or more times, surrounded by left and right delimiters,
and wraps the `right` and `left` delimiters into a token of the given `ttype`.
"""
def many_surrounded_by(combinator, left, right, ttype) do
token(left, ttype)
|> concat(
repeat_until(
combinator,
[string(right)]))
|> concat(token(right, ttype))
end
def collect_raw_chars_and_binaries(_rest, args, context, _line, _offset, ttype, attrs) do
result = merge_chars_helper(ttype, attrs, [], args)
{result, context}
end
defp merge_chars_helper(_ttype, _attrs, [], []), do: []
defp merge_chars_helper(ttype, attrs, acc, [next | rest]) when is_integer(next) or is_binary(next) do
merge_chars_helper(ttype, attrs, [next | acc], rest)
end
defp merge_chars_helper(ttype, attrs, [], [element | rest]) do
[element | merge_chars_helper(ttype, attrs, [], rest)]
end
defp merge_chars_helper(ttype, attrs, acc, list) do
tok = {ttype, attrs, acc}
[tok | merge_chars_helper(ttype, attrs, [], list)]
end
@doc """
TODO
"""
def string_like(left, right, middle, ttype, attrs \\ %{}) do
left_combinator =
case is_binary(left) do
true -> string(left)
false -> left
end
right_combinator =
case is_binary(right) do
true -> string(right)
false -> right
end
choices = middle ++ [utf8_char([])]
left_combinator
|> repeat_until(choice(choices), [right_combinator])
|> concat(right_combinator)
|> traverse({__MODULE__, :collect_raw_chars_and_binaries, [ttype, attrs]})
end
end
|
lib/makeup/lexer/combinators.ex
| 0.812459
| 0.507141
|
combinators.ex
|
starcoder
|
defmodule Wargaming.Warships.Encyclopedia do
@moduledoc """
Encyclopedia provides functions for interacting with the WarGaming.net World of Warships Encyclopedia API.
"""
use Wargaming.ApiEndpoint, api: Wargaming.Warships
@encyclopedia_info "/encyclopedia/info/"
@encyclopedia_ships "/encyclopedia/ships/"
@encyclopedia_achievements "/encyclopedia/achievements/"
@encyclopedia_ship_parameters "/encyclopedia/shipprofile/"
@doc """
Encyclopedia.info/1 searches WarGaming Warships Encyclopedia (in the configured region) and returns general enclopedia info.
[Official Reference](https://api.worldofwarships.com/wows/encyclopedia/info/?application_id=123456
Returns `{:ok, response_map}` or `{:error, error_map}`
## Available Options
* `fields` : Comma separated list of fields. Embedded fields separated by periods. To exclude a field, prefix it with a `-`. Returns all fields if one of the fields is undefined.
* `language` : Default "en". Available options:
- "cs" — Čeština
- "de" — Deutsch
- "en" — English (by default)
- "es" — Español
- "fr" — Français
- "ja" — 日本語
- "pl" — Polski
- "ru" — Русский
- "th" — ไทย
- "zh-tw" — 繁體中文
"""
def info(opts \\ %{}) do
constructed_get(@encyclopedia_info, opts)
end
@doc """
Encyclopedia.ships/1 searches WarGaming Warships Encyclopedia (in the configured region) and returns ship info.
[Official Reference](https://api.worldofwarships.com/wows/encyclopedia/ships/?application_id=123456
Returns `{:ok, response_map}` or `{:error, error_map}`
## Available Options
* `fields` : Comma separated list of fields. Embedded fields separated by periods. To exclude a field, prefix it with a `-`. Returns all fields if one of the fields is undefined.
* `language` : Default "en". Available options:
- "cs" — Čeština
- "de" — Deutsch
- "en" — English (by default)
- "es" — Español
- "fr" — Français
- "ja" — 日本語
- "pl" — Polski
- "ru" — Русский
- "th" — ไทย
- "zh-tw" — 繁體中文
* `limit` : Number of returned entries (fewer can be returned, but not more than 100). If the limit sent exceeds 100, a limit of 100 is applied (by default).
* `nation` : Comma separated list of nations. Max limit is 100.
* `page_no` : Result page number. Default is 1. Min value is 1.
* `ship_id` : Comma separated list of ship IDs. Max limit is 100.
* `type` : Comma separated list of ship types. Max limit is 100. Valid values:
- "AirCarrier" — Aircraft carrier
- "Battleship" — Battleship
- "Destroyer" — Destroyer
- "Cruiser" — Cruiser
"""
def ships(opts \\ %{}) do
constructed_get(@encyclopedia_ships, opts)
end
@doc """
Encyclopedia.achievements/1 searches WarGaming Warships Encyclopedia (in the configured region) and returns achievement info.
[Official Reference](https://api.worldofwarships.com/wows/encyclopedia/achievements/?application_id=123456
Returns `{:ok, response_map}` or `{:error, error_map}`
## Available Options
* `fields` : Comma separated list of fields. Embedded fields separated by periods. To exclude a field, prefix it with a `-`. Returns all fields if one of the fields is undefined.
* `language` : Default "en". Available options:
- "cs" — Čeština
- "de" — Deutsch
- "en" — English (by default)
- "es" — Español
- "fr" — Français
- "ja" — 日本語
- "pl" — Polski
- "ru" — Русский
- "th" — ไทย
- "zh-tw" — 繁體中文
"""
def achievements(opts \\ %{}) do
constructed_get(@encyclopedia_achievements, opts)
end
@doc """
Encyclopedia.ship_parameters/2 searches WarGaming Warships Encyclopedia (in the configured region) and returns ship configuration options info.
[Official Reference](https://api.worldofwarships.com/wows/encyclopedia/achievements/?application_id=123456
Returns `{:ok, response_map}` or `{:error, error_map}`
## Available Options
* `fields` : Comma separated list of fields. Embedded fields separated by periods. To exclude a field, prefix it with a `-`. Returns all fields if one of the fields is undefined.
* `language` : Default "en". Available options:
- "cs" — Čeština
- "de" — Deutsch
- "en" — English (by default)
- "es" — Español
- "fr" — Français
- "ja" — 日本語
- "pl" — Polski
- "ru" — Русский
- "th" — ไทย
- "zh-tw" — 繁體中文
"""
def ship_parameters(ship_id, opts \\ %{}) do
constructed_get(:ship_id, ship_id, @encyclopedia_ship_parameters, opts)
end
end
|
lib/wargaming/warships/encyclopedia.ex
| 0.862685
| 0.641303
|
encyclopedia.ex
|
starcoder
|
defmodule ELA.Vector do
alias :math, as: Math
@moduledoc"""
Contains operations for working with vectors.
"""
@doc"""
Returns a vector with zeroes with provided dimension.
## Examples
iex> Vector.new(3)
[0, 0, 0]
"""
@spec new(number) :: [number]
def new(n) when not is_number(n),
do: raise(ArgumentError, "Size provide has to be a number.")
def new(n) do
for _ <- 1..n, do: 0
end
@doc"""
Performs elementwise addition.
## Examples
iex> Vector.add([1, 2, 1], [2, 2, 2])
[3, 4, 3]
"""
@spec add([number], [number]) :: [number]
def add(u, v) when length(u) !== length(v),
do: raise(ArgumentError, "The number of elements in the vectors must match.")
def add(u, v) do
for {a, b} <- Enum.zip(u, v), do: a + b
end
@doc"""
Performs elementwise subtraction.
## Examples
iex> Vector.sub([1, 2, 1], [2, 2, 2])
[-1, 0, -1]
"""
@spec sub([number], [number]) :: [number]
def sub(u, v) when length(u) !== length(v),
do: raise(ArgumentError, "The number of elements in the vectors must match.")
@spec sub([number], [number]) :: [number]
def sub(u, v) do
add(u, Enum.map(v, fn(x) -> -x end))
end
@doc"""
Performs elementwise multiplication between two vectors.
This is the Hadmard product, but for vectors.
## Examples
iax> Vector.hadmard([1, 2], [2, 2])
[2, 4]
"""
@spec hadmard([number], [number]) :: [number]
def hadmard(u, v) when length(u) !== length(v),
do: raise(ArgumentError, "The number of elements in the vectors must match.")
def hadmard(u, v) do
Enum.zip(u, v) |> Enum.map(fn({a, b}) -> a*b end)
end
@doc"""
Calculates the cross product.
Is only defined for vectors with size three.
## Examples
iex> Vector.cross([1, 2, 1], [2, 2, 2])
[2, 0, -2]
"""
@spec cross([number], [number]) :: [number]
def cross(u, v) when length(u) !== 3 and length(v) !== 3,
do: raise(ArgumentError, "The cross product is only defined for vectors with three elements.")
def cross(u, v) do
u = List.to_tuple(u)
v = List.to_tuple(v)
[elem(u, 1)*elem(v, 2) - elem(u, 2)*elem(v, 1),
elem(u, 2)*elem(v, 0) - elem(u, 0)*elem(v, 2),
elem(u, 0)*elem(v, 1) - elem(u, 1)*elem(v, 0)]
end
@doc"""
Elementwise multiplication with a scalar.
## Examples
iex> Vector.scalar([2, 2, 2], 2)
[4, 4, 4]
"""
@spec scalar([number], number) :: [number]
def scalar(v, s) do
Enum.map(v, fn(x) -> x*s end)
end
@doc"""
Calculates the dot product.
Multiplying empty vectors return 0.
## Examples
iex> Vector.dot([1, 2, 1], [2, 2, 2])
8
"""
@spec dot([number], [number]) :: number
def dot(u, v) when length(u) !== length(v),
do: raise(ArgumentError, "The number of elements in the vectors must match.")
def dot(u, v) do
Enum.zip(u, v) |> Enum.reduce(0, fn({a, b}, acc) -> acc + a*b end)
end
@doc"""
Transponates the vector. Column vectors are two-dimensional.
## Examples
iex> Vector.transp([1, 1, 1])
[[1],
[1],
[1]]
"""
def transp(v) when is_number(hd(v)) do
Enum.map(v, fn(x) -> [x] end)
end
def transp(v) when is_list(hd(v)) do
List.flatten(v)
end
@doc"""
Calculates the euclidian norm of a vector.
## Examples
iex> Vector.norm([3, 4])
0.5
"""
@spec norm([number]) :: number
def norm(v) do
Enum.reduce(v, 0, fn(e, acc) -> acc + Math.pow(e, 2) end)
|> Math.sqrt()
end
end
|
lib/vector.ex
| 0.895353
| 0.722086
|
vector.ex
|
starcoder
|
defmodule Workflows.Workflow do
@moduledoc false
alias Workflows.Activity
alias Workflows.Command
alias Workflows.Event
alias Workflows.State
@type activities :: %{Activity.name() => Activity.t()}
@type t :: %__MODULE__{
start_at: Activity.name(),
activities: activities()
}
@type project_result :: {:continue, State.t()} | {:succeed, Activity.args()} | {:error, term()}
defstruct [:activities, :start_at]
@spec parse(map()) :: {:ok, t()} | {:error, term()}
def parse(definition) do
do_parse(definition)
end
@spec create(Activity.name(), activities()) :: t()
def create(start_at, activities) do
%__MODULE__{
start_at: start_at,
activities: activities
}
end
@spec starting_activity(t()) :: {:ok, Activity.t()} | {:error, term()}
def starting_activity(workflow) do
activity(workflow, workflow.start_at)
end
@spec activity(t(), Activity.name()) :: {:ok, Activity.t()} | {:error, term()}
def activity(workflow, name) do
Map.fetch(workflow.activities, name)
end
@spec execute(t(), State.t(), Activity.ctx()) :: State.execute_result()
def execute(workflow, state, ctx) do
with {:ok, current_activity} <- activity(workflow, state.activity) do
State.execute(state, current_activity, ctx)
end
end
@spec execute(t(), State.t(), Activity.ctx(), Command.t()) :: State.execute_command_result()
def execute(workflow, state, ctx, cmd) do
with {:ok, current_activity} <- activity(workflow, state.activity) do
State.execute(state, current_activity, ctx, cmd)
end
end
@spec project(t(), State.t() | nil, list(Event.t()) | Event.t()) :: project_result()
def project(workflow, state, events) do
do_project(workflow, state, events)
end
## Private
defp do_parse(%{"StartAt" => start_at, "States" => states}) do
with {:ok, states} <- parse_states(Map.to_list(states), []) do
{:ok, create(start_at, states)}
end
end
defp do_parse(_definition), do: {:error, "Definition requires StartAt and States fields"}
defp parse_states([], acc), do: {:ok, Enum.into(acc, %{})}
defp parse_states([{state_name, state_def} | states], acc) do
with {:ok, state} <- Activity.parse(state_name, state_def) do
parse_states(states, [{state_name, state} | acc])
end
end
defp do_project(_workflow, state, []) do
{:continue, state}
end
defp do_project(workflow, state, [event | events]) do
case do_project(workflow, state, event) do
{:continue, new_state} -> do_project(workflow, new_state, events)
{:succeed, result} -> {:succeed, result}
end
end
defp do_project(workflow, state, event) do
with {:ok, current_activity} <- activity(workflow, state.activity) do
case State.project(state, current_activity, event) do
{:stay, new_state} ->
{:continue, new_state}
{:transition, {:next, activity_name}, args} ->
with {:ok, new_activity} <- activity(workflow, activity_name) do
new_state = State.create(new_activity, args)
{:continue, new_state}
end
{:transition, :end, result} ->
{:succeed, result}
{:succeed, result} ->
{:succeed, result}
end
end
end
end
|
lib/workflows/workflow.ex
| 0.760428
| 0.460713
|
workflow.ex
|
starcoder
|
defmodule ExChangeRate.Utils.Currency do
@moduledoc """
Functions for calculation, casting, conversion and formatting of currencies
"""
@doc """
Creates a new currency representation
"""
def new(value, currency), do: Money.new(value, currency)
@doc """
Extracts the value from a currency representation
"""
def get_value(%Money{amount: amount}), do: amount
@doc """
Given two exchange rates in a common base (EUR by default), calculates
an exchange rate between the two
"""
@spec calculate_exchange_rate(from_rate :: float() | integer(), to_rate :: float() | integer()) ::
Decimal.t()
def calculate_exchange_rate(from_rate, to_rate)
when is_float(from_rate) and is_float(to_rate) do
from_rate = from_rate |> Decimal.from_float() |> Decimal.round(5)
to_rate = to_rate |> Decimal.from_float() |> Decimal.round(5)
to_rate
|> Decimal.div(from_rate)
|> Decimal.round(5)
end
def calculate_exchange_rate(from_rate, to_rate)
when is_integer(from_rate) or is_integer(to_rate) do
from_rate = if is_integer(from_rate), do: from_rate / 1, else: from_rate
to_rate = if is_integer(to_rate), do: to_rate / 1, else: to_rate
calculate_exchange_rate(from_rate, to_rate)
end
@doc """
Given an amount, a conversion rate and a target currency, returns
amount in target currency
"""
@spec convert(amount :: integer(), rate :: Decimal.t(), target_currency :: String.t()) ::
Money.t()
def convert(amount, rate, target_currency) do
amount
|> new(target_currency)
|> Money.multiply(rate)
end
@doc """
Formats given currencies or rates to a string representation
"""
@spec format_to_string(amount_currency_pair :: Money.t() | Decimal.t()) :: String.t()
def format_to_string(%Money{} = amount_currency_pair) do
Money.to_string(amount_currency_pair, separator: ".", delimiter: ",")
end
def format_to_string(%Decimal{} = rate) do
Decimal.to_string(rate, :normal)
end
@doc """
Returns a supported list of currencies
"""
def supported_currencies_list do
[
"AED",
"AFN",
"ALL",
"AMD",
"ANG",
"AOA",
"ARS",
"AUD",
"AWG",
"AZN",
"BAM",
"BBD",
"BDT",
"BGN",
"BHD",
"BIF",
"BMD",
"BND",
"BOB",
"BRL",
"BSD",
"BTC",
"BTN",
"BWP",
"BYN",
"BYR",
"BZD",
"CAD",
"CDF",
"CHF",
"CLF",
"CLP",
"CNY",
"COP",
"CRC",
"CUC",
"CUP",
"CVE",
"CZK",
"DJF",
"DKK",
"DOP",
"DZD",
"EGP",
"ERN",
"ETB",
"EUR",
"FJD",
"FKP",
"GBP",
"GEL",
"GGP",
"GHS",
"GIP",
"GMD",
"GNF",
"GTQ",
"GYD",
"HKD",
"HNL",
"HRK",
"HTG",
"HUF",
"IDR",
"ILS",
"IMP",
"INR",
"IQD",
"IRR",
"ISK",
"JEP",
"JMD",
"JOD",
"JPY",
"KES",
"KGS",
"KHR",
"KMF",
"KPW",
"KRW",
"KWD",
"KYD",
"KZT",
"LAK",
"LBP",
"LKR",
"LRD",
"LSL",
"LTL",
"LVL",
"LYD",
"MAD",
"MDL",
"MGA",
"MKD",
"MMK",
"MNT",
"MOP",
"MRO",
"MUR",
"MVR",
"MWK",
"MXN",
"MYR",
"MZN",
"NAD",
"NGN",
"NIO",
"NOK",
"NPR",
"NZD",
"OMR",
"PAB",
"PEN",
"PGK",
"PHP",
"PKR",
"PLN",
"PYG",
"QAR",
"RON",
"RSD",
"RUB",
"RWF",
"SAR",
"SBD",
"SCR",
"SDG",
"SEK",
"SGD",
"SHP",
"SLL",
"SOS",
"SRD",
"STD",
"SVC",
"SYP",
"SZL",
"THB",
"TJS",
"TMT",
"TND",
"TOP",
"TRY",
"TTD",
"TWD",
"TZS",
"UAH",
"UGX",
"USD",
"UYU",
"UZS",
"VEF",
"VND",
"VUV",
"WST",
"XAF",
"XAG",
"XAU",
"XCD",
"XDR",
"XOF",
"XPF",
"YER",
"ZAR",
"ZMK",
"ZMW",
"ZWL"
]
end
end
|
lib/ex_change_rate/utils/currency.ex
| 0.862887
| 0.632247
|
currency.ex
|
starcoder
|
defmodule Mix.Tasks.Xref do
use Mix.Task
alias Mix.Tasks.Compile.Elixir, as: E
import Mix.Compilers.Elixir, only: [read_manifest: 2, source: 1, source: 2, module: 1]
@shortdoc "Performs cross reference checks"
@recursive true
@moduledoc """
Performs cross reference checks between modules.
## Xref modes
The `xref` task expects a mode as first argument:
mix xref MODE
All available modes are discussed below.
### warnings
Prints warnings for violated cross reference checks:
mix xref warnings
This is the mode used by Mix during compilation.
### unreachable
Prints all unreachable "file:line: module.function/arity" entries:
mix xref unreachable
The "file:line" represents the file and line a call to an unknown
"module.function/arity" is made.
### callers CALLEE
Prints all callers of the given `CALLEE`, which can be one of: `Module`,
`Module.function`, or `Module.function/arity`. Examples:
mix xref callers MyMod
mix xref callers MyMod.fun
mix xref callers MyMod.fun/3
### graph
Prints a file dependency graph where an edge from `A` to `B` indicates
that `A` depends on `B`.
mix xref graph --format dot
The following options are accepted:
* `--exclude` - paths to exclude
* `--source` - displays all files that the given source file references (directly or indirectly)
* `--sink` - displays all files that reference the given file (directly or indirectly)
* `--format` - can be set to one of:
* `pretty` - uses Unicode codepoints for formatting the graph.
This is the default except on Windows
* `plain` - does not use Unicode codepoints for formatting the graph.
This is the default on Windows
* `dot` - produces a DOT graph description in `xref_graph.dot` in the
current directory. Warning: this will override any previously generated file
The `--source` and `--sink` options are particularly useful when trying to understand how
the modules in a particular file interact with the whole system.
## Shared options
Those options are shared across all modes:
* `--no-compile` - does not compile even if files require compilation
* `--no-deps-check` - does not check dependencies
* `--no-archives-check` - does not check archives
* `--no-elixir-version-check` - does not check the Elixir version from mix.exs
## Configuration
All configuration for Xref should be placed under the key `:xref`.
* `:exclude` - a list of modules and `{module, function, arity}` tuples to ignore when checking
cross references. For example: `[MissingModule, {MissingModule2, :missing_func, 2}]`
"""
@switches [compile: :boolean, deps_check: :boolean, archives_check: :boolean,
elixir_version_check: :boolean, exclude: :keep, format: :string,
source: :string, sink: :string]
@doc """
Runs this task.
"""
@spec run(OptionParser.argv) :: :ok | :error
def run(args) do
{opts, args} =
OptionParser.parse!(args, strict: @switches)
Mix.Task.run("loadpaths")
if Keyword.get(opts, :compile, true) do
Mix.Task.run("compile")
end
case args do
["warnings"] ->
warnings()
["unreachable"] ->
unreachable()
["callers", callee] ->
callers(callee)
["graph"] ->
graph(opts)
_ ->
Mix.raise "xref doesn't support this command. For more information run \"mix help xref\""
end
end
## Modes
defp warnings() do
if unreachable(&print_warnings/2) == [] do
:ok
else
:error
end
end
defp unreachable() do
if unreachable(&print_entry/2) == [] do
:ok
else
:error
end
end
defp callers(callee) do
callee
|> filter_for_callee()
|> do_callers()
:ok
end
defp graph(opts) do
write_graph(file_references(), excluded(opts), opts)
:ok
end
## Unreachable
defp unreachable(pair_fun) do
excludes = excludes()
each_source_entries(&source_warnings(&1, excludes), pair_fun)
end
defp source_warnings(source, excludes) do
source(runtime_dispatches: runtime_dispatches) = source
for {module, func_arity_lines} <- runtime_dispatches,
exports = load_exports(module),
{{func, arity}, lines} <- func_arity_lines,
warning = unreachable_mfa(exports, module, func, arity, lines, excludes),
do: warning
end
defp load_exports(module) do
if :code.is_loaded(module) do
# If the module is loaded, we will use the faster function_exported?/3 check
module
else
# Otherwise we get all exports from :beam_lib to avoid loading modules
with file when is_list(file) <- :code.which(module),
{:ok, {^module, [exports: exports]}} <- :beam_lib.chunks(file, [:exports]) do
exports
else
_ -> :unknown_module
end
end
end
defp unreachable_mfa(exports, module, func, arity, lines, excludes) do
cond do
excluded?(module, func, arity, excludes) ->
nil
skip?(module, func, arity) ->
nil
exports == :unknown_module ->
{Enum.sort(lines), :unknown_module, module, func, arity, nil}
is_atom(exports) and not function_exported?(module, func, arity) ->
{Enum.sort(lines), :unknown_function, module, func, arity, nil}
is_list(exports) and {func, arity} not in exports ->
{Enum.sort(lines), :unknown_function, module, func, arity, exports}
true ->
nil
end
end
## Print entries
defp print_entry(file, entries) do
entries
|> Enum.sort()
|> Enum.each(&IO.write(format_entry(file, &1)))
end
defp format_entry(file, {lines, _, module, function, arity, _}) do
for line <- lines do
[Exception.format_file_line(file, line), ?\s, Exception.format_mfa(module, function, arity), ?\n]
end
end
## Print warnings
defp print_warnings(file, entries) do
prefix = IO.ANSI.format([:yellow, "warning: "])
entries
|> Enum.sort()
|> Enum.each(&IO.write(:stderr, [prefix, format_warning(file, &1), ?\n]))
end
defp format_warning(file, {lines, :unknown_function, module, function, arity, exports}) do
message =
[module: module, function: function, arity: arity, reason: :"function not exported", exports: exports]
|> UndefinedFunctionError.exception()
|> Exception.message()
[message, "\n", format_file_lines(file, lines)]
end
defp format_warning(file, {lines, :unknown_module, module, function, arity, _}) do
["function ", Exception.format_mfa(module, function, arity),
" is undefined (module #{inspect module} is not available)\n" | format_file_lines(file, lines)]
end
defp format_file_lines(file, [line]) do
format_file_line(file, line)
end
defp format_file_lines(file, lines) do
["Found at #{length(lines)} locations:\n" |
Enum.map(lines, &format_file_line(file, &1))]
end
defp format_file_line(file, line) do
[" ", file, ?:, Integer.to_string(line), ?\n]
end
## "Unreachable" helpers
@protocol_builtins for {_, type} <- Protocol.__builtin__(), do: type
defp skip?(:erlang, func, 2) when func in [:andalso, :orelse] do
true
end
defp skip?(module, :__impl__, 1) do
{maybe_protocol, maybe_builtin} = module |> Module.split() |> Enum.split(-1)
maybe_protocol = Module.concat(maybe_protocol)
maybe_builtin = Module.concat(maybe_builtin)
maybe_builtin in @protocol_builtins and
Code.ensure_loaded?(maybe_protocol) and
function_exported?(maybe_protocol, :__protocol__, 1)
end
defp skip?(_, _, _) do
false
end
defp excludes() do
Mix.Project.config()
|> Keyword.get(:xref, [])
|> Keyword.get(:exclude, [])
|> MapSet.new()
end
defp excluded?(module, func, arity, excludes) do
MapSet.member?(excludes, module) or MapSet.member?(excludes, {module, func, arity})
end
## Callers
defp do_callers(filter) do
each_source_entries(&source_calls_for_filter(&1, filter), &print_calls/2)
end
defp source_calls_for_filter(source, filter) do
runtime_dispatches = source(source, :runtime_dispatches)
compile_dispatches = source(source, :compile_dispatches)
dispatches = runtime_dispatches ++ compile_dispatches
calls =
for {module, func_arity_lines} <- dispatches,
{{func, arity}, lines} <- func_arity_lines,
filter.({module, func, arity}),
do: {module, func, arity, lines}
Enum.reduce calls, %{}, fn {module, func, arity, lines}, merged_calls ->
lines = MapSet.new(lines)
Map.update(merged_calls, {module, func, arity}, lines, &MapSet.union(&1, lines))
end
end
## Print callers
defp print_calls(file, calls) do
calls
|> Enum.sort()
|> Enum.each(&IO.write(format_call(file, &1)))
end
defp format_call(file, {{module, func, arity}, lines}) do
for line <- Enum.sort(lines),
do: [file, ":", to_string(line), ": ", Exception.format_mfa(module, func, arity), ?\n]
end
## "Callers" helpers
defp filter_for_callee(callee) do
case Mix.Utils.parse_mfa(callee) do
{:ok, mfa_list} ->
mfa_list_length = length(mfa_list)
fn {module, function, arity} ->
mfa_list == Enum.take([module, function, arity], mfa_list_length)
end
:error ->
Mix.raise "xref callers CALLEE expects Module, Module.function, or Module.function/arity, " <>
"got: " <> callee
end
end
## Graph helpers
defp excluded(opts) do
opts
|> Keyword.get_values(:exclude)
|> Enum.flat_map(&[{&1, nil}, {&1, "(compile)"}, {&1, "(runtime)"}])
end
defp file_references() do
module_sources =
for manifest <- E.manifests(),
manifest_data = read_manifest(manifest, ""),
module(module: module, source: source) <- manifest_data,
source = Enum.find(manifest_data, &match?(source(source: ^source), &1)),
do: {module, source},
into: %{}
all_modules = MapSet.new(module_sources, &elem(&1, 0))
Map.new module_sources, fn {module, source} ->
source(runtime_references: runtime, compile_references: compile, source: file) = source
compile_references =
compile
|> MapSet.new()
|> MapSet.delete(module)
|> MapSet.intersection(all_modules)
|> Enum.filter(&module_sources[&1] != source)
|> Enum.map(&{source(module_sources[&1], :source), "(compile)"})
runtime_references =
runtime
|> MapSet.new()
|> MapSet.delete(module)
|> MapSet.intersection(all_modules)
|> Enum.filter(&module_sources[&1] != source)
|> Enum.map(&{source(module_sources[&1], :source), nil})
{file, compile_references ++ runtime_references}
end
end
defp write_graph(file_references, excluded, opts) do
{root, file_references} =
case {opts[:source], opts[:sink]} do
{nil, nil} ->
{Enum.map(file_references, &{elem(&1, 0), nil}) -- excluded, file_references}
{source, nil} ->
if file_references[source] do
{[{source, nil}], file_references}
else
Mix.raise "Source could not be found: #{source}"
end
{nil, sink} ->
if file_references[sink] do
file_references = filter_for_sink(file_references, sink)
roots =
file_references
|> Map.delete(sink)
|> Enum.map(&{elem(&1, 0), nil})
{roots -- excluded, file_references}
else
Mix.raise "Sink could not be found: #{sink}"
end
{_, _} ->
Mix.raise "mix xref graph expects only one of --source and --sink"
end
callback =
fn {file, type} ->
children = Map.get(file_references, file, [])
{{file, type}, children -- excluded}
end
if opts[:format] == "dot" do
Mix.Utils.write_dot_graph!("xref_graph.dot", "xref graph",
root, callback, opts)
"""
Generated "xref_graph.dot" in the current directory. To generate a PNG:
dot -Tpng xref_graph.dot -o xref_graph.png
For more options see http://www.graphviz.org/.
"""
|> String.trim_trailing()
|> Mix.shell.info()
else
Mix.Utils.print_tree(root, callback, opts)
end
end
defp filter_for_sink(file_references, sink) do
file_references
|> invert_references()
|> do_filter_for_sink([{sink, nil}], %{})
|> invert_references()
end
defp do_filter_for_sink(file_references, new_nodes, acc) do
Enum.reduce new_nodes, acc, fn {new_node_name, _type}, acc ->
new_nodes = file_references[new_node_name]
if acc[new_node_name] || !new_nodes do
acc
else
do_filter_for_sink(file_references, new_nodes, Map.put(acc, new_node_name, new_nodes))
end
end
end
defp invert_references(file_references) do
Enum.reduce file_references, %{}, fn {file, references}, acc ->
Enum.reduce references, acc, fn {reference, type}, acc ->
Map.update(acc, reference, [{file, type}], &[{file, type} | &1])
end
end
end
## Helpers
defp each_source_entries(entries_fun, pair_fun) do
for manifest <- E.manifests(),
source(source: file) = source <- read_manifest(manifest, ""),
entries = entries_fun.(source),
entries != [] and entries != %{},
do: pair_fun.(file, entries)
end
end
|
lib/mix/lib/mix/tasks/xref.ex
| 0.837952
| 0.544741
|
xref.ex
|
starcoder
|
defmodule BrazilianUtils.Phone do
@moduledoc false
alias BrazilianUtils.Helper
@spec is_valid?(String.t()) :: boolean()
def is_valid?(phone) when is_binary(phone) do
digits = Helper.only_numbers(phone)
is_valid_phone_length?(digits) and is_valid_first_number?(digits) and is_valid_ddd?(digits)
end
@spec is_valid_mobile_phone?(String.t()) :: boolean()
def is_valid_mobile_phone?(phone) when is_binary(phone) do
digits = Helper.only_numbers(phone)
is_valid_mobile_phone_length?(digits) and is_valid_mobile_first_number?(digits) and
is_valid_ddd?(digits)
end
@spec is_valid_landline_phone?(String.t()) :: boolean()
def is_valid_landline_phone?(phone) when is_binary(phone) do
digits = Helper.only_numbers(phone)
is_valid_landline_phone_length?(digits) and is_valid_landline_first_number?(digits) and
is_valid_ddd?(digits)
end
@spec phone_min_length :: 10
defp phone_min_length, do: 10
@spec phone_max_length :: 11
defp phone_max_length, do: 11
@spec mobile_valid_first_numbers :: [integer()]
defp mobile_valid_first_numbers, do: [6, 7, 8, 9]
@spec landline_valid_first_numbers :: [integer()]
defp landline_valid_first_numbers, do: [2, 3, 4, 5]
@spec is_valid_ddd?(String.t()) :: boolean()
defp is_valid_ddd?(phone) when is_binary(phone) do
ddd = phone |> String.slice(0..1)
BrazilianUtils.DataSource.get_valid_area_codes()
|> Enum.member?(ddd)
end
@spec is_valid_mobile_phone_length?(String.t()) :: boolean()
defp is_valid_mobile_phone_length?(phone) when is_binary(phone) do
String.length(phone) == phone_max_length()
end
@spec is_valid_landline_phone_length?(String.t()) :: boolean()
defp is_valid_landline_phone_length?(phone) when is_binary(phone) do
l = String.length(phone)
l <= phone_max_length() and l >= phone_min_length()
end
@spec is_valid_phone_length?(String.t()) :: boolean()
defp is_valid_phone_length?(phone) when is_binary(phone) do
is_valid_landline_phone_length?(phone) or is_valid_mobile_phone_length?(phone)
end
@spec is_valid_mobile_first_number?(String.t()) :: boolean()
defp is_valid_mobile_first_number?(phone) when is_binary(phone) do
first_digit = String.at(phone, 2) |> String.to_integer()
mobile_valid_first_numbers()
|> Enum.member?(first_digit)
end
@spec is_valid_landline_first_number?(String.t()) :: boolean()
defp is_valid_landline_first_number?(phone) when is_binary(phone) do
first_digit = String.at(phone, 2) |> String.to_integer()
landline_valid_first_numbers()
|> Enum.member?(first_digit)
end
@spec is_valid_first_number?(String.t()) :: boolean()
defp is_valid_first_number?(phone) when is_binary(phone) do
if String.length(phone) == phone_min_length() do
is_valid_landline_first_number?(phone)
else
is_valid_mobile_first_number?(phone)
end
end
end
|
lib/brazilian_utils/phone.ex
| 0.759493
| 0.413921
|
phone.ex
|
starcoder
|
defmodule Mix.Tasks.Liberator.Chart do
@shortdoc "Generates source text for a chart of Liberator's decision tree"
@moduledoc """
Generates source text for a decision tree chart for a Liberator resource.
The chart is compatible with the [Graphviz](https://graphviz.org/) graph visualization software.
```sh
mix liberator.chart
```
By default, this function will print the default decision tree to standard output.
This task can also take a module argument for any module that `use`s `Liberator.Resource`,
in which case the decision tree for the given module will be printed.
```sh
mix liberator.chart MyApp.MyResource
```
You can also provide the `--output` or `-o` option to print the chart source to a file.
```sh
mix liberator.chart -o myresource.dot MyApp.MyResource
```
## Generating a chart with the returned source code
Unfortunately, there's not a Graphviz binding for Elixir.
If you want to create an actual image of your chart,
you will have to install [Graphviz](https://graphviz.org/),
or use one of its language bindings for another language.
Once you have installed Graphviz, you can run a command like the following to generate an image
```sh
dot myresource.dot -Tsvg -o myresource.svg
```
"""
use Mix.Task
def run(args) do
{opts, argv, _errors} =
OptionParser.parse(args, aliases: [o: :output], strict: [output: :string])
if length(argv) > 1 do
IO.puts(:stderr, "More than one module name given, ignoring all after the first")
end
base_module =
if Enum.empty?(argv) do
Liberator.Default.DecisionTree
else
"Elixir.#{List.first(argv)}"
|> String.to_existing_atom()
end
Code.ensure_loaded(base_module)
unless function_exported?(base_module, :decisions, 0) and
function_exported?(base_module, :actions, 0) and
function_exported?(base_module, :handlers, 0) do
raise "The given module, #{base_module}, does not implement " <>
"the required functions from Liberator.Resource. " <>
"Make sure that module has `use Liberator.Resource` in it."
end
chart = dot(base_module)
if filename = Keyword.get(opts, :output) do
File.write!(filename, chart)
IO.puts("Chart saved to #{filename}")
else
IO.puts(chart)
end
end
defp dot(base_module) do
handler_rank_group =
base_module.handlers()
|> Map.keys()
|> Enum.map(fn handler ->
~s("#{handler}")
end)
|> Enum.join(" ")
handler_shapes =
base_module.handlers()
|> Map.keys()
|> Enum.flat_map(fn handler ->
[
~s("#{handler}" [shape=box])
]
end)
|> Enum.join("\n")
decisions =
base_module.decisions()
|> Enum.flat_map(fn {decision_fn, {true_step, false_step}} ->
[
~s("#{decision_fn}" -> "#{true_step}" [label="yes"]),
~s("#{decision_fn}" -> "#{false_step}" [label="no"])
]
end)
|> Enum.join("\n")
actions =
base_module.actions()
|> Enum.flat_map(fn {action, after_action} ->
[
~s("#{action}" [shape=box]),
~s("#{action}" -> "#{after_action}")
]
end)
|> Enum.join("\n")
"""
strict digraph G {
{ rank=same #{handler_rank_group}}
#{handler_shapes}
#{decisions}
#{actions}
}
"""
end
end
|
lib/mix/tasks/liberator.chart.ex
| 0.830147
| 0.814754
|
liberator.chart.ex
|
starcoder
|
defmodule ExAdvent.Day01 do
@moduledoc """
# Day 1: Chronal Calibration
"We've detected some temporal anomalies," one of Santa's Elves at the Temporal Anomaly Research and Detection Instrument Station tells you. She sounded pretty worried when she called you down here. "At 500-year intervals into the past, someone has been changing Santa's history!"
"The good news is that the changes won't propagate to our time stream for another 25 days, and we have a device" - she attaches something to your wrist - "that will let you fix the changes with no such propagation delay. It's configured to send you 500 years further into the past every few days; that was the best we could do on such short notice."
"The bad news is that we are detecting roughly fifty anomalies throughout time; the device will indicate fixed anomalies with stars. The other bad news is that we only have one device and you're the best person for the job! Good lu--" She taps a button on the device and you suddenly feel like you're falling. To save Christmas, you need to get all fifty stars by December 25th.
Collect stars by solving puzzles. Two puzzles will be made available on each day in the advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
"""
@input_path Path.join(:code.priv_dir(:ex_advent), "day_01_input")
@doc """
I am given a text file with a newline-seperated set of frequency
changes - e.g.
+3
+3
+4
-2
-4
To make my life easier, clean that up by converting the
file input to a list of signed ints.
"""
def clean_sequence(input_path \\ @input_path) do
input_path
|> File.stream!()
|> Stream.map(fn f ->
f
|> String.trim_trailing()
|> String.to_integer()
end)
|> Enum.into([])
end
@doc """
# Part 1
After feeling like you've been falling for a few minutes, you look at the device's tiny screen. "Error: Device must be calibrated before first use. Frequency drift detected. Cannot maintain destination lock." Below the message, the device shows a sequence of changes in frequency (your puzzle input). A value like +6 means the current frequency increases by 6; a value like -3 means the current frequency decreases by 3.
For example, if the device displays frequency changes of +1, -2, +3, +1, then starting from a frequency of zero, the following changes would occur:
Current frequency 0, change of +1; resulting frequency 1.
Current frequency 1, change of -2; resulting frequency -1.
Current frequency -1, change of +3; resulting frequency 2.
Current frequency 2, change of +1; resulting frequency 3.
In this example, the resulting frequency is 3.
Here are other example situations:
+1, +1, +1 results in 3
+1, +1, -2 results in 0
-1, -2, -3 results in -6
Starting with a frequency of zero, what is the resulting frequency after all of the changes in frequency have been applied?
"""
def calculate_frequency(input_list) do
Enum.sum(input_list)
end
@doc """
# Part Two
You notice that the device repeats the same frequency change list over and over. To calibrate the device, you need to find the first frequency it reaches twice.
For example, using the same list of changes above, the device would loop as follows:
Current frequency 0, change of +1; resulting frequency 1.
Current frequency 1, change of -2; resulting frequency -1.
Current frequency -1, change of +3; resulting frequency 2.
Current frequency 2, change of +1; resulting frequency 3.
(At this point, the device continues from the start of the list.)
Current frequency 3, change of +1; resulting frequency 4.
Current frequency 4, change of -2; resulting frequency 2, which has already been seen.
In this example, the first frequency reached twice is 2. Note that your device might need to repeat its list of frequency changes many times before a duplicate frequency is found, and that duplicates might be found while in the middle of processing the list.
Here are other examples:
+1, -1 first reaches 0 twice.
+3, +3, +4, -2, -4 first reaches 10 twice.
-6, +3, +8, +5, -6 first reaches 5 twice.
+7, +7, -2, -7, -4 first reaches 14 twice.
What is the first frequency your device reaches twice?
"""
def first_repeated(input_list) do
:ets.new(:hash, [:named_table, :set, :private])
input_list
|> Stream.cycle()
|> Enum.reduce_while({0, :ets.insert(:hash, {0, nil})}, fn f, {acc, _} ->
acc = acc + f
case :ets.lookup(:hash, acc) do
[] ->
{:cont, {acc, :ets.insert(:hash, {acc, nil})}}
[_] ->
:ets.delete(:hash)
{:halt, acc}
end
end)
end
end
|
lib/ex_advent/day_01.ex
| 0.844168
| 0.78968
|
day_01.ex
|
starcoder
|
defmodule EctoMnesia.Table do
@moduledoc """
This module provides interface to perform CRUD and select operations on a Mnesia table.
"""
alias :mnesia, as: Mnesia
@doc """
Insert a record into Mnesia table.
"""
def insert(table, record, _opts \\ []) when is_tuple(record) do
table = get_name(table)
transaction(fn ->
key = elem(record, 1)
case _get(table, key) do
nil -> _insert(table, record)
_ -> {:error, :already_exists}
end
end)
end
defp _insert(table, record) do
case Mnesia.write(table, record, :write) do
:ok -> {:ok, record}
error -> {:error, error}
end
end
@doc """
Read record from Mnesia table.
"""
def get(table, key, _opts \\ []) do
table = get_name(table)
transaction(fn ->
_get(table, key)
end)
end
defp _get(table, key, lock \\ :read) do
case Mnesia.read(table, key, lock) do
[] -> nil
[res] -> res
end
end
@doc """
Read record in Mnesia table by key.
You can partially update records by replacing values that you don't want to touch with `nil` values.
This function will automatically merge changes stored in Mnesia and update.
"""
def update(table, key, changes, _opts \\ []) when is_list(changes) do
table = get_name(table)
transaction(fn ->
case _get(table, key, :write) do
nil ->
{:error, :not_found}
stored_record ->
_insert(table, update_record(stored_record, changes))
end
end)
end
defp update_record(record, changes) do
data = Tuple.to_list(record)
changes
|> Enum.reduce(data, fn
{index, value}, record ->
List.replace_at(record, index + 1, value)
{index, :inc, value}, record ->
List.update_at(record, index + 1, fn
numeric when is_number(numeric) or is_float(numeric) ->
numeric + value
nil ->
value
end)
{index, :push, value}, record ->
List.update_at(record, index + 1, fn
list when is_list(list) -> list ++ [value]
nil -> [value]
end)
{index, :pull, value}, record ->
List.update_at(record, index + 1, fn
list when is_list(list) -> Enum.filter(list, &(&1 != value))
nil -> []
end)
end)
|> List.to_tuple()
end
@doc """
Delete record from Mnesia table by key.
"""
def delete(table, key, _opts \\ []) do
table = get_name(table)
transaction(fn ->
:ok = Mnesia.delete(table, key, :write)
{:ok, key}
end)
end
@doc """
Select all records that match MatchSpec. You can limit result by passing third optional argument.
"""
def select(table, match_spec, limit \\ nil)
def select(table, match_spec, nil) do
table = get_name(table)
transaction(fn ->
Mnesia.select(table, match_spec, :read)
end)
end
def select(table, match_spec, limit) do
table = get_name(table)
transaction(fn ->
{result, _context} = Mnesia.select(table, match_spec, limit, :read)
result
end)
end
@doc """
Get count of records in a Mnesia table.
"""
def count(table), do: table |> get_name() |> Mnesia.table_info(:size)
@doc """
Get list of attributes that defined in Mnesia schema.
"""
def attributes(table) do
name =
table
|> get_name()
|> Mnesia.table_info(:attributes)
{:ok, name}
catch
:exit, {:aborted, {:no_exists, _, :attributes}} ->
{:error, :no_exists}
end
@doc """
Returns auto-incremented integer ID for table in Mnesia.
Sequence auto-generation is implemented as `mnesia:dirty_update_counter`.
"""
def next_id(table, inc \\ 1)
def next_id(table, inc) when is_binary(table), do: table |> get_name() |> next_id(inc)
def next_id(table, inc) when is_atom(table), do: Mnesia.dirty_update_counter({:id_seq, table}, inc)
@doc """
Run function `fun` inside a Mnesia transaction with specific context.
Make sure that you don't run any code that can have side-effects inside a transactions,
because it may be run dozens of times.
By default, context is `:transaction`.
"""
def transaction(fun, context \\ :transaction) do
case activity(context, fun) do
{:error, {%{} = reason, stack}, _} ->
reraise reason, stack
{:error, reason, stack} ->
:erlang.raise(:error, reason, stack)
{:raise, err} ->
raise err
{:error, reason} ->
{:error, reason}
result ->
result
end
end
# Activity is hard to deal with because it doesn't return value in dirty context (exits),
# and returns in a transactional context
defp activity(context, fun) do
do_activity(context, fun)
catch
:exit, {:aborted, {:no_exists, [schema, _id]}} -> {:raise, "Schema #{inspect(schema)} does not exist"}
:exit, {:aborted, {:no_exists, schema}} -> {:raise, "Schema #{inspect(schema)} does not exist"}
:exit, {:aborted, :rollback} -> {:error, :rollback}
:exit, {:aborted, reason} -> {:error, reason, System.stacktrace()}
:exit, reason -> {:error, reason, System.stacktrace()}
end
defp do_activity(context, fun) do
case Mnesia.activity(context, fun) do
{:aborted, {:no_exists, [schema, _id]}} -> {:raise, "Schema #{inspect(schema)} does not exist"}
{:aborted, reason} -> {:error, reason}
{:atomic, result} -> result
result -> result
end
end
@doc """
Get the first key in the table, see `mnesia:first`.
"""
@spec first(atom) :: any | nil | no_return
def first(table) do
table = get_name(table)
case Mnesia.first(table) do
:"$end_of_table" -> nil
value -> value
end
end
@doc """
Get the next key in the table starting from the given key, see `mnesia:next`.
"""
@spec next(atom, any) :: any | nil | no_return
def next(table, key) do
table = get_name(table)
case Mnesia.next(table, key) do
:"$end_of_table" -> nil
value -> value
end
end
@doc """
Get the previous key in the table starting from the given key, see
`mnesia:prev`.
"""
@spec prev(atom, any) :: any | nil | no_return
def prev(table, key) do
table = get_name(table)
case Mnesia.prev(table, key) do
:"$end_of_table" -> nil
value -> value
end
end
@doc """
Get the last key in the table, see `mnesia:last`.
"""
@spec last(atom) :: any | nil | no_return
def last(table) do
table = get_name(table)
case Mnesia.last(table) do
:"$end_of_table" -> nil
value -> value
end
end
@doc """
Get Mnesia table name by binary or atom representation.
"""
def get_name(table) when is_atom(table), do: table
def get_name(table) when is_binary(table), do: String.to_atom(table)
end
|
lib/ecto_mnesia/table.ex
| 0.771413
| 0.612136
|
table.ex
|
starcoder
|
defmodule DataLogger.Destination.Supervisor do
@moduledoc """
Supervisor of a group of `DataLogger.Destination.Controller` workers for given `topic`.
For every configured destination, there will be a worker (unless prefixes are used).
For example if we configured a NoSQL destination and a relational destination,
for given document/schema used as `topic` a new `DataLogger.Destination.Supervisor` will be
created and it will be supervising two `DataLogger.Destination.Controller` workers.
If the destinations are specifying prefixes, by including the option `prefix: <prefix>`,
the supervisor will create and supervise loggers for only these destinations,
that have prefix which prefixes the `topic` given to start_link.
If the destinations are:
destinations: [
{MemoryDestination, %{destination: 1, prefix: :blue}},
{MemoryDestination, %{destination: 2, prefix: :purple}}
]
And the supervisor is started with `topic` of `"purple_1"` it will only
start and supervise a proces for `{MemoryDestination, %{destination: 2, prefix: :purple}}`.
Using destinations with prefixes we could send part of our data only to subset of the configured
destinations.
"""
use Supervisor
alias __MODULE__, as: Mod
alias DataLogger.Destination.Controller
@default_destinations []
@default_config [
destinations: @default_destinations
]
@doc false
def start_link(config \\ @default_config, topic: topic, name: name) do
Supervisor.start_link(Mod, {topic, config}, name: name)
end
@impl true
def init({topic, config}) do
all_destinations = Keyword.get(config, :destinations, @default_destinations)
destinations =
if Enum.any?(all_destinations, fn {_, options} -> Map.has_key?(options, :prefix) end) do
all_destinations
|> Enum.filter(fn {_, options} ->
options[:prefix] && String.starts_with?(to_string(topic), to_string(options[:prefix]))
end)
else
all_destinations
end
children =
destinations
|> Enum.map(fn {mod, options} ->
name = {:via, Registry, {DataLogger.Registry, {Controller, {topic, mod, options}}}}
%{
id: {topic, mod, options},
start:
{Controller, :start_link,
[[topic: topic, name: name, destination: %{module: mod, options: options}]]},
restart: :permanent,
shutdown: 5000,
type: :worker
}
end)
Supervisor.init(children, strategy: :one_for_one)
end
end
|
lib/data_logger/destination/supervisor.ex
| 0.835484
| 0.648828
|
supervisor.ex
|
starcoder
|
defmodule APIDoc.Doc.Schema do
@moduledoc ~S"""
A schema definition for use as in and output type.
"""
require Logger
@typedoc ~S"""
The schema type.
"""
@type type ::
:integer
| :string
| :object
| :array
@typedoc ~S"""
The format for types that have different formats.
Example: `:int32` for `:integer`s.
"""
@type format ::
:int32
| :int64
@typedoc @moduledoc
@type t :: %__MODULE__{
name: atom,
type: type,
format: format | nil,
required: list(atom) | nil,
properties: map | nil,
example: any,
minimum: integer | nil,
maximum: integer | nil,
items: map
}
@enforce_keys [:name, :type]
defstruct [
:name,
:type,
format: nil,
required: nil,
properties: nil,
example: nil,
minimum: nil,
maximum: nil,
items: nil
]
@doc false
@spec validate!(t) :: :ok
def validate!(schema) do
with %{errors: errors, warnings: warnings} <- validate(schema) do
name = Macro.to_string(schema.name)
if errors != [], do: Enum.each(errors, &Logger.error("APIDoc: Schema '#{name}': #{&1}"))
if warnings != [], do: Enum.each(warnings, &Logger.warn("APIDoc: Schema '#{name}': #{&1}"))
end
:ok
end
@doc false
@spec validate(t) :: :ok | %{errors: [String.t()], warnings: [String.t()]}
def validate(schema = %__MODULE__{type: type}) do
validated =
schema
|> Map.from_struct()
|> Enum.map(fn {k, v} -> validate_property(type, k, v) end)
|> Enum.reject(&is_nil/1)
|> Enum.group_by(&elem(&1, 0))
if validated[:error] || validated[:warn] do
%{
errors: Enum.map(validated[:error] || [], &elem(&1, 1)),
warnings: Enum.map(validated[:warn] || [], &elem(&1, 1))
}
else
:ok
end
end
@spec validate_property(type, atom, any) :: nil | {:error, String.t()} | {:warn, String.t()}
# Integer
defp validate_property(:integer, :format, nil),
do: {:warn, "No format set for integer. Recommend setting `:int32` or `:int64`."}
# Object
defp validate_property(:object, :properties, nil), do: {:error, "No properties set for object."}
defp validate_property(:object, :required, nil),
do: {:error, "No required properties set for object. Recommend setting required fields."}
# All
defp validate_property(_type, :example, nil),
do: {:warn, "No example supplied. Strongly recommend setting an example."}
defp validate_property(_type, _property, _value), do: nil
end
|
lib/doc/schema.ex
| 0.892445
| 0.457985
|
schema.ex
|
starcoder
|
defmodule Andi.InputSchemas.InputConverter do
@moduledoc """
Used to convert between SmartCity.Datasets, form data (defined by Andi.InputSchemas.DatasetInput), and Ecto.Changesets.
"""
alias SmartCity.Dataset
alias Andi.InputSchemas.DatasetInput
@type dataset :: map() | Dataset.t()
@spec changeset_from_dataset(dataset) :: Ecto.Changeset.t()
def changeset_from_dataset(%{"id" => _} = dataset) do
dataset
|> atomize_dataset_map()
|> changeset_from_dataset()
end
def changeset_from_dataset(%{id: id, business: business, technical: technical}) do
from_business =
get_business(business)
|> fix_modified_date()
from_technical =
get_technical(technical)
|> convert_key_values_for_fields()
|> convert_source_url()
%{id: id}
|> Map.merge(from_business)
|> Map.merge(from_technical)
|> AtomicMap.convert(safe: false, underscore: false)
|> DatasetInput.full_validation_changeset()
end
@spec changeset_from_dataset(Dataset.t(), map()) :: Ecto.Changeset.t()
def changeset_from_dataset(%SmartCity.Dataset{} = original_dataset, changes) do
adjusted_changes = adjust_form_input(changes)
original_dataset_flattened =
original_dataset
|> changeset_from_dataset()
|> Ecto.Changeset.apply_changes()
all_changes = Map.merge(original_dataset_flattened, adjusted_changes)
DatasetInput.full_validation_changeset(all_changes)
end
@spec form_changeset(map()) :: Ecto.Changeset.t()
def form_changeset(params \\ %{}) do
params
|> adjust_form_input()
|> DatasetInput.light_validation_changeset()
end
defp adjust_form_input(params) do
params
|> AtomicMap.convert(safe: false, underscore: false)
|> Map.update(:keywords, nil, &keywords_to_list/1)
|> fix_modified_date()
|> reset_key_values()
end
@spec restruct(map(), Dataset.t()) :: Dataset.t()
def restruct(changes, dataset) do
formatted_changes =
changes
|> Map.update(:issuedDate, nil, &date_to_iso8601_datetime/1)
|> Map.update(:modifiedDate, nil, &date_to_iso8601_datetime/1)
|> Map.update(:sourceUrl, nil, &Andi.URI.clear_query_params/1)
|> restruct_key_values()
business = Map.merge(dataset.business, get_business(formatted_changes)) |> Map.from_struct()
technical = Map.merge(dataset.technical, get_technical(formatted_changes)) |> Map.from_struct()
%{}
|> Map.put(:id, dataset.id)
|> Map.put(:business, business)
|> Map.put(:technical, technical)
|> SmartCity.Dataset.new()
|> (fn {:ok, dataset} -> dataset end).()
end
defp atomize_dataset_map(dataset) when is_map(dataset) do
dataset
|> atomize_top_level()
|> Map.update(:business, nil, &atomize_top_level/1)
|> Map.update(:technical, nil, &atomize_top_level/1)
|> update_in([:technical, :schema], fn schema -> Enum.map(schema, &atomize_top_level/1) end)
end
defp atomize_top_level(map) do
Map.new(map, fn {key, val} -> {SmartCity.Helpers.safe_string_to_atom(key), val} end)
end
defp get_business(map) when is_map(map) do
Map.take(map, DatasetInput.business_keys())
end
defp get_technical(map) when is_map(map) do
Map.take(map, DatasetInput.technical_keys())
end
defp keywords_to_list(nil), do: []
defp keywords_to_list(""), do: []
defp keywords_to_list(keywords) when is_binary(keywords) do
keywords
|> String.split(", ")
|> Enum.map(&String.trim/1)
end
defp keywords_to_list(keywords) when is_list(keywords), do: keywords
defp date_to_iso8601_datetime(date) do
time_const = "00:00:00Z"
"#{Date.to_iso8601(date)} #{time_const}"
end
defp fix_modified_date(map) do
map
|> Map.get_and_update(:modifiedDate, fn
"" -> {"", nil}
current_value -> {current_value, current_value}
end)
|> elem(1)
end
defp reset_key_values(map) do
Enum.reduce(DatasetInput.key_value_keys(), map, fn field, acc ->
Map.put_new(acc, field, %{})
end)
end
defp convert_key_values_for_fields(map) do
Enum.reduce(DatasetInput.key_value_keys(), map, fn field, acc -> convert_key_values_for_fields(acc, field) end)
end
defp convert_key_values_for_fields(map, field) do
Map.update(map, field, [], &convert_key_values/1)
end
defp convert_key_values(key_values) do
Enum.map(key_values, fn {k, v} -> %{key: to_string(k), value: v} end)
end
defp convert_source_url(map) do
source_url = Map.get(map, :sourceUrl)
source_query_params = Map.get(map, :sourceQueryParams)
{url, params} = Andi.URI.merge_url_and_params(source_url, source_query_params)
Map.put(map, :sourceUrl, url)
|> Map.put(:sourceQueryParams, convert_key_values(params))
end
defp restruct_key_values(map) do
Enum.reduce(DatasetInput.key_value_keys(), map, fn field, acc -> restruct_key_values(acc, field) end)
end
defp restruct_key_values(map, field) do
Map.update(map, field, %{}, fn key_values ->
Enum.reduce(key_values, %{}, fn entry, acc -> Map.put(acc, entry.key, entry.value) end)
end)
end
end
|
apps/andi/lib/andi/input_schemas/input_converter.ex
| 0.681621
| 0.405184
|
input_converter.ex
|
starcoder
|
defmodule Onnx.AttributeProto do
@moduledoc """
A named attribute containing either singular float, integer, string
and tensor values, or repeated float, integer, string and tensor values.
An AttributeProto MUST contain the name field, and *only one* of the
following content fields, effectively enforcing a C/C++ union equivalent.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t(),
doc_string: String.t(),
type: integer,
f: float,
i: integer,
s: String.t(),
t: Onnx.TensorProto.t(),
g: Onnx.GraphProto.t(),
floats: [float],
ints: [integer],
strings: [String.t()],
tensors: [Onnx.TensorProto.t()],
graphs: [Onnx.GraphProto.t()]
}
defstruct [
:name,
:doc_string,
:type,
:f,
:i,
:s,
:t,
:g,
:floats,
:ints,
:strings,
:tensors,
:graphs
]
field(:name, 1, optional: true, type: :string)
field(:doc_string, 13, optional: true, type: :string)
field(:type, 20, optional: true, type: Onnx.AttributeProto.AttributeType, enum: true)
field(:f, 2, optional: true, type: :float)
field(:i, 3, optional: true, type: :int64)
field(:s, 4, optional: true, type: :bytes)
field(:t, 5, optional: true, type: Onnx.TensorProto)
field(:g, 6, optional: true, type: Onnx.GraphProto)
field(:floats, 7, repeated: true, type: :float)
field(:ints, 8, repeated: true, type: :int64)
field(:strings, 9, repeated: true, type: :bytes)
field(:tensors, 10, repeated: true, type: Onnx.TensorProto)
field(:graphs, 11, repeated: true, type: Onnx.GraphProto)
end
defmodule Onnx.AttributeProto.AttributeType do
@moduledoc """
Note: this enum is structurally identical to the OpSchema::AttrType
enum defined in schema.h. If you rev one, you likely need to rev the other.
"""
use Protobuf, enum: true, syntax: :proto2
field(:UNDEFINED, 0)
field(:FLOAT, 1)
field(:INT, 2)
field(:STRING, 3)
field(:TENSOR, 4)
field(:GRAPH, 5)
field(:FLOATS, 6)
field(:INTS, 7)
field(:STRINGS, 8)
field(:TENSORS, 9)
field(:GRAPHS, 10)
end
defmodule Onnx.ValueInfoProto do
@moduledoc """
Defines information on value, including the name, the type, and
the shape of the value.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
name: String.t(),
type: Onnx.TypeProto.t(),
doc_string: String.t()
}
defstruct [:name, :type, :doc_string]
field(:name, 1, optional: true, type: :string)
field(:type, 2, optional: true, type: Onnx.TypeProto)
field(:doc_string, 3, optional: true, type: :string)
end
defmodule Onnx.NodeProto do
@moduledoc """
NodeProto stores a node that is similar to the notion of "layer"
or "operator" in many deep learning frameworks. For example, it can be a
node of type "Conv" that takes in an image, a filter tensor and a bias
tensor, and produces the convolved output.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
input: [String.t()],
output: [String.t()],
name: String.t(),
op_type: String.t(),
domain: String.t(),
attribute: [Onnx.AttributeProto.t()],
doc_string: String.t()
}
defstruct [:input, :output, :name, :op_type, :domain, :attribute, :doc_string]
field(:input, 1, repeated: true, type: :string)
field(:output, 2, repeated: true, type: :string)
field(:name, 3, optional: true, type: :string)
field(:op_type, 4, optional: true, type: :string)
field(:domain, 7, optional: true, type: :string)
field(:attribute, 5, repeated: true, type: Onnx.AttributeProto)
field(:doc_string, 6, optional: true, type: :string)
end
defmodule Onnx.ModelProto do
@moduledoc """
ModelProto is a top-level file/container format for bundling a ML model.
The semantics of the model are described by the GraphProto that represents
a parameterized computation graph against a set of named operators that are
defined independently from the graph.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
ir_version: integer,
opset_import: [Onnx.OperatorSetIdProto.t()],
producer_name: String.t(),
producer_version: String.t(),
domain: String.t(),
model_version: integer,
doc_string: String.t(),
graph: Onnx.GraphProto.t(),
metadata_props: [Onnx.StringStringEntryProto.t()]
}
defstruct [
:ir_version,
:opset_import,
:producer_name,
:producer_version,
:domain,
:model_version,
:doc_string,
:graph,
:metadata_props
]
field(:ir_version, 1, optional: true, type: :int64)
field(:opset_import, 8, repeated: true, type: Onnx.OperatorSetIdProto)
field(:producer_name, 2, optional: true, type: :string)
field(:producer_version, 3, optional: true, type: :string)
field(:domain, 4, optional: true, type: :string)
field(:model_version, 5, optional: true, type: :int64)
field(:doc_string, 6, optional: true, type: :string)
field(:graph, 7, optional: true, type: Onnx.GraphProto)
field(:metadata_props, 14, repeated: true, type: Onnx.StringStringEntryProto)
end
defmodule Onnx.StringStringEntryProto do
@moduledoc """
StringStringEntryProto follows the pattern for cross-proto-version maps.
See https://developers.google.com/protocol-buffers/docs/proto3#maps
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
key: String.t(),
value: String.t()
}
defstruct [:key, :value]
field(:key, 1, optional: true, type: :string)
field(:value, 2, optional: true, type: :string)
end
defmodule Onnx.GraphProto do
@moduledoc """
GraphProto defines a parameterized series of nodes to form a directed acyclic graph.
This is the equivalent of the "network" and "graph" in many deep learning
frameworks.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
node: [Onnx.NodeProto.t()],
name: String.t(),
initializer: [Onnx.TensorProto.t()],
doc_string: String.t(),
input: [Onnx.ValueInfoProto.t()],
output: [Onnx.ValueInfoProto.t()],
value_info: [Onnx.ValueInfoProto.t()]
}
defstruct [:node, :name, :initializer, :doc_string, :input, :output, :value_info]
field(:node, 1, repeated: true, type: Onnx.NodeProto)
field(:name, 2, optional: true, type: :string)
field(:initializer, 5, repeated: true, type: Onnx.TensorProto)
field(:doc_string, 10, optional: true, type: :string)
field(:input, 11, repeated: true, type: Onnx.ValueInfoProto)
field(:output, 12, repeated: true, type: Onnx.ValueInfoProto)
field(:value_info, 13, repeated: true, type: Onnx.ValueInfoProto)
end
defmodule Onnx.TensorProto do
@moduledoc """
A message defined to store a tensor in its serialized format.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
dims: [integer],
data_type: integer,
segment: Onnx.TensorProto.Segment.t(),
float_data: [float],
int32_data: [integer],
string_data: [String.t()],
int64_data: [integer],
name: String.t(),
doc_string: String.t(),
raw_data: String.t(),
double_data: [float],
uint64_data: [non_neg_integer]
}
defstruct [
:dims,
:data_type,
:segment,
:float_data,
:int32_data,
:string_data,
:int64_data,
:name,
:doc_string,
:raw_data,
:double_data,
:uint64_data
]
field(:dims, 1, repeated: true, type: :int64)
field(:data_type, 2, optional: true, type: Onnx.TensorProto.DataType, enum: true)
field(:segment, 3, optional: true, type: Onnx.TensorProto.Segment)
field(:float_data, 4, repeated: true, type: :float, packed: true)
field(:int32_data, 5, repeated: true, type: :int32, packed: true)
field(:string_data, 6, repeated: true, type: :bytes)
field(:int64_data, 7, repeated: true, type: :int64, packed: true)
field(:name, 8, optional: true, type: :string)
field(:doc_string, 12, optional: true, type: :string)
field(:raw_data, 9, optional: true, type: :bytes)
field(:double_data, 10, repeated: true, type: :double, packed: true)
field(:uint64_data, 11, repeated: true, type: :uint64, packed: true)
end
defmodule Onnx.TensorProto.Segment do
@moduledoc """
For very large tensors, we may want to store them in chunks, in which
case the following fields will specify the segment that is stored in
the current TensorProto.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
begin: integer,
end: integer
}
defstruct [:begin, :end]
field(:begin, 1, optional: true, type: :int64)
field(:end, 2, optional: true, type: :int64)
end
defmodule Onnx.TensorProto.DataType do
@moduledoc false
use Protobuf, enum: true, syntax: :proto2
field(:UNDEFINED, 0)
field(:FLOAT, 1)
field(:UINT8, 2)
field(:INT8, 3)
field(:UINT16, 4)
field(:INT16, 5)
field(:INT32, 6)
field(:INT64, 7)
field(:STRING, 8)
field(:BOOL, 9)
field(:FLOAT16, 10)
field(:DOUBLE, 11)
field(:UINT32, 12)
field(:UINT64, 13)
field(:COMPLEX64, 14)
field(:COMPLEX128, 15)
end
defmodule Onnx.TensorShapeProto do
@moduledoc """
Defines a tensor shape.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
dim: [Onnx.TensorShapeProto.Dimension.t()]
}
defstruct [:dim]
field(:dim, 1, repeated: true, type: Onnx.TensorShapeProto.Dimension)
end
defmodule Onnx.TensorShapeProto.Dimension do
@moduledoc """
A dimension can be either an integer value
or a symbolic variable. A symbolic variable represents an unknown
dimension.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
value: {atom, any}
}
defstruct [:value]
oneof(:value, 0)
field(:dim_value, 1, optional: true, type: :int64, oneof: 0)
field(:dim_param, 2, optional: true, type: :string, oneof: 0)
end
defmodule Onnx.TypeProto do
@moduledoc """
Define the types.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
value: {atom, any}
}
defstruct [:value]
oneof(:value, 0)
field(:tensor_type, 1, optional: true, type: Onnx.TypeProto.Tensor, oneof: 0)
end
defmodule Onnx.TypeProto.Tensor do
@moduledoc false
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
elem_type: integer,
shape: Onnx.TensorShapeProto.t()
}
defstruct [:elem_type, :shape]
field(:elem_type, 1, optional: true, type: Onnx.TensorProto.DataType, enum: true)
field(:shape, 2, optional: true, type: Onnx.TensorShapeProto)
end
defmodule Onnx.OperatorSetIdProto do
@moduledoc """
OperatorSets are uniquely identified by a (domain, opset_version) pair.
"""
use Protobuf, syntax: :proto2
@type t :: %__MODULE__{
domain: String.t(),
version: integer
}
defstruct [:domain, :version]
field(:domain, 1, optional: true, type: :string)
field(:version, 2, optional: true, type: :int64)
end
defmodule Onnx.Version do
@moduledoc """
To be compatible with both proto2 and proto3, we will use a version number
that is not defined by the default value but an explicit enum number.
"""
use Protobuf, enum: true, syntax: :proto2
field(:_START_VERSION, 0)
field(:IR_VERSION_2017_10_10, 1)
field(:IR_VERSION_2017_10_30, 2)
field(:IR_VERSION, 3)
end
|
lib/onnx.pb.ex
| 0.894528
| 0.574216
|
onnx.pb.ex
|
starcoder
|
defmodule Cryppo.EncryptedDataWithDerivedKey do
@moduledoc """
A struct for a derived key and data encrypted with this derived key
"""
import Cryppo.Base64
import Cryppo.Strategies, only: [find_key_derivation_strategy: 1]
alias Cryppo.{DerivedKey, EncryptedData, EncryptedDataWithDerivedKey, Serialization}
@typedoc """
Struct `Cryppo.EncryptedData`
A `Cryppo.EncryptedData` struct contains
* `encrypted_data`: a `Cryppo.EncryptedData` struct
* `derived_key`: a `Cryppo.DerivedKey` struct
"""
@type t :: %__MODULE__{
encrypted_data: EncryptedData.t(),
derived_key: DerivedKey.t()
}
@enforce_keys [:encrypted_data, :derived_key]
defstruct [:encrypted_data, :derived_key]
@doc false
@spec load(String.t(), String.t(), String.t(), String.t(), String.t()) ::
{:ok, t()}
| {:error,
:invalid_base64
| :invalid_derivation_artefacts
| :invalid_bson
| String.t()}
| {:unsupported_encryption_strategy, binary}
| {:unsupported_key_derivation_strategy, binary}
def load(
strategy_name,
encrypted_data_base64,
encryption_artefacts_base64,
key_derivation_strategy,
derivation_artefacts_base64
) do
with {:ok, key_derivation_mod} <-
find_key_derivation_strategy(key_derivation_strategy),
{:ok, derivation_artefacts} <- decode_base64(derivation_artefacts_base64),
{:ok, salt, iterations, length} <- DerivedKey.load_artefacts(derivation_artefacts),
{:ok, encrypted_data} <-
EncryptedData.load(strategy_name, encrypted_data_base64, encryption_artefacts_base64) do
hash = apply(key_derivation_mod, :hash_function, [])
derived_key = %DerivedKey{
encryption_key: nil,
key_derivation_strategy: key_derivation_mod,
salt: salt,
iter: iterations,
length: length,
hash: hash
}
key = %__MODULE__{encrypted_data: encrypted_data, derived_key: derived_key}
{:ok, key}
end
end
defimpl Serialization do
@spec serialize(EncryptedDataWithDerivedKey.t()) :: binary
def serialize(%EncryptedDataWithDerivedKey{
derived_key: %DerivedKey{} = derived_key,
encrypted_data: %EncryptedData{} = encrypted_data
}) do
[encrypted_data, derived_key]
|> Enum.map(fn v -> Serialization.serialize(v) end)
|> Enum.join(".")
end
end
end
|
lib/cryppo/encrypted_data_with_derived_key.ex
| 0.877227
| 0.45175
|
encrypted_data_with_derived_key.ex
|
starcoder
|
defmodule AOC.Day5.Intcode do
@moduledoc false
@type memory :: %{
integer => integer,
pointer: integer,
inputs: list(integer),
outputs: list(integer)
}
def part1(path, user_inputs) do
stream_puzzle_input(path)
|> puzzle_input_to_map(user_inputs)
|> compute
end
def part2(path, user_inputs) do
stream_puzzle_input(path)
|> puzzle_input_to_map(user_inputs)
|> compute
end
def stream_puzzle_input(path) do
File.read!(path)
|> String.trim()
|> String.split(",")
end
@spec puzzle_input_to_map(list(integer), list(integer)) :: memory
def puzzle_input_to_map(puzzle_input, user_input \\ []) do
puzzle_input
|> Stream.with_index()
|> Stream.map(fn {value, index} ->
{index, String.to_integer(value)}
end)
|> Map.new()
|> (&Map.put(&1, :pointer, 0)).()
|> (&Map.put(&1, :inputs, user_input)).()
|> (&Map.put(&1, :outputs, [])).()
end
@spec brute_force(memory, integer, integer, integer) :: {integer, integer} | :error
defp brute_force(memory, output, noun, verb) do
result =
memory
|> update(1, noun)
|> update(2, verb)
|> compute()
cond do
output == result -> {noun, verb}
verb <= 99 -> brute_force(memory, output, noun, verb + 1)
noun <= 99 -> brute_force(memory, output, noun + 1, 0)
true -> :error
end
end
@spec compute(memory) :: list(integer)
def compute(memory) do
0..map_size(memory)
|> Enum.reduce_while(memory, fn _i, memory ->
with {parameter_modes, opcode} <- process_address(memory),
{operation, num_params} <- instruction(memory, opcode),
false <- is_atom(operation),
params <- read_params(memory, num_params),
params <- Enum.zip(params, parameter_modes),
result <- apply(operation, [{memory, num_params} | params]),
false <- is_atom(result) do
{:cont, result}
else
_ ->
{:halt, Enum.reverse(read_outputs(memory))}
end
end)
end
def process_address(memory) do
address = read_instruction_pointer(memory)
{parameter_modes, op} =
read(memory, address)
|> Integer.digits()
|> Enum.split(-2)
padding = Enum.take([0, 0, 0, 0, 0, 0], 5 - length(parameter_modes))
parameter_modes = Enum.reverse(parameter_modes) ++ padding
op = Integer.undigits(op)
{parameter_modes, op}
end
@spec read_params(memory, integer) :: list(integer)
def read_params(memory, num_params) do
address = read_instruction_pointer(memory)
num_params = num_params - 2
if num_params >= 0 do
Enum.map(0..num_params, fn i -> read(memory, i + address + 1) end)
else
[]
end
end
@spec read(memory, integer) :: integer
@spec read(memory, {integer, integer}) :: integer
def read(memory, {address, mode}) do
cond do
mode == 0 -> read(memory, address)
mode == 1 -> address
true -> :error_read
end
end
def read(memory, address) do
Map.get(memory, address)
end
@spec read_instruction_pointer(memory) :: integer
def read_instruction_pointer(memory) do
memory.pointer
end
@spec update_instruction_pointer(memory, integer) :: memory
def update_instruction_pointer(memory, value) do
%{memory | :pointer => value}
end
@spec increment_instruction_pointer(memory, integer) :: memory
def increment_instruction_pointer(memory, value) do
pointer = read_instruction_pointer(memory)
update_instruction_pointer(memory, pointer + value)
end
@spec pop_input(memory) :: {integer, memory}
def pop_input(memory) do
[head | tail] = memory.inputs
memory = %{memory | :inputs => tail}
{head, memory}
end
@spec append_input(memory, integer) :: memory
def append_input(memory, value) do
inputs = memory.inputs
%{memory | :inputs => inputs ++ [value]}
end
@spec read_outputs(memory) :: list(integer)
def read_outputs(memory) do
memory.outputs
end
@spec push_output(memory, integer) :: memory
def push_output(memory, value) do
outputs = memory.outputs
%{memory | :outputs => [value | outputs]}
end
@spec update(memory, integer, integer) :: memory
def update(memory, address, value) do
%{memory | address => value}
end
@spec instruction(memory, integer) :: {(... -> memory) | :error, integer}
def instruction(_memory, opcode) do
instructions = %{
1 => &add/4,
2 => &multiply/4,
3 => &input/2,
4 => &output/2,
5 => &jump_if_true/3,
6 => &jump_if_false/3,
7 => &less_than/4,
8 => &equals/4,
99 => &terminate/1
}
func = Map.get(instructions, opcode)
if func == nil do
:error
else
{:arity, num_params} = Function.info(func, :arity)
{func, num_params}
end
end
@spec add({memory, integer}, {integer, integer}, {integer, integer}, {integer, integer}) ::
memory
def add({memory, num_params}, param_and_mode1, param_and_mode2, {param3, 0}) do
value = read(memory, param_and_mode1) + read(memory, param_and_mode2)
update(memory, param3, value)
|> increment_instruction_pointer(num_params)
end
@spec multiply({memory, integer}, {integer, integer}, {integer, integer}, {integer, integer}) ::
memory
def multiply({memory, num_params}, param_and_mode1, param_and_mode2, {param3, 0}) do
value = read(memory, param_and_mode1) * read(memory, param_and_mode2)
update(memory, param3, value)
|> increment_instruction_pointer(num_params)
end
@spec input({memory, integer}, {integer, integer}) :: memory
def input({memory, num_params}, {param1, 0}) do
{value, memory} = pop_input(memory)
update(memory, param1, value)
|> increment_instruction_pointer(num_params)
end
@spec output({memory, integer}, {integer, integer}) :: memory
def output({memory, num_params}, param1) do
read(memory, param1)
# |> (&(IO.puts("Output: #{&1}"))).()
|> (&push_output(memory, &1)).()
|> increment_instruction_pointer(num_params)
end
def jump_if_true({memory, num_params}, param_and_mode1, param_and_mode2) do
v1 = read(memory, param_and_mode1)
v2 = read(memory, param_and_mode2)
if v1 != 0 do
update_instruction_pointer(memory, v2)
else
increment_instruction_pointer(memory, num_params)
end
end
def jump_if_false({memory, num_params}, param_and_mode1, param_and_mode2) do
v1 = read(memory, param_and_mode1)
v2 = read(memory, param_and_mode2)
if v1 == 0 do
update_instruction_pointer(memory, v2)
else
increment_instruction_pointer(memory, num_params)
end
end
def less_than({memory, num_params}, param_and_mode1, param_and_mode2, {param3, 0}) do
v1 = read(memory, param_and_mode1)
v2 = read(memory, param_and_mode2)
if v1 < v2 do
update(memory, param3, 1)
else
update(memory, param3, 0)
end
|> increment_instruction_pointer(num_params)
end
def equals({memory, num_params}, param_and_mode1, param_and_mode2, {param3, 0}) do
v1 = read(memory, param_and_mode1)
v2 = read(memory, param_and_mode2)
if v1 == v2 do
update(memory, param3, 1)
else
update(memory, param3, 0)
end
|> increment_instruction_pointer(num_params)
end
@spec terminate(memory) :: :terminate
def terminate(_memory) do
:terminate
end
end
|
aoc-2019/lib/aoc/day5/intcode.ex
| 0.848722
| 0.452052
|
intcode.ex
|
starcoder
|
defmodule Mix.Tasks.Dialyze do
@moduledoc """
Analyses the current Mix project using success typing.
## Examples
# Build or check a PLT and use it to analysis a project
mix dialyze
# Use the existing PLT to analysis a project
mix dialyze --no-check
# Build or check the PLT for current environment but don't analyse
mix dialyze --no-analyse
# Skip compiling the project
mix dialyze --no-compile
# Find extra warnings during analysis
mix dialyze --unmatched-returns --error-handling --race-conditions --underspecs
The `--no-check` switch should only be used when the PLT for the current
build environment (including Erlang and Elixir) has been checked, and
no changes have been made to dependencies (including Erlang and Elixir). It is
not required to check the PLT even if changes are made to dependencies but the
success typing analysis will be less accurate and may make incorrect warnings.
Below is a common pattern of use:
## Examples
# Fetch deps
mix deps.get
# Possibly make changes to current application and then compile project
mix compile
# Run Dialyze for the first time to build a PLT and analyse
mix dialyze
# Fix Dialyzer warnings and analyse again (assuming same build
# environment, Elixir version, Erlang version and deps)
mix dialyze --no-check
This task will automatically find all dependencies for the current build
environment and add them to a PLT. The most common dependencies from
Erlang/OTP and Elixir will be cached for each version of Erlang and Elixir and
re-used between projects. If a PLT exists for the active versions of Erlang
and Elixir, and the current build environment the PLT will be checked for
consistency before analysis.
This task tries to be as efficient as possible in reusing PLTs. If Erlang or
Elixir is changed (including changing directories) without their versions
changing, the next consistency check for each project and build environment
will take longer as the PLT will need to be updated.
The default warning flags are:
--return --unused --improper-lists --fun-app --match --opaque
--fail-call --contracts --behaviours --undefined-callbacks
--no-unmatched-returns --no-error-handling --no-race-conditions
--no-overspecs --no-underspecs, --no-unknown --no-overspecs --no-specdiffs
For more information on `dialyzer` and success typing see:
`http://www.erlang.org/doc/apps/dialyzer/index.html`
"""
@shortdoc "Analyses the current Mix project using success typing"
use Mix.Task
@no_warnings [:return, :unused, :improper_lists, :fun_app,
:match, :opaque, :fail_call, :contracts, :behaviours, :undefined_callbacks]
@warnings [:unmatched_returns, :error_handling, :race_conditions, :overspecs,
:underspecs, :unknown, :overspecs, :specdiffs]
@spec run(OptionParser.argv) :: :ok
def run(args) do
{make, prepare, analysis, warnings} = parse_args(args)
info("Finding applications for analysis")
{mods, deps} = get_info(make)
try do
{plt, plt_beams} = ( plts_list(deps) |> prepare.() )
analysis.(plt, mods, plt_beams, warnings)
else
[] -> :ok
[_|_] = warnings ->
print_warnings(warnings)
Mix.raise "Dialyzer reported #{length(warnings)} warnings"
catch
:throw, {:dialyzer_error, reason} ->
Mix.raise "Dialyzer error: " <> IO.chardata_to_string(reason)
end
end
defp parse_args(args) do
warn_switches = Enum.map(@no_warnings ++ @warnings, &{&1, :boolean})
switches = [compile: :boolean, check: :boolean, analyse: :boolean] ++
warn_switches
{opts, _, _} = OptionParser.parse(args, [strict: switches])
{make_fun(opts), prepare_fun(opts), analysis_fun(opts), warnings_list(opts)}
end
defp make_fun(opts) do
case Keyword.get(opts, :compile, true) do
true -> &compile/0
false -> &no_compile/0
end
end
defp prepare_fun(opts) do
case Keyword.get(opts, :check, true) do
true -> &check/1
false -> &no_check/1
end
end
defp analysis_fun(opts) do
case Keyword.get(opts, :analyse, true) do
true -> &analyse/4
false -> &no_analyse/4
end
end
defp warnings_list(opts) do
warnings = Enum.filter(@warnings, &Keyword.get(opts, &1, false))
no_warnings = Enum.filter_map(@no_warnings,
&(not Keyword.get(opts, &1, true)), &String.to_atom("no_#{&1}"))
warnings ++ no_warnings
end
defp no_compile(), do: :ok
defp compile(), do: Mix.Task.run("compile", [])
defp get_info(make) do
infos = app_info_list(make)
apps = Keyword.keys(infos)
mods = Enum.flat_map(infos, fn({_, {mods, _deps}}) -> mods end)
deps = Enum.flat_map(infos, fn({_, {_mods, deps}}) -> deps end)
# Ensure apps not in deps.
{mods, Enum.uniq(deps) -- apps}
end
defp app_info_list(make) do
case Mix.Project.umbrella?() do
true -> get_umbrella_info(make)
false -> [get_app_info(make)]
end
end
defp get_umbrella_info(make) do
config = [build_path: Mix.Project.build_path()]
for %Mix.Dep{app: app, opts: opts} <- Mix.Dep.Umbrella.loaded() do
path = opts[:path]
Mix.Project.in_project(app, path, config, fn(_) -> get_app_info(make) end)
end
end
defp get_app_info(make) do
make.()
Keyword.fetch!(Mix.Project.config(), :app)
|> app_info()
end
defp plts_list(deps) do
[{deps_plt(), deps}, {elixir_plt(), [:elixir]},
{erlang_plt(), [:erts, :kernel, :stdlib, :crypto]}]
end
defp erlang_plt(), do: global_plt("erlang-" <> otp_vsn())
defp otp_vsn() do
major = :erlang.system_info(:otp_release)
vsn_file = Path.join([:code.root_dir(), "releases", major, "OTP_VERSION"])
try do
{:ok, contents} = File.read(vsn_file)
String.split(contents, "\n", trim: true)
else
[full] ->
full
_ ->
major
catch
:error, _ ->
major
end
end
defp elixir_plt() do
global_plt("erlang-#{otp_vsn()}_elixir-#{System.version()}")
end
defp deps_plt do
name = "erlang-#{otp_vsn()}_elixir-#{System.version()}_deps-#{build_env()}"
local_plt(name)
end
defp build_env() do
config = Mix.Project.config()
case Keyword.fetch!(config, :build_per_environment) do
true -> Atom.to_string(Mix.env())
false -> "shared"
end
end
defp global_plt(name) do
Path.join(Mix.Utils.mix_home(), "dialyze_" <> name <> ".plt")
end
defp local_plt(name) do
Path.join(Mix.Project.build_path(), "dialyze_" <> name <> ".plt")
end
defp no_analyse(_plts, _mods, _plt_beams, _warnings), do: []
defp analyse(plt, mods, plt_beams, warnings) do
info("Finding modules for analysis")
beams = resolve_modules(mods, HashSet.new())
clashes = HashSet.intersection(beams, plt_beams)
case HashSet.size(clashes) do
0 ->
plt_analyse(plt, beams, warnings)
_ ->
Mix.raise "Clashes with plt: " <>
inspect(HashSet.to_list(clashes))
end
end
defp no_check([{plt, _apps} | _plts]) do
case plt_files(plt) do
nil ->
Mix.raise "Could not open #{plt}: #{:file.format_error(:enoent)}"
beams ->
{plt, beams}
end
end
defp check(plts) do
info("Finding suitable PLTs")
find_plts(plts, [])
end
defp find_plts([{plt, apps} | plts], acc) do
case plt_files(plt) do
nil ->
find_plts(plts, [{plt, apps, nil} | acc])
beams ->
apps_rest = Enum.flat_map(plts, fn({_plt2, apps2}) -> apps2 end)
apps = Enum.uniq(apps ++ apps_rest)
check_plts([{plt, apps, beams} | acc])
end
end
defp find_plts([], acc) do
check_plts(acc)
end
defp check_plts(plts) do
{last_plt, beams, _cache} = Enum.reduce(plts, {nil, HashSet.new(), %{}},
fn({plt, apps, beams}, acc) ->
check_plt(plt, apps, beams, acc)
end)
{last_plt, beams}
end
defp check_plt(plt, apps, old_beams, {prev_plt, prev_beams, prev_cache}) do
info("Finding applications for #{Path.basename(plt)}")
cache = resolve_apps(apps, prev_cache)
mods = cache_mod_diff(cache, prev_cache)
info("Finding modules for #{Path.basename(plt)}")
beams = resolve_modules(mods, prev_beams)
check_beams(plt, beams, old_beams, prev_plt)
{plt, beams, cache}
end
defp cache_mod_diff(new, old) do
Enum.flat_map(new,
fn({app, {mods, _deps}}) ->
case Map.has_key?(old, app) do
true -> []
false -> mods
end
end)
end
defp resolve_apps(apps, cache) do
apps
|> Enum.uniq()
|> Enum.filter_map(&(not Map.has_key?(cache, &1)), &app_info/1)
|> Enum.into(cache)
end
defp app_info(app) do
app_file = Atom.to_char_list(app) ++ '.app'
case :code.where_is_file(app_file) do
:non_existing ->
error("Unknown application #{inspect(app)}")
{app, {[], []}}
app_file ->
Path.expand(app_file)
|> read_app_info(app)
end
end
defp read_app_info(app_file, app) do
case :file.consult(app_file) do
{:ok, [{:application, ^app, info}]} ->
parse_app_info(info, app)
{:error, reason} ->
Mix.raise "Could not read #{app_file}: #{:file.format_error(reason)}"
end
end
defp parse_app_info(info, app) do
mods = Keyword.get(info, :modules, [])
apps = Keyword.get(info, :applications, [])
inc_apps = Keyword.get(info, :included_applications, [])
runtime_deps = get_runtime_deps(info)
{app, {mods, runtime_deps ++ inc_apps ++ apps}}
end
defp get_runtime_deps(info) do
Keyword.get(info, :runtime_dependencies, [])
|> Enum.map(&parse_runtime_dep/1)
end
defp parse_runtime_dep(runtime_dep) do
runtime_dep = IO.chardata_to_string(runtime_dep)
regex = ~r/^(.+)\-\d+(?|\.\d+)*$/
[app] = Regex.run(regex, runtime_dep, [capture: :all_but_first])
String.to_atom(app)
end
defp resolve_modules(modules, beams) do
Enum.reduce(modules, beams, &resolve_module/2)
end
defp resolve_module(module, beams) do
beam = Atom.to_char_list(module) ++ '.beam'
case :code.where_is_file(beam) do
path when is_list(path) ->
path = Path.expand(path)
HashSet.put(beams, path)
:non_existing ->
error("Unknown module #{inspect(module)}")
beams
end
end
defp check_beams(plt, beams, nil, prev_plt) do
plt_ensure(plt, prev_plt)
case plt_files(plt) do
nil ->
Mix.raise("Could not open #{plt}: #{:file.format_error(:enoent)}")
old_beams ->
check_beams(plt, beams, old_beams)
end
end
defp check_beams(plt, beams, old_beams, _prev_plt) do
check_beams(plt, beams, old_beams)
end
defp check_beams(plt, beams, old_beams) do
remove = HashSet.difference(old_beams, beams)
plt_remove(plt, remove)
check = HashSet.intersection(beams, old_beams)
plt_check(plt, check)
add = HashSet.difference(beams, old_beams)
plt_add(plt, add)
end
defp plt_ensure(plt, nil), do: plt_new(plt)
defp plt_ensure(plt, prev_plt), do: plt_copy(prev_plt, plt)
defp plt_new(plt) do
info("Creating #{Path.basename(plt)}")
plt = erl_path(plt)
_ = plt_run([analysis_type: :plt_build, output_plt: plt,
apps: [:erts]])
:ok
end
defp plt_copy(plt, new_plt) do
info("Copying #{Path.basename(plt)} to #{Path.basename(new_plt)}")
File.cp!(plt, new_plt)
end
defp plt_add(plt, files) do
case HashSet.size(files) do
0 ->
:ok
n ->
(Mix.shell()).info("Adding #{n} modules to #{Path.basename(plt)}")
plt = erl_path(plt)
files = erl_files(files)
_ = plt_run([analysis_type: :plt_add, init_plt: plt,
files: files])
:ok
end
end
defp plt_remove(plt, files) do
case HashSet.size(files) do
0 ->
:ok
n ->
info("Removing #{n} modules from #{Path.basename(plt)}")
plt = erl_path(plt)
files = erl_files(files)
_ = plt_run([analysis_type: :plt_remove, init_plt: plt,
files: files])
:ok
end
end
defp plt_check(plt, files) do
case HashSet.size(files) do
0 ->
:ok
n ->
(Mix.shell()).info("Checking #{n} modules in #{Path.basename(plt)}")
plt = erl_path(plt)
_ = plt_run([analysis_type: :plt_check, init_plt: plt])
:ok
end
end
defp plt_analyse(plt, files, warnings) do
case HashSet.size(files) do
0 ->
[]
n ->
info("Analysing #{n} modules with #{Path.basename(plt)}")
plt = erl_path(plt)
files = Enum.map(files, &erl_path/1)
plt_run([analysis_type: :succ_typings, plts: [plt], files: files,
warnings: warnings])
end
end
defp plt_run(opts) do
:dialyzer.run([check_plt: false] ++ opts)
end
defp plt_info(plt) do
erl_path(plt)
|> :dialyzer.plt_info()
end
defp erl_files(files) do
Enum.reduce(files, [], &[erl_path(&1)|&2])
end
defp erl_path(path) do
encoding = :file.native_name_encoding()
:unicode.characters_to_list(path, encoding)
end
defp plt_files(plt) do
info("Looking up modules in #{Path.basename(plt)}")
case plt_info(plt) do
{:ok, info} ->
Keyword.fetch!(info, :files)
|> Enum.reduce(HashSet.new(), &HashSet.put(&2, Path.expand(&1)))
{:error, :no_such_file} ->
nil
{:error, reason} ->
Mix.raise("Could not open #{plt}: #{:file.format_error(reason)}")
end
end
defp print_warnings(warnings) do
_ = for warning <- warnings do
_ = error(format_warning(warning))
:ok
end
:ok
end
defp format_warning(warning) do
:dialyzer.format_warning(warning, :fullpath)
|> IO.chardata_to_string()
end
defp info(msg), do: apply(Mix.shell(), :info, [msg])
defp error(msg), do: apply(Mix.shell(), :error, [msg])
end
|
lib/mix/tasks/dialyze.ex
| 0.781831
| 0.579757
|
dialyze.ex
|
starcoder
|
defmodule RulEx.Fixtures.Value do
def test_cases do
[
%{
expr: [:val, "number", 10],
db: %{},
expected: {:ok, 10},
message: "yields stored value correctly if typing is correct"
},
%{
expr: [:val, "string", ""],
db: %{},
expected: {:ok, ""},
message: "yields stored value correctly if typing is correct"
},
%{
expr: [:val, "any", nil],
db: %{},
expected: {:ok, nil},
message: "yields stored value correctly if typing is correct"
},
%{
expr: [:val, "boolean", false],
db: %{},
expected: {:ok, false},
message: "yields stored value correctly if typing is correct"
},
%{
expr: [:var, "number", "x"],
db: %{"x" => 10},
expected: {:ok, 10},
message: "yields stored value correctly if typing is correct"
},
%{
expr: [:var, "string", "x"],
db: %{"x" => ""},
expected: {:ok, ""},
message: "yields stored value correctly if typing is correct"
},
%{
expr: [:var, "any", "x"],
db: %{"x" => nil},
expected: {:error, "invalid value 'nil' given for type 'any'"},
message: "`var` rejects if it is to yield `nil`"
},
%{
expr: [:var, "boolean", "x"],
db: %{"x" => false},
expected: {:ok, false},
message: "yields stored value correctly if typing is correct"
},
# We parse string values if they are valid date, time, or datetime values
%{
expr: [:val, "date", "2021-01-01"],
db: %{},
expected: {:ok, ~D[2021-01-01]},
message: "parses the value for date type if it is a string before yielding"
},
%{
expr: [:var, "date", "x"],
db: %{"x" => "2021-01-01"},
expected: {:ok, ~D[2021-01-01]},
message: "parses the value for date type if it is a string before yielding"
},
# We must reject any non `val` or `var` expressions
%{
expr: [:>, [:val, "number", 10], [:val, "number", 9]],
db: %{},
expected: {:error, "cannot get value for non `val` or `var` expression"},
message: "rejects arbitrary expressions that aren't `val` or `var`"
},
%{
expr: [:=, [:val, "number", 10], [:val, "number", 9]],
db: %{},
expected: {:error, "cannot get value for non `val` or `var` expression"},
message: "rejects arbitrary expressions that aren't `val` or `var`"
}
]
end
end
|
test/fixtures/value.ex
| 0.76145
| 0.506774
|
value.ex
|
starcoder
|
defmodule AWS.AutoScaling do
@moduledoc """
With Application Auto Scaling, you can configure automatic scaling for your
scalable resources. You can use Application Auto Scaling to accomplish the
following tasks:
<ul> <li> Define scaling policies to automatically scale your AWS or custom
resources
</li> <li> Scale your resources in response to CloudWatch alarms
</li> <li> Schedule one-time or recurring scaling actions
</li> <li> View the history of your scaling events
</li> </ul> Application Auto Scaling can scale the following resources:
<ul> <li> Amazon ECS services. For more information, see [Service Auto
Scaling](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-auto-scaling.html)
in the *Amazon Elastic Container Service Developer Guide*.
</li> <li> Amazon EC2 Spot fleets. For more information, see [Automatic
Scaling for Spot
Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/fleet-auto-scaling.html)
in the *Amazon EC2 User Guide*.
</li> <li> Amazon EMR clusters. For more information, see [Using Automatic
Scaling in Amazon
EMR](https://docs.aws.amazon.com/ElasticMapReduce/latest/ManagementGuide/emr-automatic-scaling.html)
in the *Amazon EMR Management Guide*.
</li> <li> AppStream 2.0 fleets. For more information, see [Fleet Auto
Scaling for Amazon AppStream
2.0](https://docs.aws.amazon.com/appstream2/latest/developerguide/autoscaling.html)
in the *Amazon AppStream 2.0 Developer Guide*.
</li> <li> Provisioned read and write capacity for Amazon DynamoDB tables
and global secondary indexes. For more information, see [Managing
Throughput Capacity Automatically with DynamoDB Auto
Scaling](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/AutoScaling.html)
in the *Amazon DynamoDB Developer Guide*.
</li> <li> Amazon Aurora Replicas. For more information, see [Using Amazon
Aurora Auto Scaling with Aurora
Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Integrating.AutoScaling.html).
</li> <li> Amazon SageMaker endpoint variants. For more information, see
[Automatically Scaling Amazon SageMaker
Models](https://docs.aws.amazon.com/sagemaker/latest/dg/endpoint-auto-scaling.html).
</li> <li> Custom resources provided by your own applications or services.
More information is available in our [GitHub
repository](https://github.com/aws/aws-auto-scaling-custom-resource).
</li> </ul> To learn more about Application Auto Scaling, including
information about granting IAM users required permissions for Application
Auto Scaling actions, see the [Application Auto Scaling User
Guide](https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html).
"""
@doc """
Deletes the specified Application Auto Scaling scaling policy.
Deleting a policy deletes the underlying alarm action, but does not delete
the CloudWatch alarm associated with the scaling policy, even if it no
longer has an associated action.
To create a scaling policy or update an existing one, see
`PutScalingPolicy`.
"""
def delete_scaling_policy(client, input, options \\ []) do
request(client, "DeleteScalingPolicy", input, options)
end
@doc """
Deletes the specified Application Auto Scaling scheduled action.
"""
def delete_scheduled_action(client, input, options \\ []) do
request(client, "DeleteScheduledAction", input, options)
end
@doc """
Deregisters a scalable target.
Deregistering a scalable target deletes the scaling policies that are
associated with it.
To create a scalable target or update an existing one, see
`RegisterScalableTarget`.
"""
def deregister_scalable_target(client, input, options \\ []) do
request(client, "DeregisterScalableTarget", input, options)
end
@doc """
Gets information about the scalable targets in the specified namespace.
You can filter the results using the `ResourceIds` and `ScalableDimension`
parameters.
To create a scalable target or update an existing one, see
`RegisterScalableTarget`. If you are no longer using a scalable target, you
can deregister it using `DeregisterScalableTarget`.
"""
def describe_scalable_targets(client, input, options \\ []) do
request(client, "DescribeScalableTargets", input, options)
end
@doc """
Provides descriptive information about the scaling activities in the
specified namespace from the previous six weeks.
You can filter the results using the `ResourceId` and `ScalableDimension`
parameters.
Scaling activities are triggered by CloudWatch alarms that are associated
with scaling policies. To view the scaling policies for a service
namespace, see `DescribeScalingPolicies`. To create a scaling policy or
update an existing one, see `PutScalingPolicy`.
"""
def describe_scaling_activities(client, input, options \\ []) do
request(client, "DescribeScalingActivities", input, options)
end
@doc """
Describes the scaling policies for the specified service namespace.
You can filter the results using the `ResourceId`, `ScalableDimension`, and
`PolicyNames` parameters.
To create a scaling policy or update an existing one, see
`PutScalingPolicy`. If you are no longer using a scaling policy, you can
delete it using `DeleteScalingPolicy`.
"""
def describe_scaling_policies(client, input, options \\ []) do
request(client, "DescribeScalingPolicies", input, options)
end
@doc """
Describes the scheduled actions for the specified service namespace.
You can filter the results using the `ResourceId`, `ScalableDimension`, and
`ScheduledActionNames` parameters.
To create a scheduled action or update an existing one, see
`PutScheduledAction`. If you are no longer using a scheduled action, you
can delete it using `DeleteScheduledAction`.
"""
def describe_scheduled_actions(client, input, options \\ []) do
request(client, "DescribeScheduledActions", input, options)
end
@doc """
Creates or updates a policy for an Application Auto Scaling scalable
target.
Each scalable target is identified by a service namespace, resource ID, and
scalable dimension. A scaling policy applies to the scalable target
identified by those three attributes. You cannot create a scaling policy
until you have registered the resource as a scalable target using
`RegisterScalableTarget`.
To update a policy, specify its policy name and the parameters that you
want to change. Any parameters that you don't specify are not changed by
this update request.
You can view the scaling policies for a service namespace using
`DescribeScalingPolicies`. If you are no longer using a scaling policy, you
can delete it using `DeleteScalingPolicy`.
Multiple scaling policies can be in force at the same time for the same
scalable target. You can have one or more target tracking scaling policies,
one or more step scaling policies, or both. However, there is a chance that
multiple policies could conflict, instructing the scalable target to scale
out or in at the same time. Application Auto Scaling gives precedence to
the policy that provides the largest capacity for both scale in and scale
out. For example, if one policy increases capacity by 3, another policy
increases capacity by 200 percent, and the current capacity is 10,
Application Auto Scaling uses the policy with the highest calculated
capacity (200% of 10 = 20) and scales out to 30.
Learn more about how to work with scaling policies in the [Application Auto
Scaling User
Guide](https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html).
"""
def put_scaling_policy(client, input, options \\ []) do
request(client, "PutScalingPolicy", input, options)
end
@doc """
Creates or updates a scheduled action for an Application Auto Scaling
scalable target.
Each scalable target is identified by a service namespace, resource ID, and
scalable dimension. A scheduled action applies to the scalable target
identified by those three attributes. You cannot create a scheduled action
until you have registered the resource as a scalable target using
`RegisterScalableTarget`.
To update an action, specify its name and the parameters that you want to
change. If you don't specify start and end times, the old values are
deleted. Any other parameters that you don't specify are not changed by
this update request.
You can view the scheduled actions using `DescribeScheduledActions`. If you
are no longer using a scheduled action, you can delete it using
`DeleteScheduledAction`.
Learn more about how to work with scheduled actions in the [Application
Auto Scaling User
Guide](https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html).
"""
def put_scheduled_action(client, input, options \\ []) do
request(client, "PutScheduledAction", input, options)
end
@doc """
Registers or updates a scalable target. A scalable target is a resource
that Application Auto Scaling can scale out and scale in. Each scalable
target has a resource ID, scalable dimension, and namespace, as well as
values for minimum and maximum capacity.
After you register a scalable target, you do not need to register it again
to use other Application Auto Scaling operations. To see which resources
have been registered, use `DescribeScalableTargets`. You can also view the
scaling policies for a service namespace using `DescribeScalableTargets`.
If you no longer need a scalable target, you can deregister it using
`DeregisterScalableTarget`.
"""
def register_scalable_target(client, input, options \\ []) do
request(client, "RegisterScalableTarget", input, options)
end
@spec request(map(), binary(), map(), list()) ::
{:ok, Poison.Parser.t | nil, Poison.Response.t} |
{:error, Poison.Parser.t} |
{:error, HTTPoison.Error.t}
defp request(client, action, input, options) do
client = %{client | service: "autoscaling"}
host = get_host("autoscaling", client)
url = get_url(host, client)
headers = [{"Host", host},
{"Content-Type", "application/x-amz-json-1.1"},
{"X-Amz-Target", "AnyScaleFrontendService.#{action}"}]
payload = Poison.Encoder.encode(input, [])
headers = AWS.Request.sign_v4(client, "POST", url, headers, payload)
case HTTPoison.post(url, payload, headers, options) do
{:ok, response=%HTTPoison.Response{status_code: 200, body: ""}} ->
{:ok, nil, response}
{:ok, response=%HTTPoison.Response{status_code: 200, body: body}} ->
{:ok, Poison.Parser.parse!(body), response}
{:ok, _response=%HTTPoison.Response{body: body}} ->
error = Poison.Parser.parse!(body)
exception = error["__type"]
message = error["message"]
{:error, {exception, message}}
{:error, %HTTPoison.Error{reason: reason}} ->
{:error, %HTTPoison.Error{reason: reason}}
end
end
defp get_host(endpoint_prefix, client) do
if client.region == "local" do
"localhost"
else
"#{endpoint_prefix}.#{client.region}.#{client.endpoint}"
end
end
defp get_url(host, %{:proto => proto, :port => port}) do
"#{proto}://#{host}:#{port}/"
end
end
|
lib/aws/autoscaling.ex
| 0.916568
| 0.596874
|
autoscaling.ex
|
starcoder
|
defmodule CoursePlanner.Courses.OfferedCourses do
@moduledoc false
alias CoursePlanner.{Courses.OfferedCourse, Repo, Attendances, Notifications.Notifier,
Notifications, Accounts.Students, Accounts.Teachers, Settings}
import Ecto.Query
alias Ecto.Changeset
@notifier Application.get_env(:course_planner, :notifier, Notifier)
def insert(params) do
student_ids = Map.get(params, "student_ids", [])
students = Repo.all(from s in Students.query(), where: s.id in ^student_ids)
teacher_ids = Map.get(params, "teacher_ids", [])
teachers = Repo.all(from s in Teachers.query(), where: s.id in ^teacher_ids)
%OfferedCourse{}
|> OfferedCourse.changeset(params)
|> Changeset.put_assoc(:students, students)
|> Changeset.put_assoc(:teachers, teachers)
|> Repo.insert()
end
def new do
OfferedCourse.changeset(%OfferedCourse{})
end
def get(id, preload \\ []) do
case Repo.get(OfferedCourse, id) do
nil -> {:error, :not_found}
course -> {:ok, Repo.preload(course, preload)}
end
end
def edit(id) do
case get(id, [:term, :course, :students, :teachers]) do
{:ok, offered_course} -> {:ok, offered_course, OfferedCourse.changeset(offered_course)}
error -> error
end
end
def update(id, params) do
student_ids = Map.get(params, "student_ids", [])
students = Repo.all(from s in Students.query(), where: s.id in ^student_ids)
teacher_ids = Map.get(params, "teacher_ids", [])
teachers = Repo.all(from s in Teachers.query(), where: s.id in ^teacher_ids)
case get(id, [:term, :course, :students, :teachers]) do
{:ok, offered_course} -> offered_course
|> OfferedCourse.changeset(params)
|> Changeset.put_assoc(:students, students)
|> Changeset.put_assoc(:teachers, teachers)
|> Repo.update()
|> format_error(offered_course, students)
error -> error
end
end
def update_syllabus(offered_course, syllabus) do
offered_course
|> OfferedCourse.changeset(%{syllabus: syllabus})
|> Repo.update()
|> format_error(offered_course, nil)
end
defp format_error({:ok, offered_course}, _, students), do: {:ok, offered_course, students}
defp format_error({:error, changeset}, offered_course, students),
do: {:error, offered_course, students, changeset}
def delete(id) do
case get(id) do
nil -> {:error, :not_found}
{:ok, offered_course} -> Repo.delete(offered_course)
end
end
def find_by_term_id(term_id) do
term_id
|> query_by_term_id()
|> select([oc], {oc.id, oc})
|> preload([:course, :students])
|> Repo.all()
|> Enum.into(%{})
end
def student_matrix(term_id) do
offered_courses = Repo.all(from oc in OfferedCourse,
join: c in assoc(oc, :course),
preload: [:students, course: c],
order_by: [asc: c.name],
where: oc.term_id == ^term_id)
offered_courses
|> Enum.map(fn(main_offered_course) ->
{main_offered_course.id, get_intersection(main_offered_course, offered_courses)}
end)
end
def query_by_term_id(term_id) do
from oc in OfferedCourse, where: oc.term_id == ^term_id
end
def get_intersection(main_offered_course, all_offered_courses) do
all_offered_courses
|> Enum.map(fn(target_offered_course) ->
student_ids1 = Enum.map(main_offered_course.students, &("#{&1.name} #{&1.family_name}"))
student_ids2 = Enum.map(target_offered_course.students, &("#{&1.name} #{&1.family_name}"))
intersection = Enum.filter(student_ids1, &(&1 in student_ids2))
{target_offered_course.id, length(intersection), intersection}
end)
end
def get_subscribed_users(offered_courses) do
offered_courses
|> Enum.flat_map(fn(offered_course) ->
Map.get(offered_course, :teachers) ++ Map.get(offered_course, :students)
end)
|> Enum.uniq_by(fn %{id: id} -> id end)
end
def with_pending_attendances do
with_pending_attendances(Settings.utc_to_system_timezone(Timex.now()))
end
def with_pending_attendances(date) do
Repo.all(from oc in OfferedCourse,
join: c in assoc(oc, :classes),
join: a in assoc(c, :attendances),
preload: [:teachers, :course, :term, classes: {c, attendances: a}],
where: c.date < ^date and a.attendance_type == "Not filled")
end
def create_pending_attendance_notification_map(notifiable_users) do
notifiable_ids = Enum.map(notifiable_users, &(&1.id))
for oc <- with_pending_attendances(), t <- oc.teachers, t.id in notifiable_ids do
%{
user: t,
type: :attendance_missing,
path: Attendances.get_offered_course_fill_attendance_path(oc.id),
data: %{offered_course_name: "#{oc.term.name}-#{oc.course.name}"}
}
end
end
def create_missing_attendance_notifications(notifiable_users) do
notifiable_users
|> create_pending_attendance_notification_map()
|> Enum.each(fn(email_data) ->
email_data
|> Notifications.create_simple_notification()
|> @notifier.notify_later()
end)
end
end
|
lib/course_planner/courses/offered_courses.ex
| 0.610686
| 0.42668
|
offered_courses.ex
|
starcoder
|
defmodule Mox do
@moduledoc """
Mox is a library for defining concurrent mocks in Elixir.
The library follows the principles outlined in
["Mocks and explicit contracts"](http://blog.plataformatec.com.br/2015/10/mocks-and-explicit-contracts/),
summarized below:
1. No ad-hoc mocks. You can only create mocks based on behaviours
2. No dynamic generation of modules during tests. Mocks are preferably defined
in your `test_helper.exs` or in a `setup_all` block and not per test
3. Concurrency support. Tests using the same mock can still use `async: true`
4. Rely on pattern matching and function clauses for asserting on the
input instead of complex expectation rules
## Example
As an example, imagine that your library defines a calculator behaviour:
defmodule MyApp.Calculator do
@callback add(integer(), integer()) :: integer()
@callback mult(integer(), integer()) :: integer()
end
If you want to mock the calculator behaviour during tests, the first step
is to define the mock, usually in your `test_helper.exs`:
Mox.defmock(MyApp.CalcMock, for: MyApp.Calculator)
Once the mock is defined, you can pass it to the system under the test.
If the system under test relies on application configuration, you should
also set it before the tests starts to keep the async property. Usually
in your config files:
config :my_app, :calculator, MyApp.CalcMock
Or in your `test_helper.exs`:
Application.put_env(:my_app, :calculator, MyApp.CalcMock)
Now in your tests, you can define expectations and verify them:
use ExUnit.Case, async: true
import Mox
# Make sure mocks are verified when the test exits
setup :verify_on_exit!
test "invokes add and mult" do
MyApp.CalcMock
|> expect(:add, fn x, y -> x + y end)
|> expect(:mult, fn x, y -> x * y end)
assert MyApp.CalcMock.add(2, 3) == 5
assert MyApp.CalcMock.mult(2, 3) == 6
end
All expectations are defined based on the current process. This
means multiple tests using the same mock can still run concurrently.
## Compile-time requirements
If the mock needs to be available during the project compilation, for
instance because you get undefined function warnings, then instead of
defining the mock in your `test_helper.exs`, you should instead define
it under `test/support/mocks.ex`:
Mox.defmock(MyApp.CalcMock, for: MyApp.Calculator)
Then you need to make sure that files in `test/support` get compiled
with the rest of the project. Edit your `mix.exs` file to add the
`test/support` directory to compilation paths:
def project do
[
...
elixirc_paths: elixirc_paths(Mix.env),
...
]
end
defp elixirc_paths(:test), do: ["test/support", "lib"]
defp elixirc_paths(_), do: ["lib"]
## Multi-process collaboration
Mox supports multi-process collaboration via two mechanisms:
1. explicit allowances
2. global mode
The allowance mechanism can still run tests concurrently while
the global one doesn't. We explore both next.
### Explicit allowances
An allowance permits a child process to use the expectations and stubs
defined in the parent process while still being safe for async tests.
test "invokes add and mult from a task" do
MyApp.CalcMock
|> expect(:add, fn x, y -> x + y end)
|> expect(:mult, fn x, y -> x * y end)
parent_pid = self()
Task.async(fn ->
MyApp.CalcMock |> allow(parent_pid, self())
assert MyApp.CalcMock.add(2, 3) == 5
assert MyApp.CalcMock.mult(2, 3) == 6
end)
|> Task.await
end
### Global mode
Mox supports global mode, where any process can consume mocks and stubs
defined in your tests. To manually switch to global mode use:
set_mox_global()
which can be done as a setup callback:
setup :set_mox_global
test "invokes add and mult from a task" do
MyApp.CalcMock
|> expect(:add, fn x, y -> x + y end)
|> expect(:mult, fn x, y -> x * y end)
Task.async(fn ->
assert MyApp.CalcMock.add(2, 3) == 5
assert MyApp.CalcMock.mult(2, 3) == 6
end)
|> Task.await
end
The default mode is `private` and the global mode must always be explicitly
set per test.
You can also automatically choose global or private mode depending on
if your tests run in async mode or not. In such case Mox will use
private mode when `async: true`, global mode otherwise:
setup :set_mox_from_context
"""
defmodule UnexpectedCallError do
defexception [:message]
end
defmodule VerificationError do
defexception [:message]
end
@doc """
Sets the Mox to private mode, where mocks can be set and
consumed by the same process unless other processes are
explicitly allowed.
setup :set_mox_private
"""
def set_mox_private(_context \\ %{}), do: Mox.Server.set_mode(self(), :private)
@doc """
Sets the Mox to global mode, where mocks can be consumed
by any process.
setup :set_mox_global
"""
def set_mox_global(_context \\ %{}), do: Mox.Server.set_mode(self(), :global)
@doc """
Chooses the Mox mode based on context. When `async: true` is used
the mode is `:private`, otherwise `:global` is chosen.
setup :set_mox_from_context
"""
def set_mox_from_context(%{async: true} = _context), do: set_mox_private()
def set_mox_from_context(_context), do: set_mox_global()
@doc """
Defines a mock with the given name `:for` the given behaviour.
Mox.defmock MyMock, for: MyBehaviour
"""
def defmock(name, options) when is_atom(name) and is_list(options) do
behaviour = options[:for] || raise ArgumentError, ":for option is required on defmock"
validate_behaviour!(behaviour)
define_mock_module(name, behaviour)
name
end
defp validate_behaviour!(behaviour) do
cond do
not Code.ensure_compiled?(behaviour) ->
raise ArgumentError,
"module #{inspect(behaviour)} is not available, please pass an existing module to :for"
not function_exported?(behaviour, :behaviour_info, 1) ->
raise ArgumentError,
"module #{inspect(behaviour)} is not a behaviour, please pass a behaviour to :for"
true ->
:ok
end
end
defp define_mock_module(name, behaviour) do
funs =
for {fun, arity} <- behaviour.behaviour_info(:callbacks) do
args = 0..arity |> Enum.to_list() |> tl() |> Enum.map(&Macro.var(:"arg#{&1}", Elixir))
quote do
def unquote(fun)(unquote_splicing(args)) do
Mox.__dispatch__(__MODULE__, unquote(fun), unquote(arity), unquote(args))
end
end
end
info =
quote do
# Establish a compile time dependency between the mock and the behaviour
_ = unquote(behaviour).module_info(:module)
def __mock_for__ do
unquote(behaviour)
end
end
Module.create(name, [info | funs], Macro.Env.location(__ENV__))
end
@doc """
Expects the `name` in `mock` with arity given by `code`
to be invoked `n` times.
## Examples
To expect `MyMock.add/2` to be called once:
expect(MyMock, :add, fn x, y -> x + y end)
To expect `MyMock.add/2` to be called five times:
expect(MyMock, :add, 5, fn x, y -> x + y end)
`expect/4` can also be invoked multiple times for the same
name/arity, allowing you to give different behaviours on each
invocation.
"""
def expect(mock, name, n \\ 1, code)
when is_atom(mock) and is_atom(name) and is_integer(n) and n >= 1 and is_function(code) do
calls = List.duplicate(code, n)
add_expectation!(mock, name, code, {n, calls, nil})
mock
end
@doc """
Allows the `name` in `mock` with arity given by `code` to
be invoked zero or many times.
Unlike expectations, stubs are never verified.
If expectations and stubs are defined for the same function
and arity, the stub is invoked only after all expectations are
fulfilled.
## Examples
To allow `MyMock.add/2` to be called any number of times:
stub(MyMock, :add, fn x, y -> x + y end)
`stub/3` will overwrite any previous calls to `stub/3`.
"""
def stub(mock, name, code)
when is_atom(mock) and is_atom(name) and is_function(code) do
add_expectation!(mock, name, code, {0, [], code})
mock
end
defp add_expectation!(mock, name, code, value) do
validate_mock!(mock)
arity = :erlang.fun_info(code)[:arity]
key = {mock, name, arity}
unless function_exported?(mock, name, arity) do
raise ArgumentError, "unknown function #{name}/#{arity} for mock #{inspect(mock)}"
end
case Mox.Server.add_expectation(self(), key, value) do
:ok ->
:ok
{:error, {:currently_allowed, owner_pid}} ->
inspected = inspect(self())
raise ArgumentError, """
cannot add expectations/stubs to #{inspect(mock)} in the current process (#{inspected}) \
because the process has been allowed by #{inspect(owner_pid)}. \
You cannot define expectations/stubs in a process that has been allowed
"""
{:error, {:not_global_owner, global_pid}} ->
inspected = inspect(self())
raise ArgumentError, """
cannot add expectations/stubs to #{inspect(mock)} in the current process (#{inspected}) \
because Mox is in global mode and the global process is #{inspect(global_pid)}. \
Only the process that set Mox to global can set expectations/stubs in global mode
"""
end
end
@doc """
Allows other processes to share expectations and stubs
defined by owner process.
## Examples
To allow `child_pid` to call any stubs or expectations defined for `MyMock`:
allow(MyMock, self(), child_pid)
"""
def allow(_mock, owner_pid, allowed_pid) when owner_pid == allowed_pid do
raise ArgumentError, "owner_pid and allowed_pid must be different"
end
def allow(mock, owner_pid, allowed_pid)
when is_atom(mock) and is_pid(owner_pid) and is_pid(allowed_pid) do
case Mox.Server.allow(mock, owner_pid, allowed_pid) do
:ok ->
mock
{:error, {:already_allowed, actual_pid}} ->
raise ArgumentError, """
cannot allow #{inspect(allowed_pid)} to use #{inspect(mock)} from #{inspect(owner_pid)} \
because it is already allowed by #{inspect(actual_pid)}.
If you are seeing this error message, it is because you are either \
setting up allowances from different processes or your tests have \
async: true and you found a race condition where two different tests \
are allowing the same process
"""
{:error, :expectations_defined} ->
raise ArgumentError, """
cannot allow #{inspect(allowed_pid)} to use #{inspect(mock)} from #{inspect(owner_pid)} \
because the process has already defined its own expectations/stubs
"""
{:error, :in_global_mode} ->
raise ArgumentError, """
cannot allow #{inspect(allowed_pid)} to use #{inspect(mock)} from #{inspect(owner_pid)} \
because Mox is in global mode, the process already has access to all \
defined expectations/stubs
"""
end
end
@doc """
Verifies the current process after it exits.
"""
def verify_on_exit!(_context \\ %{}) do
pid = self()
Mox.Server.verify_on_exit(pid)
ExUnit.Callbacks.on_exit(Mox, fn ->
verify_mock_or_all!(pid, :all)
Mox.Server.exit(pid)
end)
end
@doc """
Verifies that all expectations set by the current process
have been called.
"""
def verify! do
verify_mock_or_all!(self(), :all)
end
@doc """
Verifies that all expectations in `mock` have been called.
"""
def verify!(mock) do
validate_mock!(mock)
verify_mock_or_all!(self(), mock)
end
defp verify_mock_or_all!(pid, mock) do
pending = Mox.Server.verify(pid, mock)
messages =
for {{module, name, arity}, total, pending} <- pending do
mfa = Exception.format_mfa(module, name, arity)
called = total - pending
" * expected #{mfa} to be invoked #{times(total)} but it was invoked #{times(called)}"
end
if messages != [] do
raise VerificationError,
"error while verifying mocks for #{inspect(pid)}:\n\n" <> Enum.join(messages, "\n")
end
:ok
end
defp validate_mock!(mock) do
cond do
not Code.ensure_compiled?(mock) ->
raise ArgumentError, "module #{inspect(mock)} is not available"
not function_exported?(mock, :__mock_for__, 0) ->
raise ArgumentError, "module #{inspect(mock)} is not a mock"
true ->
:ok
end
end
@doc false
def __dispatch__(mock, name, arity, args) do
case Mox.Server.fetch_fun_to_dispatch(self(), {mock, name, arity}) do
:no_expectation ->
mfa = Exception.format_mfa(mock, name, arity)
raise UnexpectedCallError,
"no expectation defined for #{mfa} in process #{inspect(self())}"
{:out_of_expectations, count} ->
mfa = Exception.format_mfa(mock, name, arity)
raise UnexpectedCallError,
"expected #{mfa} to be called #{times(count)} but it has been " <>
"called #{times(count + 1)} in process #{inspect(self())}"
{:ok, fun_to_call} ->
apply(fun_to_call, args)
end
end
defp times(1), do: "once"
defp times(n), do: "#{n} times"
end
|
lib/mox.ex
| 0.865096
| 0.656588
|
mox.ex
|
starcoder
|
defmodule Guardian.Token.Verify do
@moduledoc """
Interface for verifying tokens.
This is intended to be used primarily by token modules
but allows for a custom verification module to be created
if the one that ships with your TokenModule is not quite what you want.
"""
@doc """
Verify a single claim
You should also include a fallback for claims that you are not validating
```elixir
def verify_claim(_mod, _key, claims, _opts), do: {:ok, claims}
```
"""
@callback verify_claim(
mod :: module,
claim_key :: String.t(),
claims :: Guardian.Token.claims(),
options :: Guardian.options()
) :: {:ok, Guardian.Token.claims()} | {:error, atom}
defmacro __using__(_opts \\ []) do
quote do
def verify_claims(mod, claims, opts) do
Enum.reduce(claims, {:ok, claims}, fn
{k, v}, {:ok, claims} -> verify_claim(mod, k, claims, opts)
_, {:error, reason} = err -> err
end)
end
def verify_claim(_mod, _claim_key, claims, _opts), do: {:ok, claims}
defoverridable verify_claim: 4
end
end
@spec time_within_drift?(mod :: module, time :: pos_integer) :: true | false
@doc """
Checks that a time value is within the `allowed_drift` as
configured for the provided module
Allowed drift is measured in seconds and represents the maximum amount
of time a token may be expired for an still be considered valid.
This is to deal with clock skew.
"""
def time_within_drift?(mod, time) when is_integer(time) do
allowed_drift = apply(mod, :config, [:allowed_drift, 0]) / 1000
diff = abs(time - Guardian.timestamp())
diff <= allowed_drift
end
def time_within_drift?(_), do: true
@spec verify_literal_claims(
claims :: Guardian.Token.claims(),
claims_to_check :: Guardian.Token.claims() | nil,
opts :: Guardian.options()
) :: {:ok, Guardian.Token.claims()} | {:error, any}
@doc """
For claims, check the values against the values found in
`claims_to_check`. If there is a claim to check that does not pass
verification, it fails.
When the value of a claim is a list, it checks that all values of
the same claim in `claims_to_check` are members of the list.
"""
def verify_literal_claims(claims, nil, _opts), do: {:ok, claims}
def verify_literal_claims(claims, claims_to_check, _opts) do
errors =
Enum.reduce(claims_to_check, [], fn {k, v}, acc ->
case verify_literal_claim(claims, k, v) do
{:ok, _} -> acc
error -> [error | acc]
end
end)
if Enum.empty?(errors) do
{:ok, claims}
else
hd(errors)
end
end
@spec verify_literal_claims(map(), binary(), [binary()] | binary()) ::
{:ok, [binary()] | binary()} | {:error, binary()}
defp verify_literal_claim(claims, key, value) do
claim_value = Map.get(claims, key)
if valid_claims?(claim_value, value) do
{:ok, claim_value}
else
{:error, key}
end
end
defp valid_claims?(claim_values, valid) when is_list(claim_values) and is_list(valid) do
Enum.all?(valid, &(&1 in claim_values))
end
defp valid_claims?(claim_values, valid) when is_list(claim_values), do: valid in claim_values
defp valid_claims?(claim_value, valid), do: claim_value == valid
end
|
lib/guardian/token/verify.ex
| 0.818592
| 0.756403
|
verify.ex
|
starcoder
|
defmodule MLLP.Envelope do
@moduledoc """
Helper functions encoding and decoding MLLP-framed messages.
Below is an example of an MLLP-framed HL7 payload. Note the <SB>, <CR>, and <EB> characters.
<SB>
MSH|^~\\\\&|MegaReg|XYZHospC|SuperOE|XYZImgCtr|20060529090131-0500||ADT^A01^ADT_A01|01052901|P|2.5<CR>
EVN||200605290901||||200605290900<CR>
PID|||56782445^^^UAReg^PI||KLEINSAMPLE^BARRY^Q^JR||19620910|M||||||||||0105I30001^^^99DEF^AN<CR>
PV1||I|W^389^1^UABH^^^^3||||12345^MORGAN^REX^J^^^MD^0010^UAMC^L||6|||||A0<CR>
OBX|1|NM|^Body Height||1.80|m^Meter^ISO+|||||F<CR>
OBX|2|NM|^Body Weight||79|kg^Kilogram^ISO+|||||F<CR>
AL1|1||^ASPIRIN<CR>
DG1|1||786.50^CHEST PAIN, UNSPECIFIED^I9|||A<CR>
<EB><CR>
"""
require Logger
# ^K - VT (Vertical Tab)
@sb <<0x0B>>
# ^\ - FS (File Separator)
@eb <<0x1C>>
# ^M - CR (Carriage Return)
@cr <<0x0D>>
@ending @eb <> @cr
@doc """
The MLLP Start-Block character. In documentation it is often represented as `<SB>`.
The `sb` is a single-byte character with the ASCII value 0x0B. This character is
also know as "VT (Vertical Tab)" and may appear as `\\\\v` or `^K` in text editors.
## Examples
iex> MLLP.Envelope.sb
"\v"
"""
def sb, do: @sb
@doc """
The MLLP End-Block character. In documentation it is often represented as `<EB>`.
The `eb` is a single-byte character with the ASCII value 0x1C. This character is
also know as "FS (File Separator)" and may appear as `^\\` in text editors.
## Examples
iex> MLLP.Envelope.eb
<<28>>
"""
def eb, do: @eb
@doc """
The MLLP Carriage Return character. In documentation it is often represented as `<CR>`.
The `cr` is a single-byte character with the ASCII value 0x0D. This character
may appear as "\\\\r" in text editors.
## Examples
iex> MLLP.Envelope.eb
<<28>>
"""
def cr, do: @cr
@doc """
In MLLP, each message ends with `eb` and `cr`. The `eb_cr` is just a simple concatenation of `eb` and `cr`.
## Examples
iex> MLLP.Envelope.eb_cr
<<28, 13>>
"""
def eb_cr, do: @ending
@doc """
Wraps a string message in the MLLP start-of-block and end-of-block characters
Example:
iex> MLLP.Envelope.wrap_message "hi"
<<11, 104, 105, 28, 13>>
"""
@spec wrap_message(binary()) :: binary() | no_return
def wrap_message(<<11, _::binary>> = message) do
if String.ends_with?(message, @ending) do
Logger.debug("MLLP Envelope performed unnecessary wrapping of wrapped message")
message
else
raise(ArgumentError, message: "MLLP Envelope cannot wrap a partially wrapped message")
end
end
def wrap_message(message) when is_binary(message) do
@sb <> message <> @ending
end
@doc """
Unwraps an MLLP encoded message
Example:
iex> MLLP.Envelope.unwrap_message <<11, 104, 105, 28, 13>>
"hi"
"""
@spec unwrap_message(binary()) :: binary() | no_return
def unwrap_message(<<11, _::binary>> = wrapped_message) do
if String.ends_with?(wrapped_message, @ending) do
unwrap(wrapped_message)
else
raise(ArgumentError, message: "MLLP Envelope cannot unwrap a partially wrapped message")
end
end
def unwrap_message(message) when is_binary(message) do
if String.ends_with?(message, @ending) do
raise(ArgumentError, message: "cannot unwrap a partially wrapped message")
else
Logger.debug("MLLP Envelope performed unnecessary unwrapping of unwrapped message")
message
end
end
@spec unwrap(binary()) :: binary()
defp unwrap(wrapped) do
wrapped
|> String.trim_leading(@sb)
|> String.trim_trailing(@ending)
end
end
|
lib/mllp/envelope.ex
| 0.872755
| 0.584123
|
envelope.ex
|
starcoder
|
defmodule HTS221.Calibration do
@moduledoc """
The calibration for the HTS221
Each HTS221 is calibrated at the factory and has an unique calibration. The
calibration is used to calculate the temperature and humidity values that are
stored in the registers as those values are raw ADC values.
"""
import Bitwise
@type t() :: %__MODULE__{
t0_degc_x8: byte(),
t1_degc_x8: byte(),
t0_msb: byte(),
t0_out: HTS221.s16(),
t1_msb: byte(),
t1_out: HTS221.s16(),
h0_rh_x2: byte(),
h0_t0_out: HTS221.s16(),
h1_rh_x2: byte(),
h1_t0_out: HTS221.s16()
}
defstruct [
:t0_degc_x8,
:t1_degc_x8,
:t0_msb,
:t0_out,
:t1_msb,
:t1_out,
:h0_rh_x2,
:h0_t0_out,
:h1_rh_x2,
:h1_t0_out
]
def from_binary(
<<h0_rh_x2, h1_rh_x2, t0_degc_x8, t1_degc_x8, _, _::size(4), t1_msb::size(2),
t0_msb::size(2), h0_t0_out::signed-little-integer-size(2)-unit(8), _::binary-size(2),
h1_t0_out::signed-little-integer-size(2)-unit(8),
t0_out::signed-little-integer-size(2)-unit(8),
t1_out::signed-little-integer-size(2)-unit(8)>>
) do
calibration = %__MODULE__{
t0_degc_x8: t0_degc_x8,
t1_degc_x8: t1_degc_x8,
t0_msb: t0_msb,
t0_out: t0_out,
t1_msb: t1_msb,
t1_out: t1_out,
h0_rh_x2: h0_rh_x2,
h0_t0_out: h0_t0_out,
h1_rh_x2: h1_rh_x2,
h1_t0_out: h1_t0_out
}
calibration
end
def t0(%__MODULE__{t0_msb: t0_msb, t0_degc_x8: t0_degc_x8}) do
((t0_msb <<< 8) + t0_degc_x8) / 8
end
def t1(%__MODULE__{t1_msb: t1_msb, t1_degc_x8: t1_degc_x8}) do
((t1_msb <<< 8) + t1_degc_x8) / 8
end
def h0(%__MODULE__{h0_rh_x2: h0_rh_x2}) do
h0_rh_x2 / 2
end
def h1(%__MODULE__{h1_rh_x2: h1_rh_x2}) do
h1_rh_x2 / 2
end
defimpl HTS221.Register do
alias HTS221.IORead
def read(_calibration) do
{:ok, IORead.new(0x30, 16)}
end
def write(_calibration) do
{:error, :access_error}
end
end
end
|
lib/hts221/calibration.ex
| 0.817902
| 0.676617
|
calibration.ex
|
starcoder
|
defmodule Tai.Markets.Quote do
@moduledoc """
Represents the inside bid & ask price point within the order book
"""
alias __MODULE__
alias Tai.Markets.PricePoint
@type price_point :: PricePoint.t()
@type venue_id :: Tai.Venue.id()
@type product_symbol :: Tai.Venues.Product.symbol()
@type t :: %Quote{
venue_id: venue_id,
product_symbol: product_symbol,
bids: [price_point],
asks: [price_point],
last_received_at: integer,
last_venue_timestamp: DateTime.t() | nil
}
@enforce_keys ~w[venue_id product_symbol bids asks last_received_at]a
defstruct venue_id: nil,
product_symbol: nil,
bids: [],
asks: [],
last_received_at: nil,
last_venue_timestamp: nil
@spec inside_bid(t) :: price_point | nil
def inside_bid(market_quote), do: market_quote.bids |> List.first()
@spec inside_ask(t) :: price_point | nil
def inside_ask(market_quote), do: market_quote.asks |> List.first()
@spec mid_price(t) :: {:ok, Decimal.t()} | {:error, :no_inside_bid | :no_inside_ask}
@spec mid_price(bid :: price_point, ask :: price_point) ::
{:ok, Decimal.t()} | {:error, :no_inside_bid | :no_inside_ask | :no_inside_bid_or_ask}
def mid_price(%Quote{} = market_quote) do
bid = market_quote |> inside_bid
ask = market_quote |> inside_ask
mid_price(bid, ask)
end
@two Decimal.new(2)
def mid_price(%PricePoint{} = bid, %PricePoint{} = ask) do
ask_price = Tai.Utils.Decimal.cast!(ask.price)
bid_price = Tai.Utils.Decimal.cast!(bid.price)
mid =
ask_price
|> Decimal.sub(bid_price)
|> Decimal.div(@two)
|> Decimal.add(bid_price)
{:ok, mid}
end
def mid_price(nil, %PricePoint{}), do: {:error, :no_inside_bid}
def mid_price(%PricePoint{}, nil), do: {:error, :no_inside_ask}
def mid_price(nil, nil), do: {:error, :no_inside_bid_or_ask}
defimpl Stored.Item do
@type market_quote :: Tai.Markets.Quote.t()
@type venue_id :: Tai.Venue.id()
@type product_symbol :: Tai.Venues.Product.symbol()
@spec key(market_quote) :: {venue_id, product_symbol}
def key(q), do: {q.venue_id, q.product_symbol}
end
end
|
apps/tai/lib/tai/markets/quote.ex
| 0.848424
| 0.422594
|
quote.ex
|
starcoder
|
defmodule Mix.Tasks.Pix.ReadBrcode do
@moduledoc """
Reads a BRCode decoding and validating it.
It accepts the raw BRCode. Beware of valid spaces in its values! When using the command line,
wrap it with single quotes. Example:
mix pix.read_br_code '00020126580014br.gov.bcb.pix0136123e4567-e12b-12d1-a456-4266554400005204000053039865802BR5913Fulano de Tal6008BRASILIA62070503***63041D3D'
Command line options:
`--citycode` - payer city code who will pay dynamic pix payment with due date
`--paymentdate` - payment date of dynamic pix payment with due date
"""
@shortdoc "Reads a BRCode decoding and validating it."
@acceptable_args [citycode: :string, paymentdate: :string]
use Mix.Task
alias ExPixBRCode.{BRCodes, Payments}
@client Tesla.client(
[],
{Tesla.Adapter.Hackney, ssl_options: [versions: [:"tlsv1.2", :"tlsv1.3"]]}
)
@impl Mix.Task
def run([]) do
Mix.shell().info("Missing argument of a BRCode")
end
def run([brcode | args]) do
Mix.Tasks.App.Start.run([])
with {:parsed_args, {args, [], []}} <-
{:parsed_args, OptionParser.parse(args, strict: @acceptable_args)},
{:ok, opts} <- build_from_code_opts(args),
{:ok, brcode} <- BRCodes.decode_to(brcode),
{:ok, payment} <- Payments.from_brcode(@client, brcode, opts) do
Mix.shell().info("""
Got a valid BRCode of type: #{brcode.type}
Decoded BRCode:
#{inspect(brcode, pretty: true)}
Loaded payment:
#{inspect(payment, pretty: true)}
""")
else
{:parsed_args, {_args, [], unknown_args}} ->
for {unknown_arg, _} <- unknown_args do
Mix.shell().error("#{unknown_arg}: Not allowed argument")
end
err ->
Mix.shell().error("Got error! #{inspect(err)}")
end
end
defp build_from_code_opts([]), do: []
defp build_from_code_opts(args) do
{:ok, [codemun: Keyword.get(args, :citycode), ddp: Keyword.get(args, :paymentdate)]}
end
end
|
lib/mix/tasks/read.brcode.ex
| 0.825062
| 0.421641
|
read.brcode.ex
|
starcoder
|
defmodule TypeCheck.TypeError do
@moduledoc """
Exception to be returned or raised when a value is not of the expected type.
This exception has two fields:
- `:raw`, which will contain the problem tuple of the type check failure.
- `:message`, which will contain a the humanly-readable representation of the raw problem_tuple
`:message` is constructed from `:raw` using the TypeCheck.TypeError.DefaultFormatter.
(TODO at some point this might be configured to use your custom formatter instead)
"""
defexception [:message, :raw, :location]
@type t() :: %__MODULE__{message: String.t(), raw: problem_tuple(), location: location()}
@typedoc """
Any built-in TypeCheck struct (c.f. `TypeCheck.Builtin.*`), whose check(s) failed.
"""
@type type_checked_against :: TypeCheck.Type.t()
@typedoc """
The name of the particular check. Might be `:no_match` for simple types,
but for more complex types that have multiple checks, it disambugates between them.
For instance, for `TypeCheck.Builtin.List` we have `:not_a_list`, `:different_length`, and `:element_error`.
"""
@type check_name :: atom()
@type location :: [] | [file: binary(), line: non_neg_integer()]
@typedoc """
An extra map with any information related to the check that failed.
For instance, if the check was a compound check, will contain the field `problem:` with the child problem_tuple
as well as `:index` or `:key` etc. to indicate _where_ in the compound structure the check failed.
"""
@type extra_information :: %{optional(atom) => any()}
@typedoc """
The value that was passed in which failed the check.
It is included for the easy creation of `value did not match y`-style messages.
"""
@type problematic_value :: any()
@typedoc """
A problem_tuple contains all information about a failed type check.
c.f. TypeCheck.TypeError.Formatter.problem_tuple for a more precise definition
"""
@type problem_tuple ::
{type_checked_against(), check_name(), extra_information(), problematic_value()}
@impl true
def exception({problem_tuple, location}) do
message = TypeCheck.TypeError.DefaultFormatter.format(problem_tuple, location)
%__MODULE__{message: message, raw: problem_tuple, location: location}
end
def exception(problem_tuple) do
exception({problem_tuple, []})
end
end
|
lib/type_check/type_error.ex
| 0.751375
| 0.749615
|
type_error.ex
|
starcoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.